From 7ee78603eea8f2c8792089ff0e5a3446aaf678f8 Mon Sep 17 00:00:00 2001 From: Brandon Farmer Date: Thu, 22 Mar 2018 14:54:06 -0700 Subject: [PATCH] Update revision for ifql and fix tests --- Gopkg.lock | 188 +- Gopkg.toml | 4 +- integrations/server_test.go | 8 + server/routes_test.go | 6 +- vendor/github.com/beorn7/perks/.gitignore | 2 + vendor/github.com/beorn7/perks/LICENSE | 20 + vendor/github.com/beorn7/perks/README.md | 31 + .../beorn7/perks/quantile/bench_test.go | 63 + .../beorn7/perks/quantile/example_test.go | 121 + .../beorn7/perks/quantile/exampledata.txt | 2388 +++++ .../beorn7/perks/quantile/stream.go | 316 + .../beorn7/perks/quantile/stream_test.go | 215 + vendor/github.com/gogo/protobuf/.gitignore | 2 + vendor/github.com/gogo/protobuf/.travis.yml | 13 +- vendor/github.com/gogo/protobuf/AUTHORS | 15 + vendor/github.com/gogo/protobuf/CONTRIBUTORS | 8 + vendor/github.com/gogo/protobuf/LICENSE | 6 +- vendor/github.com/gogo/protobuf/Makefile | 47 +- vendor/github.com/gogo/protobuf/README | 48 +- vendor/github.com/gogo/protobuf/Readme.md | 142 +- vendor/github.com/gogo/protobuf/bench.md | 190 + .../github.com/gogo/protobuf/codec/codec.go | 91 + .../gogo/protobuf/codec/codec_test.go | 54 + .../github.com/gogo/protobuf/custom_types.md | 68 + vendor/github.com/gogo/protobuf/extensions.md | 162 + .../gogo/protobuf/gogoproto/Makefile | 9 +- .../github.com/gogo/protobuf/gogoproto/doc.go | 7 +- .../gogo/protobuf/gogoproto/gogo.pb.go | 283 +- .../gogo/protobuf/gogoproto/gogo.proto | 17 +- .../gogo/protobuf/gogoproto/helper.go | 51 +- .../gogo/protobuf/install-protobuf.sh | 19 +- .../github.com/gogo/protobuf/jsonpb/jsonpb.go | 611 +- .../gogo/protobuf/jsonpb/jsonpb_test.go | 416 +- .../gogo/protobuf/plugin/compare/compare.go | 6 +- .../protobuf/plugin/compare/comparetest.go | 19 +- .../plugin/defaultcheck/defaultcheck.go | 4 +- .../plugin/description/description.go | 4 +- .../plugin/description/descriptiontest.go | 4 +- .../protobuf/plugin/embedcheck/embedcheck.go | 4 +- .../plugin/enumstringer/enumstringer.go | 6 +- .../gogo/protobuf/plugin/equal/equal.go | 90 +- .../gogo/protobuf/plugin/equal/equaltest.go | 21 +- .../gogo/protobuf/plugin/face/face.go | 4 +- .../gogo/protobuf/plugin/face/facetest.go | 4 +- .../gogo/protobuf/plugin/gostring/gostring.go | 113 +- .../protobuf/plugin/gostring/gostringtest.go | 6 +- .../protobuf/plugin/marshalto/marshalto.go | 750 +- .../protobuf/plugin/oneofcheck/oneofcheck.go | 4 +- .../gogo/protobuf/plugin/populate/populate.go | 143 +- .../gogo/protobuf/plugin/size/size.go | 103 +- .../gogo/protobuf/plugin/size/sizetest.go | 10 +- .../gogo/protobuf/plugin/stringer/stringer.go | 6 +- .../protobuf/plugin/stringer/stringertest.go | 4 +- .../gogo/protobuf/plugin/testgen/testgen.go | 70 +- .../gogo/protobuf/plugin/union/union.go | 4 +- .../gogo/protobuf/plugin/union/uniontest.go | 4 +- .../protobuf/plugin/unmarshal/unmarshal.go | 686 +- .../github.com/gogo/protobuf/proto/Makefile | 2 +- .../gogo/protobuf/proto/all_test.go | 109 +- .../gogo/protobuf/proto/any_test.go | 300 + .../github.com/gogo/protobuf/proto/decode.go | 112 +- .../gogo/protobuf/proto/decode_gogo.go | 11 +- .../gogo/protobuf/proto/decode_test.go | 262 + .../github.com/gogo/protobuf/proto/discard.go | 151 + .../gogo/protobuf/proto/duration.go | 100 + .../gogo/protobuf/proto/duration_gogo.go | 203 + .../github.com/gogo/protobuf/proto/encode.go | 36 +- .../gogo/protobuf/proto/encode_gogo.go | 22 +- .../gogo/protobuf/proto/encode_test.go | 84 + .../github.com/gogo/protobuf/proto/equal.go | 19 +- .../gogo/protobuf/proto/equal_test.go | 12 + .../gogo/protobuf/proto/extensions.go | 32 + .../gogo/protobuf/proto/extensions_gogo.go | 10 +- .../gogo/protobuf/proto/extensions_test.go | 87 +- vendor/github.com/gogo/protobuf/proto/lib.go | 3 +- .../gogo/protobuf/proto/lib_gogo.go | 6 +- .../gogo/protobuf/proto/map_test.go | 46 + .../protobuf/proto/pointer_reflect_gogo.go | 85 + .../protobuf/proto/pointer_unsafe_gogo.go | 44 +- .../gogo/protobuf/proto/properties.go | 61 +- .../gogo/protobuf/proto/properties_gogo.go | 51 +- .../gogo/protobuf/proto/proto3_test.go | 10 + .../gogo/protobuf/proto/skip_gogo.go | 6 +- vendor/github.com/gogo/protobuf/proto/text.go | 194 +- .../gogo/protobuf/proto/text_gogo.go | 12 +- .../gogo/protobuf/proto/text_parser.go | 203 +- .../gogo/protobuf/proto/text_parser_test.go | 102 +- .../gogo/protobuf/proto/text_test.go | 8 +- .../gogo/protobuf/proto/timestamp.go | 113 + .../gogo/protobuf/proto/timestamp_gogo.go | 229 + .../protoc-gen-gogo/descriptor/Makefile | 3 + .../protoc-gen-gogo/descriptor/descriptor.go | 118 + .../descriptor/descriptor.pb.go | 722 +- ...gostring.go => descriptor_gostring.gen.go} | 182 +- .../descriptor/descriptor_test.go | 31 + .../protoc-gen-gogo/descriptor/helper.go | 37 +- .../protoc-gen-gogo/generator/generator.go | 567 +- .../protoc-gen-gogo/generator/helper.go | 65 +- .../protoc-gen-gogo/generator/name_test.go | 29 + .../protobuf/protoc-gen-gogo/grpc/grpc.go | 15 +- .../protobuf/protoc-gen-gogo/plugin/Makefile | 2 +- .../protoc-gen-gogo/plugin/plugin.pb.go | 116 +- .../gogo/protobuf/sortkeys/sortkeys.go | 101 + vendor/github.com/gogo/protobuf/types/any.go | 138 + .../github.com/gogo/protobuf/types/any.pb.go | 691 ++ .../gogo/protobuf/types/any_test.go | 112 + .../github.com/gogo/protobuf/types/api.pb.go | 1892 ++++ vendor/github.com/gogo/protobuf/types/doc.go | 35 + .../gogo/protobuf/types/duration.go | 100 + .../gogo/protobuf/types/duration.pb.go | 474 + .../gogo/protobuf/types/duration_gogo.go | 100 + .../gogo/protobuf/types/duration_test.go | 120 + .../gogo/protobuf/types/empty.pb.go | 417 + .../gogo/protobuf/types/field_mask.pb.go | 704 ++ .../gogo/protobuf/types/source_context.pb.go | 473 + .../gogo/protobuf/types/struct.pb.go | 1797 ++++ .../gogo/protobuf/types/timestamp.go | 132 + .../gogo/protobuf/types/timestamp.pb.go | 492 ++ .../gogo/protobuf/types/timestamp_gogo.go | 94 + .../gogo/protobuf/types/timestamp_test.go | 152 + .../github.com/gogo/protobuf/types/type.pb.go | 2950 ++++++ .../gogo/protobuf/types/wrappers.pb.go | 2157 +++++ .../gogo/protobuf/vanity/command/command.go | 103 +- .../github.com/gogo/protobuf/vanity/enum.go | 6 +- .../github.com/gogo/protobuf/vanity/field.go | 13 +- .../github.com/gogo/protobuf/vanity/file.go | 18 +- .../gogo/protobuf/vanity/foreach.go | 6 +- vendor/github.com/gogo/protobuf/vanity/msg.go | 10 +- vendor/github.com/influxdata/ifql/.gitignore | 8 + .../influxdata/ifql/.goreleaser.yml | 54 + .../github.com/influxdata/ifql/CHANGELOG.md | 52 + vendor/github.com/influxdata/ifql/Dockerfile | 14 + .../influxdata/ifql/Dockerfile_build | 42 + vendor/github.com/influxdata/ifql/Gopkg.lock | 433 + vendor/github.com/influxdata/ifql/Gopkg.toml | 17 + vendor/github.com/influxdata/ifql/LICENSE | 684 ++ vendor/github.com/influxdata/ifql/Makefile | 66 + vendor/github.com/influxdata/ifql/README.md | 498 ++ .../github.com/influxdata/ifql/ast/Makefile | 9 + vendor/github.com/influxdata/ifql/ast/ast.go | 850 ++ vendor/github.com/influxdata/ifql/ast/json.go | 894 ++ .../influxdata/ifql/ast/json_test.go | 255 + .../github.com/influxdata/ifql/circle-test.sh | 26 + .../influxdata/ifql/compiler/compiler.go | 228 + .../influxdata/ifql/compiler/compiler_test.go | 183 + .../influxdata/ifql/compiler/doc.go | 9 + .../influxdata/ifql/compiler/runtime.go | 1866 ++++ .../influxdata/ifql/complete/complete.go | 97 + .../influxdata/ifql/complete/complete_test.go | 87 + .../influxdata/ifql/docker-compose.yml | 23 + .../influxdata/ifql/functions/count.go | 135 + .../influxdata/ifql/functions/count_test.go | 131 + .../influxdata/ifql/functions/covariance.go | 239 + .../ifql/functions/covariance_test.go | 378 + .../influxdata/ifql/functions/data_test.go | 78 + .../influxdata/ifql/functions/derivative.go | 303 + .../ifql/functions/derivative_test.go | 533 ++ .../influxdata/ifql/functions/difference.go | 291 + .../ifql/functions/difference_test.go | 415 + .../influxdata/ifql/functions/distinct.go | 221 + .../ifql/functions/distinct_test.go | 173 + .../influxdata/ifql/functions/filter.go | 280 + .../influxdata/ifql/functions/filter_test.go | 911 ++ .../influxdata/ifql/functions/first.go | 181 + .../influxdata/ifql/functions/first_test.go | 130 + .../influxdata/ifql/functions/from.go | 187 + .../influxdata/ifql/functions/from_test.go | 83 + .../influxdata/ifql/functions/group.go | 449 + .../influxdata/ifql/functions/group_test.go | 482 + .../influxdata/ifql/functions/integral.go | 214 + .../ifql/functions/integral_test.go | 212 + .../influxdata/ifql/functions/join.go | 799 ++ .../influxdata/ifql/functions/join_test.go | 968 ++ .../influxdata/ifql/functions/last.go | 187 + .../influxdata/ifql/functions/last_test.go | 132 + .../influxdata/ifql/functions/limit.go | 202 + .../influxdata/ifql/functions/limit_test.go | 183 + .../influxdata/ifql/functions/map.go | 192 + .../influxdata/ifql/functions/map_test.go | 268 + .../influxdata/ifql/functions/max.go | 178 + .../influxdata/ifql/functions/max_test.go | 125 + .../influxdata/ifql/functions/mean.go | 119 + .../influxdata/ifql/functions/mean_test.go | 65 + .../influxdata/ifql/functions/min.go | 178 + .../influxdata/ifql/functions/min_test.go | 125 + .../influxdata/ifql/functions/percentile.go | 260 + .../ifql/functions/percentile_test.go | 150 + .../influxdata/ifql/functions/range.go | 114 + .../influxdata/ifql/functions/range_test.go | 120 + .../influxdata/ifql/functions/sample.go | 188 + .../influxdata/ifql/functions/sample_test.go | 336 + .../influxdata/ifql/functions/set.go | 211 + .../influxdata/ifql/functions/set_test.go | 288 + .../influxdata/ifql/functions/shift.go | 166 + .../influxdata/ifql/functions/shift_test.go | 119 + .../influxdata/ifql/functions/skew.go | 138 + .../influxdata/ifql/functions/skew_test.go | 75 + .../influxdata/ifql/functions/sort.go | 157 + .../influxdata/ifql/functions/sort_test.go | 366 + .../influxdata/ifql/functions/spread.go | 176 + .../influxdata/ifql/functions/spread_test.go | 41 + .../ifql/functions/state_tracking.go | 302 + .../ifql/functions/state_tracking_test.go | 201 + .../influxdata/ifql/functions/stddev.go | 130 + .../influxdata/ifql/functions/stddev_test.go | 65 + .../influxdata/ifql/functions/sum.go | 152 + .../influxdata/ifql/functions/sum_test.go | 82 + .../influxdata/ifql/functions/top_bottom.go | 103 + .../influxdata/ifql/functions/window.go | 254 + .../influxdata/ifql/functions/window_test.go | 645 ++ .../influxdata/ifql/functions/yield.go | 81 + .../influxdata/ifql/interpreter/doc.go | 2 + .../ifql/interpreter/interpreter.go | 1441 +++ .../ifql/interpreter/interpreter_test.go | 308 + .../influxdata/ifql/parser/Makefile | 10 + .../github.com/influxdata/ifql/parser/doc.go | 3 + .../github.com/influxdata/ifql/parser/ifql.go | 7868 +++++++++++++++++ .../influxdata/ifql/parser/ifql.peg | 483 + .../influxdata/ifql/parser/parser.go | 18 + .../influxdata/ifql/parser/parser_debug.go | 21 + .../influxdata/ifql/parser/parser_test.go | 1609 ++++ .../influxdata/ifql/parser/types.go | 363 + vendor/github.com/influxdata/ifql/query.go | 63 + .../influxdata/ifql/query/compile.go | 432 + .../ifql/query/control/controller.go | 554 ++ .../influxdata/ifql/query/control/metrics.go | 62 + .../influxdata/ifql/query/control/queue.go | 67 + .../ifql/query/execute/aggergate_test.go | 379 + .../ifql/query/execute/aggregate.go | 190 + .../ifql/query/execute/allocator.go | 170 + .../influxdata/ifql/query/execute/block.go | 1303 +++ .../influxdata/ifql/query/execute/bounds.go | 33 + .../influxdata/ifql/query/execute/dataset.go | 189 + .../ifql/query/execute/dispatcher.go | 119 + .../influxdata/ifql/query/execute/executor.go | 227 + .../ifql/query/execute/executor_test.go | 401 + .../query/execute/expression_internal_test.go | 285 + .../ifql/query/execute/expression_test.go | 266 + .../influxdata/ifql/query/execute/format.go | 293 + .../influxdata/ifql/query/execute/queue.go | 64 + .../influxdata/ifql/query/execute/result.go | 118 + .../influxdata/ifql/query/execute/row_fn.go | 264 + .../influxdata/ifql/query/execute/selector.go | 309 + .../ifql/query/execute/selector_test.go | 703 ++ .../influxdata/ifql/query/execute/source.go | 122 + .../influxdata/ifql/query/execute/storage.go | 1044 +++ .../query/execute/storage/predicate.pb.go | 1344 +++ .../ifql/query/execute/storage/storage.pb.go | 3871 ++++++++ .../query/execute/storage/storage.yarpc.go | 191 + .../influxdata/ifql/query/execute/time.go | 66 + .../ifql/query/execute/transformation.go | 35 + .../ifql/query/execute/transport.go | 314 + .../influxdata/ifql/query/execute/trigger.go | 148 + .../influxdata/ifql/query/execute/window.go | 8 + .../influxdata/ifql/query/format.go | 55 + .../influxdata/ifql/query/operation.go | 95 + .../influxdata/ifql/query/plan/format.go | 64 + .../influxdata/ifql/query/plan/logical.go | 98 + .../ifql/query/plan/logical_test.go | 254 + .../influxdata/ifql/query/plan/physical.go | 377 + .../ifql/query/plan/physical_test.go | 792 ++ .../influxdata/ifql/query/plan/procedure.go | 141 + .../influxdata/ifql/query/plan/rules.go | 12 + .../influxdata/ifql/query/plan/storage.go | 20 + .../github.com/influxdata/ifql/query/query.go | 175 + .../influxdata/ifql/query/query_test.go | 268 + .../ifql/query/resource_management.go | 62 + .../github.com/influxdata/ifql/query/time.go | 98 + .../influxdata/ifql/query/trigger.go | 57 + vendor/github.com/influxdata/ifql/release.sh | 37 + .../influxdata/ifql/semantic/binary_types.go | 118 + .../influxdata/ifql/semantic/doc.go | 12 + .../influxdata/ifql/semantic/graph.go | 1275 +++ .../influxdata/ifql/semantic/graph_test.go | 192 + .../influxdata/ifql/semantic/json.go | 866 ++ .../influxdata/ifql/semantic/json_test.go | 233 + .../influxdata/ifql/semantic/types.go | 492 ++ .../influxdata/ifql/semantic/types_test.go | 218 + .../influxdata/ifql/semantic/walk.go | 150 + .../github.com/influxdata/tdigest/.gitignore | 1 + vendor/github.com/influxdata/tdigest/LICENSE | 202 + .../github.com/influxdata/tdigest/README.md | 42 + .../github.com/influxdata/tdigest/centroid.go | 59 + .../influxdata/tdigest/centroid_test.go | 122 + .../github.com/influxdata/tdigest/tdigest.go | 229 + .../influxdata/tdigest/tdigest_test.go | 243 + vendor/github.com/influxdata/yamux/.gitignore | 23 + vendor/github.com/influxdata/yamux/LICENSE | 362 + vendor/github.com/influxdata/yamux/README.md | 86 + vendor/github.com/influxdata/yamux/addr.go | 60 + .../github.com/influxdata/yamux/bench_test.go | 124 + vendor/github.com/influxdata/yamux/const.go | 161 + .../github.com/influxdata/yamux/const_test.go | 72 + vendor/github.com/influxdata/yamux/mux.go | 87 + vendor/github.com/influxdata/yamux/session.go | 623 ++ .../influxdata/yamux/session_test.go | 1298 +++ vendor/github.com/influxdata/yamux/spec.md | 140 + vendor/github.com/influxdata/yamux/stream.go | 466 + vendor/github.com/influxdata/yamux/util.go | 43 + .../github.com/influxdata/yamux/util_test.go | 50 + vendor/github.com/influxdata/yarpc/.gitignore | 1 + vendor/github.com/influxdata/yarpc/Godeps | 2 + vendor/github.com/influxdata/yarpc/LICENSE | 20 + vendor/github.com/influxdata/yarpc/README.md | 8 + vendor/github.com/influxdata/yarpc/call.go | 54 + .../github.com/influxdata/yarpc/call_test.go | 1 + .../github.com/influxdata/yarpc/clientconn.go | 65 + vendor/github.com/influxdata/yarpc/codec.go | 116 + .../influxdata/yarpc/codes/codes.pb.go | 86 + .../influxdata/yarpc/codes/codes.proto | 23 + vendor/github.com/influxdata/yarpc/rpc.go | 9 + vendor/github.com/influxdata/yarpc/server.go | 288 + .../influxdata/yarpc/status/status.go | 28 + .../influxdata/yarpc/status/status.pb.go | 362 + .../influxdata/yarpc/status/status.proto | 10 + vendor/github.com/influxdata/yarpc/stream.go | 183 + .../influxdata/yarpc/yarpcproto/helper.go | 33 + .../influxdata/yarpc/yarpcproto/yarpc.pb.go | 69 + .../influxdata/yarpc/yarpcproto/yarpc.proto | 12 + .../golang_protobuf_extensions/.travis.yml | 2 + .../golang_protobuf_extensions/LICENSE | 201 + .../golang_protobuf_extensions/NOTICE | 1 + .../golang_protobuf_extensions/README.md | 20 + .../pbutil/all_test.go | 177 + .../pbutil/decode.go | 75 + .../pbutil/decode_test.go | 99 + .../golang_protobuf_extensions/pbutil/doc.go | 16 + .../pbutil/encode.go | 46 + .../pbutil/encode_test.go | 67 + .../pbutil/fixtures_test.go | 103 + .../opentracing/opentracing-go/.gitignore | 13 + .../opentracing/opentracing-go/.travis.yml | 14 + .../opentracing/opentracing-go/CHANGELOG.md | 14 + .../opentracing/opentracing-go/LICENSE | 21 + .../opentracing/opentracing-go/Makefile | 32 + .../opentracing/opentracing-go/README.md | 147 + .../opentracing-go/globaltracer.go | 32 + .../opentracing/opentracing-go/gocontext.go | 57 + .../opentracing-go/gocontext_test.go | 81 + .../opentracing/opentracing-go/log/field.go | 245 + .../opentracing-go/log/field_test.go | 39 + .../opentracing/opentracing-go/log/util.go | 54 + .../opentracing/opentracing-go/noop.go | 64 + .../opentracing-go/options_test.go | 31 + .../opentracing/opentracing-go/propagation.go | 176 + .../opentracing-go/propagation_test.go | 93 + .../opentracing/opentracing-go/span.go | 185 + .../opentracing-go/testtracer_test.go | 138 + .../opentracing/opentracing-go/tracer.go | 305 + .../prometheus/client_golang/.gitignore | 26 + .../prometheus/client_golang/.travis.yml | 9 + .../prometheus/client_golang/AUTHORS.md | 18 + .../prometheus/client_golang/CHANGELOG.md | 109 + .../prometheus/client_golang/CONTRIBUTING.md | 18 + .../prometheus/client_golang/LICENSE | 201 + .../prometheus/client_golang/NOTICE | 23 + .../prometheus/client_golang/README.md | 45 + .../prometheus/client_golang/VERSION | 1 + .../client_golang/prometheus/.gitignore | 1 + .../client_golang/prometheus/README.md | 1 + .../prometheus/benchmark_test.go | 183 + .../client_golang/prometheus/collector.go | 75 + .../client_golang/prometheus/counter.go | 172 + .../client_golang/prometheus/counter_test.go | 58 + .../client_golang/prometheus/desc.go | 205 + .../client_golang/prometheus/doc.go | 181 + .../prometheus/example_clustermanager_test.go | 118 + .../client_golang/prometheus/examples_test.go | 751 ++ .../prometheus/expvar_collector.go | 119 + .../prometheus/expvar_collector_test.go | 97 + .../client_golang/prometheus/fnv.go | 29 + .../client_golang/prometheus/gauge.go | 140 + .../client_golang/prometheus/gauge_test.go | 182 + .../client_golang/prometheus/go_collector.go | 263 + .../prometheus/go_collector_test.go | 123 + .../client_golang/prometheus/histogram.go | 444 + .../prometheus/histogram_test.go | 326 + .../client_golang/prometheus/http.go | 490 + .../client_golang/prometheus/http_test.go | 121 + .../client_golang/prometheus/metric.go | 166 + .../client_golang/prometheus/metric_test.go | 35 + .../prometheus/process_collector.go | 142 + .../prometheus/process_collector_test.go | 58 + .../client_golang/prometheus/registry.go | 806 ++ .../client_golang/prometheus/registry_test.go | 545 ++ .../client_golang/prometheus/summary.go | 534 ++ .../client_golang/prometheus/summary_test.go | 347 + .../client_golang/prometheus/untyped.go | 138 + .../client_golang/prometheus/value.go | 234 + .../client_golang/prometheus/vec.go | 404 + .../client_golang/prometheus/vec_test.go | 312 + .../prometheus/client_model/.gitignore | 1 + .../prometheus/client_model/CONTRIBUTING.md | 18 + .../prometheus/client_model/LICENSE | 201 + .../prometheus/client_model/MAINTAINERS.md | 1 + .../prometheus/client_model/Makefile | 62 + .../github.com/prometheus/client_model/NOTICE | 5 + .../prometheus/client_model/README.md | 26 + .../prometheus/client_model/go/metrics.pb.go | 364 + .../prometheus/client_model/metrics.proto | 81 + .../prometheus/client_model/pom.xml | 130 + .../prometheus/client_model/setup.py | 23 + .../github.com/prometheus/common/.travis.yml | 6 + .../prometheus/common/CONTRIBUTING.md | 18 + vendor/github.com/prometheus/common/LICENSE | 201 + .../prometheus/common/MAINTAINERS.md | 1 + vendor/github.com/prometheus/common/NOTICE | 5 + vendor/github.com/prometheus/common/README.md | 12 + .../prometheus/common/expfmt/bench_test.go | 167 + .../prometheus/common/expfmt/decode.go | 429 + .../prometheus/common/expfmt/decode_test.go | 435 + .../prometheus/common/expfmt/encode.go | 88 + .../prometheus/common/expfmt/expfmt.go | 38 + .../prometheus/common/expfmt/fuzz.go | 36 + .../prometheus/common/expfmt/text_create.go | 303 + .../common/expfmt/text_create_test.go | 443 + .../prometheus/common/expfmt/text_parse.go | 757 ++ .../common/expfmt/text_parse_test.go | 593 ++ .../bitbucket.org/ww/goautoneg/README.txt | 67 + .../bitbucket.org/ww/goautoneg/autoneg.go | 162 + .../ww/goautoneg/autoneg_test.go | 33 + .../prometheus/common/model/alert.go | 136 + .../prometheus/common/model/alert_test.go | 118 + .../prometheus/common/model/fingerprinting.go | 105 + .../github.com/prometheus/common/model/fnv.go | 42 + .../prometheus/common/model/labels.go | 210 + .../prometheus/common/model/labels_test.go | 140 + .../prometheus/common/model/labelset.go | 169 + .../prometheus/common/model/metric.go | 103 + .../prometheus/common/model/metric_test.go | 132 + .../prometheus/common/model/model.go | 16 + .../prometheus/common/model/signature.go | 144 + .../prometheus/common/model/signature_test.go | 314 + .../prometheus/common/model/silence.go | 106 + .../prometheus/common/model/silence_test.go | 228 + .../prometheus/common/model/time.go | 264 + .../prometheus/common/model/time_test.go | 132 + .../prometheus/common/model/value.go | 416 + .../prometheus/common/model/value_test.go | 468 + .../github.com/prometheus/procfs/.gitignore | 1 + .../github.com/prometheus/procfs/.travis.yml | 15 + .../prometheus/procfs/CONTRIBUTING.md | 18 + vendor/github.com/prometheus/procfs/LICENSE | 201 + .../prometheus/procfs/MAINTAINERS.md | 1 + vendor/github.com/prometheus/procfs/Makefile | 71 + vendor/github.com/prometheus/procfs/NOTICE | 7 + vendor/github.com/prometheus/procfs/README.md | 11 + .../github.com/prometheus/procfs/buddyinfo.go | 95 + .../prometheus/procfs/buddyinfo_test.go | 64 + vendor/github.com/prometheus/procfs/doc.go | 45 + .../prometheus/procfs/fixtures.ttar | 446 + vendor/github.com/prometheus/procfs/fs.go | 82 + .../github.com/prometheus/procfs/fs_test.go | 39 + .../prometheus/procfs/internal/util/parse.go | 46 + vendor/github.com/prometheus/procfs/ipvs.go | 259 + .../github.com/prometheus/procfs/ipvs_test.go | 250 + vendor/github.com/prometheus/procfs/mdstat.go | 151 + .../prometheus/procfs/mdstat_test.go | 44 + .../prometheus/procfs/mountstats.go | 569 ++ .../prometheus/procfs/mountstats_test.go | 286 + .../github.com/prometheus/procfs/net_dev.go | 216 + .../prometheus/procfs/net_dev_test.go | 86 + .../github.com/prometheus/procfs/nfs/nfs.go | 263 + .../github.com/prometheus/procfs/nfs/parse.go | 317 + .../prometheus/procfs/nfs/parse_nfs.go | 67 + .../prometheus/procfs/nfs/parse_nfs_test.go | 305 + .../prometheus/procfs/nfs/parse_nfsd.go | 89 + .../prometheus/procfs/nfs/parse_nfsd_test.go | 196 + vendor/github.com/prometheus/procfs/proc.go | 238 + .../github.com/prometheus/procfs/proc_io.go | 65 + .../prometheus/procfs/proc_io_test.go | 46 + .../prometheus/procfs/proc_limits.go | 150 + .../prometheus/procfs/proc_limits_test.go | 44 + .../github.com/prometheus/procfs/proc_ns.go | 68 + .../prometheus/procfs/proc_ns_test.go | 44 + .../github.com/prometheus/procfs/proc_stat.go | 188 + .../prometheus/procfs/proc_stat_test.go | 123 + .../github.com/prometheus/procfs/proc_test.go | 174 + vendor/github.com/prometheus/procfs/stat.go | 232 + .../github.com/prometheus/procfs/stat_test.go | 74 + vendor/github.com/prometheus/procfs/ttar | 389 + vendor/github.com/prometheus/procfs/xfrm.go | 187 + .../github.com/prometheus/procfs/xfrm_test.go | 66 + .../github.com/prometheus/procfs/xfs/parse.go | 330 + .../prometheus/procfs/xfs/parse_test.go | 442 + .../github.com/prometheus/procfs/xfs/xfs.go | 163 + 486 files changed, 108584 insertions(+), 1861 deletions(-) create mode 100644 vendor/github.com/beorn7/perks/.gitignore create mode 100644 vendor/github.com/beorn7/perks/LICENSE create mode 100644 vendor/github.com/beorn7/perks/README.md create mode 100644 vendor/github.com/beorn7/perks/quantile/bench_test.go create mode 100644 vendor/github.com/beorn7/perks/quantile/example_test.go create mode 100644 vendor/github.com/beorn7/perks/quantile/exampledata.txt create mode 100644 vendor/github.com/beorn7/perks/quantile/stream.go create mode 100644 vendor/github.com/beorn7/perks/quantile/stream_test.go create mode 100644 vendor/github.com/gogo/protobuf/AUTHORS create mode 100644 vendor/github.com/gogo/protobuf/bench.md create mode 100644 vendor/github.com/gogo/protobuf/codec/codec.go create mode 100644 vendor/github.com/gogo/protobuf/codec/codec_test.go create mode 100644 vendor/github.com/gogo/protobuf/custom_types.md create mode 100644 vendor/github.com/gogo/protobuf/extensions.md create mode 100644 vendor/github.com/gogo/protobuf/proto/any_test.go create mode 100644 vendor/github.com/gogo/protobuf/proto/decode_test.go create mode 100644 vendor/github.com/gogo/protobuf/proto/discard.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration.go create mode 100644 vendor/github.com/gogo/protobuf/proto/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/encode_test.go create mode 100644 vendor/github.com/gogo/protobuf/proto/map_test.go create mode 100644 vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go rename vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/{gostring.go => descriptor_gostring.gen.go} (79%) create mode 100644 vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_test.go create mode 100644 vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go create mode 100644 vendor/github.com/gogo/protobuf/types/any.go create mode 100644 vendor/github.com/gogo/protobuf/types/any.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/any_test.go create mode 100644 vendor/github.com/gogo/protobuf/types/api.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/doc.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/duration_test.go create mode 100644 vendor/github.com/gogo/protobuf/types/empty.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/field_mask.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/source_context.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/struct.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp_gogo.go create mode 100644 vendor/github.com/gogo/protobuf/types/timestamp_test.go create mode 100644 vendor/github.com/gogo/protobuf/types/type.pb.go create mode 100644 vendor/github.com/gogo/protobuf/types/wrappers.pb.go create mode 100644 vendor/github.com/influxdata/ifql/.gitignore create mode 100644 vendor/github.com/influxdata/ifql/.goreleaser.yml create mode 100644 vendor/github.com/influxdata/ifql/CHANGELOG.md create mode 100644 vendor/github.com/influxdata/ifql/Dockerfile create mode 100644 vendor/github.com/influxdata/ifql/Dockerfile_build create mode 100644 vendor/github.com/influxdata/ifql/Gopkg.lock create mode 100644 vendor/github.com/influxdata/ifql/Gopkg.toml create mode 100644 vendor/github.com/influxdata/ifql/LICENSE create mode 100644 vendor/github.com/influxdata/ifql/Makefile create mode 100644 vendor/github.com/influxdata/ifql/README.md create mode 100644 vendor/github.com/influxdata/ifql/ast/Makefile create mode 100644 vendor/github.com/influxdata/ifql/ast/ast.go create mode 100644 vendor/github.com/influxdata/ifql/ast/json.go create mode 100644 vendor/github.com/influxdata/ifql/ast/json_test.go create mode 100755 vendor/github.com/influxdata/ifql/circle-test.sh create mode 100644 vendor/github.com/influxdata/ifql/compiler/compiler.go create mode 100644 vendor/github.com/influxdata/ifql/compiler/compiler_test.go create mode 100644 vendor/github.com/influxdata/ifql/compiler/doc.go create mode 100644 vendor/github.com/influxdata/ifql/compiler/runtime.go create mode 100644 vendor/github.com/influxdata/ifql/complete/complete.go create mode 100644 vendor/github.com/influxdata/ifql/complete/complete_test.go create mode 100644 vendor/github.com/influxdata/ifql/docker-compose.yml create mode 100644 vendor/github.com/influxdata/ifql/functions/count.go create mode 100644 vendor/github.com/influxdata/ifql/functions/count_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/covariance.go create mode 100644 vendor/github.com/influxdata/ifql/functions/covariance_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/data_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/derivative.go create mode 100644 vendor/github.com/influxdata/ifql/functions/derivative_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/difference.go create mode 100644 vendor/github.com/influxdata/ifql/functions/difference_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/distinct.go create mode 100644 vendor/github.com/influxdata/ifql/functions/distinct_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/filter.go create mode 100644 vendor/github.com/influxdata/ifql/functions/filter_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/first.go create mode 100644 vendor/github.com/influxdata/ifql/functions/first_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/from.go create mode 100644 vendor/github.com/influxdata/ifql/functions/from_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/group.go create mode 100644 vendor/github.com/influxdata/ifql/functions/group_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/integral.go create mode 100644 vendor/github.com/influxdata/ifql/functions/integral_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/join.go create mode 100644 vendor/github.com/influxdata/ifql/functions/join_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/last.go create mode 100644 vendor/github.com/influxdata/ifql/functions/last_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/limit.go create mode 100644 vendor/github.com/influxdata/ifql/functions/limit_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/map.go create mode 100644 vendor/github.com/influxdata/ifql/functions/map_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/max.go create mode 100644 vendor/github.com/influxdata/ifql/functions/max_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/mean.go create mode 100644 vendor/github.com/influxdata/ifql/functions/mean_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/min.go create mode 100644 vendor/github.com/influxdata/ifql/functions/min_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/percentile.go create mode 100644 vendor/github.com/influxdata/ifql/functions/percentile_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/range.go create mode 100644 vendor/github.com/influxdata/ifql/functions/range_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/sample.go create mode 100644 vendor/github.com/influxdata/ifql/functions/sample_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/set.go create mode 100644 vendor/github.com/influxdata/ifql/functions/set_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/shift.go create mode 100644 vendor/github.com/influxdata/ifql/functions/shift_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/skew.go create mode 100644 vendor/github.com/influxdata/ifql/functions/skew_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/sort.go create mode 100644 vendor/github.com/influxdata/ifql/functions/sort_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/spread.go create mode 100644 vendor/github.com/influxdata/ifql/functions/spread_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/state_tracking.go create mode 100644 vendor/github.com/influxdata/ifql/functions/state_tracking_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/stddev.go create mode 100644 vendor/github.com/influxdata/ifql/functions/stddev_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/sum.go create mode 100644 vendor/github.com/influxdata/ifql/functions/sum_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/top_bottom.go create mode 100644 vendor/github.com/influxdata/ifql/functions/window.go create mode 100644 vendor/github.com/influxdata/ifql/functions/window_test.go create mode 100644 vendor/github.com/influxdata/ifql/functions/yield.go create mode 100644 vendor/github.com/influxdata/ifql/interpreter/doc.go create mode 100644 vendor/github.com/influxdata/ifql/interpreter/interpreter.go create mode 100644 vendor/github.com/influxdata/ifql/interpreter/interpreter_test.go create mode 100644 vendor/github.com/influxdata/ifql/parser/Makefile create mode 100644 vendor/github.com/influxdata/ifql/parser/doc.go create mode 100644 vendor/github.com/influxdata/ifql/parser/ifql.go create mode 100644 vendor/github.com/influxdata/ifql/parser/ifql.peg create mode 100644 vendor/github.com/influxdata/ifql/parser/parser.go create mode 100644 vendor/github.com/influxdata/ifql/parser/parser_debug.go create mode 100644 vendor/github.com/influxdata/ifql/parser/parser_test.go create mode 100644 vendor/github.com/influxdata/ifql/parser/types.go create mode 100644 vendor/github.com/influxdata/ifql/query.go create mode 100644 vendor/github.com/influxdata/ifql/query/compile.go create mode 100644 vendor/github.com/influxdata/ifql/query/control/controller.go create mode 100644 vendor/github.com/influxdata/ifql/query/control/metrics.go create mode 100644 vendor/github.com/influxdata/ifql/query/control/queue.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/aggergate_test.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/aggregate.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/allocator.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/block.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/bounds.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/dataset.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/dispatcher.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/executor.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/executor_test.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/expression_internal_test.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/expression_test.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/format.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/queue.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/result.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/row_fn.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/selector.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/selector_test.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/source.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/storage.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/storage/predicate.pb.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/storage/storage.pb.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/storage/storage.yarpc.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/time.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/transformation.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/transport.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/trigger.go create mode 100644 vendor/github.com/influxdata/ifql/query/execute/window.go create mode 100644 vendor/github.com/influxdata/ifql/query/format.go create mode 100644 vendor/github.com/influxdata/ifql/query/operation.go create mode 100644 vendor/github.com/influxdata/ifql/query/plan/format.go create mode 100644 vendor/github.com/influxdata/ifql/query/plan/logical.go create mode 100644 vendor/github.com/influxdata/ifql/query/plan/logical_test.go create mode 100644 vendor/github.com/influxdata/ifql/query/plan/physical.go create mode 100644 vendor/github.com/influxdata/ifql/query/plan/physical_test.go create mode 100644 vendor/github.com/influxdata/ifql/query/plan/procedure.go create mode 100644 vendor/github.com/influxdata/ifql/query/plan/rules.go create mode 100644 vendor/github.com/influxdata/ifql/query/plan/storage.go create mode 100644 vendor/github.com/influxdata/ifql/query/query.go create mode 100644 vendor/github.com/influxdata/ifql/query/query_test.go create mode 100644 vendor/github.com/influxdata/ifql/query/resource_management.go create mode 100644 vendor/github.com/influxdata/ifql/query/time.go create mode 100644 vendor/github.com/influxdata/ifql/query/trigger.go create mode 100755 vendor/github.com/influxdata/ifql/release.sh create mode 100644 vendor/github.com/influxdata/ifql/semantic/binary_types.go create mode 100644 vendor/github.com/influxdata/ifql/semantic/doc.go create mode 100644 vendor/github.com/influxdata/ifql/semantic/graph.go create mode 100644 vendor/github.com/influxdata/ifql/semantic/graph_test.go create mode 100644 vendor/github.com/influxdata/ifql/semantic/json.go create mode 100644 vendor/github.com/influxdata/ifql/semantic/json_test.go create mode 100644 vendor/github.com/influxdata/ifql/semantic/types.go create mode 100644 vendor/github.com/influxdata/ifql/semantic/types_test.go create mode 100644 vendor/github.com/influxdata/ifql/semantic/walk.go create mode 100644 vendor/github.com/influxdata/tdigest/.gitignore create mode 100644 vendor/github.com/influxdata/tdigest/LICENSE create mode 100644 vendor/github.com/influxdata/tdigest/README.md create mode 100644 vendor/github.com/influxdata/tdigest/centroid.go create mode 100644 vendor/github.com/influxdata/tdigest/centroid_test.go create mode 100644 vendor/github.com/influxdata/tdigest/tdigest.go create mode 100644 vendor/github.com/influxdata/tdigest/tdigest_test.go create mode 100644 vendor/github.com/influxdata/yamux/.gitignore create mode 100644 vendor/github.com/influxdata/yamux/LICENSE create mode 100644 vendor/github.com/influxdata/yamux/README.md create mode 100644 vendor/github.com/influxdata/yamux/addr.go create mode 100644 vendor/github.com/influxdata/yamux/bench_test.go create mode 100644 vendor/github.com/influxdata/yamux/const.go create mode 100644 vendor/github.com/influxdata/yamux/const_test.go create mode 100644 vendor/github.com/influxdata/yamux/mux.go create mode 100644 vendor/github.com/influxdata/yamux/session.go create mode 100644 vendor/github.com/influxdata/yamux/session_test.go create mode 100644 vendor/github.com/influxdata/yamux/spec.md create mode 100644 vendor/github.com/influxdata/yamux/stream.go create mode 100644 vendor/github.com/influxdata/yamux/util.go create mode 100644 vendor/github.com/influxdata/yamux/util_test.go create mode 100644 vendor/github.com/influxdata/yarpc/.gitignore create mode 100644 vendor/github.com/influxdata/yarpc/Godeps create mode 100644 vendor/github.com/influxdata/yarpc/LICENSE create mode 100644 vendor/github.com/influxdata/yarpc/README.md create mode 100644 vendor/github.com/influxdata/yarpc/call.go create mode 100644 vendor/github.com/influxdata/yarpc/call_test.go create mode 100644 vendor/github.com/influxdata/yarpc/clientconn.go create mode 100644 vendor/github.com/influxdata/yarpc/codec.go create mode 100644 vendor/github.com/influxdata/yarpc/codes/codes.pb.go create mode 100644 vendor/github.com/influxdata/yarpc/codes/codes.proto create mode 100644 vendor/github.com/influxdata/yarpc/rpc.go create mode 100644 vendor/github.com/influxdata/yarpc/server.go create mode 100644 vendor/github.com/influxdata/yarpc/status/status.go create mode 100644 vendor/github.com/influxdata/yarpc/status/status.pb.go create mode 100644 vendor/github.com/influxdata/yarpc/status/status.proto create mode 100644 vendor/github.com/influxdata/yarpc/stream.go create mode 100644 vendor/github.com/influxdata/yarpc/yarpcproto/helper.go create mode 100644 vendor/github.com/influxdata/yarpc/yarpcproto/yarpc.pb.go create mode 100644 vendor/github.com/influxdata/yarpc/yarpcproto/yarpc.proto create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/README.md create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go create mode 100644 vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/.gitignore create mode 100644 vendor/github.com/opentracing/opentracing-go/.travis.yml create mode 100644 vendor/github.com/opentracing/opentracing-go/CHANGELOG.md create mode 100644 vendor/github.com/opentracing/opentracing-go/LICENSE create mode 100644 vendor/github.com/opentracing/opentracing-go/Makefile create mode 100644 vendor/github.com/opentracing/opentracing-go/README.md create mode 100644 vendor/github.com/opentracing/opentracing-go/globaltracer.go create mode 100644 vendor/github.com/opentracing/opentracing-go/gocontext.go create mode 100644 vendor/github.com/opentracing/opentracing-go/gocontext_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/field.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/field_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/log/util.go create mode 100644 vendor/github.com/opentracing/opentracing-go/noop.go create mode 100644 vendor/github.com/opentracing/opentracing-go/options_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/propagation.go create mode 100644 vendor/github.com/opentracing/opentracing-go/propagation_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/span.go create mode 100644 vendor/github.com/opentracing/opentracing-go/testtracer_test.go create mode 100644 vendor/github.com/opentracing/opentracing-go/tracer.go create mode 100644 vendor/github.com/prometheus/client_golang/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/.travis.yml create mode 100644 vendor/github.com/prometheus/client_golang/AUTHORS.md create mode 100644 vendor/github.com/prometheus/client_golang/CHANGELOG.md create mode 100644 vendor/github.com/prometheus/client_golang/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/client_golang/LICENSE create mode 100644 vendor/github.com/prometheus/client_golang/NOTICE create mode 100644 vendor/github.com/prometheus/client_golang/README.md create mode 100644 vendor/github.com/prometheus/client_golang/VERSION create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/.gitignore create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/README.md create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/counter_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/desc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/doc.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/examples_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/fnv.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/http_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/metric_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/registry_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/summary_test.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/untyped.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/value.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec.go create mode 100644 vendor/github.com/prometheus/client_golang/prometheus/vec_test.go create mode 100644 vendor/github.com/prometheus/client_model/.gitignore create mode 100644 vendor/github.com/prometheus/client_model/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/client_model/LICENSE create mode 100644 vendor/github.com/prometheus/client_model/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/client_model/Makefile create mode 100644 vendor/github.com/prometheus/client_model/NOTICE create mode 100644 vendor/github.com/prometheus/client_model/README.md create mode 100644 vendor/github.com/prometheus/client_model/go/metrics.pb.go create mode 100644 vendor/github.com/prometheus/client_model/metrics.proto create mode 100644 vendor/github.com/prometheus/client_model/pom.xml create mode 100644 vendor/github.com/prometheus/client_model/setup.py create mode 100644 vendor/github.com/prometheus/common/.travis.yml create mode 100644 vendor/github.com/prometheus/common/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/common/LICENSE create mode 100644 vendor/github.com/prometheus/common/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/common/NOTICE create mode 100644 vendor/github.com/prometheus/common/README.md create mode 100644 vendor/github.com/prometheus/common/expfmt/bench_test.go create mode 100644 vendor/github.com/prometheus/common/expfmt/decode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/decode_test.go create mode 100644 vendor/github.com/prometheus/common/expfmt/encode.go create mode 100644 vendor/github.com/prometheus/common/expfmt/expfmt.go create mode 100644 vendor/github.com/prometheus/common/expfmt/fuzz.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_create_test.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse.go create mode 100644 vendor/github.com/prometheus/common/expfmt/text_parse_test.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go create mode 100644 vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go create mode 100644 vendor/github.com/prometheus/common/model/alert.go create mode 100644 vendor/github.com/prometheus/common/model/alert_test.go create mode 100644 vendor/github.com/prometheus/common/model/fingerprinting.go create mode 100644 vendor/github.com/prometheus/common/model/fnv.go create mode 100644 vendor/github.com/prometheus/common/model/labels.go create mode 100644 vendor/github.com/prometheus/common/model/labels_test.go create mode 100644 vendor/github.com/prometheus/common/model/labelset.go create mode 100644 vendor/github.com/prometheus/common/model/metric.go create mode 100644 vendor/github.com/prometheus/common/model/metric_test.go create mode 100644 vendor/github.com/prometheus/common/model/model.go create mode 100644 vendor/github.com/prometheus/common/model/signature.go create mode 100644 vendor/github.com/prometheus/common/model/signature_test.go create mode 100644 vendor/github.com/prometheus/common/model/silence.go create mode 100644 vendor/github.com/prometheus/common/model/silence_test.go create mode 100644 vendor/github.com/prometheus/common/model/time.go create mode 100644 vendor/github.com/prometheus/common/model/time_test.go create mode 100644 vendor/github.com/prometheus/common/model/value.go create mode 100644 vendor/github.com/prometheus/common/model/value_test.go create mode 100644 vendor/github.com/prometheus/procfs/.gitignore create mode 100644 vendor/github.com/prometheus/procfs/.travis.yml create mode 100644 vendor/github.com/prometheus/procfs/CONTRIBUTING.md create mode 100644 vendor/github.com/prometheus/procfs/LICENSE create mode 100644 vendor/github.com/prometheus/procfs/MAINTAINERS.md create mode 100644 vendor/github.com/prometheus/procfs/Makefile create mode 100644 vendor/github.com/prometheus/procfs/NOTICE create mode 100644 vendor/github.com/prometheus/procfs/README.md create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo.go create mode 100644 vendor/github.com/prometheus/procfs/buddyinfo_test.go create mode 100644 vendor/github.com/prometheus/procfs/doc.go create mode 100644 vendor/github.com/prometheus/procfs/fixtures.ttar create mode 100644 vendor/github.com/prometheus/procfs/fs.go create mode 100644 vendor/github.com/prometheus/procfs/fs_test.go create mode 100644 vendor/github.com/prometheus/procfs/internal/util/parse.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs.go create mode 100644 vendor/github.com/prometheus/procfs/ipvs_test.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat.go create mode 100644 vendor/github.com/prometheus/procfs/mdstat_test.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats.go create mode 100644 vendor/github.com/prometheus/procfs/mountstats_test.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev.go create mode 100644 vendor/github.com/prometheus/procfs/net_dev_test.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go create mode 100644 vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io.go create mode 100644 vendor/github.com/prometheus/procfs/proc_io_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits.go create mode 100644 vendor/github.com/prometheus/procfs/proc_limits_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns.go create mode 100644 vendor/github.com/prometheus/procfs/proc_ns_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat.go create mode 100644 vendor/github.com/prometheus/procfs/proc_stat_test.go create mode 100644 vendor/github.com/prometheus/procfs/proc_test.go create mode 100644 vendor/github.com/prometheus/procfs/stat.go create mode 100644 vendor/github.com/prometheus/procfs/stat_test.go create mode 100755 vendor/github.com/prometheus/procfs/ttar create mode 100644 vendor/github.com/prometheus/procfs/xfrm.go create mode 100644 vendor/github.com/prometheus/procfs/xfrm_test.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/parse.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/parse_test.go create mode 100644 vendor/github.com/prometheus/procfs/xfs/xfs.go diff --git a/Gopkg.lock b/Gopkg.lock index 390d6fffa..54b0a0bda 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -11,6 +11,12 @@ packages = ["."] revision = "3ec0642a7fb6488f65b06f9040adc67e3990296a" +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "3a771d992973f24aa725d07868b467d1ddfceafb" + [[projects]] name = "github.com/boltdb/bolt" packages = ["."] @@ -39,8 +45,38 @@ [[projects]] name = "github.com/gogo/protobuf" - packages = ["gogoproto","jsonpb","plugin/compare","plugin/defaultcheck","plugin/description","plugin/embedcheck","plugin/enumstringer","plugin/equal","plugin/face","plugin/gostring","plugin/marshalto","plugin/oneofcheck","plugin/populate","plugin/size","plugin/stringer","plugin/testgen","plugin/union","plugin/unmarshal","proto","protoc-gen-gogo","protoc-gen-gogo/descriptor","protoc-gen-gogo/generator","protoc-gen-gogo/grpc","protoc-gen-gogo/plugin","vanity","vanity/command"] - revision = "6abcf94fd4c97dcb423fdafd42fe9f96ca7e421b" + packages = [ + "codec", + "gogoproto", + "jsonpb", + "plugin/compare", + "plugin/defaultcheck", + "plugin/description", + "plugin/embedcheck", + "plugin/enumstringer", + "plugin/equal", + "plugin/face", + "plugin/gostring", + "plugin/marshalto", + "plugin/oneofcheck", + "plugin/populate", + "plugin/size", + "plugin/stringer", + "plugin/testgen", + "plugin/union", + "plugin/unmarshal", + "proto", + "protoc-gen-gogo", + "protoc-gen-gogo/descriptor", + "protoc-gen-gogo/generator", + "protoc-gen-gogo/grpc", + "protoc-gen-gogo/plugin", + "sortkeys", + "types", + "vanity", + "vanity/command" + ] + revision = "49944b4a4b085da44c43d4b233ea40787396371f" [[projects]] name = "github.com/golang/protobuf" @@ -50,7 +86,13 @@ [[projects]] name = "github.com/google/go-cmp" - packages = ["cmp","cmp/cmpopts","cmp/internal/diff","cmp/internal/function","cmp/internal/value"] + packages = [ + "cmp", + "cmp/cmpopts", + "cmp/internal/diff", + "cmp/internal/function", + "cmp/internal/value" + ] revision = "8099a9787ce5dc5984ed879a3bda47dc730a8e97" version = "v0.1.0" @@ -65,22 +107,79 @@ packages = ["query"] revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a" +[[projects]] + name = "github.com/influxdata/ifql" + packages = [ + ".", + "ast", + "compiler", + "complete", + "functions", + "interpreter", + "parser", + "query", + "query/control", + "query/execute", + "query/execute/storage", + "query/plan", + "semantic" + ] + revision = "9445c4494d4421db2ab1aaaf6f4069446f11752e" + [[projects]] name = "github.com/influxdata/influxdb" - packages = ["influxql","influxql/internal","influxql/neldermead","models","pkg/escape"] + packages = [ + "influxql", + "influxql/internal", + "influxql/neldermead", + "models", + "pkg/escape" + ] revision = "cd9363b52cac452113b95554d98a6be51beda24e" version = "v1.1.5" [[projects]] name = "github.com/influxdata/kapacitor" - packages = ["client/v1","pipeline","pipeline/tick","services/k8s/client","tick","tick/ast","tick/stateful","udf/agent"] + packages = [ + "client/v1", + "pipeline", + "pipeline/tick", + "services/k8s/client", + "tick", + "tick/ast", + "tick/stateful", + "udf/agent" + ] revision = "6de30070b39afde111fea5e041281126fe8aae31" +[[projects]] + branch = "master" + name = "github.com/influxdata/tdigest" + packages = ["."] + revision = "617b83f940fd9acd207f712561a8a0590277fb38" + [[projects]] name = "github.com/influxdata/usage-client" packages = ["v1"] revision = "6d3895376368aa52a3a81d2a16e90f0f52371967" +[[projects]] + branch = "master" + name = "github.com/influxdata/yamux" + packages = ["."] + revision = "1f58ded512de5feabbe30b60c7d33a7a896c5f16" + +[[projects]] + branch = "master" + name = "github.com/influxdata/yarpc" + packages = [ + ".", + "codes", + "status", + "yarpcproto" + ] + revision = "f0da2db138cad2fb425541938fc28dd5a5bc6918" + [[projects]] name = "github.com/jessevdk/go-flags" packages = ["."] @@ -91,12 +190,60 @@ packages = ["."] revision = "46eb4c183bfc1ebb527d9d19bcded39476302eb8" +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" + version = "v1.0.0" + +[[projects]] + name = "github.com/opentracing/opentracing-go" + packages = [ + ".", + "log" + ] + revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" + version = "v1.0.2" + [[projects]] name = "github.com/pkg/errors" packages = ["."] revision = "645ef00459ed84a119197bfb8d8205042c6df63d" version = "v0.8.0" +[[projects]] + name = "github.com/prometheus/client_golang" + packages = ["prometheus"] + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "99fa1f4be8e564e8a6b613da7fa6f46c9edafc6c" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "e4aa40a9169a88835b849a6efb71e05dc04b88f0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "internal/util", + "nfs", + "xfs" + ] + revision = "780932d4fbbe0e69b84c34c20f5c8d0981e109ea" + [[projects]] name = "github.com/satori/go.uuid" packages = ["."] @@ -115,12 +262,20 @@ [[projects]] name = "golang.org/x/net" - packages = ["context","context/ctxhttp"] + packages = [ + "context", + "context/ctxhttp" + ] revision = "749a502dd1eaf3e5bfd4f8956748c502357c0bbe" [[projects]] name = "golang.org/x/oauth2" - packages = [".","github","heroku","internal"] + packages = [ + ".", + "github", + "heroku", + "internal" + ] revision = "2f32c3ac0fa4fb807a0fcefb0b6f2468a0d99bd0" [[projects]] @@ -131,18 +286,31 @@ [[projects]] name = "google.golang.org/api" - packages = ["gensupport","googleapi","googleapi/internal/uritemplates","oauth2/v2"] + packages = [ + "gensupport", + "googleapi", + "googleapi/internal/uritemplates", + "oauth2/v2" + ] revision = "bc20c61134e1d25265dd60049f5735381e79b631" [[projects]] name = "google.golang.org/appengine" - packages = ["internal","internal/base","internal/datastore","internal/log","internal/remote_api","internal/urlfetch","urlfetch"] + packages = [ + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", + "urlfetch" + ] revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" version = "v1.0.0" [solve-meta] analyzer-name = "dep" analyzer-version = 1 - inputs-digest = "a4df1b0953349e64a89581f4b83ac3a2f40e17681e19f8de3cbf828b6375a3ba" + inputs-digest = "3dee5534e81013d8f9d3b8cf80ad614dd8f0168114e63e1f1a5ba60733891e83" solver-name = "gps-cdcl" solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml index d593ba545..bf8892207 100644 --- a/Gopkg.toml +++ b/Gopkg.toml @@ -26,7 +26,7 @@ required = ["github.com/kevinburke/go-bindata","github.com/gogo/protobuf/proto", [[constraint]] name = "github.com/gogo/protobuf" - revision = "6abcf94fd4c97dcb423fdafd42fe9f96ca7e421b" + revision = "49944b4a4b085da44c43d4b233ea40787396371f" [[constraint]] name = "github.com/google/go-github" @@ -74,7 +74,7 @@ required = ["github.com/kevinburke/go-bindata","github.com/gogo/protobuf/proto", [[constraint]] name = "github.com/influxdata/ifql" - revision = "master" + revision = "9445c4494d4421db2ab1aaaf6f4069446f11752e" [[constraint]] name = "github.com/influxdata/kapacitor" diff --git a/integrations/server_test.go b/integrations/server_test.go index 7620d6a94..71044bd3f 100644 --- a/integrations/server_test.go +++ b/integrations/server_test.go @@ -2715,6 +2715,10 @@ func TestServer(t *testing.T) { "logout": "/oauth/logout", "external": { "statusFeed": "" + }, + "ifql": { + "self": "/chronograf/v1/ifql", + "suggestions": "/chronograf/v1/ifql/suggestions" } } `, @@ -2798,6 +2802,10 @@ func TestServer(t *testing.T) { "logout": "/oauth/logout", "external": { "statusFeed": "" + }, + "ifql": { + "self": "/chronograf/v1/ifql", + "suggestions": "/chronograf/v1/ifql/suggestions" } } `, diff --git a/server/routes_test.go b/server/routes_test.go index 38ff7b8b3..96c3b77c7 100644 --- a/server/routes_test.go +++ b/server/routes_test.go @@ -29,7 +29,7 @@ func TestAllRoutes(t *testing.T) { if err := json.Unmarshal(body, &routes); err != nil { t.Error("TestAllRoutes not able to unmarshal JSON response") } - want := `{"layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[],"external":{"statusFeed":""}} + want := `{"layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[],"external":{"statusFeed":""},"ifql":{"self":"/chronograf/v1/ifql","suggestions":"/chronograf/v1/ifql/suggestions"}} ` if want != string(body) { t.Errorf("TestAllRoutes\nwanted\n*%s*\ngot\n*%s*", want, string(body)) @@ -67,7 +67,7 @@ func TestAllRoutesWithAuth(t *testing.T) { if err := json.Unmarshal(body, &routes); err != nil { t.Error("TestAllRoutesWithAuth not able to unmarshal JSON response") } - want := `{"layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[{"name":"github","label":"GitHub","login":"/oauth/github/login","logout":"/oauth/github/logout","callback":"/oauth/github/callback"}],"logout":"/oauth/logout","external":{"statusFeed":""}} + want := `{"layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[{"name":"github","label":"GitHub","login":"/oauth/github/login","logout":"/oauth/github/logout","callback":"/oauth/github/callback"}],"logout":"/oauth/logout","external":{"statusFeed":""},"ifql":{"self":"/chronograf/v1/ifql","suggestions":"/chronograf/v1/ifql/suggestions"}} ` if want != string(body) { t.Errorf("TestAllRoutesWithAuth\nwanted\n*%s*\ngot\n*%s*", want, string(body)) @@ -100,7 +100,7 @@ func TestAllRoutesWithExternalLinks(t *testing.T) { if err := json.Unmarshal(body, &routes); err != nil { t.Error("TestAllRoutesWithExternalLinks not able to unmarshal JSON response") } - want := `{"layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[],"external":{"statusFeed":"http://pineapple.life/feed.json","custom":[{"name":"cubeapple","url":"https://cube.apple"}]}} + want := `{"layouts":"/chronograf/v1/layouts","users":"/chronograf/v1/organizations/default/users","allUsers":"/chronograf/v1/users","organizations":"/chronograf/v1/organizations","mappings":"/chronograf/v1/mappings","sources":"/chronograf/v1/sources","me":"/chronograf/v1/me","environment":"/chronograf/v1/env","dashboards":"/chronograf/v1/dashboards","config":{"self":"/chronograf/v1/config","auth":"/chronograf/v1/config/auth"},"auth":[],"external":{"statusFeed":"http://pineapple.life/feed.json","custom":[{"name":"cubeapple","url":"https://cube.apple"}]},"ifql":{"self":"/chronograf/v1/ifql","suggestions":"/chronograf/v1/ifql/suggestions"}} ` if want != string(body) { t.Errorf("TestAllRoutesWithExternalLinks\nwanted\n*%s*\ngot\n*%s*", want, string(body)) diff --git a/vendor/github.com/beorn7/perks/.gitignore b/vendor/github.com/beorn7/perks/.gitignore new file mode 100644 index 000000000..1bd9209aa --- /dev/null +++ b/vendor/github.com/beorn7/perks/.gitignore @@ -0,0 +1,2 @@ +*.test +*.prof diff --git a/vendor/github.com/beorn7/perks/LICENSE b/vendor/github.com/beorn7/perks/LICENSE new file mode 100644 index 000000000..339177be6 --- /dev/null +++ b/vendor/github.com/beorn7/perks/LICENSE @@ -0,0 +1,20 @@ +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining +a copy of this software and associated documentation files (the +"Software"), to deal in the Software without restriction, including +without limitation the rights to use, copy, modify, merge, publish, +distribute, sublicense, and/or sell copies of the Software, and to +permit persons to whom the Software is furnished to do so, subject to +the following conditions: + +The above copyright notice and this permission notice shall be +included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, +EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND +NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE +LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION +OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION +WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/README.md b/vendor/github.com/beorn7/perks/README.md new file mode 100644 index 000000000..fc0577770 --- /dev/null +++ b/vendor/github.com/beorn7/perks/README.md @@ -0,0 +1,31 @@ +# Perks for Go (golang.org) + +Perks contains the Go package quantile that computes approximate quantiles over +an unbounded data stream within low memory and CPU bounds. + +For more information and examples, see: +http://godoc.org/github.com/bmizerany/perks + +A very special thank you and shout out to Graham Cormode (Rutgers University), +Flip Korn (AT&T Labs–Research), S. Muthukrishnan (Rutgers University), and +Divesh Srivastava (AT&T Labs–Research) for their research and publication of +[Effective Computation of Biased Quantiles over Data Streams](http://www.cs.rutgers.edu/~muthu/bquant.pdf) + +Thank you, also: +* Armon Dadgar (@armon) +* Andrew Gerrand (@nf) +* Brad Fitzpatrick (@bradfitz) +* Keith Rarick (@kr) + +FAQ: + +Q: Why not move the quantile package into the project root? +A: I want to add more packages to perks later. + +Copyright (C) 2013 Blake Mizerany + +Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. diff --git a/vendor/github.com/beorn7/perks/quantile/bench_test.go b/vendor/github.com/beorn7/perks/quantile/bench_test.go new file mode 100644 index 000000000..0bd0e4e77 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/bench_test.go @@ -0,0 +1,63 @@ +package quantile + +import ( + "testing" +) + +func BenchmarkInsertTargeted(b *testing.B) { + b.ReportAllocs() + + s := NewTargeted(Targets) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertTargetedSmallEpsilon(b *testing.B) { + s := NewTargeted(TargetsSmallEpsilon) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertBiased(b *testing.B) { + s := NewLowBiased(0.01) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkInsertBiasedSmallEpsilon(b *testing.B) { + s := NewLowBiased(0.0001) + b.ResetTimer() + for i := float64(0); i < float64(b.N); i++ { + s.Insert(i) + } +} + +func BenchmarkQuery(b *testing.B) { + s := NewTargeted(Targets) + for i := float64(0); i < 1e6; i++ { + s.Insert(i) + } + b.ResetTimer() + n := float64(b.N) + for i := float64(0); i < n; i++ { + s.Query(i / n) + } +} + +func BenchmarkQuerySmallEpsilon(b *testing.B) { + s := NewTargeted(TargetsSmallEpsilon) + for i := float64(0); i < 1e6; i++ { + s.Insert(i) + } + b.ResetTimer() + n := float64(b.N) + for i := float64(0); i < n; i++ { + s.Query(i / n) + } +} diff --git a/vendor/github.com/beorn7/perks/quantile/example_test.go b/vendor/github.com/beorn7/perks/quantile/example_test.go new file mode 100644 index 000000000..ab3293aaf --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/example_test.go @@ -0,0 +1,121 @@ +// +build go1.1 + +package quantile_test + +import ( + "bufio" + "fmt" + "log" + "os" + "strconv" + "time" + + "github.com/beorn7/perks/quantile" +) + +func Example_simple() { + ch := make(chan float64) + go sendFloats(ch) + + // Compute the 50th, 90th, and 99th percentile. + q := quantile.NewTargeted(map[float64]float64{ + 0.50: 0.005, + 0.90: 0.001, + 0.99: 0.0001, + }) + for v := range ch { + q.Insert(v) + } + + fmt.Println("perc50:", q.Query(0.50)) + fmt.Println("perc90:", q.Query(0.90)) + fmt.Println("perc99:", q.Query(0.99)) + fmt.Println("count:", q.Count()) + // Output: + // perc50: 5 + // perc90: 16 + // perc99: 223 + // count: 2388 +} + +func Example_mergeMultipleStreams() { + // Scenario: + // We have multiple database shards. On each shard, there is a process + // collecting query response times from the database logs and inserting + // them into a Stream (created via NewTargeted(0.90)), much like the + // Simple example. These processes expose a network interface for us to + // ask them to serialize and send us the results of their + // Stream.Samples so we may Merge and Query them. + // + // NOTES: + // * These sample sets are small, allowing us to get them + // across the network much faster than sending the entire list of data + // points. + // + // * For this to work correctly, we must supply the same quantiles + // a priori the process collecting the samples supplied to NewTargeted, + // even if we do not plan to query them all here. + ch := make(chan quantile.Samples) + getDBQuerySamples(ch) + q := quantile.NewTargeted(map[float64]float64{0.90: 0.001}) + for samples := range ch { + q.Merge(samples) + } + fmt.Println("perc90:", q.Query(0.90)) +} + +func Example_window() { + // Scenario: We want the 90th, 95th, and 99th percentiles for each + // minute. + + ch := make(chan float64) + go sendStreamValues(ch) + + tick := time.NewTicker(1 * time.Minute) + q := quantile.NewTargeted(map[float64]float64{ + 0.90: 0.001, + 0.95: 0.0005, + 0.99: 0.0001, + }) + for { + select { + case t := <-tick.C: + flushToDB(t, q.Samples()) + q.Reset() + case v := <-ch: + q.Insert(v) + } + } +} + +func sendStreamValues(ch chan float64) { + // Use your imagination +} + +func flushToDB(t time.Time, samples quantile.Samples) { + // Use your imagination +} + +// This is a stub for the above example. In reality this would hit the remote +// servers via http or something like it. +func getDBQuerySamples(ch chan quantile.Samples) {} + +func sendFloats(ch chan<- float64) { + f, err := os.Open("exampledata.txt") + if err != nil { + log.Fatal(err) + } + sc := bufio.NewScanner(f) + for sc.Scan() { + b := sc.Bytes() + v, err := strconv.ParseFloat(string(b), 64) + if err != nil { + log.Fatal(err) + } + ch <- v + } + if sc.Err() != nil { + log.Fatal(sc.Err()) + } + close(ch) +} diff --git a/vendor/github.com/beorn7/perks/quantile/exampledata.txt b/vendor/github.com/beorn7/perks/quantile/exampledata.txt new file mode 100644 index 000000000..1602287d7 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/exampledata.txt @@ -0,0 +1,2388 @@ +8 +5 +26 +12 +5 +235 +13 +6 +28 +30 +3 +3 +3 +3 +5 +2 +33 +7 +2 +4 +7 +12 +14 +5 +8 +3 +10 +4 +5 +3 +6 +6 +209 +20 +3 +10 +14 +3 +4 +6 +8 +5 +11 +7 +3 +2 +3 +3 +212 +5 +222 +4 +10 +10 +5 +6 +3 +8 +3 +10 +254 +220 +2 +3 +5 +24 +5 +4 +222 +7 +3 +3 +223 +8 +15 +12 +14 +14 +3 +2 +2 +3 +13 +3 +11 +4 +4 +6 +5 +7 +13 +5 +3 +5 +2 +5 +3 +5 +2 +7 +15 +17 +14 +3 +6 +6 +3 +17 +5 +4 +7 +6 +4 +4 +8 +6 +8 +3 +9 +3 +6 +3 +4 +5 +3 +3 +660 +4 +6 +10 +3 +6 +3 +2 +5 +13 +2 +4 +4 +10 +4 +8 +4 +3 +7 +9 +9 +3 +10 +37 +3 +13 +4 +12 +3 +6 +10 +8 +5 +21 +2 +3 +8 +3 +2 +3 +3 +4 +12 +2 +4 +8 +8 +4 +3 +2 +20 +1 +6 +32 +2 +11 +6 +18 +3 +8 +11 +3 +212 +3 +4 +2 +6 +7 +12 +11 +3 +2 +16 +10 +6 +4 +6 +3 +2 +7 +3 +2 +2 +2 +2 +5 +6 +4 +3 +10 +3 +4 +6 +5 +3 +4 +4 +5 +6 +4 +3 +4 +4 +5 +7 +5 +5 +3 +2 +7 +2 +4 +12 +4 +5 +6 +2 +4 +4 +8 +4 +15 +13 +7 +16 +5 +3 +23 +5 +5 +7 +3 +2 +9 +8 +7 +5 +8 +11 +4 +10 +76 +4 +47 +4 +3 +2 +7 +4 +2 +3 +37 +10 +4 +2 +20 +5 +4 +4 +10 +10 +4 +3 +7 +23 +240 +7 +13 +5 +5 +3 +3 +2 +5 +4 +2 +8 +7 +19 +2 +23 +8 +7 +2 +5 +3 +8 +3 +8 +13 +5 +5 +5 +2 +3 +23 +4 +9 +8 +4 +3 +3 +5 +220 +2 +3 +4 +6 +14 +3 +53 +6 +2 +5 +18 +6 +3 +219 +6 +5 +2 +5 +3 +6 +5 +15 +4 +3 +17 +3 +2 +4 +7 +2 +3 +3 +4 +4 +3 +2 +664 +6 +3 +23 +5 +5 +16 +5 +8 +2 +4 +2 +24 +12 +3 +2 +3 +5 +8 +3 +5 +4 +3 +14 +3 +5 +8 +2 +3 +7 +9 +4 +2 +3 +6 +8 +4 +3 +4 +6 +5 +3 +3 +6 +3 +19 +4 +4 +6 +3 +6 +3 +5 +22 +5 +4 +4 +3 +8 +11 +4 +9 +7 +6 +13 +4 +4 +4 +6 +17 +9 +3 +3 +3 +4 +3 +221 +5 +11 +3 +4 +2 +12 +6 +3 +5 +7 +5 +7 +4 +9 +7 +14 +37 +19 +217 +16 +3 +5 +2 +2 +7 +19 +7 +6 +7 +4 +24 +5 +11 +4 +7 +7 +9 +13 +3 +4 +3 +6 +28 +4 +4 +5 +5 +2 +5 +6 +4 +4 +6 +10 +5 +4 +3 +2 +3 +3 +6 +5 +5 +4 +3 +2 +3 +7 +4 +6 +18 +16 +8 +16 +4 +5 +8 +6 +9 +13 +1545 +6 +215 +6 +5 +6 +3 +45 +31 +5 +2 +2 +4 +3 +3 +2 +5 +4 +3 +5 +7 +7 +4 +5 +8 +5 +4 +749 +2 +31 +9 +11 +2 +11 +5 +4 +4 +7 +9 +11 +4 +5 +4 +7 +3 +4 +6 +2 +15 +3 +4 +3 +4 +3 +5 +2 +13 +5 +5 +3 +3 +23 +4 +4 +5 +7 +4 +13 +2 +4 +3 +4 +2 +6 +2 +7 +3 +5 +5 +3 +29 +5 +4 +4 +3 +10 +2 +3 +79 +16 +6 +6 +7 +7 +3 +5 +5 +7 +4 +3 +7 +9 +5 +6 +5 +9 +6 +3 +6 +4 +17 +2 +10 +9 +3 +6 +2 +3 +21 +22 +5 +11 +4 +2 +17 +2 +224 +2 +14 +3 +4 +4 +2 +4 +4 +4 +4 +5 +3 +4 +4 +10 +2 +6 +3 +3 +5 +7 +2 +7 +5 +6 +3 +218 +2 +2 +5 +2 +6 +3 +5 +222 +14 +6 +33 +3 +2 +5 +3 +3 +3 +9 +5 +3 +3 +2 +7 +4 +3 +4 +3 +5 +6 +5 +26 +4 +13 +9 +7 +3 +221 +3 +3 +4 +4 +4 +4 +2 +18 +5 +3 +7 +9 +6 +8 +3 +10 +3 +11 +9 +5 +4 +17 +5 +5 +6 +6 +3 +2 +4 +12 +17 +6 +7 +218 +4 +2 +4 +10 +3 +5 +15 +3 +9 +4 +3 +3 +6 +29 +3 +3 +4 +5 +5 +3 +8 +5 +6 +6 +7 +5 +3 +5 +3 +29 +2 +31 +5 +15 +24 +16 +5 +207 +4 +3 +3 +2 +15 +4 +4 +13 +5 +5 +4 +6 +10 +2 +7 +8 +4 +6 +20 +5 +3 +4 +3 +12 +12 +5 +17 +7 +3 +3 +3 +6 +10 +3 +5 +25 +80 +4 +9 +3 +2 +11 +3 +3 +2 +3 +8 +7 +5 +5 +19 +5 +3 +3 +12 +11 +2 +6 +5 +5 +5 +3 +3 +3 +4 +209 +14 +3 +2 +5 +19 +4 +4 +3 +4 +14 +5 +6 +4 +13 +9 +7 +4 +7 +10 +2 +9 +5 +7 +2 +8 +4 +6 +5 +5 +222 +8 +7 +12 +5 +216 +3 +4 +4 +6 +3 +14 +8 +7 +13 +4 +3 +3 +3 +3 +17 +5 +4 +3 +33 +6 +6 +33 +7 +5 +3 +8 +7 +5 +2 +9 +4 +2 +233 +24 +7 +4 +8 +10 +3 +4 +15 +2 +16 +3 +3 +13 +12 +7 +5 +4 +207 +4 +2 +4 +27 +15 +2 +5 +2 +25 +6 +5 +5 +6 +13 +6 +18 +6 +4 +12 +225 +10 +7 +5 +2 +2 +11 +4 +14 +21 +8 +10 +3 +5 +4 +232 +2 +5 +5 +3 +7 +17 +11 +6 +6 +23 +4 +6 +3 +5 +4 +2 +17 +3 +6 +5 +8 +3 +2 +2 +14 +9 +4 +4 +2 +5 +5 +3 +7 +6 +12 +6 +10 +3 +6 +2 +2 +19 +5 +4 +4 +9 +2 +4 +13 +3 +5 +6 +3 +6 +5 +4 +9 +6 +3 +5 +7 +3 +6 +6 +4 +3 +10 +6 +3 +221 +3 +5 +3 +6 +4 +8 +5 +3 +6 +4 +4 +2 +54 +5 +6 +11 +3 +3 +4 +4 +4 +3 +7 +3 +11 +11 +7 +10 +6 +13 +223 +213 +15 +231 +7 +3 +7 +228 +2 +3 +4 +4 +5 +6 +7 +4 +13 +3 +4 +5 +3 +6 +4 +6 +7 +2 +4 +3 +4 +3 +3 +6 +3 +7 +3 +5 +18 +5 +6 +8 +10 +3 +3 +3 +2 +4 +2 +4 +4 +5 +6 +6 +4 +10 +13 +3 +12 +5 +12 +16 +8 +4 +19 +11 +2 +4 +5 +6 +8 +5 +6 +4 +18 +10 +4 +2 +216 +6 +6 +6 +2 +4 +12 +8 +3 +11 +5 +6 +14 +5 +3 +13 +4 +5 +4 +5 +3 +28 +6 +3 +7 +219 +3 +9 +7 +3 +10 +6 +3 +4 +19 +5 +7 +11 +6 +15 +19 +4 +13 +11 +3 +7 +5 +10 +2 +8 +11 +2 +6 +4 +6 +24 +6 +3 +3 +3 +3 +6 +18 +4 +11 +4 +2 +5 +10 +8 +3 +9 +5 +3 +4 +5 +6 +2 +5 +7 +4 +4 +14 +6 +4 +4 +5 +5 +7 +2 +4 +3 +7 +3 +3 +6 +4 +5 +4 +4 +4 +3 +3 +3 +3 +8 +14 +2 +3 +5 +3 +2 +4 +5 +3 +7 +3 +3 +18 +3 +4 +4 +5 +7 +3 +3 +3 +13 +5 +4 +8 +211 +5 +5 +3 +5 +2 +5 +4 +2 +655 +6 +3 +5 +11 +2 +5 +3 +12 +9 +15 +11 +5 +12 +217 +2 +6 +17 +3 +3 +207 +5 +5 +4 +5 +9 +3 +2 +8 +5 +4 +3 +2 +5 +12 +4 +14 +5 +4 +2 +13 +5 +8 +4 +225 +4 +3 +4 +5 +4 +3 +3 +6 +23 +9 +2 +6 +7 +233 +4 +4 +6 +18 +3 +4 +6 +3 +4 +4 +2 +3 +7 +4 +13 +227 +4 +3 +5 +4 +2 +12 +9 +17 +3 +7 +14 +6 +4 +5 +21 +4 +8 +9 +2 +9 +25 +16 +3 +6 +4 +7 +8 +5 +2 +3 +5 +4 +3 +3 +5 +3 +3 +3 +2 +3 +19 +2 +4 +3 +4 +2 +3 +4 +4 +2 +4 +3 +3 +3 +2 +6 +3 +17 +5 +6 +4 +3 +13 +5 +3 +3 +3 +4 +9 +4 +2 +14 +12 +4 +5 +24 +4 +3 +37 +12 +11 +21 +3 +4 +3 +13 +4 +2 +3 +15 +4 +11 +4 +4 +3 +8 +3 +4 +4 +12 +8 +5 +3 +3 +4 +2 +220 +3 +5 +223 +3 +3 +3 +10 +3 +15 +4 +241 +9 +7 +3 +6 +6 +23 +4 +13 +7 +3 +4 +7 +4 +9 +3 +3 +4 +10 +5 +5 +1 +5 +24 +2 +4 +5 +5 +6 +14 +3 +8 +2 +3 +5 +13 +13 +3 +5 +2 +3 +15 +3 +4 +2 +10 +4 +4 +4 +5 +5 +3 +5 +3 +4 +7 +4 +27 +3 +6 +4 +15 +3 +5 +6 +6 +5 +4 +8 +3 +9 +2 +6 +3 +4 +3 +7 +4 +18 +3 +11 +3 +3 +8 +9 +7 +24 +3 +219 +7 +10 +4 +5 +9 +12 +2 +5 +4 +4 +4 +3 +3 +19 +5 +8 +16 +8 +6 +22 +3 +23 +3 +242 +9 +4 +3 +3 +5 +7 +3 +3 +5 +8 +3 +7 +5 +14 +8 +10 +3 +4 +3 +7 +4 +6 +7 +4 +10 +4 +3 +11 +3 +7 +10 +3 +13 +6 +8 +12 +10 +5 +7 +9 +3 +4 +7 +7 +10 +8 +30 +9 +19 +4 +3 +19 +15 +4 +13 +3 +215 +223 +4 +7 +4 +8 +17 +16 +3 +7 +6 +5 +5 +4 +12 +3 +7 +4 +4 +13 +4 +5 +2 +5 +6 +5 +6 +6 +7 +10 +18 +23 +9 +3 +3 +6 +5 +2 +4 +2 +7 +3 +3 +2 +5 +5 +14 +10 +224 +6 +3 +4 +3 +7 +5 +9 +3 +6 +4 +2 +5 +11 +4 +3 +3 +2 +8 +4 +7 +4 +10 +7 +3 +3 +18 +18 +17 +3 +3 +3 +4 +5 +3 +3 +4 +12 +7 +3 +11 +13 +5 +4 +7 +13 +5 +4 +11 +3 +12 +3 +6 +4 +4 +21 +4 +6 +9 +5 +3 +10 +8 +4 +6 +4 +4 +6 +5 +4 +8 +6 +4 +6 +4 +4 +5 +9 +6 +3 +4 +2 +9 +3 +18 +2 +4 +3 +13 +3 +6 +6 +8 +7 +9 +3 +2 +16 +3 +4 +6 +3 +2 +33 +22 +14 +4 +9 +12 +4 +5 +6 +3 +23 +9 +4 +3 +5 +5 +3 +4 +5 +3 +5 +3 +10 +4 +5 +5 +8 +4 +4 +6 +8 +5 +4 +3 +4 +6 +3 +3 +3 +5 +9 +12 +6 +5 +9 +3 +5 +3 +2 +2 +2 +18 +3 +2 +21 +2 +5 +4 +6 +4 +5 +10 +3 +9 +3 +2 +10 +7 +3 +6 +6 +4 +4 +8 +12 +7 +3 +7 +3 +3 +9 +3 +4 +5 +4 +4 +5 +5 +10 +15 +4 +4 +14 +6 +227 +3 +14 +5 +216 +22 +5 +4 +2 +2 +6 +3 +4 +2 +9 +9 +4 +3 +28 +13 +11 +4 +5 +3 +3 +2 +3 +3 +5 +3 +4 +3 +5 +23 +26 +3 +4 +5 +6 +4 +6 +3 +5 +5 +3 +4 +3 +2 +2 +2 +7 +14 +3 +6 +7 +17 +2 +2 +15 +14 +16 +4 +6 +7 +13 +6 +4 +5 +6 +16 +3 +3 +28 +3 +6 +15 +3 +9 +2 +4 +6 +3 +3 +22 +4 +12 +6 +7 +2 +5 +4 +10 +3 +16 +6 +9 +2 +5 +12 +7 +5 +5 +5 +5 +2 +11 +9 +17 +4 +3 +11 +7 +3 +5 +15 +4 +3 +4 +211 +8 +7 +5 +4 +7 +6 +7 +6 +3 +6 +5 +6 +5 +3 +4 +4 +26 +4 +6 +10 +4 +4 +3 +2 +3 +3 +4 +5 +9 +3 +9 +4 +4 +5 +5 +8 +2 +4 +2 +3 +8 +4 +11 +19 +5 +8 +6 +3 +5 +6 +12 +3 +2 +4 +16 +12 +3 +4 +4 +8 +6 +5 +6 +6 +219 +8 +222 +6 +16 +3 +13 +19 +5 +4 +3 +11 +6 +10 +4 +7 +7 +12 +5 +3 +3 +5 +6 +10 +3 +8 +2 +5 +4 +7 +2 +4 +4 +2 +12 +9 +6 +4 +2 +40 +2 +4 +10 +4 +223 +4 +2 +20 +6 +7 +24 +5 +4 +5 +2 +20 +16 +6 +5 +13 +2 +3 +3 +19 +3 +2 +4 +5 +6 +7 +11 +12 +5 +6 +7 +7 +3 +5 +3 +5 +3 +14 +3 +4 +4 +2 +11 +1 +7 +3 +9 +6 +11 +12 +5 +8 +6 +221 +4 +2 +12 +4 +3 +15 +4 +5 +226 +7 +218 +7 +5 +4 +5 +18 +4 +5 +9 +4 +4 +2 +9 +18 +18 +9 +5 +6 +6 +3 +3 +7 +3 +5 +4 +4 +4 +12 +3 +6 +31 +5 +4 +7 +3 +6 +5 +6 +5 +11 +2 +2 +11 +11 +6 +7 +5 +8 +7 +10 +5 +23 +7 +4 +3 +5 +34 +2 +5 +23 +7 +3 +6 +8 +4 +4 +4 +2 +5 +3 +8 +5 +4 +8 +25 +2 +3 +17 +8 +3 +4 +8 +7 +3 +15 +6 +5 +7 +21 +9 +5 +6 +6 +5 +3 +2 +3 +10 +3 +6 +3 +14 +7 +4 +4 +8 +7 +8 +2 +6 +12 +4 +213 +6 +5 +21 +8 +2 +5 +23 +3 +11 +2 +3 +6 +25 +2 +3 +6 +7 +6 +6 +4 +4 +6 +3 +17 +9 +7 +6 +4 +3 +10 +7 +2 +3 +3 +3 +11 +8 +3 +7 +6 +4 +14 +36 +3 +4 +3 +3 +22 +13 +21 +4 +2 +7 +4 +4 +17 +15 +3 +7 +11 +2 +4 +7 +6 +209 +6 +3 +2 +2 +24 +4 +9 +4 +3 +3 +3 +29 +2 +2 +4 +3 +3 +5 +4 +6 +3 +3 +2 +4 diff --git a/vendor/github.com/beorn7/perks/quantile/stream.go b/vendor/github.com/beorn7/perks/quantile/stream.go new file mode 100644 index 000000000..d7d14f8eb --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream.go @@ -0,0 +1,316 @@ +// Package quantile computes approximate quantiles over an unbounded data +// stream within low memory and CPU bounds. +// +// A small amount of accuracy is traded to achieve the above properties. +// +// Multiple streams can be merged before calling Query to generate a single set +// of results. This is meaningful when the streams represent the same type of +// data. See Merge and Samples. +// +// For more detailed information about the algorithm used, see: +// +// Effective Computation of Biased Quantiles over Data Streams +// +// http://www.cs.rutgers.edu/~muthu/bquant.pdf +package quantile + +import ( + "math" + "sort" +) + +// Sample holds an observed value and meta information for compression. JSON +// tags have been added for convenience. +type Sample struct { + Value float64 `json:",string"` + Width float64 `json:",string"` + Delta float64 `json:",string"` +} + +// Samples represents a slice of samples. It implements sort.Interface. +type Samples []Sample + +func (a Samples) Len() int { return len(a) } +func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value } +func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] } + +type invariant func(s *stream, r float64) float64 + +// NewLowBiased returns an initialized Stream for low-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the lower ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within (1±Epsilon)*Quantile. +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewLowBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * r + } + return newStream(ƒ) +} + +// NewHighBiased returns an initialized Stream for high-biased quantiles +// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but +// error guarantees can still be given even for the higher ranks of the data +// distribution. +// +// The provided epsilon is a relative error, i.e. the true quantile of a value +// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error +// properties. +func NewHighBiased(epsilon float64) *Stream { + ƒ := func(s *stream, r float64) float64 { + return 2 * epsilon * (s.n - r) + } + return newStream(ƒ) +} + +// NewTargeted returns an initialized Stream concerned with a particular set of +// quantile values that are supplied a priori. Knowing these a priori reduces +// space and computation time. The targets map maps the desired quantiles to +// their absolute errors, i.e. the true quantile of a value returned by a query +// is guaranteed to be within (Quantile±Epsilon). +// +// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties. +func NewTargeted(targetMap map[float64]float64) *Stream { + // Convert map to slice to avoid slow iterations on a map. + // ƒ is called on the hot path, so converting the map to a slice + // beforehand results in significant CPU savings. + targets := targetMapToSlice(targetMap) + + ƒ := func(s *stream, r float64) float64 { + var m = math.MaxFloat64 + var f float64 + for _, t := range targets { + if t.quantile*s.n <= r { + f = (2 * t.epsilon * r) / t.quantile + } else { + f = (2 * t.epsilon * (s.n - r)) / (1 - t.quantile) + } + if f < m { + m = f + } + } + return m + } + return newStream(ƒ) +} + +type target struct { + quantile float64 + epsilon float64 +} + +func targetMapToSlice(targetMap map[float64]float64) []target { + targets := make([]target, 0, len(targetMap)) + + for quantile, epsilon := range targetMap { + t := target{ + quantile: quantile, + epsilon: epsilon, + } + targets = append(targets, t) + } + + return targets +} + +// Stream computes quantiles for a stream of float64s. It is not thread-safe by +// design. Take care when using across multiple goroutines. +type Stream struct { + *stream + b Samples + sorted bool +} + +func newStream(ƒ invariant) *Stream { + x := &stream{ƒ: ƒ} + return &Stream{x, make(Samples, 0, 500), true} +} + +// Insert inserts v into the stream. +func (s *Stream) Insert(v float64) { + s.insert(Sample{Value: v, Width: 1}) +} + +func (s *Stream) insert(sample Sample) { + s.b = append(s.b, sample) + s.sorted = false + if len(s.b) == cap(s.b) { + s.flush() + } +} + +// Query returns the computed qth percentiles value. If s was created with +// NewTargeted, and q is not in the set of quantiles provided a priori, Query +// will return an unspecified result. +func (s *Stream) Query(q float64) float64 { + if !s.flushed() { + // Fast path when there hasn't been enough data for a flush; + // this also yields better accuracy for small sets of data. + l := len(s.b) + if l == 0 { + return 0 + } + i := int(math.Ceil(float64(l) * q)) + if i > 0 { + i -= 1 + } + s.maybeSort() + return s.b[i].Value + } + s.flush() + return s.stream.query(q) +} + +// Merge merges samples into the underlying streams samples. This is handy when +// merging multiple streams from separate threads, database shards, etc. +// +// ATTENTION: This method is broken and does not yield correct results. The +// underlying algorithm is not capable of merging streams correctly. +func (s *Stream) Merge(samples Samples) { + sort.Sort(samples) + s.stream.merge(samples) +} + +// Reset reinitializes and clears the list reusing the samples buffer memory. +func (s *Stream) Reset() { + s.stream.reset() + s.b = s.b[:0] +} + +// Samples returns stream samples held by s. +func (s *Stream) Samples() Samples { + if !s.flushed() { + return s.b + } + s.flush() + return s.stream.samples() +} + +// Count returns the total number of samples observed in the stream +// since initialization. +func (s *Stream) Count() int { + return len(s.b) + s.stream.count() +} + +func (s *Stream) flush() { + s.maybeSort() + s.stream.merge(s.b) + s.b = s.b[:0] +} + +func (s *Stream) maybeSort() { + if !s.sorted { + s.sorted = true + sort.Sort(s.b) + } +} + +func (s *Stream) flushed() bool { + return len(s.stream.l) > 0 +} + +type stream struct { + n float64 + l []Sample + ƒ invariant +} + +func (s *stream) reset() { + s.l = s.l[:0] + s.n = 0 +} + +func (s *stream) insert(v float64) { + s.merge(Samples{{v, 1, 0}}) +} + +func (s *stream) merge(samples Samples) { + // TODO(beorn7): This tries to merge not only individual samples, but + // whole summaries. The paper doesn't mention merging summaries at + // all. Unittests show that the merging is inaccurate. Find out how to + // do merges properly. + var r float64 + i := 0 + for _, sample := range samples { + for ; i < len(s.l); i++ { + c := s.l[i] + if c.Value > sample.Value { + // Insert at position i. + s.l = append(s.l, Sample{}) + copy(s.l[i+1:], s.l[i:]) + s.l[i] = Sample{ + sample.Value, + sample.Width, + math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1), + // TODO(beorn7): How to calculate delta correctly? + } + i++ + goto inserted + } + r += c.Width + } + s.l = append(s.l, Sample{sample.Value, sample.Width, 0}) + i++ + inserted: + s.n += sample.Width + r += sample.Width + } + s.compress() +} + +func (s *stream) count() int { + return int(s.n) +} + +func (s *stream) query(q float64) float64 { + t := math.Ceil(q * s.n) + t += math.Ceil(s.ƒ(s, t) / 2) + p := s.l[0] + var r float64 + for _, c := range s.l[1:] { + r += p.Width + if r+c.Width+c.Delta > t { + return p.Value + } + p = c + } + return p.Value +} + +func (s *stream) compress() { + if len(s.l) < 2 { + return + } + x := s.l[len(s.l)-1] + xi := len(s.l) - 1 + r := s.n - 1 - x.Width + + for i := len(s.l) - 2; i >= 0; i-- { + c := s.l[i] + if c.Width+x.Width+x.Delta <= s.ƒ(s, r) { + x.Width += c.Width + s.l[xi] = x + // Remove element at i. + copy(s.l[i:], s.l[i+1:]) + s.l = s.l[:len(s.l)-1] + xi -= 1 + } else { + x = c + xi = i + } + r -= c.Width + } +} + +func (s *stream) samples() Samples { + samples := make(Samples, len(s.l)) + copy(samples, s.l) + return samples +} diff --git a/vendor/github.com/beorn7/perks/quantile/stream_test.go b/vendor/github.com/beorn7/perks/quantile/stream_test.go new file mode 100644 index 000000000..855195097 --- /dev/null +++ b/vendor/github.com/beorn7/perks/quantile/stream_test.go @@ -0,0 +1,215 @@ +package quantile + +import ( + "math" + "math/rand" + "sort" + "testing" +) + +var ( + Targets = map[float64]float64{ + 0.01: 0.001, + 0.10: 0.01, + 0.50: 0.05, + 0.90: 0.01, + 0.99: 0.001, + } + TargetsSmallEpsilon = map[float64]float64{ + 0.01: 0.0001, + 0.10: 0.001, + 0.50: 0.005, + 0.90: 0.001, + 0.99: 0.0001, + } + LowQuantiles = []float64{0.01, 0.1, 0.5} + HighQuantiles = []float64{0.99, 0.9, 0.5} +) + +const RelativeEpsilon = 0.01 + +func verifyPercsWithAbsoluteEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for quantile, epsilon := range Targets { + n := float64(len(a)) + k := int(quantile * n) + if k < 1 { + k = 1 + } + lower := int((quantile - epsilon) * n) + if lower < 1 { + lower = 1 + } + upper := int(math.Ceil((quantile + epsilon) * n)) + if upper > len(a) { + upper = len(a) + } + w, min, max := a[k-1], a[lower-1], a[upper-1] + if g := s.Query(quantile); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", quantile, w, min, max, g) + } + } +} + +func verifyLowPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for _, qu := range LowQuantiles { + n := float64(len(a)) + k := int(qu * n) + + lowerRank := int((1 - RelativeEpsilon) * qu * n) + upperRank := int(math.Ceil((1 + RelativeEpsilon) * qu * n)) + w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] + if g := s.Query(qu); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) + } + } +} + +func verifyHighPercsWithRelativeEpsilon(t *testing.T, a []float64, s *Stream) { + sort.Float64s(a) + for _, qu := range HighQuantiles { + n := float64(len(a)) + k := int(qu * n) + + lowerRank := int((1 - (1+RelativeEpsilon)*(1-qu)) * n) + upperRank := int(math.Ceil((1 - (1-RelativeEpsilon)*(1-qu)) * n)) + w, min, max := a[k-1], a[lowerRank-1], a[upperRank-1] + if g := s.Query(qu); g < min || g > max { + t.Errorf("q=%f: want %v [%f,%f], got %v", qu, w, min, max, g) + } + } +} + +func populateStream(s *Stream) []float64 { + a := make([]float64, 0, 1e5+100) + for i := 0; i < cap(a); i++ { + v := rand.NormFloat64() + // Add 5% asymmetric outliers. + if i%20 == 0 { + v = v*v + 1 + } + s.Insert(v) + a = append(a, v) + } + return a +} + +func TestTargetedQuery(t *testing.T) { + rand.Seed(42) + s := NewTargeted(Targets) + a := populateStream(s) + verifyPercsWithAbsoluteEpsilon(t, a, s) +} + +func TestTargetedQuerySmallSampleSize(t *testing.T) { + rand.Seed(42) + s := NewTargeted(TargetsSmallEpsilon) + a := []float64{1, 2, 3, 4, 5} + for _, v := range a { + s.Insert(v) + } + verifyPercsWithAbsoluteEpsilon(t, a, s) + // If not yet flushed, results should be precise: + if !s.flushed() { + for φ, want := range map[float64]float64{ + 0.01: 1, + 0.10: 1, + 0.50: 3, + 0.90: 5, + 0.99: 5, + } { + if got := s.Query(φ); got != want { + t.Errorf("want %f for φ=%f, got %f", want, φ, got) + } + } + } +} + +func TestLowBiasedQuery(t *testing.T) { + rand.Seed(42) + s := NewLowBiased(RelativeEpsilon) + a := populateStream(s) + verifyLowPercsWithRelativeEpsilon(t, a, s) +} + +func TestHighBiasedQuery(t *testing.T) { + rand.Seed(42) + s := NewHighBiased(RelativeEpsilon) + a := populateStream(s) + verifyHighPercsWithRelativeEpsilon(t, a, s) +} + +// BrokenTestTargetedMerge is broken, see Merge doc comment. +func BrokenTestTargetedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewTargeted(Targets) + s2 := NewTargeted(Targets) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyPercsWithAbsoluteEpsilon(t, a, s1) +} + +// BrokenTestLowBiasedMerge is broken, see Merge doc comment. +func BrokenTestLowBiasedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewLowBiased(RelativeEpsilon) + s2 := NewLowBiased(RelativeEpsilon) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyLowPercsWithRelativeEpsilon(t, a, s2) +} + +// BrokenTestHighBiasedMerge is broken, see Merge doc comment. +func BrokenTestHighBiasedMerge(t *testing.T) { + rand.Seed(42) + s1 := NewHighBiased(RelativeEpsilon) + s2 := NewHighBiased(RelativeEpsilon) + a := populateStream(s1) + a = append(a, populateStream(s2)...) + s1.Merge(s2.Samples()) + verifyHighPercsWithRelativeEpsilon(t, a, s2) +} + +func TestUncompressed(t *testing.T) { + q := NewTargeted(Targets) + for i := 100; i > 0; i-- { + q.Insert(float64(i)) + } + if g := q.Count(); g != 100 { + t.Errorf("want count 100, got %d", g) + } + // Before compression, Query should have 100% accuracy. + for quantile := range Targets { + w := quantile * 100 + if g := q.Query(quantile); g != w { + t.Errorf("want %f, got %f", w, g) + } + } +} + +func TestUncompressedSamples(t *testing.T) { + q := NewTargeted(map[float64]float64{0.99: 0.001}) + for i := 1; i <= 100; i++ { + q.Insert(float64(i)) + } + if g := q.Samples().Len(); g != 100 { + t.Errorf("want count 100, got %d", g) + } +} + +func TestUncompressedOne(t *testing.T) { + q := NewTargeted(map[float64]float64{0.99: 0.01}) + q.Insert(3.14) + if g := q.Query(0.90); g != 3.14 { + t.Error("want PI, got", g) + } +} + +func TestDefaults(t *testing.T) { + if g := NewTargeted(map[float64]float64{0.99: 0.001}).Query(0.99); g != 0 { + t.Errorf("want 0, got %f", g) + } +} diff --git a/vendor/github.com/gogo/protobuf/.gitignore b/vendor/github.com/gogo/protobuf/.gitignore index ea050e9b8..76009479d 100644 --- a/vendor/github.com/gogo/protobuf/.gitignore +++ b/vendor/github.com/gogo/protobuf/.gitignore @@ -1 +1,3 @@ ._* +*.js +*.js.map diff --git a/vendor/github.com/gogo/protobuf/.travis.yml b/vendor/github.com/gogo/protobuf/.travis.yml index 5cebf1091..c2db66798 100644 --- a/vendor/github.com/gogo/protobuf/.travis.yml +++ b/vendor/github.com/gogo/protobuf/.travis.yml @@ -1,7 +1,7 @@ env: - - PROTOBUF_VERSION=2.5.0 - PROTOBUF_VERSION=2.6.1 - - PROTOBUF_VERSION=3.0.0-beta-2 + - PROTOBUF_VERSION=3.0.2 + - PROTOBUF_VERSION=3.5.1 before_install: - ./install-protobuf.sh @@ -10,12 +10,11 @@ before_install: script: - PATH=/home/travis/bin:$PATH make buildserverall - echo $TRAVIS_GO_VERSION - - if [ "$TRAVIS_GO_VERSION" == 1.6 ] && [[ "$PROTOBUF_VERSION" == 3.0.0* ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi + - if [[ "$PROTOBUF_VERSION" == "3.5.1" ]] && [[ "$TRAVIS_GO_VERSION" =~ ^1\.10\.[0-9]+$ ]]; then ! git status --porcelain | read || (git status; git diff; exit 1); fi language: go go: - - 1.3.3 - - 1.4.2 - - 1.5.3 - - 1.6 + - 1.8.x + - 1.9.x + - 1.10.x diff --git a/vendor/github.com/gogo/protobuf/AUTHORS b/vendor/github.com/gogo/protobuf/AUTHORS new file mode 100644 index 000000000..3d97fc7a2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/AUTHORS @@ -0,0 +1,15 @@ +# This is the official list of GoGo authors for copyright purposes. +# This file is distinct from the CONTRIBUTORS file, which +# lists people. For example, employees are listed in CONTRIBUTORS, +# but not in AUTHORS, because the employer holds the copyright. + +# Names should be added to this file as one of +# Organization's name +# Individual's name +# Individual's name + +# Please keep the list sorted. + +Sendgrid, Inc +Vastech SA (PTY) LTD +Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/CONTRIBUTORS b/vendor/github.com/gogo/protobuf/CONTRIBUTORS index d2c3b418f..1b4f6c208 100644 --- a/vendor/github.com/gogo/protobuf/CONTRIBUTORS +++ b/vendor/github.com/gogo/protobuf/CONTRIBUTORS @@ -1,15 +1,23 @@ Anton Povarov +Brian Goff Clayton Coleman Denis Smirnov DongYun Kang Dwayne Schultz Georg Apitz Gustav Paul +Johan Brandhorst +John Shahid John Tuley Laurent Patrick Lee +Peter Edge +Roger Johansson +Sam Nguyen +Sergio Arbeo Stephen J Day Tamir Duberstein Todd Eisenberger Tormod Erevik Lea +Vyacheslav Kim Walter Schulze diff --git a/vendor/github.com/gogo/protobuf/LICENSE b/vendor/github.com/gogo/protobuf/LICENSE index 335e38e19..7be0cc7b6 100644 --- a/vendor/github.com/gogo/protobuf/LICENSE +++ b/vendor/github.com/gogo/protobuf/LICENSE @@ -1,7 +1,7 @@ -Extensions for Protocol Buffers to create more go like structures. +Protocol Buffers for Go with Gadgets -Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -http://github.com/gogo/protobuf/gogoproto +Copyright (c) 2013, The GoGo Authors. All rights reserved. +http://github.com/gogo/protobuf Go support for Protocol Buffers - Google's data interchange format diff --git a/vendor/github.com/gogo/protobuf/Makefile b/vendor/github.com/gogo/protobuf/Makefile index 095ab8805..ae13345e5 100644 --- a/vendor/github.com/gogo/protobuf/Makefile +++ b/vendor/github.com/gogo/protobuf/Makefile @@ -1,7 +1,7 @@ -# Extensions for Protocol Buffers to create more go like structures. +# Protocol Buffers for Go with Gadgets # -# Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -# http://github.com/gogo/protobuf/gogoproto +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -26,11 +26,13 @@ # (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. +GO_VERSION:=$(shell go version) + .PHONY: nuke regenerate tests clean install gofmt vet contributors all: clean install regenerate install tests errcheck vet -buildserverall: clean install regenerate install tests vet +buildserverall: clean install regenerate install tests vet js install: go install ./proto @@ -41,8 +43,10 @@ install: go install ./protoc-gen-gogofast go install ./protoc-gen-gogofaster go install ./protoc-gen-gogoslick + go install ./protoc-gen-gostring go install ./protoc-min-version go install ./protoc-gen-combo + go install ./gogoreplace clean: go clean ./... @@ -60,6 +64,8 @@ regenerate: make -C gogoproto regenerate make -C proto/testdata regenerate make -C jsonpb/jsonpb_test_proto regenerate + make -C _conformance regenerate + make -C protobuf regenerate make -C test regenerate make -C test/example regenerate make -C test/unrecognized regenerate @@ -77,10 +83,12 @@ regenerate: make -C test/oneof regenerate make -C test/oneof3 regenerate make -C test/theproto3 regenerate + make -C test/mapdefaults regenerate make -C test/mapsproto2 regenerate make -C test/issue42order regenerate make -C proto generate-test-pbs make -C test/importdedup regenerate + make -C test/importduplicate regenerate make -C test/custombytesnonstruct regenerate make -C test/required regenerate make -C test/casttype regenerate @@ -95,18 +103,35 @@ regenerate: make -C test/asymetric-issue125 regenerate make -C test/filedotname regenerate make -C test/nopackage regenerate + make -C test/types regenerate + make -C test/proto3extension regenerate + make -C test/stdtypes regenerate + make -C test/data regenerate + make -C test/typedecl regenerate + make -C test/issue260 regenerate + make -C test/issue261 regenerate + make -C test/issue262 regenerate + make -C test/issue312 regenerate + make -C test/enumdecl regenerate + make -C test/typedecl_all regenerate + make -C test/enumdecl_all regenerate + make -C test/int64support regenerate + make -C test/issue322 regenerate + make -C test/issue330 regenerate + make -C test/importcustom-issue389 regenerate make gofmt tests: go build ./test/enumprefix go test ./... + (cd test/stdtypes && make test) vet: go vet ./... go tool vet --shadow . errcheck: - go get -u github.com/kisielk/errcheck + go get github.com/kisielk/errcheck errcheck ./test/... drone: @@ -114,15 +139,25 @@ drone: (cd $(GOPATH)/src/github.com/gogo/protobuf && make buildserverall) testall: + go get -u github.com/golang/protobuf/proto make -C protoc-gen-gogo/testdata test make -C vanity/test test + make -C test/registration test make tests bench: + go get golang.org/x/tools/cmd/benchcmp (cd test/mixbench && go build .) - (cd test/mixbench && ./mixbench) + ./test/mixbench/mixbench contributors: git log --format='%aN <%aE>' | sort -fu > CONTRIBUTORS +js: +ifeq (go1.10, $(findstring go1.10, $(GO_VERSION))) + go get -u github.com/gopherjs/gopherjs + gopherjs build github.com/gogo/protobuf/protoc-gen-gogo +endif +update: + (cd protobuf && make update) diff --git a/vendor/github.com/gogo/protobuf/README b/vendor/github.com/gogo/protobuf/README index b4accc0c0..035426df5 100644 --- a/vendor/github.com/gogo/protobuf/README +++ b/vendor/github.com/gogo/protobuf/README @@ -25,7 +25,7 @@ To use this software, you must: for details or, if you are using gccgo, follow the instructions at https://golang.org/doc/install/gccgo - Grab the code from the repository and install the proto package. - The simplest way is to run `go get -u github.com/golang/protobuf/{proto,protoc-gen-go}`. + The simplest way is to run `go get -u github.com/golang/protobuf/protoc-gen-go`. The compiler plugin, protoc-gen-go, will be installed in $GOBIN, defaulting to $GOPATH/bin. It must be in your $PATH for the protocol compiler, protoc, to find it. @@ -118,12 +118,12 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. Consider file test.proto, containing ```proto + syntax = "proto2"; package example; enum FOO { X = 17; }; @@ -207,6 +207,50 @@ the --go_out argument to protoc: protoc --gogo_out=plugins=grpc:. *.proto +## Compatibility ## + +The library and the generated code are expected to be stable over time. +However, we reserve the right to make breaking changes without notice for the +following reasons: + +- Security. A security issue in the specification or implementation may come to + light whose resolution requires breaking compatibility. We reserve the right + to address such security issues. +- Unspecified behavior. There are some aspects of the Protocol Buffers + specification that are undefined. Programs that depend on such unspecified + behavior may break in future releases. +- Specification errors or changes. If it becomes necessary to address an + inconsistency, incompleteness, or change in the Protocol Buffers + specification, resolving the issue could affect the meaning or legality of + existing programs. We reserve the right to address such issues, including + updating the implementations. +- Bugs. If the library has a bug that violates the specification, a program + that depends on the buggy behavior may break if the bug is fixed. We reserve + the right to fix such bugs. +- Adding methods or fields to generated structs. These may conflict with field + names that already exist in a schema, causing applications to break. When the + code generator encounters a field in the schema that would collide with a + generated field or method name, the code generator will append an underscore + to the generated field or method name. +- Adding, removing, or changing methods or fields in generated structs that + start with `XXX`. These parts of the generated code are exported out of + necessity, but should not be considered part of the public API. +- Adding, removing, or changing unexported symbols in generated code. + +Any breaking changes outside of these will be announced 6 months in advance to +protobuf@googlegroups.com. + +You should, whenever possible, use generated code created by the `protoc-gen-go` +tool built at the same commit as the `proto` package. The `proto` package +declares package-level constants in the form `ProtoPackageIsVersionX`. +Application code and generated code may depend on one of these constants to +ensure that compilation will fail if the available version of the proto library +is too old. Whenever we make a change to the generated code that requires newer +library support, in the same commit we will increment the version number of the +generated code and declare a new package-level constant whose name incorporates +the latest version number. Removing a compatibility constant is considered a +breaking change and would be subject to the announcement policy stated above. + ## Plugins ## The `protoc-gen-go/generator` package exposes a plugin interface, diff --git a/vendor/github.com/gogo/protobuf/Readme.md b/vendor/github.com/gogo/protobuf/Readme.md index 293da37b2..16d8e5e8b 100644 --- a/vendor/github.com/gogo/protobuf/Readme.md +++ b/vendor/github.com/gogo/protobuf/Readme.md @@ -1,8 +1,82 @@ # Protocol Buffers for Go with Gadgets -Travis CI Matrix Builds: [![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) +[![Build Status](https://travis-ci.org/gogo/protobuf.svg?branch=master)](https://travis-ci.org/gogo/protobuf) -### Getting Started (Give me the speed I don't care about the rest) +gogoprotobuf is a fork of golang/protobuf with extra code generation features. + +This code generation is used to achieve: + + - fast marshalling and unmarshalling + - more canonical Go structures + - goprotobuf compatibility + - less typing by optionally generating extra helper code + - peace of mind by optionally generating test and benchmark code + - other serialization formats + +Keeping track of how up to date gogoprotobuf is relative to golang/protobuf is done in this +issue + +## Users + +These projects use gogoprotobuf: + + - etcd - blog - sample proto file + - spacemonkey - blog + - badoo - sample proto file + - mesos-go - sample proto file + - heka - the switch from golang/protobuf to gogo/protobuf when it was still on code.google.com + - cockroachdb - sample proto file + - go-ipfs - sample proto file + - rkive-go - sample proto file + - dropbox + - srclib - sample proto file + - adyoulike + - cloudfoundry - sample proto file + - kubernetes - go2idl built on top of gogoprotobuf + - dgraph - release notes - benchmarks + - centrifugo - release notes - blog + - docker swarmkit - sample proto file + - nats.io - go-nats-streaming + - tidb - Communication between tidb and tikv + - protoactor-go - vanity command that also generates actors from service definitions + - containerd - vanity command with custom field names that conforms to the golang convention. + - nakama + - proteus + - carbonzipper stack + - sendgrid + - zero-os/0-stor + - go-spacemesh + +Please let us know if you are using gogoprotobuf by posting on our GoogleGroup. + +### Mentioned + + - Cloudflare - go serialization talk - Albert Strasheim + - GopherCon 2014 Writing High Performance Databases in Go by Ben Johnson + - alecthomas' go serialization benchmarks + - Go faster with gogoproto - Agniva De Sarker + - Evolution of protobuf (Gource Visualization) - Landon Wilkins + - Creating GopherJS Apps with gRPC-Web - Johan Brandhorst + - So you want to use GoGo Protobuf - Johan Brandhorst + - Advanced gRPC Error Usage - Johan Brandhorst + +## Getting Started + +There are several ways to use gogoprotobuf, but for all you need to install go and protoc. +After that you can choose: + + - Speed + - More Speed and more generated code + - Most Speed and most customization + +### Installation + +To install it, you must first have Go (at least version 1.6.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Latest patch versions of Go 1.8, 1.9 and 1.10 are continuously tested. + +Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). +Most versions from 2.3.1 should not give any problems, but 2.6.1, 3.0.2 and 3.5.1 are continuously tested. + +### Speed Install the protoc-gen-gofast binary @@ -12,7 +86,12 @@ Use it to generate faster marshaling and unmarshaling go code for your protocol protoc --gofast_out=. myproto.proto -### Getting started (I have heard about fields without pointers and more code generation) +This does not allow you to use any of the other gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). + +### More Speed and more generated code + +Fields without pointers cause less time in the garbage collector. +More code generation results in more convenient methods. Other binaries are also included: @@ -20,31 +99,52 @@ Other binaries are also included: protoc-gen-gogofaster (same as gogofast, without XXX_unrecognized, less pointer fields) protoc-gen-gogoslick (same as gogofaster, but with generated string, gostring and equal methods) -### Getting started (I want more customization power over fields, speed, other serialization formats and tests, etc.) - -Please visit the [homepage](http://gogo.github.io) for more documentation. - -### Installation - -To install it, you must first have Go (at least version 1.3.3) installed (see [http://golang.org/doc/install](http://golang.org/doc/install)). Go 1.3.3, 1.4.2, 1.5.3 and 1.6 are continiuosly tested. - -Next, install the standard protocol buffer implementation from [https://github.com/google/protobuf](https://github.com/google/protobuf). -Most versions from 2.3.1 should not give any problems, but 2.5.0, 2.6.1 and 3 alpha are continuously tested. - -Finally run: +Installing any of these binaries is easy. Simply run: go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/{binary} + go get github.com/gogo/protobuf/gogoproto + +These binaries allow you to use gogoprotobuf [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md). You can also use your own binary. + +To generate the code, you also need to set the include path properly. + + protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/gogo/protobuf/protobuf --{binary}_out=. myproto.proto + +To use proto files from "google/protobuf" you need to add additional args to protoc. + + protoc -I=. -I=$GOPATH/src -I=$GOPATH/src/github.com/gogo/protobuf/protobuf --{binary}_out=\ + Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types,\ + Mgoogle/protobuf/duration.proto=github.com/gogo/protobuf/types,\ + Mgoogle/protobuf/struct.proto=github.com/gogo/protobuf/types,\ + Mgoogle/protobuf/timestamp.proto=github.com/gogo/protobuf/types,\ + Mgoogle/protobuf/wrappers.proto=github.com/gogo/protobuf/types:. \ + myproto.proto + +Note that in the protoc command, {binary} does not contain the initial prefix of "protoc-gen". + +### Most Speed and most customization + +Customizing the fields of the messages to be the fields that you actually want to use removes the need to copy between the structs you use and structs you use to serialize. +gogoprotobuf also offers more serialization formats and generation of tests and even more methods. + +Please visit the [extensions](https://github.com/gogo/protobuf/blob/master/extensions.md) page for more documentation. + +Install protoc-gen-gogo: + + go get github.com/gogo/protobuf/proto + go get github.com/gogo/protobuf/jsonpb go get github.com/gogo/protobuf/protoc-gen-gogo go get github.com/gogo/protobuf/gogoproto -### Proto3 - -Proto3 is supported, but most of the new native types are not supported yet. -[See Proto3 Issue](https://github.com/gogo/protobuf/issues/57) for more details. - -### GRPC +## GRPC It works the same as golang/protobuf, simply specify the plugin. Here is an example using gofast: protoc --gofast_out=plugins=grpc:. my.proto + +See [https://github.com/gogo/grpc-example](https://github.com/gogo/grpc-example) for an example of using gRPC with gogoprotobuf and the wider grpc-ecosystem. + + + diff --git a/vendor/github.com/gogo/protobuf/bench.md b/vendor/github.com/gogo/protobuf/bench.md new file mode 100644 index 000000000..16da66ad2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/bench.md @@ -0,0 +1,190 @@ +# Benchmarks + +## How to reproduce + +For a comparison run: + + make bench + +followed by [benchcmp](http://code.google.com/p/go/source/browse/misc/benchcmp benchcmp) on the resulting files: + + $GOROOT/misc/benchcmp $GOPATH/src/github.com/gogo/protobuf/test/mixbench/marshal.txt $GOPATH/src/github.com/gogo/protobuf/test/mixbench/marshaler.txt + $GOROOT/misc/benchcmp $GOPATH/src/github.com/gogo/protobuf/test/mixbench/unmarshal.txt $GOPATH/src/github.com/gogo/protobuf/test/mixbench/unmarshaler.txt + +Benchmarks ran on Revision: 11c56be39364 + +June 2013 + +Processor 2,66 GHz Intel Core i7 + +Memory 8 GB 1067 MHz DDR3 + +## Marshaler + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
benchmarkold ns/opnew ns/opdelta
BenchmarkNidOptNativeProtoMarshal2656889-66.53%
BenchmarkNinOptNativeProtoMarshal26511015-61.71%
BenchmarkNidRepNativeProtoMarshal4266112519-70.65%
BenchmarkNinRepNativeProtoMarshal4230612354-70.80%
BenchmarkNidRepPackedNativeProtoMarshal3414811902-65.15%
BenchmarkNinRepPackedNativeProtoMarshal3337511969-64.14%
BenchmarkNidOptStructProtoMarshal71483727-47.86%
BenchmarkNinOptStructProtoMarshal69563481-49.96%
BenchmarkNidRepStructProtoMarshal4655119492-58.13%
BenchmarkNinRepStructProtoMarshal4671519043-59.24%
BenchmarkNidEmbeddedStructProtoMarshal52312050-60.81%
BenchmarkNinEmbeddedStructProtoMarshal46652000-57.13%
BenchmarkNidNestedStructProtoMarshal181106103604-42.79%
BenchmarkNinNestedStructProtoMarshal182053102069-43.93%
BenchmarkNidOptCustomProtoMarshal1209310-74.36%
BenchmarkNinOptCustomProtoMarshal1435277-80.70%
BenchmarkNidRepCustomProtoMarshal4126763-81.51%
BenchmarkNinRepCustomProtoMarshal3972769-80.64%
BenchmarkNinOptNativeUnionProtoMarshal973303-68.86%
BenchmarkNinOptStructUnionProtoMarshal1536521-66.08%
BenchmarkNinEmbeddedStructUnionProtoMarshal2327884-62.01%
BenchmarkNinNestedStructUnionProtoMarshal2070743-64.11%
BenchmarkTreeProtoMarshal1554838-46.07%
BenchmarkOrBranchProtoMarshal31562012-36.25%
BenchmarkAndBranchProtoMarshal31831996-37.29%
BenchmarkLeafProtoMarshal965606-37.20%
BenchmarkDeepTreeProtoMarshal23161283-44.60%
BenchmarkADeepBranchProtoMarshal27191492-45.13%
BenchmarkAndDeepBranchProtoMarshal46632922-37.34%
BenchmarkDeepLeafProtoMarshal18491016-45.05%
BenchmarkNilProtoMarshal43976-82.53%
BenchmarkNidOptEnumProtoMarshal514152-70.43%
BenchmarkNinOptEnumProtoMarshal550158-71.27%
BenchmarkNidRepEnumProtoMarshal647207-68.01%
BenchmarkNinRepEnumProtoMarshal662213-67.82%
BenchmarkTimerProtoMarshal934271-70.99%
BenchmarkMyExtendableProtoMarshal608185-69.57%
BenchmarkOtherExtenableProtoMarshal1112332-70.14%
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
benchmarkold MB/snew MB/sspeedup
BenchmarkNidOptNativeProtoMarshal126.86378.862.99x
BenchmarkNinOptNativeProtoMarshal114.27298.422.61x
BenchmarkNidRepNativeProtoMarshal164.25561.203.42x
BenchmarkNinRepNativeProtoMarshal166.10568.233.42x
BenchmarkNidRepPackedNativeProtoMarshal99.10283.972.87x
BenchmarkNinRepPackedNativeProtoMarshal101.30282.312.79x
BenchmarkNidOptStructProtoMarshal176.83339.071.92x
BenchmarkNinOptStructProtoMarshal163.59326.572.00x
BenchmarkNidRepStructProtoMarshal178.84427.492.39x
BenchmarkNinRepStructProtoMarshal178.70437.692.45x
BenchmarkNidEmbeddedStructProtoMarshal124.24317.562.56x
BenchmarkNinEmbeddedStructProtoMarshal132.03307.992.33x
BenchmarkNidNestedStructProtoMarshal192.91337.861.75x
BenchmarkNinNestedStructProtoMarshal192.44344.451.79x
BenchmarkNidOptCustomProtoMarshal29.77116.033.90x
BenchmarkNinOptCustomProtoMarshal22.29115.385.18x
BenchmarkNidRepCustomProtoMarshal35.14189.805.40x
BenchmarkNinRepCustomProtoMarshal36.50188.405.16x
BenchmarkNinOptNativeUnionProtoMarshal32.87105.393.21x
BenchmarkNinOptStructUnionProtoMarshal66.40195.762.95x
BenchmarkNinEmbeddedStructUnionProtoMarshal93.24245.262.63x
BenchmarkNinNestedStructUnionProtoMarshal57.49160.062.78x
BenchmarkTreeProtoMarshal137.64255.121.85x
BenchmarkOrBranchProtoMarshal137.80216.101.57x
BenchmarkAndBranchProtoMarshal136.64217.891.59x
BenchmarkLeafProtoMarshal214.48341.531.59x
BenchmarkDeepTreeProtoMarshal95.85173.031.81x
BenchmarkADeepBranchProtoMarshal82.73150.781.82x
BenchmarkAndDeepBranchProtoMarshal96.72153.981.59x
BenchmarkDeepLeafProtoMarshal117.34213.411.82x
BenchmarkNidOptEnumProtoMarshal3.8913.163.38x
BenchmarkNinOptEnumProtoMarshal1.826.303.46x
BenchmarkNidRepEnumProtoMarshal12.3638.503.11x
BenchmarkNinRepEnumProtoMarshal12.0837.533.11x
BenchmarkTimerProtoMarshal73.81253.873.44x
BenchmarkMyExtendableProtoMarshal13.1543.083.28x
BenchmarkOtherExtenableProtoMarshal24.2881.093.34x
+ +## Unmarshaler + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
benchmarkold ns/opnew ns/opdelta
BenchmarkNidOptNativeProtoUnmarshal25211006-60.10%
BenchmarkNinOptNativeProtoUnmarshal25291750-30.80%
BenchmarkNidRepNativeProtoUnmarshal4906735299-28.06%
BenchmarkNinRepNativeProtoUnmarshal4799035456-26.12%
BenchmarkNidRepPackedNativeProtoUnmarshal2645623950-9.47%
BenchmarkNinRepPackedNativeProtoUnmarshal2649924037-9.29%
BenchmarkNidOptStructProtoUnmarshal68033873-43.07%
BenchmarkNinOptStructProtoUnmarshal67864154-38.79%
BenchmarkNidRepStructProtoUnmarshal5627631970-43.19%
BenchmarkNinRepStructProtoUnmarshal4875031832-34.70%
BenchmarkNidEmbeddedStructProtoUnmarshal45561973-56.69%
BenchmarkNinEmbeddedStructProtoUnmarshal44851975-55.96%
BenchmarkNidNestedStructProtoUnmarshal223395135844-39.19%
BenchmarkNinNestedStructProtoUnmarshal226446134022-40.82%
BenchmarkNidOptCustomProtoUnmarshal1859300-83.86%
BenchmarkNinOptCustomProtoUnmarshal1486402-72.95%
BenchmarkNidRepCustomProtoUnmarshal82291669-79.72%
BenchmarkNinRepCustomProtoUnmarshal82531649-80.02%
BenchmarkNinOptNativeUnionProtoUnmarshal840307-63.45%
BenchmarkNinOptStructUnionProtoUnmarshal1395639-54.19%
BenchmarkNinEmbeddedStructUnionProtoUnmarshal22971167-49.19%
BenchmarkNinNestedStructUnionProtoUnmarshal1820889-51.15%
BenchmarkTreeProtoUnmarshal1521720-52.66%
BenchmarkOrBranchProtoUnmarshal26691385-48.11%
BenchmarkAndBranchProtoUnmarshal26671420-46.76%
BenchmarkLeafProtoUnmarshal1171584-50.13%
BenchmarkDeepTreeProtoUnmarshal20651081-47.65%
BenchmarkADeepBranchProtoUnmarshal26951178-56.29%
BenchmarkAndDeepBranchProtoUnmarshal40551918-52.70%
BenchmarkDeepLeafProtoUnmarshal1758865-50.80%
BenchmarkNilProtoUnmarshal56463-88.79%
BenchmarkNidOptEnumProtoUnmarshal76273-90.34%
BenchmarkNinOptEnumProtoUnmarshal764163-78.66%
BenchmarkNidRepEnumProtoUnmarshal1078447-58.53%
BenchmarkNinRepEnumProtoUnmarshal1071479-55.28%
BenchmarkTimerProtoUnmarshal1128362-67.91%
BenchmarkMyExtendableProtoUnmarshal808217-73.14%
BenchmarkOtherExtenableProtoUnmarshal1233517-58.07%
+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
benchmarkold MB/snew MB/sspeedup
BenchmarkNidOptNativeProtoUnmarshal133.67334.982.51x
BenchmarkNinOptNativeProtoUnmarshal119.77173.081.45x
BenchmarkNidRepNativeProtoUnmarshal143.23199.121.39x
BenchmarkNinRepNativeProtoUnmarshal146.07198.161.36x
BenchmarkNidRepPackedNativeProtoUnmarshal127.80141.041.10x
BenchmarkNinRepPackedNativeProtoUnmarshal127.55140.781.10x
BenchmarkNidOptStructProtoUnmarshal185.79326.311.76x
BenchmarkNinOptStructProtoUnmarshal167.68273.661.63x
BenchmarkNidRepStructProtoUnmarshal147.88260.391.76x
BenchmarkNinRepStructProtoUnmarshal171.20261.971.53x
BenchmarkNidEmbeddedStructProtoUnmarshal142.86329.422.31x
BenchmarkNinEmbeddedStructProtoUnmarshal137.33311.832.27x
BenchmarkNidNestedStructProtoUnmarshal154.97259.471.67x
BenchmarkNinNestedStructProtoUnmarshal154.32258.421.67x
BenchmarkNidOptCustomProtoUnmarshal19.36119.666.18x
BenchmarkNinOptCustomProtoUnmarshal21.5279.503.69x
BenchmarkNidRepCustomProtoUnmarshal17.6286.864.93x
BenchmarkNinRepCustomProtoUnmarshal17.5787.925.00x
BenchmarkNinOptNativeUnionProtoUnmarshal38.07104.122.73x
BenchmarkNinOptStructUnionProtoUnmarshal73.08159.542.18x
BenchmarkNinEmbeddedStructUnionProtoUnmarshal94.00185.921.98x
BenchmarkNinNestedStructUnionProtoUnmarshal65.35133.752.05x
BenchmarkTreeProtoUnmarshal141.28297.132.10x
BenchmarkOrBranchProtoUnmarshal162.56313.961.93x
BenchmarkAndBranchProtoUnmarshal163.06306.151.88x
BenchmarkLeafProtoUnmarshal176.72354.192.00x
BenchmarkDeepTreeProtoUnmarshal107.50205.301.91x
BenchmarkADeepBranchProtoUnmarshal83.48190.882.29x
BenchmarkAndDeepBranchProtoUnmarshal110.97234.602.11x
BenchmarkDeepLeafProtoUnmarshal123.40250.732.03x
BenchmarkNidOptEnumProtoUnmarshal2.6227.1610.37x
BenchmarkNinOptEnumProtoUnmarshal1.316.114.66x
BenchmarkNidRepEnumProtoUnmarshal7.4217.882.41x
BenchmarkNinRepEnumProtoUnmarshal7.4716.692.23x
BenchmarkTimerProtoUnmarshal61.12190.343.11x
BenchmarkMyExtendableProtoUnmarshal9.9036.713.71x
BenchmarkOtherExtenableProtoUnmarshal21.9052.132.38x
\ No newline at end of file diff --git a/vendor/github.com/gogo/protobuf/codec/codec.go b/vendor/github.com/gogo/protobuf/codec/codec.go new file mode 100644 index 000000000..91d10fe7f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/codec/codec.go @@ -0,0 +1,91 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package codec + +import ( + "github.com/gogo/protobuf/proto" +) + +type Codec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error + String() string +} + +type marshaler interface { + MarshalTo(data []byte) (n int, err error) +} + +func getSize(v interface{}) (int, bool) { + if sz, ok := v.(interface { + Size() (n int) + }); ok { + return sz.Size(), true + } else if sz, ok := v.(interface { + ProtoSize() (n int) + }); ok { + return sz.ProtoSize(), true + } else { + return 0, false + } +} + +type codec struct { + buf []byte +} + +func (this *codec) String() string { + return "proto" +} + +func New(size int) Codec { + return &codec{make([]byte, size)} +} + +func (this *codec) Marshal(v interface{}) ([]byte, error) { + if m, ok := v.(marshaler); ok { + n, ok := getSize(v) + if !ok { + return proto.Marshal(v.(proto.Message)) + } + if n > len(this.buf) { + this.buf = make([]byte, n) + } + _, err := m.MarshalTo(this.buf) + if err != nil { + return nil, err + } + return this.buf[:n], nil + } + return proto.Marshal(v.(proto.Message)) +} + +func (this *codec) Unmarshal(data []byte, v interface{}) error { + return proto.Unmarshal(data, v.(proto.Message)) +} diff --git a/vendor/github.com/gogo/protobuf/codec/codec_test.go b/vendor/github.com/gogo/protobuf/codec/codec_test.go new file mode 100644 index 000000000..de2c9bc4b --- /dev/null +++ b/vendor/github.com/gogo/protobuf/codec/codec_test.go @@ -0,0 +1,54 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package codec + +import ( + "github.com/gogo/protobuf/test" + "math/rand" + "testing" + "time" +) + +func TestCodec(t *testing.T) { + r := rand.New(rand.NewSource(time.Now().UnixNano())) + in := test.NewPopulatedNinOptStruct(r, true) + c := New(r.Intn(1024)) + data, err := c.Marshal(in) + if err != nil { + t.Fatal(err) + } + out := &test.NinOptStruct{} + err = c.Unmarshal(data, out) + if err != nil { + t.Fatal(err) + } + if err := in.VerboseEqual(out); err != nil { + t.Fatal(err) + } +} diff --git a/vendor/github.com/gogo/protobuf/custom_types.md b/vendor/github.com/gogo/protobuf/custom_types.md new file mode 100644 index 000000000..3eed249b5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/custom_types.md @@ -0,0 +1,68 @@ +# Custom types + +Custom types is a gogo protobuf extensions that allows for using a custom +struct type to decorate the underlying structure of the protocol message. + +# How to use + +## Defining the protobuf message + +```proto +message CustomType { + optional ProtoType Field = 1 [(gogoproto.customtype) = "T"]; +} + +message ProtoType { + optional string Field = 1; +} +``` + +or alternatively you can declare the field type in the protocol message to be +`bytes`: + +```proto +message BytesCustomType { + optional bytes Field = 1 [(gogoproto.customtype) = "T"]; +} +``` + +The downside of using `bytes` is that it makes it harder to generate protobuf +code in other languages. In either case, it is the user responsibility to +ensure that the custom type marshals and unmarshals to the expected wire +format. That is, in the first example, gogo protobuf will not attempt to ensure +that the wire format of `ProtoType` and `T` are wire compatible. + +## Custom type method signatures + +The custom type must define the following methods with the given +signatures. Assuming the custom type is called `T`: + +```go +func (t T) Marshal() ([]byte, error) {} +func (t *T) MarshalTo(data []byte) (n int, err error) {} +func (t *T) Unmarshal(data []byte) error {} + +func (t T) MarshalJSON() ([]byte, error) {} +func (t *T) UnmarshalJSON(data []byte) error {} + +// only required if the compare option is set +func (t T) Compare(other T) int {} +// only required if the equal option is set +func (t T) Equal(other T) bool {} +// only required if populate option is set +func NewPopulatedT(r randyThetest) *T {} +``` + +Check [t.go](test/t.go) for a full example + +# Warnings and issues + +`Warning about customtype: It is your responsibility to test all cases of your marshaling, unmarshaling and size methods implemented for your custom type.` + +Issues with customtype include: + * A Bytes method is not allowed. + * Defining a customtype as a fake proto message is broken. + * proto.Clone is broken. + * Using a proto message as a customtype is not allowed. + * cusomtype of type map can not UnmarshalText + * customtype of type struct cannot jsonpb unmarshal diff --git a/vendor/github.com/gogo/protobuf/extensions.md b/vendor/github.com/gogo/protobuf/extensions.md new file mode 100644 index 000000000..35dfee16f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/extensions.md @@ -0,0 +1,162 @@ +# gogoprotobuf Extensions + +Here is an [example.proto](https://github.com/gogo/protobuf/blob/master/test/example/example.proto) which uses most of the gogoprotobuf code generation plugins. + +Please also look at the example [Makefile](https://github.com/gogo/protobuf/blob/master/test/example/Makefile) which shows how to specify the `descriptor.proto` and `gogo.proto` in your proto_path + +The documentation at [http://godoc.org/github.com/gogo/protobuf/gogoproto](http://godoc.org/github.com/gogo/protobuf/gogoproto) describes the extensions made to goprotobuf in more detail. + +Also see [http://godoc.org/github.com/gogo/protobuf/plugin/](http://godoc.org/github.com/gogo/protobuf/plugin/) for documentation of each of the extensions which have their own plugins. + +# Fast Marshalling and Unmarshalling + +Generating a `Marshal`, `MarshalTo`, `Size` (or `ProtoSize`) and `Unmarshal` method for a struct results in faster marshalling and unmarshalling than when using reflect. + +See [BenchComparison](https://github.com/gogo/protobuf/blob/master/bench.md) for a comparison between reflect and generated code used for marshalling and unmarshalling. + + + + + + + + + + + +
NameOptionTypeDescriptionDefault
marshalerMessageboolif true, a Marshal and MarshalTo method is generated for the specific messagefalse
sizerMessageboolif true, a Size method is generated for the specific messagefalse
unmarshaler Message bool if true, an Unmarshal method is generated for the specific message false
protosizerMessageboolif true, a ProtoSize method is generated for the specific messagefalse
unsafe_marshaler (deprecated) Message bool if true, a Marshal and MarshalTo method is generated. false
unsafe_unmarshaler (deprecated) Message bool if true, an Unmarshal method is generated. false
stable_marshaler Message bool if true, a Marshal and MarshalTo method is generated for the specific message, but unlike marshaler the output is guaranteed to be deterministic, at the sacrifice of some speed false
typedecl (beta) Message bool if false, type declaration of the message is excluded from the generated output. Requires the marshaler and unmarshaler to be generated. true
+ +# More Canonical Go Structures + +Lots of times working with a goprotobuf struct will lead you to a place where you create another struct that is easier to work with and then have a function to copy the values between the two structs. + +You might also find that basic structs that started their life as part of an API need to be sent over the wire. With gob, you could just send it. With goprotobuf, you need to make a new struct. + +`gogoprotobuf` tries to fix these problems with the nullable, embed, customtype, customname, casttype, castkey and castvalue field extensions. + + + + + + + + + + + + + + + +
NameOptionTypeDescriptionDefault
nullable Field bool if false, a field is generated without a pointer (see warning below). true
embed Field bool if true, the field is generated as an embedded field. false
customtype Field string It works with the Marshal and Unmarshal methods, to allow you to have your own types in your struct, but marshal to bytes. For example, custom.Uuid or custom.Fixed128. For more information please refer to the CustomTypes document goprotobuf type
customname (beta) Field string Changes the generated fieldname. This is especially useful when generated methods conflict with fieldnames. goprotobuf field name
casttype (beta) Field string Changes the generated field type. It assumes that this type is castable to the original goprotobuf field type. It currently does not support maps, structs or enums. goprotobuf field type
castkey (beta) Field string Changes the generated fieldtype for a map key. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. goprotobuf field type
castvalue (beta) Field string Changes the generated fieldtype for a map value. All generated code assumes that this type is castable to the protocol buffer field type. Only supported on maps. goprotobuf field type
enum_customname (beta) Enum string Sets the type name of an enum. If goproto_enum_prefix is enabled, this value will be used as a prefix when generating enum values.goprotobuf enum type name. Helps with golint issues.
enumdecl (beta) Enum bool if false, type declaration of the enum is excluded from the generated output. Requires the marshaler and unmarshaler to be generated. true
enumvalue_customname (beta) Enum Value string Changes the generated enum name. Helps with golint issues.goprotobuf enum value name
stdtime Timestamp Field bool Changes the Well Known Timestamp Type to time.TimeTimestamp
stdduration Duration Field bool Changes the Well Known Duration Type to time.DurationDuration
+ +`Warning about nullable: according to the Protocol Buffer specification, you should be able to tell whether a field is set or unset. With the option nullable=false this feature is lost, since your non-nullable fields will always be set.` + +# Goprotobuf Compatibility + +Gogoprotobuf is compatible with Goprotobuf, because it is compatible with protocol buffers (see the section on tests below). + +Gogoprotobuf generates the same code as goprotobuf if no extensions are used. + +The enumprefix, getters and stringer extensions can be used to remove some of the unnecessary code generated by goprotobuf. + + + + + + + + + + + +
NameOptionTypeDescriptionDefault
gogoproto_import File bool if false, the generated code imports github.com/golang/protobuf/proto instead of github.com/gogo/protobuf/proto. true
goproto_enum_prefix Enum bool if false, generates the enum constant names without the messagetype prefix true
goproto_getters Message bool if false, the message is generated without get methods, this is useful when you would rather want to use face true
goproto_stringer Message bool if false, the message is generated without the default string method, this is useful for rather using stringer true
goproto_enum_stringer (experimental) Enum bool if false, the enum is generated without the default string method, this is useful for rather using enum_stringer true
goproto_extensions_map (beta) Message bool if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension true
goproto_unrecognized (beta) Message bool if false, XXX_unrecognized field is not generated. This is useful to reduce GC pressure at the cost of losing information about unrecognized fields. true
goproto_registration (beta) File bool if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). false
+ +# Less Typing + +The Protocol Buffer language is very parseable and extra code can be easily generated for structures. + +Helper methods, functions and interfaces can be generated by triggering certain extensions like gostring. + + + + + + + + + + + + + +
NameOptionTypeDescriptionDefault
gostring Message bool if true, a `GoString` method is generated. This returns a string representing valid go code to reproduce the current state of the struct. false
onlyone Message bool if true, all fields must be nullable and only one of the fields may be set, like a union. Two methods are generated: `GetValue() interface{}` and `SetValue(v interface{}) (set bool)`. These provide easier interaction with a union. false
equal Message bool if true, an Equal method is generated false
compare Message bool if true, a Compare method is generated. This is very useful for quickly implementing sort on a list of protobuf structs false
verbose_equal Message bool if true, a verbose equal method is generated for the message. This returns an error which describes the exact element which is not equal to the exact element in the other struct. false
stringer Message bool if true, a String method is generated for the message. false
face Message bool if true, a function will be generated which can convert a structure which satisfies an interface (face) to the specified structure. This interface contains getters for each of the fields in the struct. The specified struct is also generated with the getters. This allows it to satisfy its own face. false
description (beta) Message bool if true, a Description method is generated for the message. false
populate Message bool if true, a `NewPopulated` function is generated. This is necessary for generated tests. false
enum_stringer (experimental) Enum bool if true, a String method is generated for an Enum false
+ +Issues with Compare include: + * Oneof is not supported yet + * Not all Well Known Types are supported yet + * Maps are not supported + +#Peace of Mind + +Test and Benchmark generation is done with the following extensions: + + + + +
testgen Message bool if true, tests are generated for proto, json and prototext marshalling as well as for some of the other enabled plugins false
benchgen Message bool if true, benchmarks are generated for proto, json and prototext marshalling as well as for some of the other enabled plugins false
+ +# More Serialization Formats + +Other serialization formats like xml and json typically use reflect to marshal and unmarshal structured data. Manipulating these structs into something other than the default Go requires editing tags. The following extensions provide ways of editing these tags for the generated protobuf structs. + + + + +
jsontag (beta) Field string if set, the json tag value between the double quotes is replaced with this string fieldname
moretags (beta) Field string if set, this string is appended to the tag string empty
+ +Here is a longer explanation of jsontag and moretags + +# File Options + +Each of the boolean message and enum extensions also have a file extension: + + * `marshaler_all` + * `sizer_all` + * `protosizer_all` + * `unmarshaler_all` + * `unsafe_marshaler_all` + * `unsafe_unmarshaler_all` + * `stable_marshaler_all` + * `goproto_enum_prefix_all` + * `goproto_getters_all` + * `goproto_stringer_all` + * `goproto_enum_stringer_all` + * `goproto_extensions_map_all` + * `goproto_unrecognized_all` + * `gostring_all` + * `onlyone_all` + * `equal_all` + * `compare_all` + * `verbose_equal_all` + * `stringer_all` + * `enum_stringer_all` + * `face_all` + * `description_all` + * `populate_all` + * `testgen_all` + * `benchgen_all` + * `enumdecl_all` + * `typedecl_all` + +Each of these are the same as their Message Option counterparts, except they apply to all messages in the file. Their Message option counterparts can also be used to overwrite their effect. + +# Tests + + * The normal barrage of tests are run with: `make tests` + * A few weird tests: `make testall` + * Tests for compatibility with [golang/protobuf](https://github.com/golang/protobuf) are handled by a different project [harmonytests](https://github.com/gogo/harmonytests), since it requires goprotobuf. + * Cross version tests are made with [Travis CI](https://travis-ci.org/gogo/protobuf). + * GRPC Tests are also handled by a different project [grpctests](https://github.com/gogo/grpctests), since it depends on a lot of grpc libraries. + * Thanks to [go-fuzz](https://github.com/dvyukov/go-fuzz/) we have proper [fuzztests](https://github.com/gogo/fuzztests). + diff --git a/vendor/github.com/gogo/protobuf/gogoproto/Makefile b/vendor/github.com/gogo/protobuf/gogoproto/Makefile index 557f29493..0b4659b73 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/Makefile +++ b/vendor/github.com/gogo/protobuf/gogoproto/Makefile @@ -1,7 +1,7 @@ -# Extensions for Protocol Buffers to create more go like structures. +# Protocol Buffers for Go with Gadgets # -# Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -# http://github.com/gogo/protobuf/gogoproto +# Copyright (c) 2013, The GoGo Authors. All rights reserved. +# http://github.com/gogo/protobuf # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are @@ -27,7 +27,8 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. regenerate: - protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. --proto_path=../../../../:../protobuf/:. *.proto + go install github.com/gogo/protobuf/protoc-gen-gogo + protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:../../../../ --proto_path=../../../../:../protobuf/:. *.proto restore: cp gogo.pb.golden gogo.pb.go diff --git a/vendor/github.com/gogo/protobuf/gogoproto/doc.go b/vendor/github.com/gogo/protobuf/gogoproto/doc.go index f0424d4f8..147b5ecc6 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/doc.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/doc.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -148,6 +148,7 @@ The enumprefix, getters and stringer extensions can be used to remove some of th - goproto_stringer, if false, the message is generated without the default string method, this is useful for rather using stringer, or allowing you to write your own string method. - goproto_extensions_map (beta), if false, the extensions field is generated as type []byte instead of type map[int32]proto.Extension - goproto_unrecognized (beta), if false, XXX_unrecognized field is not generated. This is useful in conjunction with gogoproto.nullable=false, to generate structures completely devoid of pointers and reduce GC pressure at the cost of losing information about unrecognized fields. + - goproto_registration (beta), if true, the generated files will register all messages and types against both gogo/protobuf and golang/protobuf. This is necessary when using third-party packages which read registrations from golang/protobuf (such as the grpc-gateway). Less Typing and Peace of Mind is explained in their specific plugin folders godoc: diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go index ff355b9be..5765acb15 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: gogo.proto -// DO NOT EDIT! /* Package gogoproto is a generated protocol buffer package. @@ -34,6 +33,7 @@ var E_GoprotoEnumPrefix = &proto.ExtensionDesc{ Field: 62001, Name: "gogoproto.goproto_enum_prefix", Tag: "varint,62001,opt,name=goproto_enum_prefix,json=goprotoEnumPrefix", + Filename: "gogo.proto", } var E_GoprotoEnumStringer = &proto.ExtensionDesc{ @@ -42,6 +42,7 @@ var E_GoprotoEnumStringer = &proto.ExtensionDesc{ Field: 62021, Name: "gogoproto.goproto_enum_stringer", Tag: "varint,62021,opt,name=goproto_enum_stringer,json=goprotoEnumStringer", + Filename: "gogo.proto", } var E_EnumStringer = &proto.ExtensionDesc{ @@ -50,6 +51,7 @@ var E_EnumStringer = &proto.ExtensionDesc{ Field: 62022, Name: "gogoproto.enum_stringer", Tag: "varint,62022,opt,name=enum_stringer,json=enumStringer", + Filename: "gogo.proto", } var E_EnumCustomname = &proto.ExtensionDesc{ @@ -58,6 +60,16 @@ var E_EnumCustomname = &proto.ExtensionDesc{ Field: 62023, Name: "gogoproto.enum_customname", Tag: "bytes,62023,opt,name=enum_customname,json=enumCustomname", + Filename: "gogo.proto", +} + +var E_Enumdecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.EnumOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 62024, + Name: "gogoproto.enumdecl", + Tag: "varint,62024,opt,name=enumdecl", + Filename: "gogo.proto", } var E_EnumvalueCustomname = &proto.ExtensionDesc{ @@ -66,6 +78,7 @@ var E_EnumvalueCustomname = &proto.ExtensionDesc{ Field: 66001, Name: "gogoproto.enumvalue_customname", Tag: "bytes,66001,opt,name=enumvalue_customname,json=enumvalueCustomname", + Filename: "gogo.proto", } var E_GoprotoGettersAll = &proto.ExtensionDesc{ @@ -74,6 +87,7 @@ var E_GoprotoGettersAll = &proto.ExtensionDesc{ Field: 63001, Name: "gogoproto.goproto_getters_all", Tag: "varint,63001,opt,name=goproto_getters_all,json=goprotoGettersAll", + Filename: "gogo.proto", } var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ @@ -82,6 +96,7 @@ var E_GoprotoEnumPrefixAll = &proto.ExtensionDesc{ Field: 63002, Name: "gogoproto.goproto_enum_prefix_all", Tag: "varint,63002,opt,name=goproto_enum_prefix_all,json=goprotoEnumPrefixAll", + Filename: "gogo.proto", } var E_GoprotoStringerAll = &proto.ExtensionDesc{ @@ -90,6 +105,7 @@ var E_GoprotoStringerAll = &proto.ExtensionDesc{ Field: 63003, Name: "gogoproto.goproto_stringer_all", Tag: "varint,63003,opt,name=goproto_stringer_all,json=goprotoStringerAll", + Filename: "gogo.proto", } var E_VerboseEqualAll = &proto.ExtensionDesc{ @@ -98,6 +114,7 @@ var E_VerboseEqualAll = &proto.ExtensionDesc{ Field: 63004, Name: "gogoproto.verbose_equal_all", Tag: "varint,63004,opt,name=verbose_equal_all,json=verboseEqualAll", + Filename: "gogo.proto", } var E_FaceAll = &proto.ExtensionDesc{ @@ -106,6 +123,7 @@ var E_FaceAll = &proto.ExtensionDesc{ Field: 63005, Name: "gogoproto.face_all", Tag: "varint,63005,opt,name=face_all,json=faceAll", + Filename: "gogo.proto", } var E_GostringAll = &proto.ExtensionDesc{ @@ -114,6 +132,7 @@ var E_GostringAll = &proto.ExtensionDesc{ Field: 63006, Name: "gogoproto.gostring_all", Tag: "varint,63006,opt,name=gostring_all,json=gostringAll", + Filename: "gogo.proto", } var E_PopulateAll = &proto.ExtensionDesc{ @@ -122,6 +141,7 @@ var E_PopulateAll = &proto.ExtensionDesc{ Field: 63007, Name: "gogoproto.populate_all", Tag: "varint,63007,opt,name=populate_all,json=populateAll", + Filename: "gogo.proto", } var E_StringerAll = &proto.ExtensionDesc{ @@ -130,6 +150,7 @@ var E_StringerAll = &proto.ExtensionDesc{ Field: 63008, Name: "gogoproto.stringer_all", Tag: "varint,63008,opt,name=stringer_all,json=stringerAll", + Filename: "gogo.proto", } var E_OnlyoneAll = &proto.ExtensionDesc{ @@ -138,6 +159,7 @@ var E_OnlyoneAll = &proto.ExtensionDesc{ Field: 63009, Name: "gogoproto.onlyone_all", Tag: "varint,63009,opt,name=onlyone_all,json=onlyoneAll", + Filename: "gogo.proto", } var E_EqualAll = &proto.ExtensionDesc{ @@ -146,6 +168,7 @@ var E_EqualAll = &proto.ExtensionDesc{ Field: 63013, Name: "gogoproto.equal_all", Tag: "varint,63013,opt,name=equal_all,json=equalAll", + Filename: "gogo.proto", } var E_DescriptionAll = &proto.ExtensionDesc{ @@ -154,6 +177,7 @@ var E_DescriptionAll = &proto.ExtensionDesc{ Field: 63014, Name: "gogoproto.description_all", Tag: "varint,63014,opt,name=description_all,json=descriptionAll", + Filename: "gogo.proto", } var E_TestgenAll = &proto.ExtensionDesc{ @@ -162,6 +186,7 @@ var E_TestgenAll = &proto.ExtensionDesc{ Field: 63015, Name: "gogoproto.testgen_all", Tag: "varint,63015,opt,name=testgen_all,json=testgenAll", + Filename: "gogo.proto", } var E_BenchgenAll = &proto.ExtensionDesc{ @@ -170,6 +195,7 @@ var E_BenchgenAll = &proto.ExtensionDesc{ Field: 63016, Name: "gogoproto.benchgen_all", Tag: "varint,63016,opt,name=benchgen_all,json=benchgenAll", + Filename: "gogo.proto", } var E_MarshalerAll = &proto.ExtensionDesc{ @@ -178,6 +204,7 @@ var E_MarshalerAll = &proto.ExtensionDesc{ Field: 63017, Name: "gogoproto.marshaler_all", Tag: "varint,63017,opt,name=marshaler_all,json=marshalerAll", + Filename: "gogo.proto", } var E_UnmarshalerAll = &proto.ExtensionDesc{ @@ -186,6 +213,7 @@ var E_UnmarshalerAll = &proto.ExtensionDesc{ Field: 63018, Name: "gogoproto.unmarshaler_all", Tag: "varint,63018,opt,name=unmarshaler_all,json=unmarshalerAll", + Filename: "gogo.proto", } var E_StableMarshalerAll = &proto.ExtensionDesc{ @@ -194,6 +222,7 @@ var E_StableMarshalerAll = &proto.ExtensionDesc{ Field: 63019, Name: "gogoproto.stable_marshaler_all", Tag: "varint,63019,opt,name=stable_marshaler_all,json=stableMarshalerAll", + Filename: "gogo.proto", } var E_SizerAll = &proto.ExtensionDesc{ @@ -202,6 +231,7 @@ var E_SizerAll = &proto.ExtensionDesc{ Field: 63020, Name: "gogoproto.sizer_all", Tag: "varint,63020,opt,name=sizer_all,json=sizerAll", + Filename: "gogo.proto", } var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ @@ -210,6 +240,7 @@ var E_GoprotoEnumStringerAll = &proto.ExtensionDesc{ Field: 63021, Name: "gogoproto.goproto_enum_stringer_all", Tag: "varint,63021,opt,name=goproto_enum_stringer_all,json=goprotoEnumStringerAll", + Filename: "gogo.proto", } var E_EnumStringerAll = &proto.ExtensionDesc{ @@ -218,6 +249,7 @@ var E_EnumStringerAll = &proto.ExtensionDesc{ Field: 63022, Name: "gogoproto.enum_stringer_all", Tag: "varint,63022,opt,name=enum_stringer_all,json=enumStringerAll", + Filename: "gogo.proto", } var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ @@ -226,6 +258,7 @@ var E_UnsafeMarshalerAll = &proto.ExtensionDesc{ Field: 63023, Name: "gogoproto.unsafe_marshaler_all", Tag: "varint,63023,opt,name=unsafe_marshaler_all,json=unsafeMarshalerAll", + Filename: "gogo.proto", } var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ @@ -234,6 +267,7 @@ var E_UnsafeUnmarshalerAll = &proto.ExtensionDesc{ Field: 63024, Name: "gogoproto.unsafe_unmarshaler_all", Tag: "varint,63024,opt,name=unsafe_unmarshaler_all,json=unsafeUnmarshalerAll", + Filename: "gogo.proto", } var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ @@ -242,6 +276,7 @@ var E_GoprotoExtensionsMapAll = &proto.ExtensionDesc{ Field: 63025, Name: "gogoproto.goproto_extensions_map_all", Tag: "varint,63025,opt,name=goproto_extensions_map_all,json=goprotoExtensionsMapAll", + Filename: "gogo.proto", } var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ @@ -250,6 +285,7 @@ var E_GoprotoUnrecognizedAll = &proto.ExtensionDesc{ Field: 63026, Name: "gogoproto.goproto_unrecognized_all", Tag: "varint,63026,opt,name=goproto_unrecognized_all,json=goprotoUnrecognizedAll", + Filename: "gogo.proto", } var E_GogoprotoImport = &proto.ExtensionDesc{ @@ -258,6 +294,7 @@ var E_GogoprotoImport = &proto.ExtensionDesc{ Field: 63027, Name: "gogoproto.gogoproto_import", Tag: "varint,63027,opt,name=gogoproto_import,json=gogoprotoImport", + Filename: "gogo.proto", } var E_ProtosizerAll = &proto.ExtensionDesc{ @@ -266,6 +303,7 @@ var E_ProtosizerAll = &proto.ExtensionDesc{ Field: 63028, Name: "gogoproto.protosizer_all", Tag: "varint,63028,opt,name=protosizer_all,json=protosizerAll", + Filename: "gogo.proto", } var E_CompareAll = &proto.ExtensionDesc{ @@ -274,6 +312,34 @@ var E_CompareAll = &proto.ExtensionDesc{ Field: 63029, Name: "gogoproto.compare_all", Tag: "varint,63029,opt,name=compare_all,json=compareAll", + Filename: "gogo.proto", +} + +var E_TypedeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63030, + Name: "gogoproto.typedecl_all", + Tag: "varint,63030,opt,name=typedecl_all,json=typedeclAll", + Filename: "gogo.proto", +} + +var E_EnumdeclAll = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63031, + Name: "gogoproto.enumdecl_all", + Tag: "varint,63031,opt,name=enumdecl_all,json=enumdeclAll", + Filename: "gogo.proto", +} + +var E_GoprotoRegistration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FileOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 63032, + Name: "gogoproto.goproto_registration", + Tag: "varint,63032,opt,name=goproto_registration,json=goprotoRegistration", + Filename: "gogo.proto", } var E_GoprotoGetters = &proto.ExtensionDesc{ @@ -282,6 +348,7 @@ var E_GoprotoGetters = &proto.ExtensionDesc{ Field: 64001, Name: "gogoproto.goproto_getters", Tag: "varint,64001,opt,name=goproto_getters,json=goprotoGetters", + Filename: "gogo.proto", } var E_GoprotoStringer = &proto.ExtensionDesc{ @@ -290,6 +357,7 @@ var E_GoprotoStringer = &proto.ExtensionDesc{ Field: 64003, Name: "gogoproto.goproto_stringer", Tag: "varint,64003,opt,name=goproto_stringer,json=goprotoStringer", + Filename: "gogo.proto", } var E_VerboseEqual = &proto.ExtensionDesc{ @@ -298,6 +366,7 @@ var E_VerboseEqual = &proto.ExtensionDesc{ Field: 64004, Name: "gogoproto.verbose_equal", Tag: "varint,64004,opt,name=verbose_equal,json=verboseEqual", + Filename: "gogo.proto", } var E_Face = &proto.ExtensionDesc{ @@ -306,6 +375,7 @@ var E_Face = &proto.ExtensionDesc{ Field: 64005, Name: "gogoproto.face", Tag: "varint,64005,opt,name=face", + Filename: "gogo.proto", } var E_Gostring = &proto.ExtensionDesc{ @@ -314,6 +384,7 @@ var E_Gostring = &proto.ExtensionDesc{ Field: 64006, Name: "gogoproto.gostring", Tag: "varint,64006,opt,name=gostring", + Filename: "gogo.proto", } var E_Populate = &proto.ExtensionDesc{ @@ -322,6 +393,7 @@ var E_Populate = &proto.ExtensionDesc{ Field: 64007, Name: "gogoproto.populate", Tag: "varint,64007,opt,name=populate", + Filename: "gogo.proto", } var E_Stringer = &proto.ExtensionDesc{ @@ -330,6 +402,7 @@ var E_Stringer = &proto.ExtensionDesc{ Field: 67008, Name: "gogoproto.stringer", Tag: "varint,67008,opt,name=stringer", + Filename: "gogo.proto", } var E_Onlyone = &proto.ExtensionDesc{ @@ -338,6 +411,7 @@ var E_Onlyone = &proto.ExtensionDesc{ Field: 64009, Name: "gogoproto.onlyone", Tag: "varint,64009,opt,name=onlyone", + Filename: "gogo.proto", } var E_Equal = &proto.ExtensionDesc{ @@ -346,6 +420,7 @@ var E_Equal = &proto.ExtensionDesc{ Field: 64013, Name: "gogoproto.equal", Tag: "varint,64013,opt,name=equal", + Filename: "gogo.proto", } var E_Description = &proto.ExtensionDesc{ @@ -354,6 +429,7 @@ var E_Description = &proto.ExtensionDesc{ Field: 64014, Name: "gogoproto.description", Tag: "varint,64014,opt,name=description", + Filename: "gogo.proto", } var E_Testgen = &proto.ExtensionDesc{ @@ -362,6 +438,7 @@ var E_Testgen = &proto.ExtensionDesc{ Field: 64015, Name: "gogoproto.testgen", Tag: "varint,64015,opt,name=testgen", + Filename: "gogo.proto", } var E_Benchgen = &proto.ExtensionDesc{ @@ -370,6 +447,7 @@ var E_Benchgen = &proto.ExtensionDesc{ Field: 64016, Name: "gogoproto.benchgen", Tag: "varint,64016,opt,name=benchgen", + Filename: "gogo.proto", } var E_Marshaler = &proto.ExtensionDesc{ @@ -378,6 +456,7 @@ var E_Marshaler = &proto.ExtensionDesc{ Field: 64017, Name: "gogoproto.marshaler", Tag: "varint,64017,opt,name=marshaler", + Filename: "gogo.proto", } var E_Unmarshaler = &proto.ExtensionDesc{ @@ -386,6 +465,7 @@ var E_Unmarshaler = &proto.ExtensionDesc{ Field: 64018, Name: "gogoproto.unmarshaler", Tag: "varint,64018,opt,name=unmarshaler", + Filename: "gogo.proto", } var E_StableMarshaler = &proto.ExtensionDesc{ @@ -394,6 +474,7 @@ var E_StableMarshaler = &proto.ExtensionDesc{ Field: 64019, Name: "gogoproto.stable_marshaler", Tag: "varint,64019,opt,name=stable_marshaler,json=stableMarshaler", + Filename: "gogo.proto", } var E_Sizer = &proto.ExtensionDesc{ @@ -402,6 +483,7 @@ var E_Sizer = &proto.ExtensionDesc{ Field: 64020, Name: "gogoproto.sizer", Tag: "varint,64020,opt,name=sizer", + Filename: "gogo.proto", } var E_UnsafeMarshaler = &proto.ExtensionDesc{ @@ -410,6 +492,7 @@ var E_UnsafeMarshaler = &proto.ExtensionDesc{ Field: 64023, Name: "gogoproto.unsafe_marshaler", Tag: "varint,64023,opt,name=unsafe_marshaler,json=unsafeMarshaler", + Filename: "gogo.proto", } var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ @@ -418,6 +501,7 @@ var E_UnsafeUnmarshaler = &proto.ExtensionDesc{ Field: 64024, Name: "gogoproto.unsafe_unmarshaler", Tag: "varint,64024,opt,name=unsafe_unmarshaler,json=unsafeUnmarshaler", + Filename: "gogo.proto", } var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ @@ -426,6 +510,7 @@ var E_GoprotoExtensionsMap = &proto.ExtensionDesc{ Field: 64025, Name: "gogoproto.goproto_extensions_map", Tag: "varint,64025,opt,name=goproto_extensions_map,json=goprotoExtensionsMap", + Filename: "gogo.proto", } var E_GoprotoUnrecognized = &proto.ExtensionDesc{ @@ -434,6 +519,7 @@ var E_GoprotoUnrecognized = &proto.ExtensionDesc{ Field: 64026, Name: "gogoproto.goproto_unrecognized", Tag: "varint,64026,opt,name=goproto_unrecognized,json=goprotoUnrecognized", + Filename: "gogo.proto", } var E_Protosizer = &proto.ExtensionDesc{ @@ -442,6 +528,7 @@ var E_Protosizer = &proto.ExtensionDesc{ Field: 64028, Name: "gogoproto.protosizer", Tag: "varint,64028,opt,name=protosizer", + Filename: "gogo.proto", } var E_Compare = &proto.ExtensionDesc{ @@ -450,6 +537,16 @@ var E_Compare = &proto.ExtensionDesc{ Field: 64029, Name: "gogoproto.compare", Tag: "varint,64029,opt,name=compare", + Filename: "gogo.proto", +} + +var E_Typedecl = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MessageOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 64030, + Name: "gogoproto.typedecl", + Tag: "varint,64030,opt,name=typedecl", + Filename: "gogo.proto", } var E_Nullable = &proto.ExtensionDesc{ @@ -458,6 +555,7 @@ var E_Nullable = &proto.ExtensionDesc{ Field: 65001, Name: "gogoproto.nullable", Tag: "varint,65001,opt,name=nullable", + Filename: "gogo.proto", } var E_Embed = &proto.ExtensionDesc{ @@ -466,6 +564,7 @@ var E_Embed = &proto.ExtensionDesc{ Field: 65002, Name: "gogoproto.embed", Tag: "varint,65002,opt,name=embed", + Filename: "gogo.proto", } var E_Customtype = &proto.ExtensionDesc{ @@ -474,6 +573,7 @@ var E_Customtype = &proto.ExtensionDesc{ Field: 65003, Name: "gogoproto.customtype", Tag: "bytes,65003,opt,name=customtype", + Filename: "gogo.proto", } var E_Customname = &proto.ExtensionDesc{ @@ -482,6 +582,7 @@ var E_Customname = &proto.ExtensionDesc{ Field: 65004, Name: "gogoproto.customname", Tag: "bytes,65004,opt,name=customname", + Filename: "gogo.proto", } var E_Jsontag = &proto.ExtensionDesc{ @@ -490,6 +591,7 @@ var E_Jsontag = &proto.ExtensionDesc{ Field: 65005, Name: "gogoproto.jsontag", Tag: "bytes,65005,opt,name=jsontag", + Filename: "gogo.proto", } var E_Moretags = &proto.ExtensionDesc{ @@ -498,6 +600,7 @@ var E_Moretags = &proto.ExtensionDesc{ Field: 65006, Name: "gogoproto.moretags", Tag: "bytes,65006,opt,name=moretags", + Filename: "gogo.proto", } var E_Casttype = &proto.ExtensionDesc{ @@ -506,6 +609,7 @@ var E_Casttype = &proto.ExtensionDesc{ Field: 65007, Name: "gogoproto.casttype", Tag: "bytes,65007,opt,name=casttype", + Filename: "gogo.proto", } var E_Castkey = &proto.ExtensionDesc{ @@ -514,6 +618,7 @@ var E_Castkey = &proto.ExtensionDesc{ Field: 65008, Name: "gogoproto.castkey", Tag: "bytes,65008,opt,name=castkey", + Filename: "gogo.proto", } var E_Castvalue = &proto.ExtensionDesc{ @@ -522,6 +627,25 @@ var E_Castvalue = &proto.ExtensionDesc{ Field: 65009, Name: "gogoproto.castvalue", Tag: "bytes,65009,opt,name=castvalue", + Filename: "gogo.proto", +} + +var E_Stdtime = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65010, + Name: "gogoproto.stdtime", + Tag: "varint,65010,opt,name=stdtime", + Filename: "gogo.proto", +} + +var E_Stdduration = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.FieldOptions)(nil), + ExtensionType: (*bool)(nil), + Field: 65011, + Name: "gogoproto.stdduration", + Tag: "varint,65011,opt,name=stdduration", + Filename: "gogo.proto", } func init() { @@ -529,6 +653,7 @@ func init() { proto.RegisterExtension(E_GoprotoEnumStringer) proto.RegisterExtension(E_EnumStringer) proto.RegisterExtension(E_EnumCustomname) + proto.RegisterExtension(E_Enumdecl) proto.RegisterExtension(E_EnumvalueCustomname) proto.RegisterExtension(E_GoprotoGettersAll) proto.RegisterExtension(E_GoprotoEnumPrefixAll) @@ -556,6 +681,9 @@ func init() { proto.RegisterExtension(E_GogoprotoImport) proto.RegisterExtension(E_ProtosizerAll) proto.RegisterExtension(E_CompareAll) + proto.RegisterExtension(E_TypedeclAll) + proto.RegisterExtension(E_EnumdeclAll) + proto.RegisterExtension(E_GoprotoRegistration) proto.RegisterExtension(E_GoprotoGetters) proto.RegisterExtension(E_GoprotoStringer) proto.RegisterExtension(E_VerboseEqual) @@ -578,6 +706,7 @@ func init() { proto.RegisterExtension(E_GoprotoUnrecognized) proto.RegisterExtension(E_Protosizer) proto.RegisterExtension(E_Compare) + proto.RegisterExtension(E_Typedecl) proto.RegisterExtension(E_Nullable) proto.RegisterExtension(E_Embed) proto.RegisterExtension(E_Customtype) @@ -587,79 +716,89 @@ func init() { proto.RegisterExtension(E_Casttype) proto.RegisterExtension(E_Castkey) proto.RegisterExtension(E_Castvalue) + proto.RegisterExtension(E_Stdtime) + proto.RegisterExtension(E_Stdduration) } func init() { proto.RegisterFile("gogo.proto", fileDescriptorGogo) } var fileDescriptorGogo = []byte{ - // 1096 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x94, 0x97, 0xcb, 0x6f, 0xdc, 0x54, - 0x14, 0x87, 0x85, 0x48, 0x95, 0x99, 0x93, 0x17, 0x99, 0x84, 0x50, 0x2a, 0x10, 0xed, 0x8e, 0x55, - 0xba, 0x42, 0xa8, 0xae, 0x10, 0x6a, 0xab, 0x34, 0x2a, 0x22, 0x10, 0x05, 0x52, 0x40, 0x2c, 0x46, - 0x9e, 0xc9, 0x8d, 0x3b, 0xe0, 0xf1, 0x35, 0xbe, 0x76, 0xd5, 0xb0, 0x43, 0xe5, 0x21, 0x84, 0x78, - 0x23, 0x41, 0x4b, 0xcb, 0x63, 0xc1, 0xfb, 0x59, 0x1e, 0x7b, 0x36, 0xc0, 0x9a, 0xff, 0x81, 0x0d, - 0x10, 0x5e, 0x52, 0x76, 0xd9, 0xf4, 0x1e, 0xfb, 0x1c, 0xcf, 0xb5, 0x67, 0xa4, 0x7b, 0x67, 0xe7, - 0x64, 0xee, 0xf7, 0xcd, 0xf5, 0x39, 0xbe, 0xe7, 0x37, 0x06, 0x08, 0x64, 0x20, 0x97, 0xe3, 0x44, - 0xa6, 0xb2, 0xd5, 0xc4, 0xeb, 0xfc, 0xf2, 0xd0, 0xe1, 0x40, 0xca, 0x20, 0x14, 0x47, 0xf3, 0xbf, - 0x3a, 0xd9, 0xf6, 0xd1, 0x2d, 0xa1, 0xba, 0x49, 0x2f, 0x4e, 0x65, 0x52, 0x2c, 0xf6, 0x1e, 0x80, - 0x05, 0x5a, 0xdc, 0x16, 0x51, 0xd6, 0x6f, 0xc7, 0x89, 0xd8, 0xee, 0x5d, 0x68, 0xdd, 0xb6, 0x5c, - 0x90, 0xcb, 0x4c, 0x2e, 0xaf, 0xe8, 0x4f, 0x1f, 0x8c, 0xd3, 0x9e, 0x8c, 0xd4, 0xc1, 0x6b, 0xbf, - 0xdf, 0x78, 0xf8, 0x86, 0x3b, 0x1b, 0x1b, 0xf3, 0x84, 0xe2, 0x67, 0xeb, 0x39, 0xe8, 0x6d, 0xc0, - 0xcd, 0x15, 0x9f, 0x4a, 0x93, 0x5e, 0x14, 0x88, 0xc4, 0x62, 0xfc, 0x99, 0x8c, 0x0b, 0x86, 0xf1, - 0x21, 0x42, 0xbd, 0x53, 0x30, 0x33, 0x8e, 0xeb, 0x17, 0x72, 0x4d, 0x0b, 0x53, 0xb2, 0x0a, 0x73, - 0xb9, 0xa4, 0x9b, 0xa9, 0x54, 0xf6, 0x23, 0xbf, 0x2f, 0x2c, 0x9a, 0x5f, 0x73, 0x4d, 0x73, 0x63, - 0x16, 0xb1, 0x53, 0x25, 0xe5, 0x9d, 0x85, 0x45, 0xfc, 0xcf, 0x79, 0x3f, 0xcc, 0x84, 0x69, 0x3b, - 0x32, 0xd2, 0x76, 0x16, 0x97, 0xb1, 0xf2, 0xb7, 0x8b, 0x13, 0xb9, 0x72, 0xa1, 0x14, 0x18, 0x5e, - 0xa3, 0x13, 0x81, 0x48, 0x53, 0x91, 0xa8, 0xb6, 0x1f, 0x86, 0x23, 0x36, 0x79, 0xba, 0x17, 0x96, - 0xc6, 0x4b, 0xbb, 0xd5, 0x4e, 0xac, 0x16, 0xe4, 0x89, 0x30, 0xf4, 0x36, 0xe1, 0x96, 0x11, 0x9d, - 0x75, 0x70, 0x5e, 0x26, 0xe7, 0xe2, 0x50, 0x77, 0x51, 0xbb, 0x0e, 0xfc, 0xff, 0xb2, 0x1f, 0x0e, - 0xce, 0x77, 0xc9, 0xd9, 0x22, 0x96, 0xdb, 0x82, 0xc6, 0xfb, 0x60, 0xfe, 0xbc, 0x48, 0x3a, 0x52, - 0x89, 0xb6, 0x78, 0x2a, 0xf3, 0x43, 0x07, 0xdd, 0x15, 0xd2, 0xcd, 0x11, 0xb8, 0x82, 0x1c, 0xba, - 0x8e, 0x41, 0x63, 0xdb, 0xef, 0x0a, 0x07, 0xc5, 0x55, 0x52, 0x4c, 0xe2, 0x7a, 0x44, 0x4f, 0xc0, - 0x74, 0x20, 0x8b, 0x5b, 0x72, 0xc0, 0xdf, 0x23, 0x7c, 0x8a, 0x19, 0x52, 0xc4, 0x32, 0xce, 0x42, - 0x3f, 0x75, 0xd9, 0xc1, 0xfb, 0xac, 0x60, 0x86, 0x14, 0x63, 0x94, 0xf5, 0x03, 0x56, 0x28, 0xa3, - 0x9e, 0xf7, 0xc2, 0x94, 0x8c, 0xc2, 0x1d, 0x19, 0xb9, 0x6c, 0xe2, 0x43, 0x32, 0x00, 0x21, 0x28, - 0x38, 0x0e, 0x4d, 0xd7, 0x46, 0x7c, 0x44, 0x78, 0x43, 0x70, 0x07, 0xf4, 0x39, 0xe3, 0x21, 0xa3, - 0x57, 0x38, 0x28, 0x3e, 0x26, 0xc5, 0xac, 0x81, 0xd1, 0x6d, 0xa4, 0x42, 0xa5, 0x81, 0x70, 0x91, - 0x7c, 0xc2, 0xb7, 0x41, 0x08, 0x95, 0xb2, 0x23, 0xa2, 0xee, 0x39, 0x37, 0xc3, 0xa7, 0x5c, 0x4a, - 0x66, 0x50, 0xa1, 0x27, 0x4f, 0xdf, 0x4f, 0xd4, 0x39, 0x3f, 0x74, 0x6a, 0xc7, 0x67, 0xe4, 0x98, - 0x2e, 0x21, 0xaa, 0x48, 0x16, 0x8d, 0xa3, 0xf9, 0x9c, 0x2b, 0x62, 0x60, 0x74, 0xf4, 0x54, 0xea, - 0x77, 0x42, 0xd1, 0x1e, 0xc7, 0xf6, 0x05, 0x1f, 0xbd, 0x82, 0x5d, 0x33, 0x8d, 0xba, 0xd3, 0xaa, - 0xf7, 0xb4, 0x93, 0xe6, 0x4b, 0xee, 0x74, 0x0e, 0x20, 0xfc, 0x18, 0xdc, 0x3a, 0x72, 0xd4, 0x3b, - 0xc8, 0xbe, 0x22, 0xd9, 0xd2, 0x88, 0x71, 0x4f, 0x23, 0x61, 0x5c, 0xe5, 0xd7, 0x3c, 0x12, 0x44, - 0xcd, 0xa5, 0xab, 0x96, 0x45, 0xca, 0xdf, 0x1e, 0xaf, 0x6a, 0xdf, 0x70, 0xd5, 0x0a, 0xb6, 0x52, - 0xb5, 0x87, 0x61, 0x89, 0x8c, 0xe3, 0xf5, 0xf5, 0x5b, 0x1e, 0xac, 0x05, 0xbd, 0x59, 0xed, 0xee, - 0xe3, 0x70, 0xa8, 0x2c, 0xe7, 0x85, 0x54, 0x44, 0x0a, 0x19, 0xbd, 0xe7, 0xd8, 0xc1, 0x7c, 0x8d, - 0xcc, 0x3c, 0xf1, 0x57, 0x4a, 0xc1, 0x9a, 0x1f, 0xa3, 0xfc, 0x51, 0x38, 0xc8, 0xf2, 0x2c, 0x4a, - 0x44, 0x57, 0x06, 0x91, 0x6e, 0xe3, 0x96, 0x83, 0xfa, 0xbb, 0x5a, 0xab, 0x36, 0x0d, 0x1c, 0xcd, - 0x67, 0xe0, 0xa6, 0xf2, 0xf7, 0x46, 0xbb, 0xd7, 0x8f, 0x65, 0x92, 0x5a, 0x8c, 0xdf, 0x73, 0xa7, - 0x4a, 0xee, 0x4c, 0x8e, 0x79, 0x2b, 0x30, 0x9b, 0xff, 0xe9, 0xfa, 0x48, 0xfe, 0x40, 0xa2, 0x99, - 0x01, 0x45, 0x83, 0xa3, 0x2b, 0xfb, 0xb1, 0x9f, 0xb8, 0xcc, 0xbf, 0x1f, 0x79, 0x70, 0x10, 0x52, - 0x3c, 0x7d, 0x73, 0xb5, 0x24, 0x6e, 0xdd, 0x31, 0x24, 0x59, 0x13, 0x4a, 0xf9, 0x41, 0xe9, 0x79, - 0x66, 0x8f, 0xce, 0x6c, 0x35, 0x88, 0xbd, 0xfb, 0xb1, 0x3c, 0xd5, 0xb8, 0xb4, 0xcb, 0x2e, 0xee, - 0x95, 0x15, 0xaa, 0xa4, 0xa5, 0x77, 0x1a, 0x66, 0x2a, 0x51, 0x69, 0x57, 0x3d, 0x4b, 0xaa, 0x69, - 0x33, 0x29, 0xbd, 0xbb, 0x60, 0x02, 0x63, 0xcf, 0x8e, 0x3f, 0x47, 0x78, 0xbe, 0xdc, 0xbb, 0x07, - 0x1a, 0x1c, 0x77, 0x76, 0xf4, 0x79, 0x42, 0x4b, 0x04, 0x71, 0x8e, 0x3a, 0x3b, 0xfe, 0x02, 0xe3, - 0x8c, 0x20, 0xee, 0x5e, 0xc2, 0x9f, 0x5e, 0x9a, 0xa0, 0x71, 0xc5, 0xb5, 0x3b, 0x0e, 0x93, 0x94, - 0x71, 0x76, 0xfa, 0x45, 0xfa, 0x72, 0x26, 0xbc, 0xbb, 0xe1, 0x80, 0x63, 0xc1, 0x5f, 0x26, 0xb4, - 0x58, 0xaf, 0x13, 0x64, 0xca, 0xc8, 0x35, 0x3b, 0xfe, 0x0a, 0xe1, 0x26, 0x85, 0x5b, 0xa7, 0x5c, - 0xb3, 0x0b, 0x5e, 0xe5, 0xad, 0x13, 0x81, 0x65, 0xe3, 0x48, 0xb3, 0xd3, 0xaf, 0x71, 0xd5, 0x19, - 0xd1, 0xa7, 0xa9, 0x59, 0x8e, 0x29, 0x3b, 0xff, 0x3a, 0xf1, 0x03, 0x06, 0x2b, 0x60, 0x8c, 0x49, - 0xbb, 0xe2, 0x0d, 0xae, 0x80, 0x41, 0xe1, 0x31, 0xaa, 0x47, 0x9f, 0xdd, 0xf4, 0x26, 0x1f, 0xa3, - 0x5a, 0xf2, 0x61, 0x37, 0xf3, 0x69, 0x61, 0x57, 0xbc, 0xc5, 0xdd, 0xcc, 0xd7, 0xe3, 0x36, 0xea, - 0x59, 0x62, 0x77, 0xbc, 0xcd, 0xdb, 0xa8, 0x45, 0x89, 0x4e, 0xa6, 0xd6, 0x70, 0x8e, 0xd8, 0x7d, - 0xef, 0x90, 0x6f, 0x7e, 0x28, 0x46, 0xbc, 0x47, 0x60, 0x69, 0x74, 0x86, 0xd8, 0xad, 0x97, 0xf6, - 0x6a, 0xbf, 0xfa, 0xcd, 0x08, 0xd1, 0x91, 0xb7, 0x38, 0x2a, 0x3f, 0xec, 0xda, 0xcb, 0x7b, 0xd5, - 0x17, 0x3b, 0x33, 0x3e, 0xf4, 0x2f, 0x34, 0x18, 0x8c, 0x6e, 0xbb, 0xeb, 0x0a, 0xb9, 0x0c, 0x08, - 0x8f, 0x06, 0x4d, 0x6e, 0x3b, 0x7f, 0x95, 0x8f, 0x06, 0x11, 0x1a, 0x6e, 0x44, 0x59, 0x18, 0xe2, - 0xc3, 0xd1, 0xba, 0x7d, 0x44, 0x4c, 0x88, 0x70, 0x8b, 0xd9, 0x3f, 0xf6, 0xe9, 0x60, 0x30, 0xa0, - 0x67, 0xe8, 0x01, 0xd1, 0xef, 0xe8, 0x1a, 0x58, 0xc8, 0x3f, 0xf7, 0x79, 0x20, 0xe0, 0x6a, 0x7d, - 0x9e, 0xa0, 0x78, 0x69, 0x4c, 0x77, 0x62, 0xeb, 0xb7, 0xfe, 0xb5, 0x5f, 0xbc, 0x83, 0x1a, 0xc8, - 0x40, 0x90, 0xbf, 0x75, 0x5a, 0x04, 0xbb, 0x55, 0x41, 0xfe, 0xa2, 0x79, 0x0c, 0x26, 0x9f, 0x50, - 0x32, 0x4a, 0xfd, 0xc0, 0x46, 0xff, 0x4d, 0x34, 0xaf, 0xc7, 0x82, 0xf5, 0x65, 0x22, 0xf4, 0xa5, - 0xb2, 0xb1, 0xff, 0x10, 0x5b, 0x02, 0x08, 0x77, 0x7d, 0x95, 0xba, 0xdc, 0xf7, 0xbf, 0x0c, 0x33, - 0x80, 0x9b, 0xc6, 0xeb, 0x27, 0xc5, 0x8e, 0x8d, 0xfd, 0x8f, 0x37, 0x4d, 0xeb, 0xf5, 0x00, 0x6c, - 0xe2, 0x65, 0xfe, 0xbe, 0x6d, 0x83, 0xff, 0x27, 0x78, 0x40, 0x9c, 0x3c, 0x02, 0x0b, 0xfa, 0x79, - 0xa9, 0x63, 0x27, 0x61, 0x55, 0xae, 0xca, 0xf5, 0xfc, 0x41, 0xbc, 0x1e, 0x00, 0x00, 0xff, 0xff, - 0x87, 0x5c, 0xee, 0x2b, 0x7e, 0x11, 0x00, 0x00, + // 1220 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x98, 0x4b, 0x6f, 0x1c, 0x45, + 0x10, 0x80, 0x85, 0x48, 0x14, 0x6f, 0xd9, 0x8e, 0xf1, 0xda, 0x98, 0x10, 0x81, 0x08, 0x9c, 0x38, + 0xd9, 0xa7, 0x08, 0xa5, 0xad, 0xc8, 0x72, 0x2c, 0xc7, 0x4a, 0x84, 0xc1, 0x98, 0x38, 0xbc, 0x0e, + 0xab, 0xd9, 0xdd, 0xf6, 0x78, 0x60, 0x66, 0x7a, 0x98, 0xe9, 0x89, 0xe2, 0xdc, 0x50, 0x78, 0x08, + 0x21, 0xde, 0x48, 0x90, 0x90, 0x04, 0x38, 0xf0, 0x7e, 0x86, 0xf7, 0x91, 0x0b, 0x8f, 0x2b, 0xff, + 0x81, 0x0b, 0x60, 0xde, 0xbe, 0xf9, 0x82, 0x6a, 0xb6, 0x6a, 0xb6, 0x67, 0xbd, 0x52, 0xf7, 0xde, + 0xc6, 0xeb, 0xfe, 0xbe, 0xad, 0xa9, 0x9a, 0xae, 0xea, 0x59, 0x00, 0x5f, 0xf9, 0x6a, 0x3a, 0x49, + 0x95, 0x56, 0xf5, 0x1a, 0x5e, 0x17, 0x97, 0x07, 0x0f, 0xf9, 0x4a, 0xf9, 0xa1, 0x9c, 0x29, 0xfe, + 0x6a, 0xe6, 0xeb, 0x33, 0x6d, 0x99, 0xb5, 0xd2, 0x20, 0xd1, 0x2a, 0xed, 0x2c, 0x16, 0x77, 0xc1, + 0x04, 0x2d, 0x6e, 0xc8, 0x38, 0x8f, 0x1a, 0x49, 0x2a, 0xd7, 0x83, 0xb3, 0xf5, 0x9b, 0xa6, 0x3b, + 0xe4, 0x34, 0x93, 0xd3, 0x8b, 0x71, 0x1e, 0xdd, 0x9d, 0xe8, 0x40, 0xc5, 0xd9, 0x81, 0xab, 0xbf, + 0x5c, 0x7b, 0xe8, 0x9a, 0xdb, 0x87, 0x56, 0xc7, 0x09, 0xc5, 0xff, 0xad, 0x14, 0xa0, 0x58, 0x85, + 0xeb, 0x2b, 0xbe, 0x4c, 0xa7, 0x41, 0xec, 0xcb, 0xd4, 0x62, 0xfc, 0x9e, 0x8c, 0x13, 0x86, 0xf1, + 0x5e, 0x42, 0xc5, 0x02, 0x8c, 0x0e, 0xe2, 0xfa, 0x81, 0x5c, 0x23, 0xd2, 0x94, 0x2c, 0xc1, 0x58, + 0x21, 0x69, 0xe5, 0x99, 0x56, 0x51, 0xec, 0x45, 0xd2, 0xa2, 0xf9, 0xb1, 0xd0, 0xd4, 0x56, 0xf7, + 0x23, 0xb6, 0x50, 0x52, 0x42, 0xc0, 0x10, 0x7e, 0xd2, 0x96, 0xad, 0xd0, 0x62, 0xf8, 0x89, 0x02, + 0x29, 0xd7, 0x8b, 0xd3, 0x30, 0x89, 0xd7, 0x67, 0xbc, 0x30, 0x97, 0x66, 0x24, 0xb7, 0xf6, 0xf5, + 0x9c, 0xc6, 0x65, 0x2c, 0xfb, 0xf9, 0xfc, 0x9e, 0x22, 0x9c, 0x89, 0x52, 0x60, 0xc4, 0x64, 0x54, + 0xd1, 0x97, 0x5a, 0xcb, 0x34, 0x6b, 0x78, 0x61, 0xbf, 0xf0, 0x8e, 0x07, 0x61, 0x69, 0xbc, 0xb0, + 0x55, 0xad, 0xe2, 0x52, 0x87, 0x9c, 0x0f, 0x43, 0xb1, 0x06, 0x37, 0xf4, 0x79, 0x2a, 0x1c, 0x9c, + 0x17, 0xc9, 0x39, 0xb9, 0xeb, 0xc9, 0x40, 0xed, 0x0a, 0xf0, 0xe7, 0x65, 0x2d, 0x1d, 0x9c, 0xaf, + 0x93, 0xb3, 0x4e, 0x2c, 0x97, 0x14, 0x8d, 0x27, 0x61, 0xfc, 0x8c, 0x4c, 0x9b, 0x2a, 0x93, 0x0d, + 0xf9, 0x68, 0xee, 0x85, 0x0e, 0xba, 0x4b, 0xa4, 0x1b, 0x23, 0x70, 0x11, 0x39, 0x74, 0x1d, 0x81, + 0xa1, 0x75, 0xaf, 0x25, 0x1d, 0x14, 0x97, 0x49, 0xb1, 0x0f, 0xd7, 0x23, 0x3a, 0x0f, 0x23, 0xbe, + 0xea, 0xdc, 0x92, 0x03, 0x7e, 0x85, 0xf0, 0x61, 0x66, 0x48, 0x91, 0xa8, 0x24, 0x0f, 0x3d, 0xed, + 0x12, 0xc1, 0x1b, 0xac, 0x60, 0x86, 0x14, 0x03, 0xa4, 0xf5, 0x4d, 0x56, 0x64, 0x46, 0x3e, 0xe7, + 0x60, 0x58, 0xc5, 0xe1, 0xa6, 0x8a, 0x5d, 0x82, 0x78, 0x8b, 0x0c, 0x40, 0x08, 0x0a, 0x66, 0xa1, + 0xe6, 0x5a, 0x88, 0xb7, 0xb7, 0x78, 0x7b, 0x70, 0x05, 0x96, 0x60, 0x8c, 0x1b, 0x54, 0xa0, 0x62, + 0x07, 0xc5, 0x3b, 0xa4, 0xd8, 0x6f, 0x60, 0x74, 0x1b, 0x5a, 0x66, 0xda, 0x97, 0x2e, 0x92, 0x77, + 0xf9, 0x36, 0x08, 0xa1, 0x54, 0x36, 0x65, 0xdc, 0xda, 0x70, 0x33, 0xbc, 0xc7, 0xa9, 0x64, 0x06, + 0x15, 0x0b, 0x30, 0x1a, 0x79, 0x69, 0xb6, 0xe1, 0x85, 0x4e, 0xe5, 0x78, 0x9f, 0x1c, 0x23, 0x25, + 0x44, 0x19, 0xc9, 0xe3, 0x41, 0x34, 0x1f, 0x70, 0x46, 0x0c, 0x8c, 0xb6, 0x5e, 0xa6, 0xbd, 0x66, + 0x28, 0x1b, 0x83, 0xd8, 0x3e, 0xe4, 0xad, 0xd7, 0x61, 0x97, 0x4d, 0xe3, 0x2c, 0xd4, 0xb2, 0xe0, + 0x9c, 0x93, 0xe6, 0x23, 0xae, 0x74, 0x01, 0x20, 0xfc, 0x00, 0xdc, 0xd8, 0x77, 0x4c, 0x38, 0xc8, + 0x3e, 0x26, 0xd9, 0x54, 0x9f, 0x51, 0x41, 0x2d, 0x61, 0x50, 0xe5, 0x27, 0xdc, 0x12, 0x64, 0x8f, + 0x6b, 0x05, 0x26, 0xf3, 0x38, 0xf3, 0xd6, 0x07, 0xcb, 0xda, 0xa7, 0x9c, 0xb5, 0x0e, 0x5b, 0xc9, + 0xda, 0x29, 0x98, 0x22, 0xe3, 0x60, 0x75, 0xfd, 0x8c, 0x1b, 0x6b, 0x87, 0x5e, 0xab, 0x56, 0xf7, + 0x21, 0x38, 0x58, 0xa6, 0xf3, 0xac, 0x96, 0x71, 0x86, 0x4c, 0x23, 0xf2, 0x12, 0x07, 0xf3, 0x55, + 0x32, 0x73, 0xc7, 0x5f, 0x2c, 0x05, 0xcb, 0x5e, 0x82, 0xf2, 0xfb, 0xe1, 0x00, 0xcb, 0xf3, 0x38, + 0x95, 0x2d, 0xe5, 0xc7, 0xc1, 0x39, 0xd9, 0x76, 0x50, 0x7f, 0xde, 0x53, 0xaa, 0x35, 0x03, 0x47, + 0xf3, 0x09, 0xb8, 0xae, 0x3c, 0xab, 0x34, 0x82, 0x28, 0x51, 0xa9, 0xb6, 0x18, 0xbf, 0xe0, 0x4a, + 0x95, 0xdc, 0x89, 0x02, 0x13, 0x8b, 0xb0, 0xbf, 0xf8, 0xd3, 0xf5, 0x91, 0xfc, 0x92, 0x44, 0xa3, + 0x5d, 0x8a, 0x1a, 0x47, 0x4b, 0x45, 0x89, 0x97, 0xba, 0xf4, 0xbf, 0xaf, 0xb8, 0x71, 0x10, 0x42, + 0x8d, 0x43, 0x6f, 0x26, 0x12, 0xa7, 0xbd, 0x83, 0xe1, 0x6b, 0x6e, 0x1c, 0xcc, 0x90, 0x82, 0x0f, + 0x0c, 0x0e, 0x8a, 0x6f, 0x58, 0xc1, 0x0c, 0x2a, 0xee, 0xe9, 0x0e, 0xda, 0x54, 0xfa, 0x41, 0xa6, + 0x53, 0x0f, 0x57, 0x5b, 0x54, 0xdf, 0x6e, 0x55, 0x0f, 0x61, 0xab, 0x06, 0x2a, 0x4e, 0xc2, 0x58, + 0xcf, 0x11, 0xa3, 0x7e, 0xcb, 0x2e, 0xdb, 0xb2, 0xcc, 0x32, 0xcf, 0x2f, 0x85, 0x8f, 0x6d, 0x53, + 0x33, 0xaa, 0x9e, 0x30, 0xc4, 0x9d, 0x58, 0xf7, 0xea, 0x39, 0xc0, 0x2e, 0x3b, 0xbf, 0x5d, 0x96, + 0xbe, 0x72, 0x0c, 0x10, 0xc7, 0x61, 0xb4, 0x72, 0x06, 0xb0, 0xab, 0x1e, 0x27, 0xd5, 0x88, 0x79, + 0x04, 0x10, 0x87, 0x61, 0x0f, 0xce, 0x73, 0x3b, 0xfe, 0x04, 0xe1, 0xc5, 0x72, 0x71, 0x14, 0x86, + 0x78, 0x8e, 0xdb, 0xd1, 0x27, 0x09, 0x2d, 0x11, 0xc4, 0x79, 0x86, 0xdb, 0xf1, 0xa7, 0x18, 0x67, + 0x04, 0x71, 0xf7, 0x14, 0x7e, 0xf7, 0xcc, 0x1e, 0xea, 0xc3, 0x9c, 0xbb, 0x59, 0xd8, 0x47, 0xc3, + 0xdb, 0x4e, 0x3f, 0x4d, 0x5f, 0xce, 0x84, 0xb8, 0x03, 0xf6, 0x3a, 0x26, 0xfc, 0x59, 0x42, 0x3b, + 0xeb, 0xc5, 0x02, 0x0c, 0x1b, 0x03, 0xdb, 0x8e, 0x3f, 0x47, 0xb8, 0x49, 0x61, 0xe8, 0x34, 0xb0, + 0xed, 0x82, 0xe7, 0x39, 0x74, 0x22, 0x30, 0x6d, 0x3c, 0xab, 0xed, 0xf4, 0x0b, 0x9c, 0x75, 0x46, + 0xc4, 0x1c, 0xd4, 0xca, 0xfe, 0x6b, 0xe7, 0x5f, 0x24, 0xbe, 0xcb, 0x60, 0x06, 0x8c, 0xfe, 0x6f, + 0x57, 0xbc, 0xc4, 0x19, 0x30, 0x28, 0xdc, 0x46, 0xbd, 0x33, 0xdd, 0x6e, 0x7a, 0x99, 0xb7, 0x51, + 0xcf, 0x48, 0xc7, 0x6a, 0x16, 0x6d, 0xd0, 0xae, 0x78, 0x85, 0xab, 0x59, 0xac, 0xc7, 0x30, 0x7a, + 0x87, 0xa4, 0xdd, 0xf1, 0x2a, 0x87, 0xd1, 0x33, 0x23, 0xc5, 0x0a, 0xd4, 0x77, 0x0f, 0x48, 0xbb, + 0xef, 0x35, 0xf2, 0x8d, 0xef, 0x9a, 0x8f, 0xe2, 0x3e, 0x98, 0xea, 0x3f, 0x1c, 0xed, 0xd6, 0x0b, + 0xdb, 0x3d, 0xaf, 0x33, 0xe6, 0x6c, 0x14, 0xa7, 0xba, 0x5d, 0xd6, 0x1c, 0x8c, 0x76, 0xed, 0xc5, + 0xed, 0x6a, 0xa3, 0x35, 0xe7, 0xa2, 0x98, 0x07, 0xe8, 0xce, 0x24, 0xbb, 0xeb, 0x12, 0xb9, 0x0c, + 0x08, 0xb7, 0x06, 0x8d, 0x24, 0x3b, 0x7f, 0x99, 0xb7, 0x06, 0x11, 0xb8, 0x35, 0x78, 0x1a, 0xd9, + 0xe9, 0x2b, 0xbc, 0x35, 0x18, 0x11, 0xb3, 0x30, 0x14, 0xe7, 0x61, 0x88, 0xcf, 0x56, 0xfd, 0xe6, + 0x3e, 0xe3, 0x46, 0x86, 0x6d, 0x86, 0x7f, 0xdd, 0x21, 0x98, 0x01, 0x71, 0x18, 0xf6, 0xca, 0xa8, + 0x29, 0xdb, 0x36, 0xf2, 0xb7, 0x1d, 0xee, 0x27, 0xb8, 0x5a, 0xcc, 0x01, 0x74, 0x5e, 0xa6, 0x31, + 0x0a, 0x1b, 0xfb, 0xfb, 0x4e, 0xe7, 0xbd, 0xde, 0x40, 0xba, 0x82, 0xe2, 0x6d, 0xdc, 0x22, 0xd8, + 0xaa, 0x0a, 0x8a, 0x17, 0xf0, 0x23, 0xb0, 0xef, 0xe1, 0x4c, 0xc5, 0xda, 0xf3, 0x6d, 0xf4, 0x1f, + 0x44, 0xf3, 0x7a, 0x4c, 0x58, 0xa4, 0x52, 0xa9, 0x3d, 0x3f, 0xb3, 0xb1, 0x7f, 0x12, 0x5b, 0x02, + 0x08, 0xb7, 0xbc, 0x4c, 0xbb, 0xdc, 0xf7, 0x5f, 0x0c, 0x33, 0x80, 0x41, 0xe3, 0xf5, 0x23, 0x72, + 0xd3, 0xc6, 0xfe, 0xcd, 0x41, 0xd3, 0x7a, 0x71, 0x14, 0x6a, 0x78, 0x59, 0xfc, 0x0e, 0x61, 0x83, + 0xff, 0x21, 0xb8, 0x4b, 0xe0, 0x37, 0x67, 0xba, 0xad, 0x03, 0x7b, 0xb2, 0xff, 0xa5, 0x4a, 0xf3, + 0x7a, 0x31, 0x0f, 0xc3, 0x99, 0x6e, 0xb7, 0x73, 0x3a, 0xd1, 0x58, 0xf0, 0xff, 0x76, 0xca, 0x97, + 0xdc, 0x92, 0x39, 0xb6, 0x08, 0x13, 0x2d, 0x15, 0xf5, 0x82, 0xc7, 0x60, 0x49, 0x2d, 0xa9, 0x95, + 0x62, 0x17, 0x3d, 0x78, 0x9b, 0x1f, 0xe8, 0x8d, 0xbc, 0x39, 0xdd, 0x52, 0xd1, 0x0c, 0x1e, 0x35, + 0xbb, 0xbf, 0xa0, 0x95, 0x07, 0xcf, 0xff, 0x03, 0x00, 0x00, 0xff, 0xff, 0xed, 0x5f, 0x6c, 0x20, + 0x74, 0x13, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto index e8e3edb61..7f0997935 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto +++ b/vendor/github.com/gogo/protobuf/gogoproto/gogo.proto @@ -1,5 +1,7 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -31,12 +33,14 @@ import "google/protobuf/descriptor.proto"; option java_package = "com.google.protobuf"; option java_outer_classname = "GoGoProtos"; +option go_package = "github.com/gogo/protobuf/gogoproto"; extend google.protobuf.EnumOptions { optional bool goproto_enum_prefix = 62001; optional bool goproto_enum_stringer = 62021; optional bool enum_stringer = 62022; optional string enum_customname = 62023; + optional bool enumdecl = 62024; } extend google.protobuf.EnumValueOptions { @@ -75,6 +79,10 @@ extend google.protobuf.FileOptions { optional bool gogoproto_import = 63027; optional bool protosizer_all = 63028; optional bool compare_all = 63029; + optional bool typedecl_all = 63030; + optional bool enumdecl_all = 63031; + + optional bool goproto_registration = 63032; } extend google.protobuf.MessageOptions { @@ -105,6 +113,8 @@ extend google.protobuf.MessageOptions { optional bool protosizer = 64028; optional bool compare = 64029; + + optional bool typedecl = 64030; } extend google.protobuf.FieldOptions { @@ -117,4 +127,7 @@ extend google.protobuf.FieldOptions { optional string casttype = 65007; optional string castkey = 65008; optional string castvalue = 65009; + + optional bool stdtime = 65010; + optional bool stdduration = 65011; } diff --git a/vendor/github.com/gogo/protobuf/gogoproto/helper.go b/vendor/github.com/gogo/protobuf/gogoproto/helper.go index 8c29dbc0e..6b851c562 100644 --- a/vendor/github.com/gogo/protobuf/gogoproto/helper.go +++ b/vendor/github.com/gogo/protobuf/gogoproto/helper.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -37,6 +39,14 @@ func IsNullable(field *google_protobuf.FieldDescriptorProto) bool { return proto.GetBoolExtension(field.Options, E_Nullable, true) } +func IsStdTime(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdtime, false) +} + +func IsStdDuration(field *google_protobuf.FieldDescriptorProto) bool { + return proto.GetBoolExtension(field.Options, E_Stdduration, false) +} + func NeedsNilCheck(proto3 bool, field *google_protobuf.FieldDescriptorProto) bool { nullable := IsNullable(field) if field.IsMessage() || IsCustomType(field) { @@ -80,7 +90,18 @@ func IsCastValue(field *google_protobuf.FieldDescriptorProto) bool { return false } +func HasEnumDecl(file *google_protobuf.FileDescriptorProto, enum *google_protobuf.EnumDescriptorProto) bool { + return proto.GetBoolExtension(enum.Options, E_Enumdecl, proto.GetBoolExtension(file.Options, E_EnumdeclAll, true)) +} + +func HasTypeDecl(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { + return proto.GetBoolExtension(message.Options, E_Typedecl, proto.GetBoolExtension(file.Options, E_TypedeclAll, true)) +} + func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Customtype) if err == nil && v.(*string) != nil { @@ -91,6 +112,9 @@ func GetCustomType(field *google_protobuf.FieldDescriptorProto) string { } func GetCastType(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Casttype) if err == nil && v.(*string) != nil { @@ -101,6 +125,9 @@ func GetCastType(field *google_protobuf.FieldDescriptorProto) string { } func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Castkey) if err == nil && v.(*string) != nil { @@ -111,6 +138,9 @@ func GetCastKey(field *google_protobuf.FieldDescriptorProto) string { } func GetCastValue(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Castvalue) if err == nil && v.(*string) != nil { @@ -145,6 +175,9 @@ func IsEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) bool } func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Customname) if err == nil && v.(*string) != nil { @@ -155,6 +188,9 @@ func GetCustomName(field *google_protobuf.FieldDescriptorProto) string { } func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_EnumCustomname) if err == nil && v.(*string) != nil { @@ -165,6 +201,9 @@ func GetEnumCustomName(field *google_protobuf.EnumDescriptorProto) string { } func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) string { + if field == nil { + return "" + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_EnumvalueCustomname) if err == nil && v.(*string) != nil { @@ -175,6 +214,9 @@ func GetEnumValueCustomName(field *google_protobuf.EnumValueDescriptorProto) str } func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Jsontag) if err == nil && v.(*string) != nil { @@ -185,6 +227,9 @@ func GetJsonTag(field *google_protobuf.FieldDescriptorProto) *string { } func GetMoreTags(field *google_protobuf.FieldDescriptorProto) *string { + if field == nil { + return nil + } if field.Options != nil { v, err := proto.GetExtension(field.Options, E_Moretags) if err == nil && v.(*string) != nil { @@ -306,3 +351,7 @@ func ImportsGoGoProto(file *google_protobuf.FileDescriptorProto) bool { func HasCompare(file *google_protobuf.FileDescriptorProto, message *google_protobuf.DescriptorProto) bool { return proto.GetBoolExtension(message.Options, E_Compare, proto.GetBoolExtension(file.Options, E_CompareAll, false)) } + +func RegistersGolangProto(file *google_protobuf.FileDescriptorProto) bool { + return proto.GetBoolExtension(file.Options, E_GoprotoRegistration, false) +} diff --git a/vendor/github.com/gogo/protobuf/install-protobuf.sh b/vendor/github.com/gogo/protobuf/install-protobuf.sh index 10c9320ea..f42fc9e63 100755 --- a/vendor/github.com/gogo/protobuf/install-protobuf.sh +++ b/vendor/github.com/gogo/protobuf/install-protobuf.sh @@ -7,23 +7,22 @@ die() { exit 1 } +cd /home/travis + case "$PROTOBUF_VERSION" in 2*) basename=protobuf-$PROTOBUF_VERSION + wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz + tar xzf $basename.tar.gz + cd protobuf-$PROTOBUF_VERSION + ./configure --prefix=/home/travis && make -j2 && make install ;; 3*) - basename=protobuf-cpp-$PROTOBUF_VERSION + basename=protoc-$PROTOBUF_VERSION-linux-x86_64 + wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.zip + unzip $basename.zip ;; *) die "unknown protobuf version: $PROTOBUF_VERSION" ;; esac - -cd /home/travis - -wget https://github.com/google/protobuf/releases/download/v$PROTOBUF_VERSION/$basename.tar.gz -tar xzf $basename.tar.gz - -cd protobuf-$PROTOBUF_VERSION - -./configure --prefix=/home/travis && make -j2 && make install diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go index 13b561603..57a3a9147 100644 --- a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb.go @@ -41,14 +41,18 @@ package jsonpb import ( "bytes" "encoding/json" + "errors" "fmt" "io" + "math" "reflect" "sort" "strconv" "strings" + "time" "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/types" ) // Marshaler is a configurable object for converting between @@ -68,12 +72,53 @@ type Marshaler struct { // Whether to use the original (.proto) name for fields. OrigName bool + + // A custom URL resolver to use when marshaling Any messages to JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver +} + +// AnyResolver takes a type URL, present in an Any message, and resolves it into +// an instance of the associated message. +type AnyResolver interface { + Resolve(typeUrl string) (proto.Message, error) +} + +func defaultResolveAny(typeUrl string) (proto.Message, error) { + // Only the part of typeUrl after the last slash is relevant. + mname := typeUrl + if slash := strings.LastIndex(mname, "/"); slash >= 0 { + mname = mname[slash+1:] + } + mt := proto.MessageType(mname) + if mt == nil { + return nil, fmt.Errorf("unknown message type %q", mname) + } + return reflect.New(mt.Elem()).Interface().(proto.Message), nil +} + +// JSONPBMarshaler is implemented by protobuf messages that customize the +// way they are marshaled to JSON. Messages that implement this should +// also implement JSONPBUnmarshaler so that the custom format can be +// parsed. +type JSONPBMarshaler interface { + MarshalJSONPB(*Marshaler) ([]byte, error) +} + +// JSONPBUnmarshaler is implemented by protobuf messages that customize +// the way they are unmarshaled from JSON. Messages that implement this +// should also implement JSONPBMarshaler so that the custom format can be +// produced. +type JSONPBUnmarshaler interface { + UnmarshalJSONPB(*Unmarshaler, []byte) error } // Marshal marshals a protocol buffer into JSON. func (m *Marshaler) Marshal(out io.Writer, pb proto.Message) error { writer := &errWriter{writer: out} - return m.marshalObject(writer, pb, "") + return m.marshalObject(writer, pb, "", "") } // MarshalToString converts a protocol buffer object to JSON string. @@ -87,20 +132,119 @@ func (m *Marshaler) MarshalToString(pb proto.Message) (string, error) { type int32Slice []int32 +var nonFinite = map[string]float64{ + `"NaN"`: math.NaN(), + `"Infinity"`: math.Inf(1), + `"-Infinity"`: math.Inf(-1), +} + // For sorting extensions ids to ensure stable output. func (s int32Slice) Len() int { return len(s) } func (s int32Slice) Less(i, j int) bool { return s[i] < s[j] } func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } +type isWkt interface { + XXX_WellKnownType() string +} + // marshalObject writes a struct to the Writer. -func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string) error { +func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent, typeURL string) error { + if jsm, ok := v.(JSONPBMarshaler); ok { + b, err := jsm.MarshalJSONPB(m) + if err != nil { + return err + } + if typeURL != "" { + // we are marshaling this object to an Any type + var js map[string]*json.RawMessage + if err = json.Unmarshal(b, &js); err != nil { + return fmt.Errorf("type %T produced invalid JSON: %v", v, err) + } + turl, err := json.Marshal(typeURL) + if err != nil { + return fmt.Errorf("failed to marshal type URL %q to JSON: %v", typeURL, err) + } + js["@type"] = (*json.RawMessage)(&turl) + if b, err = json.Marshal(js); err != nil { + return err + } + } + + out.write(string(b)) + return out.err + } + + s := reflect.ValueOf(v).Elem() + + // Handle well-known types. + if wkt, ok := v.(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + // "Wrappers use the same representation in JSON + // as the wrapped primitive type, ..." + sprop := proto.GetProperties(s.Type()) + return m.marshalValue(out, sprop.Prop[0], s.Field(0), indent) + case "Any": + // Any is a bit more involved. + return m.marshalAny(out, v, indent) + case "Duration": + // "Generated output always contains 3, 6, or 9 fractional digits, + // depending on required precision." + s, ns := s.Field(0).Int(), s.Field(1).Int() + d := time.Duration(s)*time.Second + time.Duration(ns)*time.Nanosecond + x := fmt.Sprintf("%.9f", d.Seconds()) + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`s"`) + return out.err + case "Struct", "ListValue": + // Let marshalValue handle the `Struct.fields` map or the `ListValue.values` slice. + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, s.Field(0), indent) + case "Timestamp": + // "RFC 3339, where generated output will always be Z-normalized + // and uses 3, 6 or 9 fractional digits." + s, ns := s.Field(0).Int(), s.Field(1).Int() + t := time.Unix(s, ns).UTC() + // time.RFC3339Nano isn't exactly right (we need to get 3/6/9 fractional digits). + x := t.Format("2006-01-02T15:04:05.000000000") + x = strings.TrimSuffix(x, "000") + x = strings.TrimSuffix(x, "000") + out.write(`"`) + out.write(x) + out.write(`Z"`) + return out.err + case "Value": + // Value has a single oneof. + kind := s.Field(0) + if kind.IsNil() { + // "absence of any variant indicates an error" + return errors.New("nil Value") + } + // oneof -> *T -> T -> T.F + x := kind.Elem().Elem().Field(0) + // TODO: pass the correct Properties if needed. + return m.marshalValue(out, &proto.Properties{}, x, indent) + } + } + out.write("{") if m.Indent != "" { out.write("\n") } - s := reflect.ValueOf(v).Elem() firstField := true + + if typeURL != "" { + if err := m.marshalTypeURL(out, indent, typeURL); err != nil { + return err + } + firstField = false + } + for i := 0; i < s.NumField(); i++ { value := s.Field(i) valueField := s.Type().Field(i) @@ -108,9 +252,14 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string continue } + //this is not a protobuf field + if valueField.Tag.Get("protobuf") == "" && valueField.Tag.Get("protobuf_oneof") == "" { + continue + } + // IsNil will panic on most value kinds. switch value.Kind() { - case reflect.Chan, reflect.Func, reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice: + case reflect.Chan, reflect.Func, reflect.Interface: if value.IsNil() { continue } @@ -138,6 +287,10 @@ func (m *Marshaler) marshalObject(out *errWriter, v proto.Message, indent string if value.Len() == 0 { continue } + case reflect.Map, reflect.Ptr, reflect.Slice: + if value.IsNil() { + continue + } } } @@ -227,6 +380,77 @@ func (m *Marshaler) writeSep(out *errWriter) { } } +func (m *Marshaler) marshalAny(out *errWriter, any proto.Message, indent string) error { + // "If the Any contains a value that has a special JSON mapping, + // it will be converted as follows: {"@type": xxx, "value": yyy}. + // Otherwise, the value will be converted into a JSON object, + // and the "@type" field will be inserted to indicate the actual data type." + v := reflect.ValueOf(any).Elem() + turl := v.Field(0).String() + val := v.Field(1).Bytes() + + var msg proto.Message + var err error + if m.AnyResolver != nil { + msg, err = m.AnyResolver.Resolve(turl) + } else { + msg, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if err := proto.Unmarshal(val, msg); err != nil { + return err + } + + if _, ok := msg.(isWkt); ok { + out.write("{") + if m.Indent != "" { + out.write("\n") + } + if err := m.marshalTypeURL(out, indent, turl); err != nil { + return err + } + m.writeSep(out) + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + out.write(`"value": `) + } else { + out.write(`"value":`) + } + if err := m.marshalObject(out, msg, indent+m.Indent, ""); err != nil { + return err + } + if m.Indent != "" { + out.write("\n") + out.write(indent) + } + out.write("}") + return out.err + } + + return m.marshalObject(out, msg, indent, turl) +} + +func (m *Marshaler) marshalTypeURL(out *errWriter, indent, typeURL string) error { + if m.Indent != "" { + out.write(indent) + out.write(m.Indent) + } + out.write(`"@type":`) + if m.Indent != "" { + out.write(" ") + } + b, err := json.Marshal(typeURL) + if err != nil { + return err + } + out.write(string(b)) + return out.err +} + // marshalField writes field description and value to the Writer. func (m *Marshaler) marshalField(out *errWriter, prop *proto.Properties, v reflect.Value, indent string) error { if m.Indent != "" { @@ -250,6 +474,12 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle v = reflect.Indirect(v) + // Handle nil pointer + if v.Kind() == reflect.Invalid { + out.write("null") + return out.err + } + // Handle repeated elements. if v.Kind() == reflect.Slice && v.Type().Elem().Kind() != reflect.Uint8 { out.write("[") @@ -277,6 +507,29 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle return out.err } + // Handle well-known types. + // Most are handled up in marshalObject (because 99% are messages). + if wkt, ok := v.Interface().(isWkt); ok { + switch wkt.XXX_WellKnownType() { + case "NullValue": + out.write("null") + return out.err + } + } + + if t, ok := v.Interface().(time.Time); ok { + ts, err := types.TimestampProto(t) + if err != nil { + return err + } + return m.marshalValue(out, prop, reflect.ValueOf(ts), indent) + } + + if d, ok := v.Interface().(time.Duration); ok { + dur := types.DurationProto(d) + return m.marshalValue(out, prop, reflect.ValueOf(dur), indent) + } + // Handle enumerations. if !m.EnumsAsInts && prop.Enum != "" { // Unknown enum values will are stringified by the proto library as their @@ -330,6 +583,18 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle out.write(`null`) return out.err } + + if m, ok := v.Interface().(interface { + MarshalJSON() ([]byte, error) + }); ok { + data, err := m.MarshalJSON() + if err != nil { + return err + } + out.write(string(data)) + return nil + } + pm, ok := iface.(proto.Message) if !ok { if prop.CustomType == "" { @@ -341,7 +606,7 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle } pm = i.Convert(t).Interface().(proto.Message) } - return m.marshalObject(out, pm, indent+m.Indent) + return m.marshalObject(out, pm, indent+m.Indent, "") } // Handle maps. @@ -395,6 +660,24 @@ func (m *Marshaler) marshalValue(out *errWriter, prop *proto.Properties, v refle return out.err } + // Handle non-finite floats, e.g. NaN, Infinity and -Infinity. + if v.Kind() == reflect.Float32 || v.Kind() == reflect.Float64 { + f := v.Float() + var sval string + switch { + case math.IsInf(f, 1): + sval = `"Infinity"` + case math.IsInf(f, -1): + sval = `"-Infinity"` + case math.IsNaN(f): + sval = `"NaN"` + } + if sval != "" { + out.write(sval) + return out.err + } + } + // Default handling defers to the encoding/json library. b, err := json.Marshal(v.Interface()) if err != nil { @@ -417,6 +700,12 @@ type Unmarshaler struct { // Whether to allow messages to contain unknown fields, as opposed to // failing to unmarshal. AllowUnknownFields bool + + // A custom URL resolver to use when unmarshaling Any messages from JSON. + // If unset, the default resolution strategy is to extract the + // fully-qualified type name from the type URL and pass that to + // proto.MessageType(string). + AnyResolver AnyResolver } // UnmarshalNext unmarshals the next protocol buffer from a JSON object stream. @@ -466,10 +755,195 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe // Allocate memory for pointer fields. if targetType.Kind() == reflect.Ptr { + // If input value is "null" and target is a pointer type, then the field should be treated as not set + // UNLESS the target is structpb.Value, in which case it should be set to structpb.NullValue. + _, isJSONPBUnmarshaler := target.Interface().(JSONPBUnmarshaler) + if string(inputValue) == "null" && targetType != reflect.TypeOf(&types.Value{}) && !isJSONPBUnmarshaler { + return nil + } target.Set(reflect.New(targetType.Elem())) + return u.unmarshalValue(target.Elem(), inputValue, prop) } + if jsu, ok := target.Addr().Interface().(JSONPBUnmarshaler); ok { + return jsu.UnmarshalJSONPB(u, []byte(inputValue)) + } + + // Handle well-known types that are not pointers. + if w, ok := target.Addr().Interface().(isWkt); ok { + switch w.XXX_WellKnownType() { + case "DoubleValue", "FloatValue", "Int64Value", "UInt64Value", + "Int32Value", "UInt32Value", "BoolValue", "StringValue", "BytesValue": + return u.unmarshalValue(target.Field(0), inputValue, prop) + case "Any": + // Use json.RawMessage pointer type instead of value to support pre-1.8 version. + // 1.8 changed RawMessage.MarshalJSON from pointer type to value type, see + // https://github.com/golang/go/issues/14493 + var jsonFields map[string]*json.RawMessage + if err := json.Unmarshal(inputValue, &jsonFields); err != nil { + return err + } + + val, ok := jsonFields["@type"] + if !ok || val == nil { + return errors.New("Any JSON doesn't have '@type'") + } + + var turl string + if err := json.Unmarshal([]byte(*val), &turl); err != nil { + return fmt.Errorf("can't unmarshal Any's '@type': %q", *val) + } + target.Field(0).SetString(turl) + + var m proto.Message + var err error + if u.AnyResolver != nil { + m, err = u.AnyResolver.Resolve(turl) + } else { + m, err = defaultResolveAny(turl) + } + if err != nil { + return err + } + + if _, ok := m.(isWkt); ok { + val, ok := jsonFields["value"] + if !ok { + return errors.New("Any JSON doesn't have 'value'") + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), *val, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } else { + delete(jsonFields, "@type") + nestedProto, uerr := json.Marshal(jsonFields) + if uerr != nil { + return fmt.Errorf("can't generate JSON for Any's nested proto to be unmarshaled: %v", uerr) + } + + if err = u.unmarshalValue(reflect.ValueOf(m).Elem(), nestedProto, nil); err != nil { + return fmt.Errorf("can't unmarshal Any nested proto %T: %v", m, err) + } + } + + b, err := proto.Marshal(m) + if err != nil { + return fmt.Errorf("can't marshal proto %T into Any.Value: %v", m, err) + } + target.Field(1).SetBytes(b) + + return nil + case "Duration": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + + d, err := time.ParseDuration(unq) + if err != nil { + return fmt.Errorf("bad Duration: %v", err) + } + + ns := d.Nanoseconds() + s := ns / 1e9 + ns %= 1e9 + target.Field(0).SetInt(s) + target.Field(1).SetInt(ns) + return nil + case "Timestamp": + unq, err := strconv.Unquote(string(inputValue)) + if err != nil { + return err + } + + t, err := time.Parse(time.RFC3339Nano, unq) + if err != nil { + return fmt.Errorf("bad Timestamp: %v", err) + } + + target.Field(0).SetInt(t.Unix()) + target.Field(1).SetInt(int64(t.Nanosecond())) + return nil + case "Struct": + var m map[string]json.RawMessage + if err := json.Unmarshal(inputValue, &m); err != nil { + return fmt.Errorf("bad StructValue: %v", err) + } + target.Field(0).Set(reflect.ValueOf(map[string]*types.Value{})) + for k, jv := range m { + pv := &types.Value{} + if err := u.unmarshalValue(reflect.ValueOf(pv).Elem(), jv, prop); err != nil { + return fmt.Errorf("bad value in StructValue for key %q: %v", k, err) + } + target.Field(0).SetMapIndex(reflect.ValueOf(k), reflect.ValueOf(pv)) + } + return nil + case "ListValue": + var s []json.RawMessage + if err := json.Unmarshal(inputValue, &s); err != nil { + return fmt.Errorf("bad ListValue: %v", err) + } + + target.Field(0).Set(reflect.ValueOf(make([]*types.Value, len(s), len(s)))) + for i, sv := range s { + if err := u.unmarshalValue(target.Field(0).Index(i), sv, prop); err != nil { + return err + } + } + return nil + case "Value": + ivStr := string(inputValue) + if ivStr == "null" { + target.Field(0).Set(reflect.ValueOf(&types.Value_NullValue{})) + } else if v, err := strconv.ParseFloat(ivStr, 0); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_NumberValue{NumberValue: v})) + } else if v, err := strconv.Unquote(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_StringValue{StringValue: v})) + } else if v, err := strconv.ParseBool(ivStr); err == nil { + target.Field(0).Set(reflect.ValueOf(&types.Value_BoolValue{BoolValue: v})) + } else if err := json.Unmarshal(inputValue, &[]json.RawMessage{}); err == nil { + lv := &types.ListValue{} + target.Field(0).Set(reflect.ValueOf(&types.Value_ListValue{ListValue: lv})) + return u.unmarshalValue(reflect.ValueOf(lv).Elem(), inputValue, prop) + } else if err := json.Unmarshal(inputValue, &map[string]json.RawMessage{}); err == nil { + sv := &types.Struct{} + target.Field(0).Set(reflect.ValueOf(&types.Value_StructValue{StructValue: sv})) + return u.unmarshalValue(reflect.ValueOf(sv).Elem(), inputValue, prop) + } else { + return fmt.Errorf("unrecognized type for Value %q", ivStr) + } + return nil + } + } + + if t, ok := target.Addr().Interface().(*time.Time); ok { + ts := &types.Timestamp{} + if err := u.unmarshalValue(reflect.ValueOf(ts).Elem(), inputValue, prop); err != nil { + return err + } + tt, err := types.TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil + } + + if d, ok := target.Addr().Interface().(*time.Duration); ok { + dur := &types.Duration{} + if err := u.unmarshalValue(reflect.ValueOf(dur).Elem(), inputValue, prop); err != nil { + return err + } + dd, err := types.DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil + } + // Handle enums, which have an underlying type of int32, // and may appear as strings. // The case of an enum appearing as a number is handled @@ -493,6 +967,14 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe // Handle nested messages. if targetType.Kind() == reflect.Struct { + if prop != nil && len(prop.CustomType) > 0 && target.CanAddr() { + if m, ok := target.Addr().Interface().(interface { + UnmarshalJSON([]byte) error + }); ok { + return json.Unmarshal(inputValue, m) + } + } + var jsonFields map[string]json.RawMessage if err := json.Unmarshal(inputValue, &jsonFields); err != nil { return err @@ -549,6 +1031,26 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe } } } + // Handle proto2 extensions. + if len(jsonFields) > 0 { + if ep, ok := target.Addr().Interface().(proto.Message); ok { + for _, ext := range proto.RegisteredExtensions(ep) { + name := fmt.Sprintf("[%s]", ext.Name) + raw, ok := jsonFields[name] + if !ok { + continue + } + delete(jsonFields, name) + nv := reflect.New(reflect.TypeOf(ext.ExtensionType).Elem()) + if err := u.unmarshalValue(nv.Elem(), raw, nil); err != nil { + return err + } + if err := proto.SetExtension(ep, ext, nv.Interface()); err != nil { + return err + } + } + } + } if !u.AllowUnknownFields && len(jsonFields) > 0 { // Pick any field to be the scapegoat. var f string @@ -563,11 +1065,23 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe // Handle arrays if targetType.Kind() == reflect.Slice { - // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling - // strings into aliased []byte types. - // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 - // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a if targetType.Elem().Kind() == reflect.Uint8 { + outRef := reflect.New(targetType) + outVal := outRef.Interface() + //CustomType with underlying type []byte + if _, ok := outVal.(interface { + UnmarshalJSON([]byte) error + }); ok { + if err := json.Unmarshal(inputValue, outVal); err != nil { + return err + } + target.Set(outRef.Elem()) + return nil + } + // Special case for encoded bytes. Pre-go1.5 doesn't support unmarshalling + // strings into aliased []byte types. + // https://github.com/golang/go/commit/4302fd0409da5e4f1d71471a6770dacdc3301197 + // https://github.com/golang/go/commit/c60707b14d6be26bf4213114d13070bff00d0b0a var out []byte if err := json.Unmarshal(inputValue, &out); err != nil { return err @@ -580,11 +1094,13 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe if err := json.Unmarshal(inputValue, &slc); err != nil { return err } - len := len(slc) - target.Set(reflect.MakeSlice(targetType, len, len)) - for i := 0; i < len; i++ { - if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { - return err + if slc != nil { + l := len(slc) + target.Set(reflect.MakeSlice(targetType, l, l)) + for i := 0; i < l; i++ { + if err := u.unmarshalValue(target.Index(i), slc[i], prop); err != nil { + return err + } } } return nil @@ -596,37 +1112,39 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe if err := json.Unmarshal(inputValue, &mp); err != nil { return err } - target.Set(reflect.MakeMap(targetType)) - var keyprop, valprop *proto.Properties - if prop != nil { - // These could still be nil if the protobuf metadata is broken somehow. - // TODO: This won't work because the fields are unexported. - // We should probably just reparse them. - //keyprop, valprop = prop.mkeyprop, prop.mvalprop - } - for ks, raw := range mp { - // Unmarshal map key. The core json library already decoded the key into a - // string, so we handle that specially. Other types were quoted post-serialization. - var k reflect.Value - if targetType.Key().Kind() == reflect.String { - k = reflect.ValueOf(ks) - } else { - k = reflect.New(targetType.Key()).Elem() - if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + if mp != nil { + target.Set(reflect.MakeMap(targetType)) + var keyprop, valprop *proto.Properties + if prop != nil { + // These could still be nil if the protobuf metadata is broken somehow. + // TODO: This won't work because the fields are unexported. + // We should probably just reparse them. + //keyprop, valprop = prop.mkeyprop, prop.mvalprop + } + for ks, raw := range mp { + // Unmarshal map key. The core json library already decoded the key into a + // string, so we handle that specially. Other types were quoted post-serialization. + var k reflect.Value + if targetType.Key().Kind() == reflect.String { + k = reflect.ValueOf(ks) + } else { + k = reflect.New(targetType.Key()).Elem() + if err := u.unmarshalValue(k, json.RawMessage(ks), keyprop); err != nil { + return err + } + } + + if !k.Type().AssignableTo(targetType.Key()) { + k = k.Convert(targetType.Key()) + } + + // Unmarshal map value. + v := reflect.New(targetType.Elem()).Elem() + if err := u.unmarshalValue(v, raw, valprop); err != nil { return err } + target.SetMapIndex(k, v) } - - if !k.Type().AssignableTo(targetType.Key()) { - k = k.Convert(targetType.Key()) - } - - // Unmarshal map value. - v := reflect.New(targetType.Elem()).Elem() - if err := u.unmarshalValue(v, raw, valprop); err != nil { - return err - } - target.SetMapIndex(k, v) } return nil } @@ -638,6 +1156,15 @@ func (u *Unmarshaler) unmarshalValue(target reflect.Value, inputValue json.RawMe inputValue = inputValue[1 : len(inputValue)-1] } + // Non-finite numbers can be encoded as strings. + isFloat := targetType.Kind() == reflect.Float32 || targetType.Kind() == reflect.Float64 + if isFloat { + if num, ok := nonFinite[string(inputValue)]; ok { + target.SetFloat(num) + return nil + } + } + // Use the encoding/json for parsing other value types. return json.Unmarshal(inputValue, target.Addr().Interface()) } diff --git a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test.go b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test.go index 0dcfcc576..8d585f2ed 100644 --- a/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test.go +++ b/vendor/github.com/gogo/protobuf/jsonpb/jsonpb_test.go @@ -35,6 +35,7 @@ import ( "bytes" "encoding/json" "io" + "math" "reflect" "strings" "testing" @@ -42,6 +43,7 @@ import ( pb "github.com/gogo/protobuf/jsonpb/jsonpb_test_proto" "github.com/gogo/protobuf/proto" proto3pb "github.com/gogo/protobuf/proto/proto3_proto" + "github.com/gogo/protobuf/types" ) var ( @@ -105,7 +107,7 @@ var ( RSint32: []int32{-1, -2, -3}, RSint64: []int64{-6789012345, -3456789012}, RFloat: []float32{3.14, 6.28}, - RDouble: []float64{299792458, 6.62606957e-34}, + RDouble: []float64{299792458 * 1e20, 6.62606957e-34}, RString: []string{"happy", "days"}, RBytes: [][]byte{[]byte("skittles"), []byte("m&m's")}, } @@ -119,7 +121,7 @@ var ( `"rSint32":[-1,-2,-3],` + `"rSint64":["-6789012345","-3456789012"],` + `"rFloat":[3.14,6.28],` + - `"rDouble":[2.99792458e+08,6.62606957e-34],` + + `"rDouble":[2.99792458e+28,6.62606957e-34],` + `"rString":["happy","days"],` + `"rBytes":["c2tpdHRsZXM=","bSZtJ3M="]` + `}` @@ -162,7 +164,7 @@ var ( 6.28 ], "rDouble": [ - 2.99792458e+08, + 2.99792458e+28, 6.62606957e-34 ], "rString": [ @@ -269,6 +271,58 @@ var ( `"[jsonpb.Complex.real_extension]":{"imaginary":0.5772156649},` + `"[jsonpb.name]":"Pi"` + `}` + + anySimple = &pb.KnownTypes{ + An: &types.Any{ + TypeUrl: "something.example.com/jsonpb.Simple", + Value: []byte{ + // &pb.Simple{OBool:true} + 1 << 3, 1, + }, + }, + } + anySimpleJSON = `{"an":{"@type":"something.example.com/jsonpb.Simple","oBool":true}}` + anySimplePrettyJSON = `{ + "an": { + "@type": "something.example.com/jsonpb.Simple", + "oBool": true + } +}` + + anyWellKnown = &pb.KnownTypes{ + An: &types.Any{ + TypeUrl: "type.googleapis.com/google.protobuf.Duration", + Value: []byte{ + // &durpb.Duration{Seconds: 1, Nanos: 212000000 } + 1 << 3, 1, // seconds + 2 << 3, 0x80, 0xba, 0x8b, 0x65, // nanos + }, + }, + } + anyWellKnownJSON = `{"an":{"@type":"type.googleapis.com/google.protobuf.Duration","value":"1.212s"}}` + anyWellKnownPrettyJSON = `{ + "an": { + "@type": "type.googleapis.com/google.protobuf.Duration", + "value": "1.212s" + } +}` + + nonFinites = &pb.NonFinites{ + FNan: proto.Float32(float32(math.NaN())), + FPinf: proto.Float32(float32(math.Inf(1))), + FNinf: proto.Float32(float32(math.Inf(-1))), + DNan: proto.Float64(float64(math.NaN())), + DPinf: proto.Float64(float64(math.Inf(1))), + DNinf: proto.Float64(float64(math.Inf(-1))), + } + nonFinitesJSON = `{` + + `"fNan":"NaN",` + + `"fPinf":"Infinity",` + + `"fNinf":"-Infinity",` + + `"dNan":"NaN",` + + `"dPinf":"Infinity",` + + `"dNinf":"-Infinity"` + + `}` ) func init() { @@ -288,6 +342,7 @@ var marshalingTests = []struct { }{ {"simple flat object", marshaler, simpleObject, simpleObjectJSON}, {"simple pretty object", marshalerAllOptions, simpleObject, simpleObjectPrettyJSON}, + {"non-finite floats fields object", marshaler, nonFinites, nonFinitesJSON}, {"repeated fields flat object", marshaler, repeatsObject, repeatsObjectJSON}, {"repeated fields pretty object", marshalerAllOptions, repeatsObject, repeatsObjectPrettyJSON}, {"nested message/enum flat object", marshaler, complexObject, complexObjectJSON}, @@ -312,6 +367,9 @@ var marshalingTests = []struct { `{"rFunny":[1,2]}`}, {"empty value", marshaler, &pb.Simple3{}, `{}`}, {"empty value emitted", Marshaler{EmitDefaults: true}, &pb.Simple3{}, `{"dub":0}`}, + {"empty repeated emitted", Marshaler{EmitDefaults: true}, &pb.SimpleSlice3{}, `{"slices":[]}`}, + {"empty map emitted", Marshaler{EmitDefaults: true}, &pb.SimpleMap3{}, `{"stringy":{}}`}, + {"nested struct null", Marshaler{EmitDefaults: true}, &pb.SimpleNull3{}, `{"simple":null}`}, {"map", marshaler, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, `{"nummy":{"1":2,"3":4}}`}, {"map", marshalerAllOptions, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}, nummyPrettyJSON}, {"map", marshaler, @@ -341,6 +399,52 @@ var marshalingTests = []struct { {"force orig_name", Marshaler{OrigName: true}, &pb.Simple{OInt32: proto.Int32(4)}, `{"o_int32":4}`}, {"proto2 extension", marshaler, realNumber, realNumberJSON}, + {"Any with message", marshaler, anySimple, anySimpleJSON}, + {"Any with message and indent", marshalerAllOptions, anySimple, anySimplePrettyJSON}, + {"Any with WKT", marshaler, anyWellKnown, anyWellKnownJSON}, + {"Any with WKT and indent", marshalerAllOptions, anyWellKnown, anyWellKnownPrettyJSON}, + {"Duration", marshaler, &pb.KnownTypes{Dur: &types.Duration{Seconds: 3}}, `{"dur":"3.000s"}`}, + {"Struct", marshaler, &pb.KnownTypes{St: &types.Struct{ + Fields: map[string]*types.Value{ + "one": {Kind: &types.Value_StringValue{StringValue: "loneliest number"}}, + "two": {Kind: &types.Value_NullValue{NullValue: types.NULL_VALUE}}, + }, + }}, `{"st":{"one":"loneliest number","two":null}}`}, + {"empty ListValue", marshaler, &pb.KnownTypes{Lv: &types.ListValue{}}, `{"lv":[]}`}, + {"basic ListValue", marshaler, &pb.KnownTypes{Lv: &types.ListValue{Values: []*types.Value{ + {Kind: &types.Value_StringValue{StringValue: "x"}}, + {Kind: &types.Value_NullValue{}}, + {Kind: &types.Value_NumberValue{NumberValue: 3}}, + {Kind: &types.Value_BoolValue{BoolValue: true}}, + }}}, `{"lv":["x",null,3,true]}`}, + {"Timestamp", marshaler, &pb.KnownTypes{Ts: &types.Timestamp{Seconds: 14e8, Nanos: 21e6}}, `{"ts":"2014-05-13T16:53:20.021Z"}`}, + {"number Value", marshaler, &pb.KnownTypes{Val: &types.Value{Kind: &types.Value_NumberValue{NumberValue: 1}}}, `{"val":1}`}, + {"null Value", marshaler, &pb.KnownTypes{Val: &types.Value{Kind: &types.Value_NullValue{NullValue: types.NULL_VALUE}}}, `{"val":null}`}, + {"string number value", marshaler, &pb.KnownTypes{Val: &types.Value{Kind: &types.Value_StringValue{StringValue: "9223372036854775807"}}}, `{"val":"9223372036854775807"}`}, + {"list of lists Value", marshaler, &pb.KnownTypes{Val: &types.Value{ + Kind: &types.Value_ListValue{ListValue: &types.ListValue{ + Values: []*types.Value{ + {Kind: &types.Value_StringValue{StringValue: "x"}}, + {Kind: &types.Value_ListValue{ListValue: &types.ListValue{ + Values: []*types.Value{ + {Kind: &types.Value_ListValue{ListValue: &types.ListValue{ + Values: []*types.Value{{Kind: &types.Value_StringValue{StringValue: "y"}}}, + }}}, + {Kind: &types.Value_StringValue{StringValue: "z"}}, + }, + }}}, + }, + }}, + }}, `{"val":["x",[["y"],"z"]]}`}, + {"DoubleValue", marshaler, &pb.KnownTypes{Dbl: &types.DoubleValue{Value: 1.2}}, `{"dbl":1.2}`}, + {"FloatValue", marshaler, &pb.KnownTypes{Flt: &types.FloatValue{Value: 1.2}}, `{"flt":1.2}`}, + {"Int64Value", marshaler, &pb.KnownTypes{I64: &types.Int64Value{Value: -3}}, `{"i64":"-3"}`}, + {"UInt64Value", marshaler, &pb.KnownTypes{U64: &types.UInt64Value{Value: 3}}, `{"u64":"3"}`}, + {"Int32Value", marshaler, &pb.KnownTypes{I32: &types.Int32Value{Value: -4}}, `{"i32":-4}`}, + {"UInt32Value", marshaler, &pb.KnownTypes{U32: &types.UInt32Value{Value: 4}}, `{"u32":4}`}, + {"BoolValue", marshaler, &pb.KnownTypes{Bool: &types.BoolValue{Value: true}}, `{"bool":true}`}, + {"StringValue", marshaler, &pb.KnownTypes{Str: &types.StringValue{Value: "plush"}}, `{"str":"plush"}`}, + {"BytesValue", marshaler, &pb.KnownTypes{Bytes: &types.BytesValue{Value: []byte("wow")}}, `{"bytes":"d293"}`}, } func TestMarshaling(t *testing.T) { @@ -354,6 +458,36 @@ func TestMarshaling(t *testing.T) { } } +func TestMarshalJSONPBMarshaler(t *testing.T) { + rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` + msg := dynamicMessage{rawJson: rawJson} + str, err := new(Marshaler).MarshalToString(&msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling JSONPBMarshaler: %v", err) + } + if str != rawJson { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, rawJson) + } +} + +func TestMarshalAnyJSONPBMarshaler(t *testing.T) { + msg := dynamicMessage{rawJson: `{ "foo": "bar", "baz": [0, 1, 2, 3] }`} + a, err := types.MarshalAny(&msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling to Any: %v", err) + } + str, err := new(Marshaler).MarshalToString(a) + if err != nil { + t.Errorf("an unexpected error occurred when marshalling Any to JSON: %v", err) + } + // after custom marshaling, it's round-tripped through JSON decoding/encoding already, + // so the keys are sorted, whitespace is compacted, and "@type" key has been added + expected := `{"@type":"type.googleapis.com/` + dynamicMessageName + `","baz":[0,1,2,3],"foo":"bar"}` + if str != expected { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", str, expected) + } +} + var unmarshalingTests = []struct { desc string unmarshaler Unmarshaler @@ -392,17 +526,97 @@ var unmarshalingTests = []struct { }}}, {"unquoted int64 object", Unmarshaler{}, `{"oInt64":-314}`, &pb.Simple{OInt64: proto.Int64(-314)}}, {"unquoted uint64 object", Unmarshaler{}, `{"oUint64":123}`, &pb.Simple{OUint64: proto.Uint64(123)}}, + {"NaN", Unmarshaler{}, `{"oDouble":"NaN"}`, &pb.Simple{ODouble: proto.Float64(math.NaN())}}, + {"Inf", Unmarshaler{}, `{"oFloat":"Infinity"}`, &pb.Simple{OFloat: proto.Float32(float32(math.Inf(1)))}}, + {"-Inf", Unmarshaler{}, `{"oDouble":"-Infinity"}`, &pb.Simple{ODouble: proto.Float64(math.Inf(-1))}}, {"map", Unmarshaler{}, `{"nummy":{"1":2,"3":4}}`, &pb.Mappy{Nummy: map[int64]int32{1: 2, 3: 4}}}, {"map", Unmarshaler{}, `{"strry":{"\"one\"":"two","three":"four"}}`, &pb.Mappy{Strry: map[string]string{`"one"`: "two", "three": "four"}}}, {"map", Unmarshaler{}, `{"objjy":{"1":{"dub":1}}}`, &pb.Mappy{Objjy: map[int32]*pb.Simple3{1: {Dub: 1}}}}, + {"proto2 extension", Unmarshaler{}, realNumberJSON, realNumber}, + // TODO does not work with go version 1.7, but works with go version 1.8 {"Any with message", Unmarshaler{}, anySimpleJSON, anySimple}, + // TODO does not work with go version 1.7, but works with go version 1.8 {"Any with message and indent", Unmarshaler{}, anySimplePrettyJSON, anySimple}, + {"Any with WKT", Unmarshaler{}, anyWellKnownJSON, anyWellKnown}, + {"Any with WKT and indent", Unmarshaler{}, anyWellKnownPrettyJSON, anyWellKnown}, // TODO: This is broken. //{"map", Unmarshaler{}, `{"enumy":{"XIV":"ROMAN"}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, {"map", Unmarshaler{}, `{"enumy":{"XIV":2}}`, &pb.Mappy{Enumy: map[string]pb.Numeral{"XIV": pb.Numeral_ROMAN}}}, {"oneof", Unmarshaler{}, `{"salary":31000}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Salary{Salary: 31000}}}, - {"oneof spec name", Unmarshaler{}, `{"country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{Country: "Australia"}}}, + {"oneof spec name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{Country: "Australia"}}}, {"oneof orig_name", Unmarshaler{}, `{"Country":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_Country{Country: "Australia"}}}, + {"oneof spec name2", Unmarshaler{}, `{"homeAddress":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{HomeAddress: "Australia"}}}, + {"oneof orig_name2", Unmarshaler{}, `{"home_address":"Australia"}`, &pb.MsgWithOneof{Union: &pb.MsgWithOneof_HomeAddress{HomeAddress: "Australia"}}}, {"orig_name input", Unmarshaler{}, `{"o_bool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, {"camelName input", Unmarshaler{}, `{"oBool":true}`, &pb.Simple{OBool: proto.Bool(true)}}, + {"Duration", Unmarshaler{}, `{"dur":"3.000s"}`, &pb.KnownTypes{Dur: &types.Duration{Seconds: 3}}}, + {"null Duration", Unmarshaler{}, `{"dur":null}`, &pb.KnownTypes{Dur: nil}}, + {"Timestamp", Unmarshaler{}, `{"ts":"2014-05-13T16:53:20.021Z"}`, &pb.KnownTypes{Ts: &types.Timestamp{Seconds: 14e8, Nanos: 21e6}}}, + {"PreEpochTimestamp", Unmarshaler{}, `{"ts":"1969-12-31T23:59:58.999999995Z"}`, &pb.KnownTypes{Ts: &types.Timestamp{Seconds: -2, Nanos: 999999995}}}, + {"ZeroTimeTimestamp", Unmarshaler{}, `{"ts":"0001-01-01T00:00:00Z"}`, &pb.KnownTypes{Ts: &types.Timestamp{Seconds: -62135596800, Nanos: 0}}}, + {"null Timestamp", Unmarshaler{}, `{"ts":null}`, &pb.KnownTypes{Ts: nil}}, + {"null Struct", Unmarshaler{}, `{"st": null}`, &pb.KnownTypes{St: nil}}, + {"empty Struct", Unmarshaler{}, `{"st": {}}`, &pb.KnownTypes{St: &types.Struct{}}}, + {"basic Struct", Unmarshaler{}, `{"st": {"a": "x", "b": null, "c": 3, "d": true}}`, &pb.KnownTypes{St: &types.Struct{Fields: map[string]*types.Value{ + "a": {Kind: &types.Value_StringValue{StringValue: "x"}}, + "b": {Kind: &types.Value_NullValue{}}, + "c": {Kind: &types.Value_NumberValue{NumberValue: 3}}, + "d": {Kind: &types.Value_BoolValue{BoolValue: true}}, + }}}}, + {"nested Struct", Unmarshaler{}, `{"st": {"a": {"b": 1, "c": [{"d": true}, "f"]}}}`, &pb.KnownTypes{St: &types.Struct{Fields: map[string]*types.Value{ + "a": {Kind: &types.Value_StructValue{StructValue: &types.Struct{Fields: map[string]*types.Value{ + "b": {Kind: &types.Value_NumberValue{NumberValue: 1}}, + "c": {Kind: &types.Value_ListValue{ListValue: &types.ListValue{Values: []*types.Value{ + {Kind: &types.Value_StructValue{StructValue: &types.Struct{Fields: map[string]*types.Value{"d": {Kind: &types.Value_BoolValue{BoolValue: true}}}}}}, + {Kind: &types.Value_StringValue{StringValue: "f"}}, + }}}}, + }}}}, + }}}}, + {"null ListValue", Unmarshaler{}, `{"lv": null}`, &pb.KnownTypes{Lv: nil}}, + {"empty ListValue", Unmarshaler{}, `{"lv": []}`, &pb.KnownTypes{Lv: &types.ListValue{}}}, + {"basic ListValue", Unmarshaler{}, `{"lv": ["x", null, 3, true]}`, &pb.KnownTypes{Lv: &types.ListValue{Values: []*types.Value{ + {Kind: &types.Value_StringValue{StringValue: "x"}}, + {Kind: &types.Value_NullValue{}}, + {Kind: &types.Value_NumberValue{NumberValue: 3}}, + {Kind: &types.Value_BoolValue{BoolValue: true}}, + }}}}, + {"number Value", Unmarshaler{}, `{"val":1}`, &pb.KnownTypes{Val: &types.Value{Kind: &types.Value_NumberValue{NumberValue: 1}}}}, + {"null Value", Unmarshaler{}, `{"val":null}`, &pb.KnownTypes{Val: &types.Value{Kind: &types.Value_NullValue{NullValue: types.NULL_VALUE}}}}, + {"bool Value", Unmarshaler{}, `{"val":true}`, &pb.KnownTypes{Val: &types.Value{Kind: &types.Value_BoolValue{BoolValue: true}}}}, + {"string Value", Unmarshaler{}, `{"val":"x"}`, &pb.KnownTypes{Val: &types.Value{Kind: &types.Value_StringValue{StringValue: "x"}}}}, + {"string number value", Unmarshaler{}, `{"val":"9223372036854775807"}`, &pb.KnownTypes{Val: &types.Value{Kind: &types.Value_StringValue{StringValue: "9223372036854775807"}}}}, + {"list of lists Value", Unmarshaler{}, `{"val":["x", [["y"], "z"]]}`, &pb.KnownTypes{Val: &types.Value{ + Kind: &types.Value_ListValue{ListValue: &types.ListValue{ + Values: []*types.Value{ + {Kind: &types.Value_StringValue{StringValue: "x"}}, + {Kind: &types.Value_ListValue{ListValue: &types.ListValue{ + Values: []*types.Value{ + {Kind: &types.Value_ListValue{ListValue: &types.ListValue{ + Values: []*types.Value{{Kind: &types.Value_StringValue{StringValue: "y"}}}, + }}}, + {Kind: &types.Value_StringValue{StringValue: "z"}}, + }, + }}}, + }, + }}}}}, + + {"DoubleValue", Unmarshaler{}, `{"dbl":1.2}`, &pb.KnownTypes{Dbl: &types.DoubleValue{Value: 1.2}}}, + {"FloatValue", Unmarshaler{}, `{"flt":1.2}`, &pb.KnownTypes{Flt: &types.FloatValue{Value: 1.2}}}, + {"Int64Value", Unmarshaler{}, `{"i64":"-3"}`, &pb.KnownTypes{I64: &types.Int64Value{Value: -3}}}, + {"UInt64Value", Unmarshaler{}, `{"u64":"3"}`, &pb.KnownTypes{U64: &types.UInt64Value{Value: 3}}}, + {"Int32Value", Unmarshaler{}, `{"i32":-4}`, &pb.KnownTypes{I32: &types.Int32Value{Value: -4}}}, + {"UInt32Value", Unmarshaler{}, `{"u32":4}`, &pb.KnownTypes{U32: &types.UInt32Value{Value: 4}}}, + {"BoolValue", Unmarshaler{}, `{"bool":true}`, &pb.KnownTypes{Bool: &types.BoolValue{Value: true}}}, + {"StringValue", Unmarshaler{}, `{"str":"plush"}`, &pb.KnownTypes{Str: &types.StringValue{Value: "plush"}}}, + {"BytesValue", Unmarshaler{}, `{"bytes":"d293"}`, &pb.KnownTypes{Bytes: &types.BytesValue{Value: []byte("wow")}}}, + // Ensure that `null` as a value ends up with a nil pointer instead of a [type]Value struct. + {"null DoubleValue", Unmarshaler{}, `{"dbl":null}`, &pb.KnownTypes{Dbl: nil}}, + {"null FloatValue", Unmarshaler{}, `{"flt":null}`, &pb.KnownTypes{Flt: nil}}, + {"null Int64Value", Unmarshaler{}, `{"i64":null}`, &pb.KnownTypes{I64: nil}}, + {"null UInt64Value", Unmarshaler{}, `{"u64":null}`, &pb.KnownTypes{U64: nil}}, + {"null Int32Value", Unmarshaler{}, `{"i32":null}`, &pb.KnownTypes{I32: nil}}, + {"null UInt32Value", Unmarshaler{}, `{"u32":null}`, &pb.KnownTypes{U32: nil}}, + {"null BoolValue", Unmarshaler{}, `{"bool":null}`, &pb.KnownTypes{Bool: nil}}, + {"null StringValue", Unmarshaler{}, `{"str":null}`, &pb.KnownTypes{Str: nil}}, + {"null BytesValue", Unmarshaler{}, `{"bytes":null}`, &pb.KnownTypes{Bytes: nil}}, } func TestUnmarshaling(t *testing.T) { @@ -425,6 +639,26 @@ func TestUnmarshaling(t *testing.T) { } } +func TestUnmarshalNullArray(t *testing.T) { + var repeats pb.Repeats + if err := UnmarshalString(`{"rBool":null}`, &repeats); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(repeats, pb.Repeats{}) { + t.Errorf("got non-nil fields in [%#v]", repeats) + } +} + +func TestUnmarshalNullObject(t *testing.T) { + var maps pb.Maps + if err := UnmarshalString(`{"mInt64Str":null}`, &maps); err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(maps, pb.Maps{}) { + t.Errorf("got non-nil fields in [%#v]", maps) + } +} + func TestUnmarshalNext(t *testing.T) { // We only need to check against a few, not all of them. tests := unmarshalingTests[:5] @@ -480,3 +714,177 @@ func TestUnmarshalingBadInput(t *testing.T) { } } } + +type funcResolver func(turl string) (proto.Message, error) + +func (fn funcResolver) Resolve(turl string) (proto.Message, error) { + return fn(turl) +} + +func TestAnyWithCustomResolver(t *testing.T) { + var resolvedTypeUrls []string + resolver := funcResolver(func(turl string) (proto.Message, error) { + resolvedTypeUrls = append(resolvedTypeUrls, turl) + return new(pb.Simple), nil + }) + msg := &pb.Simple{ + OBytes: []byte{1, 2, 3, 4}, + OBool: proto.Bool(true), + OString: proto.String("foobar"), + OInt64: proto.Int64(1020304), + } + msgBytes, err := proto.Marshal(msg) + if err != nil { + t.Errorf("an unexpected error occurred when marshaling message: %v", err) + } + // make an Any with a type URL that won't resolve w/out custom resolver + any := &types.Any{ + TypeUrl: "https://foobar.com/some.random.MessageKind", + Value: msgBytes, + } + + m := Marshaler{AnyResolver: resolver} + js, err := m.MarshalToString(any) + if err != nil { + t.Errorf("an unexpected error occurred when marshaling any to JSON: %v", err) + } + if len(resolvedTypeUrls) != 1 { + t.Errorf("custom resolver was not invoked during marshaling") + } else if resolvedTypeUrls[0] != "https://foobar.com/some.random.MessageKind" { + t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[0], "https://foobar.com/some.random.MessageKind") + } + wanted := `{"@type":"https://foobar.com/some.random.MessageKind","oBool":true,"oInt64":"1020304","oString":"foobar","oBytes":"AQIDBA=="}` + if js != wanted { + t.Errorf("marshalling JSON produced incorrect output: got %s, wanted %s", js, wanted) + } + + u := Unmarshaler{AnyResolver: resolver} + roundTrip := &types.Any{} + err = u.Unmarshal(bytes.NewReader([]byte(js)), roundTrip) + if err != nil { + t.Errorf("an unexpected error occurred when unmarshaling any from JSON: %v", err) + } + if len(resolvedTypeUrls) != 2 { + t.Errorf("custom resolver was not invoked during marshaling") + } else if resolvedTypeUrls[1] != "https://foobar.com/some.random.MessageKind" { + t.Errorf("custom resolver was invoked with wrong URL: got %q, wanted %q", resolvedTypeUrls[1], "https://foobar.com/some.random.MessageKind") + } + if !proto.Equal(any, roundTrip) { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", roundTrip, any) + } +} + +func TestUnmarshalJSONPBUnmarshaler(t *testing.T) { + rawJson := `{ "foo": "bar", "baz": [0, 1, 2, 3] }` + var msg dynamicMessage + if err := Unmarshal(strings.NewReader(rawJson), &msg); err != nil { + t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err) + } + if msg.rawJson != rawJson { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", msg.rawJson, rawJson) + } +} + +func TestUnmarshalNullWithJSONPBUnmarshaler(t *testing.T) { + rawJson := `{"stringField":null}` + var ptrFieldMsg ptrFieldMessage + if err := Unmarshal(strings.NewReader(rawJson), &ptrFieldMsg); err != nil { + t.Errorf("unmarshal error: %v", err) + } + + want := ptrFieldMessage{StringField: &stringField{IsSet: true, StringValue: "null"}} + if !proto.Equal(&ptrFieldMsg, &want) { + t.Errorf("unmarshal result StringField: got %v, want %v", ptrFieldMsg, want) + } +} + +func TestUnmarshalAnyJSONPBUnmarshaler(t *testing.T) { + rawJson := `{ "@type": "blah.com/` + dynamicMessageName + `", "foo": "bar", "baz": [0, 1, 2, 3] }` + var got types.Any + if err := Unmarshal(strings.NewReader(rawJson), &got); err != nil { + t.Errorf("an unexpected error occurred when parsing into JSONPBUnmarshaler: %v", err) + } + + dm := &dynamicMessage{rawJson: `{"baz":[0,1,2,3],"foo":"bar"}`} + var want types.Any + if b, err := proto.Marshal(dm); err != nil { + t.Errorf("an unexpected error occurred when marshaling message: %v", err) + } else { + want.TypeUrl = "blah.com/" + dynamicMessageName + want.Value = b + } + + if !proto.Equal(&got, &want) { + t.Errorf("message contents not set correctly after unmarshalling JSON: got %s, wanted %s", got, want) + } +} + +const ( + dynamicMessageName = "google.protobuf.jsonpb.testing.dynamicMessage" +) + +func init() { + // we register the custom type below so that we can use it in Any types + proto.RegisterType((*dynamicMessage)(nil), dynamicMessageName) +} + +type ptrFieldMessage struct { + StringField *stringField `protobuf:"bytes,1,opt,name=stringField"` +} + +func (m *ptrFieldMessage) Reset() { +} + +func (m *ptrFieldMessage) String() string { + return m.StringField.StringValue +} + +func (m *ptrFieldMessage) ProtoMessage() { +} + +type stringField struct { + IsSet bool `protobuf:"varint,1,opt,name=isSet"` + StringValue string `protobuf:"bytes,2,opt,name=stringValue"` +} + +func (s *stringField) Reset() { +} + +func (s *stringField) String() string { + return s.StringValue +} + +func (s *stringField) ProtoMessage() { +} + +func (s *stringField) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { + s.IsSet = true + s.StringValue = string(js) + return nil +} + +// dynamicMessage implements protobuf.Message but is not a normal generated message type. +// It provides implementations of JSONPBMarshaler and JSONPBUnmarshaler for JSON support. +type dynamicMessage struct { + rawJson string `protobuf:"bytes,1,opt,name=rawJson"` +} + +func (m *dynamicMessage) Reset() { + m.rawJson = "{}" +} + +func (m *dynamicMessage) String() string { + return m.rawJson +} + +func (m *dynamicMessage) ProtoMessage() { +} + +func (m *dynamicMessage) MarshalJSONPB(jm *Marshaler) ([]byte, error) { + return []byte(m.rawJson), nil +} + +func (m *dynamicMessage) UnmarshalJSONPB(jum *Unmarshaler, js []byte) error { + m.rawJson = string(js) + return nil +} diff --git a/vendor/github.com/gogo/protobuf/plugin/compare/compare.go b/vendor/github.com/gogo/protobuf/plugin/compare/compare.go index f861f7944..97d0a4a9a 100644 --- a/vendor/github.com/gogo/protobuf/plugin/compare/compare.go +++ b/vendor/github.com/gogo/protobuf/plugin/compare/compare.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -510,7 +512,7 @@ func (p *plugin) generateMessage(file *generator.FileDescriptor, message *genera p.In() p.generateMsgNullAndTypeCheck(ccTypeName) - vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(field) + vanity.TurnOffNullableForNativeTypes(field) p.generateField(file, message, field) p.P(`return 0`) diff --git a/vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go b/vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go index db7157179..4fbdbc633 100644 --- a/vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go +++ b/vendor/github.com/gogo/protobuf/plugin/compare/comparetest.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -46,6 +48,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes timePkg := imports.NewImport("time") testingPkg := imports.NewImport("testing") protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + unsafePkg := imports.NewImport("unsafe") if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { protoPkg = imports.NewImport("github.com/golang/protobuf/proto") } @@ -60,18 +63,28 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { used = true + hasUnsafe := gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) || + gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) p.P(`func Test`, ccTypeName, `Compare(t *`, testingPkg.Use(), `.T) {`) p.In() + if hasUnsafe { + p.P(`var bigendian uint32 = 0x01020304`) + p.P(`if *(*byte)(`, unsafePkg.Use(), `.Pointer(&bigendian)) == 1 {`) + p.In() + p.P(`t.Skip("unsafe does not work on big endian architectures")`) + p.Out() + p.P(`}`) + } p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) - p.P(`data, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(p)`) p.P(`if err != nil {`) p.In() p.P(`panic(err)`) p.Out() p.P(`}`) p.P(`msg := &`, ccTypeName, `{}`) - p.P(`if err := `, protoPkg.Use(), `.Unmarshal(data, msg); err != nil {`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(dAtA, msg); err != nil {`) p.In() p.P(`panic(err)`) p.Out() diff --git a/vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go b/vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go index 690ad0df3..486f28771 100644 --- a/vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go +++ b/vendor/github.com/gogo/protobuf/plugin/defaultcheck/defaultcheck.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/description/description.go b/vendor/github.com/gogo/protobuf/plugin/description/description.go index a2f08aa07..f72efba61 100644 --- a/vendor/github.com/gogo/protobuf/plugin/description/description.go +++ b/vendor/github.com/gogo/protobuf/plugin/description/description.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go b/vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go index 7feaf3209..babcd311d 100644 --- a/vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go +++ b/vendor/github.com/gogo/protobuf/plugin/description/descriptiontest.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go b/vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go index af8fd9681..1cb77cacb 100644 --- a/vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go +++ b/vendor/github.com/gogo/protobuf/plugin/embedcheck/embedcheck.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go b/vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go index 7feb8be18..04d6e547f 100644 --- a/vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go +++ b/vendor/github.com/gogo/protobuf/plugin/enumstringer/enumstringer.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -74,7 +76,7 @@ func (p *enumstringer) Generate(file *generator.FileDescriptor) { continue } if gogoproto.IsGoEnumStringer(file.FileDescriptorProto, enum.EnumDescriptorProto) { - panic("old enum string method needs to be disabled, please use gogoproto.old_enum_stringer or gogoproto.old_enum_string_all and set it to false") + panic("Go enum stringer conflicts with new enumstringer plugin: please use gogoproto.goproto_enum_stringer or gogoproto.goproto_enum_string_all and set it to false") } p.atleastOne = true ccTypeName := generator.CamelCaseSlice(enum.TypeName()) diff --git a/vendor/github.com/gogo/protobuf/plugin/equal/equal.go b/vendor/github.com/gogo/protobuf/plugin/equal/equal.go index e0b004146..41a2c9704 100644 --- a/vendor/github.com/gogo/protobuf/plugin/equal/equal.go +++ b/vendor/github.com/gogo/protobuf/plugin/equal/equal.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -105,10 +107,7 @@ given to the equal plugin, will generate the following code: func (this *B) Equal(that interface{}) bool { if that == nil { - if this == nil { - return true - } - return false + return this == nil } that1, ok := that.(*B) @@ -116,10 +115,7 @@ given to the equal plugin, will generate the following code: return false } if that1 == nil { - if this == nil { - return true - } - return false + return this == nil } else if this == nil { return false } @@ -145,12 +141,12 @@ and the following test code: func TestBVerboseEqual(t *testing8.T) { popr := math_rand8.New(math_rand8.NewSource(time8.Now().UnixNano())) p := NewPopulatedB(popr, false) - data, err := github_com_gogo_protobuf_proto2.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto2.Marshal(p) if err != nil { panic(err) } msg := &B{} - if err := github_com_gogo_protobuf_proto2.Unmarshal(data, msg); err != nil { + if err := github_com_gogo_protobuf_proto2.Unmarshal(dAtA, msg); err != nil { panic(err) } if err := p.VerboseEqual(msg); err != nil { @@ -234,19 +230,15 @@ func (p *plugin) generateNullableField(fieldname string, verbose bool) { func (p *plugin) generateMsgNullAndTypeCheck(ccTypeName string, verbose bool) { p.P(`if that == nil {`) p.In() - p.P(`if this == nil {`) - p.In() if verbose { + p.P(`if this == nil {`) + p.In() p.P(`return nil`) - } else { - p.P(`return true`) - } - p.Out() - p.P(`}`) - if verbose { + p.Out() + p.P(`}`) p.P(`return `, p.fmtPkg.Use(), `.Errorf("that == nil && this != nil")`) } else { - p.P(`return false`) + p.P(`return this == nil`) } p.Out() p.P(`}`) @@ -272,19 +264,15 @@ func (p *plugin) generateMsgNullAndTypeCheck(ccTypeName string, verbose bool) { p.P(`}`) p.P(`if that1 == nil {`) p.In() - p.P(`if this == nil {`) - p.In() if verbose { + p.P(`if this == nil {`) + p.In() p.P(`return nil`) - } else { - p.P(`return true`) - } - p.Out() - p.P(`}`) - if verbose { + p.Out() + p.P(`}`) p.P(`return `, p.fmtPkg.Use(), `.Errorf("that is type *`, ccTypeName, ` but is nil && this != nil")`) } else { - p.P(`return false`) + p.P(`return this == nil`) } p.Out() p.P(`} else if this == nil {`) @@ -304,9 +292,11 @@ func (p *plugin) generateField(file *generator.FileDescriptor, message *generato repeated := field.IsRepeated() ctype := gogoproto.IsCustomType(field) nullable := gogoproto.IsNullable(field) + isDuration := gogoproto.IsStdDuration(field) + isTimestamp := gogoproto.IsStdTime(field) // oneof := field.OneofIndex != nil if !repeated { - if ctype { + if ctype || isTimestamp { if nullable { p.P(`if that1.`, fieldname, ` == nil {`) p.In() @@ -332,6 +322,20 @@ func (p *plugin) generateField(file *generator.FileDescriptor, message *generato } p.Out() p.P(`}`) + } else if isDuration { + if nullable { + p.generateNullableField(fieldname, verbose) + } else { + p.P(`if this.`, fieldname, ` != that1.`, fieldname, `{`) + } + p.In() + if verbose { + p.P(`return `, p.fmtPkg.Use(), `.Errorf("`, fieldname, ` this(%v) Not Equal that(%v)", this.`, fieldname, `, that1.`, fieldname, `)`) + } else { + p.P(`return false`) + } + p.Out() + p.P(`}`) } else { if field.IsMessage() || p.IsGroup(field) { if nullable { @@ -375,8 +379,20 @@ func (p *plugin) generateField(file *generator.FileDescriptor, message *generato p.P(`}`) p.P(`for i := range this.`, fieldname, ` {`) p.In() - if ctype { + if ctype && !p.IsMap(field) { p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } else if isTimestamp { + if nullable { + p.P(`if !this.`, fieldname, `[i].Equal(*that1.`, fieldname, `[i]) {`) + } else { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) {`) + } + } else if isDuration { + if nullable { + p.P(`if dthis, dthat := this.`, fieldname, `[i], that1.`, fieldname, `[i]; (dthis != nil && dthat != nil && *dthis != *dthat) || (dthis != nil && dthat == nil) || (dthis == nil && dthat != nil) {`) + } else { + p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) + } } else { if p.IsMap(field) { m := p.GoMapType(nil, field) @@ -406,7 +422,15 @@ func (p *plugin) generateField(file *generator.FileDescriptor, message *generato } } } else if mapValue.IsBytes() { - p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `[i], that1.`, fieldname, `[i]) {`) + if ctype { + if nullable { + p.P(`if !this.`, fieldname, `[i].Equal(*that1.`, fieldname, `[i]) { //nullable`) + } else { + p.P(`if !this.`, fieldname, `[i].Equal(that1.`, fieldname, `[i]) { //not nullable`) + } + } else { + p.P(`if !`, p.bytesPkg.Use(), `.Equal(this.`, fieldname, `[i], that1.`, fieldname, `[i]) {`) + } } else if mapValue.IsString() { p.P(`if this.`, fieldname, `[i] != that1.`, fieldname, `[i] {`) } else { @@ -589,7 +613,7 @@ func (p *plugin) generateMessage(file *generator.FileDescriptor, message *genera p.In() p.generateMsgNullAndTypeCheck(ccTypeName, verbose) - vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(field) + vanity.TurnOffNullableForNativeTypes(field) p.generateField(file, message, field, verbose) if verbose { diff --git a/vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go b/vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go index 2fec83551..1233647a5 100644 --- a/vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go +++ b/vendor/github.com/gogo/protobuf/plugin/equal/equaltest.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -46,6 +48,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes timePkg := imports.NewImport("time") testingPkg := imports.NewImport("testing") protoPkg := imports.NewImport("github.com/gogo/protobuf/proto") + unsafePkg := imports.NewImport("unsafe") if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { protoPkg = imports.NewImport("github.com/golang/protobuf/proto") } @@ -60,18 +63,30 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes if gogoproto.HasTestGen(file.FileDescriptorProto, message.DescriptorProto) { used = true + hasUnsafe := gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) || + gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) p.P(`func Test`, ccTypeName, `VerboseEqual(t *`, testingPkg.Use(), `.T) {`) p.In() + if hasUnsafe { + if hasUnsafe { + p.P(`var bigendian uint32 = 0x01020304`) + p.P(`if *(*byte)(`, unsafePkg.Use(), `.Pointer(&bigendian)) == 1 {`) + p.In() + p.P(`t.Skip("unsafe does not work on big endian architectures")`) + p.Out() + p.P(`}`) + } + } p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(`, timePkg.Use(), `.Now().UnixNano()))`) p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) - p.P(`data, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(p)`) p.P(`if err != nil {`) p.In() p.P(`panic(err)`) p.Out() p.P(`}`) p.P(`msg := &`, ccTypeName, `{}`) - p.P(`if err := `, protoPkg.Use(), `.Unmarshal(data, msg); err != nil {`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(dAtA, msg); err != nil {`) p.In() p.P(`panic(err)`) p.Out() diff --git a/vendor/github.com/gogo/protobuf/plugin/face/face.go b/vendor/github.com/gogo/protobuf/plugin/face/face.go index 3c0c25b3e..a02934526 100644 --- a/vendor/github.com/gogo/protobuf/plugin/face/face.go +++ b/vendor/github.com/gogo/protobuf/plugin/face/face.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/face/facetest.go b/vendor/github.com/gogo/protobuf/plugin/face/facetest.go index 305e092ef..467cc0a66 100644 --- a/vendor/github.com/gogo/protobuf/plugin/face/facetest.go +++ b/vendor/github.com/gogo/protobuf/plugin/face/facetest.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go b/vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go index cb38af9ec..2b439469f 100644 --- a/vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go +++ b/vendor/github.com/gogo/protobuf/plugin/gostring/gostring.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -95,10 +97,13 @@ not print their values, while the generated GoString method will always print al package gostring import ( - "github.com/gogo/protobuf/gogoproto" - "github.com/gogo/protobuf/protoc-gen-gogo/generator" + "fmt" + "os" "strconv" "strings" + + "github.com/gogo/protobuf/gogoproto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" ) type gostring struct { @@ -106,6 +111,7 @@ type gostring struct { generator.PluginImports atleastOne bool localName string + overwrite bool } func NewGoString() *gostring { @@ -116,6 +122,10 @@ func (p *gostring) Name() string { return "gostring" } +func (p *gostring) Overwrite() { + p.overwrite = true +} + func (p *gostring) Init(g *generator.Generator) { p.Generator = g } @@ -138,8 +148,9 @@ func (p *gostring) Generate(file *generator.FileDescriptor) { reflectPkg := p.NewImport("reflect") sortKeysPkg := p.NewImport("github.com/gogo/protobuf/sortkeys") + extensionToGoStringUsed := false for _, message := range file.Messages() { - if !gogoproto.HasGoString(file.FileDescriptorProto, message.DescriptorProto) { + if !p.overwrite && !gogoproto.HasGoString(file.FileDescriptorProto, message.DescriptorProto) { continue } if message.DescriptorProto.GetOptions().GetMapEntry() { @@ -214,13 +225,27 @@ func (p *gostring) Generate(file *generator.FileDescriptor) { p.P(`s = append(s, "`, fieldname, `: " + `, mapName, `+ ",\n")`) p.Out() p.P(`}`) - } else if field.IsMessage() || p.IsGroup(field) { + } else if (field.IsMessage() && !gogoproto.IsCustomType(field) && !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field)) || p.IsGroup(field) { if nullable || repeated { p.P(`if this.`, fieldname, ` != nil {`) p.In() } - if nullable || repeated { + if nullable { p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } else if repeated { + if nullable { + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) + } else { + goTyp, _ := p.GoType(message, field) + goTyp = strings.Replace(goTyp, "[]", "", 1) + p.P("vs := make([]*", goTyp, ", len(this.", fieldname, "))") + p.P("for i := range vs {") + p.In() + p.P("vs[i] = &this.", fieldname, "[i]") + p.Out() + p.P("}") + p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", vs) + ",\n")`) + } } else { p.P(`s = append(s, "`, fieldname, `: " + `, stringsPkg.Use(), `.Replace(this.`, fieldname, `.GoString()`, ",`&`,``,1)", ` + ",\n")`) } @@ -236,7 +261,7 @@ func (p *gostring) Generate(file *generator.FileDescriptor) { if field.IsEnum() { if nullable && !repeated && !proto3 { goTyp, _ := p.GoType(message, field) - p.P(`s = append(s, "`, fieldname, `: " + valueToGoString`, p.localName, `(this.`, fieldname, `,"`, packageName, ".", generator.GoTypeToName(goTyp), `"`, `) + ",\n")`) + p.P(`s = append(s, "`, fieldname, `: " + valueToGoString`, p.localName, `(this.`, fieldname, `,"`, generator.GoTypeToName(goTyp), `"`, `) + ",\n")`) } else { p.P(`s = append(s, "`, fieldname, `: " + `, fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `) + ",\n")`) } @@ -257,6 +282,7 @@ func (p *gostring) Generate(file *generator.FileDescriptor) { if message.DescriptorProto.HasExtension() { if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { p.P(`s = append(s, "XXX_InternalExtensions: " + extensionToGoString`, p.localName, `(this) + ",\n")`) + extensionToGoStringUsed = true } else { p.P(`if this.XXX_extensions != nil {`) p.In() @@ -274,7 +300,6 @@ func (p *gostring) Generate(file *generator.FileDescriptor) { } p.P(`s = append(s, "}")`) - //outStr += strings.Join([]string{" + `}`", `}`, `,", "`, ")"}, "") p.P(`return `, stringsPkg.Use(), `.Join(s, "")`) p.Out() p.P(`}`) @@ -293,20 +318,15 @@ func (p *gostring) Generate(file *generator.FileDescriptor) { p.P(`return "nil"`) p.Out() p.P(`}`) - outFlds := []string{} fieldname := p.GetOneOfFieldName(message, field) - if field.IsMessage() || p.IsGroup(field) { - tmp := strings.Join([]string{"`", fieldname, ":` + "}, "") - tmp += strings.Join([]string{fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `)`}, "") - outFlds = append(outFlds, tmp) - } else { - tmp := strings.Join([]string{"`", fieldname, ":` + "}, "") - tmp += strings.Join([]string{fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, ")"}, "") - outFlds = append(outFlds, tmp) - } - outStr := strings.Join([]string{"s := ", stringsPkg.Use(), ".Join([]string{`&", packageName, ".", ccTypeName, "{` + \n"}, "") - outStr += strings.Join(outFlds, ",\n") - outStr += strings.Join([]string{" + `}`", `}`, `,", "`, ")"}, "") + outStr := strings.Join([]string{ + "s := ", + stringsPkg.Use(), ".Join([]string{`&", packageName, ".", ccTypeName, "{` + \n", + "`", fieldname, ":` + ", fmtPkg.Use(), `.Sprintf("%#v", this.`, fieldname, `)`, + " + `}`", + `}`, + `,", "`, + `)`}, "") p.P(outStr) p.P(`return s`) p.Out() @@ -331,29 +351,34 @@ func (p *gostring) Generate(file *generator.FileDescriptor) { p.Out() p.P(`}`) - p.P(`func extensionToGoString`, p.localName, `(m `, protoPkg.Use(), `.Message) string {`) - p.In() - p.P(`e := `, protoPkg.Use(), `.GetUnsafeExtensionsMap(m)`) - p.P(`if e == nil { return "nil" }`) - p.P(`s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"`) - p.P(`keys := make([]int, 0, len(e))`) - p.P(`for k := range e {`) - p.In() - p.P(`keys = append(keys, int(k))`) - p.Out() - p.P(`}`) - p.P(sortPkg.Use(), `.Ints(keys)`) - p.P(`ss := []string{}`) - p.P(`for _, k := range keys {`) - p.In() - p.P(`ss = append(ss, `, strconvPkg.Use(), `.Itoa(k) + ": " + e[int32(k)].GoString())`) - p.Out() - p.P(`}`) - p.P(`s+=`, stringsPkg.Use(), `.Join(ss, ",") + "})"`) - p.P(`return s`) - p.Out() - p.P(`}`) - + if extensionToGoStringUsed { + if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { + fmt.Fprintf(os.Stderr, "The GoString plugin for messages with extensions requires importing gogoprotobuf. Please see file %s", file.GetName()) + os.Exit(1) + } + p.P(`func extensionToGoString`, p.localName, `(m `, protoPkg.Use(), `.Message) string {`) + p.In() + p.P(`e := `, protoPkg.Use(), `.GetUnsafeExtensionsMap(m)`) + p.P(`if e == nil { return "nil" }`) + p.P(`s := "proto.NewUnsafeXXX_InternalExtensions(map[int32]proto.Extension{"`) + p.P(`keys := make([]int, 0, len(e))`) + p.P(`for k := range e {`) + p.In() + p.P(`keys = append(keys, int(k))`) + p.Out() + p.P(`}`) + p.P(sortPkg.Use(), `.Ints(keys)`) + p.P(`ss := []string{}`) + p.P(`for _, k := range keys {`) + p.In() + p.P(`ss = append(ss, `, strconvPkg.Use(), `.Itoa(k) + ": " + e[int32(k)].GoString())`) + p.Out() + p.P(`}`) + p.P(`s+=`, stringsPkg.Use(), `.Join(ss, ",") + "})"`) + p.P(`return s`) + p.Out() + p.P(`}`) + } } func init() { diff --git a/vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go b/vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go index 539774905..c790e5908 100644 --- a/vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go +++ b/vendor/github.com/gogo/protobuf/plugin/gostring/gostringtest.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -72,7 +74,7 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes p.P(`_, err := `, parserPkg.Use(), `.ParseExpr(s1)`) p.P(`if err != nil {`) p.In() - p.P(`panic(err)`) + p.P(`t.Fatal(err)`) p.Out() p.P(`}`) p.Out() diff --git a/vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go b/vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go index 82f7b1ffd..24110cb44 100644 --- a/vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go +++ b/vendor/github.com/gogo/protobuf/plugin/marshalto/marshalto.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -73,35 +75,35 @@ message B { given to the marshalto plugin, will generate the following code: - func (m *B) Marshal() (data []byte, err error) { + func (m *B) Marshal() (dAtA []byte, err error) { size := m.Size() - data = make([]byte, size) - n, err := m.MarshalTo(data) + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) if err != nil { return nil, err } - return data[:n], nil + return dAtA[:n], nil } - func (m *B) MarshalTo(data []byte) (int, error) { + func (m *B) MarshalTo(dAtA []byte) (int, error) { var i int _ = i var l int _ = l - data[i] = 0xa + dAtA[i] = 0xa i++ - i = encodeVarintExample(data, i, uint64(m.A.Size())) - n2, err := m.A.MarshalTo(data[i:]) + i = encodeVarintExample(dAtA, i, uint64(m.A.Size())) + n2, err := m.A.MarshalTo(dAtA[i:]) if err != nil { return 0, err } i += n2 if len(m.G) > 0 { for _, msg := range m.G { - data[i] = 0x12 + dAtA[i] = 0x12 i++ - i = encodeVarintExample(data, i, uint64(msg.Size())) - n, err := msg.MarshalTo(data[i:]) + i = encodeVarintExample(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) if err != nil { return 0, err } @@ -109,7 +111,7 @@ given to the marshalto plugin, will generate the following code: } } if m.XXX_unrecognized != nil { - i += copy(data[i:], m.XXX_unrecognized) + i += copy(dAtA[i:], m.XXX_unrecognized) } return i, nil } @@ -170,27 +172,20 @@ type marshalto struct { *generator.Generator generator.PluginImports atleastOne bool - unsafePkg generator.Single errorsPkg generator.Single protoPkg generator.Single sortKeysPkg generator.Single mathPkg generator.Single + typesPkg generator.Single + binaryPkg generator.Single localName string - unsafe bool } func NewMarshal() *marshalto { return &marshalto{} } -func NewUnsafeMarshal() *marshalto { - return &marshalto{unsafe: true} -} - func (p *marshalto) Name() string { - if p.unsafe { - return "unsafemarshaler" - } return "marshalto" } @@ -199,69 +194,31 @@ func (p *marshalto) Init(g *generator.Generator) { } func (p *marshalto) callFixed64(varName ...string) { - p.P(`i = encodeFixed64`, p.localName, `(data, i, uint64(`, strings.Join(varName, ""), `))`) + p.P(p.binaryPkg.Use(), `.LittleEndian.PutUint64(dAtA[i:], uint64(`, strings.Join(varName, ""), `))`) + p.P(`i += 8`) } func (p *marshalto) callFixed32(varName ...string) { - p.P(`i = encodeFixed32`, p.localName, `(data, i, uint32(`, strings.Join(varName, ""), `))`) + p.P(p.binaryPkg.Use(), `.LittleEndian.PutUint32(dAtA[i:], uint32(`, strings.Join(varName, ""), `))`) + p.P(`i += 4`) } func (p *marshalto) callVarint(varName ...string) { - p.P(`i = encodeVarint`, p.localName, `(data, i, uint64(`, strings.Join(varName, ""), `))`) + p.P(`i = encodeVarint`, p.localName, `(dAtA, i, uint64(`, strings.Join(varName, ""), `))`) } func (p *marshalto) encodeVarint(varName string) { p.P(`for `, varName, ` >= 1<<7 {`) p.In() - p.P(`data[i] = uint8(uint64(`, varName, `)&0x7f|0x80)`) + p.P(`dAtA[i] = uint8(uint64(`, varName, `)&0x7f|0x80)`) p.P(varName, ` >>= 7`) p.P(`i++`) p.Out() p.P(`}`) - p.P(`data[i] = uint8(`, varName, `)`) + p.P(`dAtA[i] = uint8(`, varName, `)`) p.P(`i++`) } -func (p *marshalto) encodeFixed64(varName string) { - p.P(`data[i] = uint8(`, varName, `)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 8)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 16)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 24)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 32)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 40)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 48)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 56)`) - p.P(`i++`) -} - -func (p *marshalto) unsafeFixed64(varName string, someType string) { - p.P(`*(*`, someType, `)(`, p.unsafePkg.Use(), `.Pointer(&data[i])) = `, varName) - p.P(`i+=8`) -} - -func (p *marshalto) encodeFixed32(varName string) { - p.P(`data[i] = uint8(`, varName, `)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 8)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 16)`) - p.P(`i++`) - p.P(`data[i] = uint8(`, varName, ` >> 24)`) - p.P(`i++`) -} - -func (p *marshalto) unsafeFixed32(varName string, someType string) { - p.P(`*(*`, someType, `)(`, p.unsafePkg.Use(), `.Pointer(&data[i])) = `, varName) - p.P(`i+=4`) -} - func (p *marshalto) encodeKey(fieldNumber int32, wireType int) { x := uint32(fieldNumber)<<3 | uint32(wireType) i := 0 @@ -272,7 +229,7 @@ func (p *marshalto) encodeKey(fieldNumber int32, wireType int) { } keybuf = append(keybuf, uint8(x)) for _, b := range keybuf { - p.P(`data[i] = `, fmt.Sprintf("%#v", b)) + p.P(`dAtA[i] = `, fmt.Sprintf("%#v", b)) p.P(`i++`) } } @@ -307,8 +264,8 @@ func wireToType(wire string) int { panic("unreachable") } -func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorProto_Type, varName string, protoSizer bool) { - switch fieldTyp { +func (p *marshalto) mapField(numGen NumGen, field *descriptor.FieldDescriptorProto, kvField *descriptor.FieldDescriptorProto, varName string, protoSizer bool) { + switch kvField.GetType() { case descriptor.FieldDescriptorProto_TYPE_DOUBLE: p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(`, varName, `))`) case descriptor.FieldDescriptorProto_TYPE_FLOAT: @@ -328,29 +285,47 @@ func (p *marshalto) mapField(numGen NumGen, fieldTyp descriptor.FieldDescriptorP case descriptor.FieldDescriptorProto_TYPE_BOOL: p.P(`if `, varName, ` {`) p.In() - p.P(`data[i] = 1`) + p.P(`dAtA[i] = 1`) p.Out() p.P(`} else {`) p.In() - p.P(`data[i] = 0`) + p.P(`dAtA[i] = 0`) p.Out() p.P(`}`) p.P(`i++`) case descriptor.FieldDescriptorProto_TYPE_STRING, descriptor.FieldDescriptorProto_TYPE_BYTES: - p.callVarint(`len(`, varName, `)`) - p.P(`i+=copy(data[i:], `, varName, `)`) + if gogoproto.IsCustomType(field) && kvField.IsBytes() { + p.callVarint(varName, `.Size()`) + p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(dAtA[i:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return 0, err`) + p.Out() + p.P(`}`) + p.P(`i+=n`, numGen.Current()) + } else { + p.callVarint(`len(`, varName, `)`) + p.P(`i+=copy(dAtA[i:], `, varName, `)`) + } case descriptor.FieldDescriptorProto_TYPE_SINT32: p.callVarint(`(uint32(`, varName, `) << 1) ^ uint32((`, varName, ` >> 31))`) case descriptor.FieldDescriptorProto_TYPE_SINT64: p.callVarint(`(uint64(`, varName, `) << 1) ^ uint64((`, varName, ` >> 63))`) case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - if protoSizer { + if gogoproto.IsStdTime(field) { + p.callVarint(p.typesPkg.Use(), `.SizeOfStdTime(*`, varName, `)`) + p.P(`n`, numGen.Next(), `, err := `, p.typesPkg.Use(), `.StdTimeMarshalTo(*`, varName, `, dAtA[i:])`) + } else if gogoproto.IsStdDuration(field) { + p.callVarint(p.typesPkg.Use(), `.SizeOfStdDuration(*`, varName, `)`) + p.P(`n`, numGen.Next(), `, err := `, p.typesPkg.Use(), `.StdDurationMarshalTo(*`, varName, `, dAtA[i:])`) + } else if protoSizer { p.callVarint(varName, `.ProtoSize()`) + p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(dAtA[i:])`) } else { p.callVarint(varName, `.Size()`) + p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(dAtA[i:])`) } - p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(data[i:])`) p.P(`if err != nil {`) p.In() p.P(`return 0, err`) @@ -399,7 +374,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`if m.`, fieldname, ` != nil {`) p.In() } - packed := field.IsPacked() + packed := field.IsPacked() || (proto3 && field.IsPacked3()) wireType := field.WireType() fieldNumber := field.GetNumber() if packed { @@ -407,132 +382,68 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } switch *field.Type { case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if !p.unsafe || gogoproto.IsCastType(field) { - if packed { - p.encodeKey(fieldNumber, wireType) - p.callVarint(`len(m.`, fieldname, `) * 8`) - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`) - p.encodeFixed64("f" + numGen.Current()) - p.Out() - p.P(`}`) - } else if repeated { - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`) - p.encodeFixed64("f" + numGen.Current()) - p.Out() - p.P(`}`) - } else if proto3 { - p.P(`if m.`, fieldname, ` != 0 {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) - p.Out() - p.P(`}`) - } else if !nullable { - p.encodeKey(fieldNumber, wireType) - p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) - } else { - p.encodeKey(fieldNumber, wireType) - p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(*m.`+fieldname, `))`) - } + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 8`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`) + p.callFixed64("f" + numGen.Current()) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float64bits(float64(num))`) + p.callFixed64("f" + numGen.Current()) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(m.`+fieldname, `))`) } else { - if packed { - p.encodeKey(fieldNumber, wireType) - p.callVarint(`len(m.`, fieldname, `) * 8`) - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.unsafeFixed64("num", "float64") - p.Out() - p.P(`}`) - } else if repeated { - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed64("num", "float64") - p.Out() - p.P(`}`) - } else if proto3 { - p.P(`if m.`, fieldname, ` != 0 {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed64(`m.`+fieldname, "float64") - p.Out() - p.P(`}`) - } else if !nullable { - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed64(`m.`+fieldname, "float64") - } else { - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed64(`*m.`+fieldname, `float64`) - } + p.encodeKey(fieldNumber, wireType) + p.callFixed64(p.mathPkg.Use(), `.Float64bits(float64(*m.`+fieldname, `))`) } case descriptor.FieldDescriptorProto_TYPE_FLOAT: - if !p.unsafe || gogoproto.IsCastType(field) { - if packed { - p.encodeKey(fieldNumber, wireType) - p.callVarint(`len(m.`, fieldname, `) * 4`) - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`) - p.encodeFixed32("f" + numGen.Current()) - p.Out() - p.P(`}`) - } else if repeated { - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`) - p.encodeFixed32("f" + numGen.Current()) - p.Out() - p.P(`}`) - } else if proto3 { - p.P(`if m.`, fieldname, ` != 0 {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) - p.Out() - p.P(`}`) - } else if !nullable { - p.encodeKey(fieldNumber, wireType) - p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) - } else { - p.encodeKey(fieldNumber, wireType) - p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(*m.`+fieldname, `))`) - } + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 4`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`) + p.callFixed32("f" + numGen.Current()) + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.P(`f`, numGen.Next(), ` := `, p.mathPkg.Use(), `.Float32bits(float32(num))`) + p.callFixed32("f" + numGen.Current()) + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(m.`+fieldname, `))`) } else { - if packed { - p.encodeKey(fieldNumber, wireType) - p.callVarint(`len(m.`, fieldname, `) * 4`) - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.unsafeFixed32("num", "float32") - p.Out() - p.P(`}`) - } else if repeated { - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed32("num", "float32") - p.Out() - p.P(`}`) - } else if proto3 { - p.P(`if m.`, fieldname, ` != 0 {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed32(`m.`+fieldname, `float32`) - p.Out() - p.P(`}`) - } else if !nullable { - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed32(`m.`+fieldname, `float32`) - } else { - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed32(`*m.`+fieldname, "float32") - } + p.encodeKey(fieldNumber, wireType) + p.callFixed32(p.mathPkg.Use(), `.Float32bits(float32(*m.`+fieldname, `))`) } case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, @@ -541,7 +452,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi descriptor.FieldDescriptorProto_TYPE_ENUM: if packed { jvar := "j" + numGen.Next() - p.P(`data`, numGen.Next(), ` := make([]byte, len(m.`, fieldname, `)*10)`) + p.P(`dAtA`, numGen.Next(), ` := make([]byte, len(m.`, fieldname, `)*10)`) p.P(`var `, jvar, ` int`) if *field.Type == descriptor.FieldDescriptorProto_TYPE_INT64 || *field.Type == descriptor.FieldDescriptorProto_TYPE_INT32 { @@ -554,18 +465,18 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } p.P(`for num >= 1<<7 {`) p.In() - p.P(`data`, numGen.Current(), `[`, jvar, `] = uint8(uint64(num)&0x7f|0x80)`) + p.P(`dAtA`, numGen.Current(), `[`, jvar, `] = uint8(uint64(num)&0x7f|0x80)`) p.P(`num >>= 7`) p.P(jvar, `++`) p.Out() p.P(`}`) - p.P(`data`, numGen.Current(), `[`, jvar, `] = uint8(num)`) + p.P(`dAtA`, numGen.Current(), `[`, jvar, `] = uint8(num)`) p.P(jvar, `++`) p.Out() p.P(`}`) p.encodeKey(fieldNumber, wireType) p.callVarint(jvar) - p.P(`i += copy(data[i:], data`, numGen.Current(), `[:`, jvar, `])`) + p.P(`i += copy(dAtA[i:], dAtA`, numGen.Current(), `[:`, jvar, `])`) } else if repeated { p.P(`for _, num := range m.`, fieldname, ` {`) p.In() @@ -589,137 +500,65 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } case descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: - if !p.unsafe { - if packed { - p.encodeKey(fieldNumber, wireType) - p.callVarint(`len(m.`, fieldname, `) * 8`) - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeFixed64("num") - p.Out() - p.P(`}`) - } else if repeated { - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.encodeFixed64("num") - p.Out() - p.P(`}`) - } else if proto3 { - p.P(`if m.`, fieldname, ` != 0 {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.callFixed64("m." + fieldname) - p.Out() - p.P(`}`) - } else if !nullable { - p.encodeKey(fieldNumber, wireType) - p.callFixed64("m." + fieldname) - } else { - p.encodeKey(fieldNumber, wireType) - p.callFixed64("*m." + fieldname) - } + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 8`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.callFixed64("num") + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed64("num") + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed64("m." + fieldname) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callFixed64("m." + fieldname) } else { - typeName := "int64" - if *field.Type == descriptor.FieldDescriptorProto_TYPE_FIXED64 { - typeName = "uint64" - } - if packed { - p.encodeKey(fieldNumber, wireType) - p.callVarint(`len(m.`, fieldname, `) * 8`) - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.unsafeFixed64("num", typeName) - p.Out() - p.P(`}`) - } else if repeated { - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed64("num", typeName) - p.Out() - p.P(`}`) - } else if proto3 { - p.P(`if m.`, fieldname, ` != 0 {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed64("m."+fieldname, typeName) - p.Out() - p.P(`}`) - } else if !nullable { - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed64("m."+fieldname, typeName) - } else { - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed64("*m."+fieldname, typeName) - } + p.encodeKey(fieldNumber, wireType) + p.callFixed64("*m." + fieldname) } case descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: - if !p.unsafe { - if packed { - p.encodeKey(fieldNumber, wireType) - p.callVarint(`len(m.`, fieldname, `) * 4`) - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeFixed32("num") - p.Out() - p.P(`}`) - } else if repeated { - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.encodeFixed32("num") - p.Out() - p.P(`}`) - } else if proto3 { - p.P(`if m.`, fieldname, ` != 0 {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.callFixed32("m." + fieldname) - p.Out() - p.P(`}`) - } else if !nullable { - p.encodeKey(fieldNumber, wireType) - p.callFixed32("m." + fieldname) - } else { - p.encodeKey(fieldNumber, wireType) - p.callFixed32("*m." + fieldname) - } + if packed { + p.encodeKey(fieldNumber, wireType) + p.callVarint(`len(m.`, fieldname, `) * 4`) + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.callFixed32("num") + p.Out() + p.P(`}`) + } else if repeated { + p.P(`for _, num := range m.`, fieldname, ` {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed32("num") + p.Out() + p.P(`}`) + } else if proto3 { + p.P(`if m.`, fieldname, ` != 0 {`) + p.In() + p.encodeKey(fieldNumber, wireType) + p.callFixed32("m." + fieldname) + p.Out() + p.P(`}`) + } else if !nullable { + p.encodeKey(fieldNumber, wireType) + p.callFixed32("m." + fieldname) } else { - typeName := "int32" - if *field.Type == descriptor.FieldDescriptorProto_TYPE_FIXED32 { - typeName = "uint32" - } - if packed { - p.encodeKey(fieldNumber, wireType) - p.callVarint(`len(m.`, fieldname, `) * 4`) - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.unsafeFixed32("num", typeName) - p.Out() - p.P(`}`) - } else if repeated { - p.P(`for _, num := range m.`, fieldname, ` {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed32("num", typeName) - p.Out() - p.P(`}`) - } else if proto3 { - p.P(`if m.`, fieldname, ` != 0 {`) - p.In() - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed32("m."+fieldname, typeName) - p.Out() - p.P(`}`) - } else if !nullable { - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed32("m."+fieldname, typeName) - } else { - p.encodeKey(fieldNumber, wireType) - p.unsafeFixed32("*m."+fieldname, typeName) - } + p.encodeKey(fieldNumber, wireType) + p.callFixed32("*m." + fieldname) } case descriptor.FieldDescriptorProto_TYPE_BOOL: if packed { @@ -729,11 +568,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.In() p.P(`if b {`) p.In() - p.P(`data[i] = 1`) + p.P(`dAtA[i] = 1`) p.Out() p.P(`} else {`) p.In() - p.P(`data[i] = 0`) + p.P(`dAtA[i] = 0`) p.Out() p.P(`}`) p.P(`i++`) @@ -745,11 +584,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.encodeKey(fieldNumber, wireType) p.P(`if b {`) p.In() - p.P(`data[i] = 1`) + p.P(`dAtA[i] = 1`) p.Out() p.P(`} else {`) p.In() - p.P(`data[i] = 0`) + p.P(`dAtA[i] = 0`) p.Out() p.P(`}`) p.P(`i++`) @@ -761,11 +600,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.encodeKey(fieldNumber, wireType) p.P(`if m.`, fieldname, ` {`) p.In() - p.P(`data[i] = 1`) + p.P(`dAtA[i] = 1`) p.Out() p.P(`} else {`) p.In() - p.P(`data[i] = 0`) + p.P(`dAtA[i] = 0`) p.Out() p.P(`}`) p.P(`i++`) @@ -775,11 +614,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.encodeKey(fieldNumber, wireType) p.P(`if m.`, fieldname, ` {`) p.In() - p.P(`data[i] = 1`) + p.P(`dAtA[i] = 1`) p.Out() p.P(`} else {`) p.In() - p.P(`data[i] = 0`) + p.P(`dAtA[i] = 0`) p.Out() p.P(`}`) p.P(`i++`) @@ -787,11 +626,11 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.encodeKey(fieldNumber, wireType) p.P(`if *m.`, fieldname, ` {`) p.In() - p.P(`data[i] = 1`) + p.P(`dAtA[i] = 1`) p.Out() p.P(`} else {`) p.In() - p.P(`data[i] = 0`) + p.P(`dAtA[i] = 0`) p.Out() p.P(`}`) p.P(`i++`) @@ -803,7 +642,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.encodeKey(fieldNumber, wireType) p.P(`l = len(s)`) p.encodeVarint("l") - p.P(`i+=copy(data[i:], s)`) + p.P(`i+=copy(dAtA[i:], s)`) p.Out() p.P(`}`) } else if proto3 { @@ -811,17 +650,17 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.In() p.encodeKey(fieldNumber, wireType) p.callVarint(`len(m.`, fieldname, `)`) - p.P(`i+=copy(data[i:], m.`, fieldname, `)`) + p.P(`i+=copy(dAtA[i:], m.`, fieldname, `)`) p.Out() p.P(`}`) } else if !nullable { p.encodeKey(fieldNumber, wireType) p.callVarint(`len(m.`, fieldname, `)`) - p.P(`i+=copy(data[i:], m.`, fieldname, `)`) + p.P(`i+=copy(dAtA[i:], m.`, fieldname, `)`) } else { p.encodeKey(fieldNumber, wireType) p.callVarint(`len(*m.`, fieldname, `)`) - p.P(`i+=copy(data[i:], *m.`, fieldname, `)`) + p.P(`i+=copy(dAtA[i:], *m.`, fieldname, `)`) } case descriptor.FieldDescriptorProto_TYPE_GROUP: panic(fmt.Errorf("marshaler does not support group %v", fieldname)) @@ -885,39 +724,65 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`v := m.`, fieldname, `[k]`) } accessor := `v` - sum = append(sum, strconv.Itoa(valueKeySize)) switch m.ValueField.GetType() { case descriptor.FieldDescriptorProto_TYPE_DOUBLE, descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, strconv.Itoa(8)) case descriptor.FieldDescriptorProto_TYPE_FLOAT, descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, strconv.Itoa(4)) case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM, descriptor.FieldDescriptorProto_TYPE_INT32: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, `sov`+p.localName+`(uint64(v))`) case descriptor.FieldDescriptorProto_TYPE_BOOL: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, `1`) - case descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: + case descriptor.FieldDescriptorProto_TYPE_STRING: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, `len(v)+sov`+p.localName+`(uint64(len(v)))`) - case descriptor.FieldDescriptorProto_TYPE_SINT32, - descriptor.FieldDescriptorProto_TYPE_SINT64: - sum = append(sum, `soz`+p.localName+`(uint64(v))`) - case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - if nullable { - p.P(`if v == nil {`) + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if gogoproto.IsCustomType(field) { + p.P(`cSize := 0`) + if gogoproto.IsNullable(field) { + p.P(`if `, accessor, ` != nil {`) + p.In() + } + p.P(`cSize = `, accessor, `.Size()`) + p.P(`cSize += `, strconv.Itoa(valueKeySize), ` + sov`+p.localName+`(uint64(cSize))`) + if gogoproto.IsNullable(field) { + p.Out() + p.P(`}`) + } + sum = append(sum, `cSize`) + } else { + p.P(`byteSize := 0`) + if proto3 { + p.P(`if len(v) > 0 {`) + } else { + p.P(`if v != nil {`) + } p.In() - p.P(`return 0, `, p.errorsPkg.Use(), `.New("proto: map has nil element")`) + p.P(`byteSize = `, strconv.Itoa(valueKeySize), ` + len(v)+sov`+p.localName+`(uint64(len(v)))`) p.Out() p.P(`}`) + sum = append(sum, `byteSize`) } - if valuegoTyp != valuegoAliasTyp { + case descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, `soz`+p.localName+`(uint64(v))`) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if valuegoTyp != valuegoAliasTyp && + !gogoproto.IsStdTime(field) && + !gogoproto.IsStdDuration(field) { if nullable { // cast back to the type that has the generated methods on it accessor = `((` + valuegoTyp + `)(` + accessor + `))` @@ -927,31 +792,73 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } else if !nullable { accessor = `(&v)` } - if protoSizer { - p.P(`msgSize := `, accessor, `.ProtoSize()`) + p.P(`msgSize := 0`) + p.P(`if `, accessor, ` != nil {`) + p.In() + if gogoproto.IsStdTime(field) { + p.P(`msgSize = `, p.typesPkg.Use(), `.SizeOfStdTime(*`, accessor, `)`) + } else if gogoproto.IsStdDuration(field) { + p.P(`msgSize = `, p.typesPkg.Use(), `.SizeOfStdDuration(*`, accessor, `)`) + } else if protoSizer { + p.P(`msgSize = `, accessor, `.ProtoSize()`) } else { - p.P(`msgSize := `, accessor, `.Size()`) + p.P(`msgSize = `, accessor, `.Size()`) } - sum = append(sum, `msgSize + sov`+p.localName+`(uint64(msgSize))`) + p.P(`msgSize += `, strconv.Itoa(valueKeySize), ` + sov`+p.localName+`(uint64(msgSize))`) + p.Out() + p.P(`}`) + sum = append(sum, `msgSize`) } p.P(`mapSize := `, strings.Join(sum, " + ")) p.callVarint("mapSize") p.encodeKey(1, wireToType(keywire)) - p.mapField(numGen, m.KeyField.GetType(), "k", protoSizer) + p.mapField(numGen, field, m.KeyField, "k", protoSizer) + nullableMsg := nullable && (m.ValueField.GetType() == descriptor.FieldDescriptorProto_TYPE_MESSAGE || + gogoproto.IsCustomType(field) && m.ValueField.IsBytes()) + plainBytes := m.ValueField.IsBytes() && !gogoproto.IsCustomType(field) + if nullableMsg { + p.P(`if `, accessor, ` != nil { `) + p.In() + } else if plainBytes { + if proto3 { + p.P(`if len(`, accessor, `) > 0 {`) + } else { + p.P(`if `, accessor, ` != nil {`) + } + p.In() + } p.encodeKey(2, wireToType(valuewire)) - p.mapField(numGen, m.ValueField.GetType(), accessor, protoSizer) + p.mapField(numGen, field, m.ValueField, accessor, protoSizer) + if nullableMsg || plainBytes { + p.Out() + p.P(`}`) + } p.Out() p.P(`}`) } else if repeated { p.P(`for _, msg := range m.`, fieldname, ` {`) p.In() p.encodeKey(fieldNumber, wireType) - if protoSizer { - p.callVarint("msg.ProtoSize()") + varName := "msg" + if gogoproto.IsStdTime(field) { + if gogoproto.IsNullable(field) { + varName = "*" + varName + } + p.callVarint(p.typesPkg.Use(), `.SizeOfStdTime(`, varName, `)`) + p.P(`n, err := `, p.typesPkg.Use(), `.StdTimeMarshalTo(`, varName, `, dAtA[i:])`) + } else if gogoproto.IsStdDuration(field) { + if gogoproto.IsNullable(field) { + varName = "*" + varName + } + p.callVarint(p.typesPkg.Use(), `.SizeOfStdDuration(`, varName, `)`) + p.P(`n, err := `, p.typesPkg.Use(), `.StdDurationMarshalTo(`, varName, `, dAtA[i:])`) + } else if protoSizer { + p.callVarint(varName, ".ProtoSize()") + p.P(`n, err := `, varName, `.MarshalTo(dAtA[i:])`) } else { - p.callVarint("msg.Size()") + p.callVarint(varName, ".Size()") + p.P(`n, err := `, varName, `.MarshalTo(dAtA[i:])`) } - p.P(`n, err := msg.MarshalTo(data[i:])`) p.P(`if err != nil {`) p.In() p.P(`return 0, err`) @@ -962,12 +869,26 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`}`) } else { p.encodeKey(fieldNumber, wireType) - if protoSizer { - p.callVarint(`m.`, fieldname, `.ProtoSize()`) + varName := `m.` + fieldname + if gogoproto.IsStdTime(field) { + if gogoproto.IsNullable(field) { + varName = "*" + varName + } + p.callVarint(p.typesPkg.Use(), `.SizeOfStdTime(`, varName, `)`) + p.P(`n`, numGen.Next(), `, err := `, p.typesPkg.Use(), `.StdTimeMarshalTo(`, varName, `, dAtA[i:])`) + } else if gogoproto.IsStdDuration(field) { + if gogoproto.IsNullable(field) { + varName = "*" + varName + } + p.callVarint(p.typesPkg.Use(), `.SizeOfStdDuration(`, varName, `)`) + p.P(`n`, numGen.Next(), `, err := `, p.typesPkg.Use(), `.StdDurationMarshalTo(`, varName, `, dAtA[i:])`) + } else if protoSizer { + p.callVarint(varName, `.ProtoSize()`) + p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(dAtA[i:])`) } else { - p.callVarint(`m.`, fieldname, `.Size()`) + p.callVarint(varName, `.Size()`) + p.P(`n`, numGen.Next(), `, err := `, varName, `.MarshalTo(dAtA[i:])`) } - p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) p.P(`if err != nil {`) p.In() p.P(`return 0, err`) @@ -982,7 +903,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.In() p.encodeKey(fieldNumber, wireType) p.callVarint("len(b)") - p.P(`i+=copy(data[i:], b)`) + p.P(`i+=copy(dAtA[i:], b)`) p.Out() p.P(`}`) } else if proto3 { @@ -990,13 +911,13 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.In() p.encodeKey(fieldNumber, wireType) p.callVarint(`len(m.`, fieldname, `)`) - p.P(`i+=copy(data[i:], m.`, fieldname, `)`) + p.P(`i+=copy(dAtA[i:], m.`, fieldname, `)`) p.Out() p.P(`}`) } else { p.encodeKey(fieldNumber, wireType) p.callVarint(`len(m.`, fieldname, `)`) - p.P(`i+=copy(data[i:], m.`, fieldname, `)`) + p.P(`i+=copy(dAtA[i:], m.`, fieldname, `)`) } } else { if repeated { @@ -1008,7 +929,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } else { p.callVarint(`msg.Size()`) } - p.P(`n, err := msg.MarshalTo(data[i:])`) + p.P(`n, err := msg.MarshalTo(dAtA[i:])`) p.P(`if err != nil {`) p.In() p.P(`return 0, err`) @@ -1024,7 +945,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } else { p.callVarint(`m.`, fieldname, `.Size()`) } - p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) + p.P(`n`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(dAtA[i:])`) p.P(`if err != nil {`) p.In() p.P(`return 0, err`) @@ -1035,7 +956,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi } case descriptor.FieldDescriptorProto_TYPE_SINT32: if packed { - datavar := "data" + numGen.Next() + datavar := "dAtA" + numGen.Next() jvar := "j" + numGen.Next() p.P(datavar, ` := make([]byte, len(m.`, fieldname, ")*5)") p.P(`var `, jvar, ` int`) @@ -1056,7 +977,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`}`) p.encodeKey(fieldNumber, wireType) p.callVarint(jvar) - p.P(`i+=copy(data[i:], `, datavar, `[:`, jvar, `])`) + p.P(`i+=copy(dAtA[i:], `, datavar, `[:`, jvar, `])`) } else if repeated { p.P(`for _, num := range m.`, fieldname, ` {`) p.In() @@ -1083,7 +1004,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi if packed { jvar := "j" + numGen.Next() xvar := "x" + numGen.Next() - datavar := "data" + numGen.Next() + datavar := "dAtA" + numGen.Next() p.P(`var `, jvar, ` int`) p.P(datavar, ` := make([]byte, len(m.`, fieldname, `)*10)`) p.P(`for _, num := range m.`, fieldname, ` {`) @@ -1102,7 +1023,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi p.P(`}`) p.encodeKey(fieldNumber, wireType) p.callVarint(jvar) - p.P(`i+=copy(data[i:], `, datavar, `[:`, jvar, `])`) + p.P(`i+=copy(dAtA[i:], `, datavar, `[:`, jvar, `])`) } else if repeated { p.P(`for _, num := range m.`, fieldname, ` {`) p.In() @@ -1137,6 +1058,7 @@ func (p *marshalto) generateField(proto3 bool, numGen NumGen, file *generator.Fi func (p *marshalto) Generate(file *generator.FileDescriptor) { numGen := NewNumGen() p.PluginImports = generator.NewPluginImports(p.Generator) + p.atleastOne = false p.localName = generator.FileName(file) @@ -1146,51 +1068,40 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { p.protoPkg = p.NewImport("github.com/golang/protobuf/proto") } - p.unsafePkg = p.NewImport("unsafe") p.errorsPkg = p.NewImport("errors") + p.binaryPkg = p.NewImport("encoding/binary") + p.typesPkg = p.NewImport("github.com/gogo/protobuf/types") for _, message := range file.Messages() { if message.DescriptorProto.GetOptions().GetMapEntry() { continue } ccTypeName := generator.CamelCaseSlice(message.TypeName()) - if p.unsafe { - if !gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) { - continue - } - if gogoproto.IsMarshaler(file.FileDescriptorProto, message.DescriptorProto) { - panic(fmt.Sprintf("unsafe_marshaler and marshalto enabled for %v", ccTypeName)) - } - } - if !p.unsafe { - if !gogoproto.IsMarshaler(file.FileDescriptorProto, message.DescriptorProto) { - continue - } - if gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) { - panic(fmt.Sprintf("unsafe_marshaler and marshalto enabled for %v", ccTypeName)) - } + if !gogoproto.IsMarshaler(file.FileDescriptorProto, message.DescriptorProto) && + !gogoproto.IsUnsafeMarshaler(file.FileDescriptorProto, message.DescriptorProto) { + continue } p.atleastOne = true - p.P(`func (m *`, ccTypeName, `) Marshal() (data []byte, err error) {`) + p.P(`func (m *`, ccTypeName, `) Marshal() (dAtA []byte, err error) {`) p.In() if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { p.P(`size := m.ProtoSize()`) } else { p.P(`size := m.Size()`) } - p.P(`data = make([]byte, size)`) - p.P(`n, err := m.MarshalTo(data)`) + p.P(`dAtA = make([]byte, size)`) + p.P(`n, err := m.MarshalTo(dAtA)`) p.P(`if err != nil {`) p.In() p.P(`return nil, err`) p.Out() p.P(`}`) - p.P(`return data[:n], nil`) + p.P(`return dAtA[:n], nil`) p.Out() p.P(`}`) p.P(``) - p.P(`func (m *`, ccTypeName, `) MarshalTo(data []byte) (int, error) {`) + p.P(`func (m *`, ccTypeName, `) MarshalTo(dAtA []byte) (int, error) {`) p.In() p.P(`var i int`) p.P(`_ = i`) @@ -1210,7 +1121,7 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { oneofs[fieldname] = struct{}{} p.P(`if m.`, fieldname, ` != nil {`) p.In() - p.P(`nn`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(data[i:])`) + p.P(`nn`, numGen.Next(), `, err := m.`, fieldname, `.MarshalTo(dAtA[i:])`) p.P(`if err != nil {`) p.In() p.P(`return 0, err`) @@ -1224,7 +1135,7 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { } if message.DescriptorProto.HasExtension() { if gogoproto.HasExtensionsMap(file.FileDescriptorProto, message.DescriptorProto) { - p.P(`n, err := `, p.protoPkg.Use(), `.EncodeInternalExtension(m, data[i:])`) + p.P(`n, err := `, p.protoPkg.Use(), `.EncodeInternalExtension(m, dAtA[i:])`) p.P(`if err != nil {`) p.In() p.P(`return 0, err`) @@ -1234,7 +1145,7 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { } else { p.P(`if m.XXX_extensions != nil {`) p.In() - p.P(`i+=copy(data[i:], m.XXX_extensions)`) + p.P(`i+=copy(dAtA[i:], m.XXX_extensions)`) p.Out() p.P(`}`) } @@ -1242,7 +1153,7 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { p.P(`if m.XXX_unrecognized != nil {`) p.In() - p.P(`i+=copy(data[i:], m.XXX_unrecognized)`) + p.P(`i+=copy(dAtA[i:], m.XXX_unrecognized)`) p.Out() p.P(`}`) } @@ -1260,10 +1171,10 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { continue } ccTypeName := p.OneOfTypeName(message, field) - p.P(`func (m *`, ccTypeName, `) MarshalTo(data []byte) (int, error) {`) + p.P(`func (m *`, ccTypeName, `) MarshalTo(dAtA []byte) (int, error) {`) p.In() p.P(`i := 0`) - vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(field) + vanity.TurnOffNullableForNativeTypes(field) p.generateField(false, numGen, file, message, field) p.P(`return i, nil`) p.Out() @@ -1272,40 +1183,16 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { } if p.atleastOne { - p.P(`func encodeFixed64`, p.localName, `(data []byte, offset int, v uint64) int {`) - p.In() - p.P(`data[offset] = uint8(v)`) - p.P(`data[offset+1] = uint8(v >> 8)`) - p.P(`data[offset+2] = uint8(v >> 16)`) - p.P(`data[offset+3] = uint8(v >> 24)`) - p.P(`data[offset+4] = uint8(v >> 32)`) - p.P(`data[offset+5] = uint8(v >> 40)`) - p.P(`data[offset+6] = uint8(v >> 48)`) - p.P(`data[offset+7] = uint8(v >> 56)`) - p.P(`return offset+8`) - p.Out() - p.P(`}`) - - p.P(`func encodeFixed32`, p.localName, `(data []byte, offset int, v uint32) int {`) - p.In() - p.P(`data[offset] = uint8(v)`) - p.P(`data[offset+1] = uint8(v >> 8)`) - p.P(`data[offset+2] = uint8(v >> 16)`) - p.P(`data[offset+3] = uint8(v >> 24)`) - p.P(`return offset+4`) - p.Out() - p.P(`}`) - - p.P(`func encodeVarint`, p.localName, `(data []byte, offset int, v uint64) int {`) + p.P(`func encodeVarint`, p.localName, `(dAtA []byte, offset int, v uint64) int {`) p.In() p.P(`for v >= 1<<7 {`) p.In() - p.P(`data[offset] = uint8(v&0x7f|0x80)`) + p.P(`dAtA[offset] = uint8(v&0x7f|0x80)`) p.P(`v >>= 7`) p.P(`offset++`) p.Out() p.P(`}`) - p.P(`data[offset] = uint8(v)`) + p.P(`dAtA[offset] = uint8(v)`) p.P(`return offset+1`) p.Out() p.P(`}`) @@ -1315,5 +1202,4 @@ func (p *marshalto) Generate(file *generator.FileDescriptor) { func init() { generator.RegisterPlugin(NewMarshal()) - generator.RegisterPlugin(NewUnsafeMarshal()) } diff --git a/vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go b/vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go index cd0d19a77..0f822e8a8 100644 --- a/vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go +++ b/vendor/github.com/gogo/protobuf/plugin/oneofcheck/oneofcheck.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/populate/populate.go b/vendor/github.com/gogo/protobuf/plugin/populate/populate.go index c95d9f28e..cf61fe9b0 100644 --- a/vendor/github.com/gogo/protobuf/plugin/populate/populate.go +++ b/vendor/github.com/gogo/protobuf/plugin/populate/populate.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -83,14 +85,15 @@ package populate import ( "fmt" + "math" + "strconv" + "strings" + "github.com/gogo/protobuf/gogoproto" "github.com/gogo/protobuf/proto" descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" "github.com/gogo/protobuf/protoc-gen-gogo/generator" "github.com/gogo/protobuf/vanity" - "math" - "strconv" - "strings" ) type VarGen interface { @@ -121,6 +124,7 @@ type plugin struct { varGen VarGen atleastOne bool localName string + typesPkg generator.Single } func NewPlugin() *plugin { @@ -178,7 +182,7 @@ func negative(fieldType descriptor.FieldDescriptorProto_Type) bool { return true } -func getFuncName(goTypName string) string { +func (p *plugin) getFuncName(goTypName string) string { funcName := "NewPopulated" + goTypName goTypNames := strings.Split(goTypName, ".") if len(goTypNames) == 2 { @@ -186,17 +190,23 @@ func getFuncName(goTypName string) string { } else if len(goTypNames) != 1 { panic(fmt.Errorf("unreachable: too many dots in %v", goTypName)) } + switch funcName { + case "time.NewPopulatedTime": + funcName = p.typesPkg.Use() + ".NewPopulatedStdTime" + case "time.NewPopulatedDuration": + funcName = p.typesPkg.Use() + ".NewPopulatedStdDuration" + } return funcName } -func getFuncCall(goTypName string) string { - funcName := getFuncName(goTypName) +func (p *plugin) getFuncCall(goTypName string) string { + funcName := p.getFuncName(goTypName) funcCall := funcName + "(r, easy)" return funcCall } -func getCustomFuncCall(goTypName string) string { - funcName := getFuncName(goTypName) +func (p *plugin) getCustomFuncCall(goTypName string) string { + funcName := p.getFuncName(goTypName) funcCall := funcName + "(r)" return funcCall } @@ -246,10 +256,17 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato if keygoAliasTyp != keygoTyp { keyval = keygoAliasTyp + `(` + keyval + `)` } - if m.ValueField.IsMessage() || p.IsGroup(field) { + if m.ValueField.IsMessage() || p.IsGroup(field) || + (m.ValueField.IsBytes() && gogoproto.IsCustomType(field)) { s := `this.` + fieldname + `[` + keyval + `] = ` - goTypName = generator.GoTypeToName(valuegoTyp) - funcCall := getFuncCall(goTypName) + if gogoproto.IsStdTime(field) || gogoproto.IsStdDuration(field) { + valuegoTyp = valuegoAliasTyp + } + funcCall := p.getCustomFuncCall(goTypName) + if !gogoproto.IsCustomType(field) { + goTypName = generator.GoTypeToName(valuegoTyp) + funcCall = p.getFuncCall(goTypName) + } if !nullable { funcCall = `*` + funcCall } @@ -287,8 +304,25 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato } p.Out() p.P(`}`) + } else if gogoproto.IsCustomType(field) { + funcCall := p.getCustomFuncCall(goTypName) + if field.IsRepeated() { + p.P(p.varGen.Next(), ` := r.Intn(10)`) + p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) + p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) + p.In() + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, `[i] = *`, p.varGen.Current()) + p.Out() + p.P(`}`) + } else if gogoproto.IsNullable(field) { + p.P(`this.`, fieldname, ` = `, funcCall) + } else { + p.P(p.varGen.Next(), `:= `, funcCall) + p.P(`this.`, fieldname, ` = *`, p.varGen.Current()) + } } else if field.IsMessage() || p.IsGroup(field) { - funcCall := getFuncCall(goTypName) + funcCall := p.getFuncCall(goTypName) if field.IsRepeated() { p.P(p.varGen.Next(), ` := r.Intn(5)`) p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) @@ -327,23 +361,6 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato p.P(p.varGen.Next(), ` := `, val) p.P(`this.`, fieldname, ` = &`, p.varGen.Current()) } - } else if gogoproto.IsCustomType(field) { - funcCall := getCustomFuncCall(goTypName) - if field.IsRepeated() { - p.P(p.varGen.Next(), ` := r.Intn(10)`) - p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) - p.P(`for i := 0; i < `, p.varGen.Current(), `; i++ {`) - p.In() - p.P(p.varGen.Next(), `:= `, funcCall) - p.P(`this.`, fieldname, `[i] = *`, p.varGen.Current()) - p.Out() - p.P(`}`) - } else if gogoproto.IsNullable(field) { - p.P(`this.`, fieldname, ` = `, funcCall) - } else { - p.P(p.varGen.Next(), `:= `, funcCall) - p.P(`this.`, fieldname, ` = *`, p.varGen.Current()) - } } else if field.IsBytes() { if field.IsRepeated() { p.P(p.varGen.Next(), ` := r.Intn(10)`) @@ -369,7 +386,8 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato p.P(`}`) } } else if field.IsString() { - val := fmt.Sprintf("randString%v(r)", p.localName) + typName := generator.GoTypeToName(goTyp) + val := fmt.Sprintf("%s(randString%v(r))", typName, p.localName) if field.IsRepeated() { p.P(p.varGen.Next(), ` := r.Intn(10)`) p.P(`this.`, fieldname, ` = make(`, goTyp, `, `, p.varGen.Current(), `)`) @@ -425,7 +443,7 @@ func (p *plugin) GenerateField(file *generator.FileDescriptor, message *generato } } -func (p *plugin) hasLoop(field *descriptor.FieldDescriptorProto, visited []*generator.Descriptor, excludes []*generator.Descriptor) *generator.Descriptor { +func (p *plugin) hasLoop(pkg string, field *descriptor.FieldDescriptorProto, visited []*generator.Descriptor, excludes []*generator.Descriptor) *generator.Descriptor { if field.IsMessage() || p.IsGroup(field) || p.IsMap(field) { var fieldMessage *generator.Descriptor if p.IsMap(field) { @@ -449,24 +467,27 @@ func (p *plugin) hasLoop(field *descriptor.FieldDescriptorProto, visited []*gene return fieldMessage } } + for _, f := range fieldMessage.Field { - visited = append(visited, fieldMessage) - loopTo := p.hasLoop(f, visited, excludes) - if loopTo != nil { - return loopTo + if strings.HasPrefix(f.GetTypeName(), "."+pkg) { + visited = append(visited, fieldMessage) + loopTo := p.hasLoop(pkg, f, visited, excludes) + if loopTo != nil { + return loopTo + } } } } return nil } -func (p *plugin) loops(field *descriptor.FieldDescriptorProto, message *generator.Descriptor) int { +func (p *plugin) loops(pkg string, field *descriptor.FieldDescriptorProto, message *generator.Descriptor) int { //fmt.Fprintf(os.Stderr, "loops %v %v\n", field.GetTypeName(), generator.CamelCaseSlice(message.TypeName())) excludes := []*generator.Descriptor{} loops := 0 for { visited := []*generator.Descriptor{} - loopTo := p.hasLoop(field, visited, excludes) + loopTo := p.hasLoop(pkg, field, visited, excludes) if loopTo == nil { break } @@ -482,7 +503,7 @@ func (p *plugin) Generate(file *generator.FileDescriptor) { p.PluginImports = generator.NewPluginImports(p.Generator) p.varGen = NewVarGen() proto3 := gogoproto.IsProto3(file.FileDescriptorProto) - + p.typesPkg = p.NewImport("github.com/gogo/protobuf/types") p.localName = generator.FileName(file) protoPkg := p.NewImport("github.com/gogo/protobuf/proto") if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { @@ -501,7 +522,7 @@ func (p *plugin) Generate(file *generator.FileDescriptor) { loopLevels := make([]int, len(message.Field)) maxLoopLevel := 0 for i, field := range message.Field { - loopLevels[i] = p.loops(field, message) + loopLevels[i] = p.loops(file.GetPackage(), field, message) if loopLevels[i] > maxLoopLevel { maxLoopLevel = loopLevels[i] } @@ -612,8 +633,8 @@ func (p *plugin) Generate(file *generator.FileDescriptor) { } p.P(`wire := r.Intn(4)`) p.P(`if wire == 3 { wire = 5 }`) - p.P(`data := randField`, p.localName, `(nil, r, fieldNumber, wire)`) - p.P(protoPkg.Use(), `.SetRawExtension(this, int32(fieldNumber), data)`) + p.P(`dAtA := randField`, p.localName, `(nil, r, fieldNumber, wire)`) + p.P(protoPkg.Use(), `.SetRawExtension(this, int32(fieldNumber), dAtA)`) p.Out() p.P(`}`) p.Out() @@ -646,7 +667,7 @@ func (p *plugin) Generate(file *generator.FileDescriptor) { p.P(`func NewPopulated`, ccTypeName, `(r randy`, p.localName, `, easy bool) *`, ccTypeName, ` {`) p.In() p.P(`this := &`, ccTypeName, `{}`) - vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(f) + vanity.TurnOffNullableForNativeTypes(f) p.GenerateField(file, message, f) p.P(`return this`) p.Out() @@ -698,7 +719,7 @@ func (p *plugin) Generate(file *generator.FileDescriptor) { p.Out() p.P(`}`) - p.P(`func randUnrecognized`, p.localName, `(r randy`, p.localName, `, maxFieldNumber int) (data []byte) {`) + p.P(`func randUnrecognized`, p.localName, `(r randy`, p.localName, `, maxFieldNumber int) (dAtA []byte) {`) p.In() p.P(`l := r.Intn(5)`) p.P(`for i := 0; i < l; i++ {`) @@ -706,64 +727,64 @@ func (p *plugin) Generate(file *generator.FileDescriptor) { p.P(`wire := r.Intn(4)`) p.P(`if wire == 3 { wire = 5 }`) p.P(`fieldNumber := maxFieldNumber + r.Intn(100)`) - p.P(`data = randField`, p.localName, `(data, r, fieldNumber, wire)`) + p.P(`dAtA = randField`, p.localName, `(dAtA, r, fieldNumber, wire)`) p.Out() p.P(`}`) - p.P(`return data`) + p.P(`return dAtA`) p.Out() p.P(`}`) - p.P(`func randField`, p.localName, `(data []byte, r randy`, p.localName, `, fieldNumber int, wire int) []byte {`) + p.P(`func randField`, p.localName, `(dAtA []byte, r randy`, p.localName, `, fieldNumber int, wire int) []byte {`) p.In() p.P(`key := uint32(fieldNumber)<<3 | uint32(wire)`) p.P(`switch wire {`) p.P(`case 0:`) p.In() - p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(key))`) + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(key))`) p.P(p.varGen.Next(), ` := r.Int63()`) p.P(`if r.Intn(2) == 0 {`) p.In() p.P(p.varGen.Current(), ` *= -1`) p.Out() p.P(`}`) - p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(`, p.varGen.Current(), `))`) + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(`, p.varGen.Current(), `))`) p.Out() p.P(`case 1:`) p.In() - p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(key))`) - p.P(`data = append(data, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))`) + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(key))`) + p.P(`dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))`) p.Out() p.P(`case 2:`) p.In() - p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(key))`) + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(key))`) p.P(`ll := r.Intn(100)`) - p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(ll))`) + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(ll))`) p.P(`for j := 0; j < ll; j++ {`) p.In() - p.P(`data = append(data, byte(r.Intn(256)))`) + p.P(`dAtA = append(dAtA, byte(r.Intn(256)))`) p.Out() p.P(`}`) p.Out() p.P(`default:`) p.In() - p.P(`data = encodeVarintPopulate`, p.localName, `(data, uint64(key))`) - p.P(`data = append(data, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))`) + p.P(`dAtA = encodeVarintPopulate`, p.localName, `(dAtA, uint64(key))`) + p.P(`dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)))`) p.Out() p.P(`}`) - p.P(`return data`) + p.P(`return dAtA`) p.Out() p.P(`}`) - p.P(`func encodeVarintPopulate`, p.localName, `(data []byte, v uint64) []byte {`) + p.P(`func encodeVarintPopulate`, p.localName, `(dAtA []byte, v uint64) []byte {`) p.In() p.P(`for v >= 1<<7 {`) p.In() - p.P(`data = append(data, uint8(uint64(v)&0x7f|0x80))`) + p.P(`dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80))`) p.P(`v >>= 7`) p.Out() p.P(`}`) - p.P(`data = append(data, uint8(v))`) - p.P(`return data`) + p.P(`dAtA = append(dAtA, uint8(v))`) + p.P(`return dAtA`) p.Out() p.P(`}`) diff --git a/vendor/github.com/gogo/protobuf/plugin/size/size.go b/vendor/github.com/gogo/protobuf/plugin/size/size.go index ef207bd85..79cd403be 100644 --- a/vendor/github.com/gogo/protobuf/plugin/size/size.go +++ b/vendor/github.com/gogo/protobuf/plugin/size/size.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -88,13 +90,13 @@ and the following test code: func TestBSize(t *testing5.T) { popr := math_rand5.New(math_rand5.NewSource(time5.Now().UnixNano())) p := NewPopulatedB(popr, true) - data, err := github_com_gogo_protobuf_proto2.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto2.Marshal(p) if err != nil { panic(err) } size := p.Size() - if len(data) != size { - t.Fatalf("size %v != marshalled size %v", size, len(data)) + if len(dAtA) != size { + t.Fatalf("size %v != marshalled size %v", size, len(dAtA)) } } @@ -119,6 +121,7 @@ package size import ( "fmt" + "os" "strconv" "strings" @@ -134,6 +137,7 @@ type size struct { generator.PluginImports atleastOne bool localName string + typesPkg generator.Single } func NewSize() *size { @@ -198,6 +202,23 @@ func (p *size) sizeZigZag() { }`) } +func (p *size) std(field *descriptor.FieldDescriptorProto, name string) (string, bool) { + if gogoproto.IsStdTime(field) { + if gogoproto.IsNullable(field) { + return p.typesPkg.Use() + `.SizeOfStdTime(*` + name + `)`, true + } else { + return p.typesPkg.Use() + `.SizeOfStdTime(` + name + `)`, true + } + } else if gogoproto.IsStdDuration(field) { + if gogoproto.IsNullable(field) { + return p.typesPkg.Use() + `.SizeOfStdDuration(*` + name + `)`, true + } else { + return p.typesPkg.Use() + `.SizeOfStdDuration(` + name + `)`, true + } + } + return "", false +} + func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, message *generator.Descriptor, field *descriptor.FieldDescriptorProto, sizeName string) { fieldname := p.GetOneOfFieldName(message, field) nullable := gogoproto.IsNullable(field) @@ -210,7 +231,7 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag p.P(`if m.`, fieldname, ` != nil {`) p.In() } - packed := field.IsPacked() + packed := field.IsPacked() || (proto3 && field.IsPacked3()) _, wire := p.GoType(message, field) wireType := wireToType(wire) fieldNumber := field.GetNumber() @@ -367,50 +388,89 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag descriptor.FieldDescriptorProto_TYPE_SINT64: sum = append(sum, `soz`+p.localName+`(uint64(k))`) } - sum = append(sum, strconv.Itoa(valueKeySize)) switch m.ValueField.GetType() { case descriptor.FieldDescriptorProto_TYPE_DOUBLE, descriptor.FieldDescriptorProto_TYPE_FIXED64, descriptor.FieldDescriptorProto_TYPE_SFIXED64: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, strconv.Itoa(8)) case descriptor.FieldDescriptorProto_TYPE_FLOAT, descriptor.FieldDescriptorProto_TYPE_FIXED32, descriptor.FieldDescriptorProto_TYPE_SFIXED32: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, strconv.Itoa(4)) case descriptor.FieldDescriptorProto_TYPE_INT64, descriptor.FieldDescriptorProto_TYPE_UINT64, descriptor.FieldDescriptorProto_TYPE_UINT32, descriptor.FieldDescriptorProto_TYPE_ENUM, descriptor.FieldDescriptorProto_TYPE_INT32: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, `sov`+p.localName+`(uint64(v))`) case descriptor.FieldDescriptorProto_TYPE_BOOL: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, `1`) - case descriptor.FieldDescriptorProto_TYPE_STRING, - descriptor.FieldDescriptorProto_TYPE_BYTES: + case descriptor.FieldDescriptorProto_TYPE_STRING: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, `len(v)+sov`+p.localName+`(uint64(len(v)))`) + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if gogoproto.IsCustomType(field) { + p.P(`l = 0`) + if nullable { + p.P(`if v != nil {`) + p.In() + } + p.P(`l = v.`, sizeName, `()`) + p.P(`l += `, strconv.Itoa(valueKeySize), ` + sov`+p.localName+`(uint64(l))`) + if nullable { + p.Out() + p.P(`}`) + } + sum = append(sum, `l`) + } else { + p.P(`l = 0`) + if proto3 { + p.P(`if len(v) > 0 {`) + } else { + p.P(`if v != nil {`) + } + p.In() + p.P(`l = `, strconv.Itoa(valueKeySize), ` + len(v)+sov`+p.localName+`(uint64(len(v)))`) + p.Out() + p.P(`}`) + sum = append(sum, `l`) + } case descriptor.FieldDescriptorProto_TYPE_SINT32, descriptor.FieldDescriptorProto_TYPE_SINT64: + sum = append(sum, strconv.Itoa(valueKeySize)) sum = append(sum, `soz`+p.localName+`(uint64(v))`) case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + stdSizeCall, stdOk := p.std(field, "v") if nullable { p.P(`l = 0`) p.P(`if v != nil {`) p.In() - if valuegoTyp != valuegoAliasTyp { + if stdOk { + p.P(`l = `, stdSizeCall) + } else if valuegoTyp != valuegoAliasTyp { p.P(`l = ((`, valuegoTyp, `)(v)).`, sizeName, `()`) } else { p.P(`l = v.`, sizeName, `()`) } + p.P(`l += `, strconv.Itoa(valueKeySize), ` + sov`+p.localName+`(uint64(l))`) p.Out() p.P(`}`) + sum = append(sum, `l`) } else { - if valuegoTyp != valuegoAliasTyp { + if stdOk { + p.P(`l = `, stdSizeCall) + } else if valuegoTyp != valuegoAliasTyp { p.P(`l = ((*`, valuegoTyp, `)(&v)).`, sizeName, `()`) } else { p.P(`l = v.`, sizeName, `()`) } + sum = append(sum, strconv.Itoa(valueKeySize)) + sum = append(sum, `l+sov`+p.localName+`(uint64(l))`) } - sum = append(sum, `l+sov`+p.localName+`(uint64(l))`) } p.P(`mapEntrySize := `, strings.Join(sum, "+")) p.P(`n+=mapEntrySize+`, fieldKeySize, `+sov`, p.localName, `(uint64(mapEntrySize))`) @@ -419,12 +479,22 @@ func (p *size) generateField(proto3 bool, file *generator.FileDescriptor, messag } else if repeated { p.P(`for _, e := range m.`, fieldname, ` { `) p.In() - p.P(`l=e.`, sizeName, `()`) + stdSizeCall, stdOk := p.std(field, "e") + if stdOk { + p.P(`l=`, stdSizeCall) + } else { + p.P(`l=e.`, sizeName, `()`) + } p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) p.Out() p.P(`}`) } else { - p.P(`l=m.`, fieldname, `.`, sizeName, `()`) + stdSizeCall, stdOk := p.std(field, "m."+fieldname) + if stdOk { + p.P(`l=`, stdSizeCall) + } else { + p.P(`l=m.`, fieldname, `.`, sizeName, `()`) + } p.P(`n+=`, strconv.Itoa(key), `+l+sov`, p.localName, `(uint64(l))`) } case descriptor.FieldDescriptorProto_TYPE_BYTES: @@ -500,12 +570,17 @@ func (p *size) Generate(file *generator.FileDescriptor) { p.PluginImports = generator.NewPluginImports(p.Generator) p.atleastOne = false p.localName = generator.FileName(file) + p.typesPkg = p.NewImport("github.com/gogo/protobuf/types") protoPkg := p.NewImport("github.com/gogo/protobuf/proto") if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { protoPkg = p.NewImport("github.com/golang/protobuf/proto") } for _, message := range file.Messages() { sizeName := "" + if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) && gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { + fmt.Fprintf(os.Stderr, "ERROR: message %v cannot support both sizer and protosizer plugins\n", generator.CamelCase(*message.Name)) + os.Exit(1) + } if gogoproto.IsSizer(file.FileDescriptorProto, message.DescriptorProto) { sizeName = "Size" } else if gogoproto.IsProtoSizer(file.FileDescriptorProto, message.DescriptorProto) { @@ -577,7 +652,7 @@ func (p *size) Generate(file *generator.FileDescriptor) { p.In() p.P(`var l int`) p.P(`_ = l`) - vanity.TurnOffNullableForNativeTypesWithoutDefaultsOnly(f) + vanity.TurnOffNullableForNativeTypes(f) p.generateField(false, file, message, f, sizeName) p.P(`return n`) p.Out() diff --git a/vendor/github.com/gogo/protobuf/plugin/size/sizetest.go b/vendor/github.com/gogo/protobuf/plugin/size/sizetest.go index 4fa946e57..1df987300 100644 --- a/vendor/github.com/gogo/protobuf/plugin/size/sizetest.go +++ b/vendor/github.com/gogo/protobuf/plugin/size/sizetest.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -71,16 +73,16 @@ func (p *test) Generate(imports generator.PluginImports, file *generator.FileDes p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) p.P(`size2 := `, protoPkg.Use(), `.Size(p)`) - p.P(`data, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(p)`) p.P(`if err != nil {`) p.In() p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.Out() p.P(`}`) p.P(`size := p.`, sizeName, `()`) - p.P(`if len(data) != size {`) + p.P(`if len(dAtA) != size {`) p.In() - p.P(`t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(data))`) + p.P(`t.Errorf("seed = %d, size %v != marshalled size %v", seed, size, len(dAtA))`) p.Out() p.P(`}`) p.P(`if size2 != size {`) diff --git a/vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go b/vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go index 231523802..098a9db77 100644 --- a/vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go +++ b/vendor/github.com/gogo/protobuf/plugin/stringer/stringer.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -201,7 +203,7 @@ func (p *stringer) Generate(file *generator.FileDescriptor) { } else if p.IsMap(field) { mapName := `mapStringFor` + fieldname p.P("`", fieldname, ":`", ` + `, mapName, " + `,", "`,") - } else if field.IsMessage() || p.IsGroup(field) { + } else if (field.IsMessage() && !gogoproto.IsCustomType(field)) || p.IsGroup(field) { desc := p.ObjectNamed(field.GetTypeName()) msgname := p.TypeName(desc) msgnames := strings.Split(msgname, ".") diff --git a/vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go b/vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go index df615ba78..0912a22df 100644 --- a/vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go +++ b/vendor/github.com/gogo/protobuf/plugin/stringer/stringertest.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go b/vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go index a48a1c2cc..e0a9287e5 100644 --- a/vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go +++ b/vendor/github.com/gogo/protobuf/plugin/testgen/testgen.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -61,16 +63,16 @@ given to the testgen plugin, will generate the following test code: func TestAProto(t *testing.T) { popr := math_rand.New(math_rand.NewSource(time.Now().UnixNano())) p := NewPopulatedA(popr, false) - data, err := github_com_gogo_protobuf_proto.Marshal(p) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(p) if err != nil { panic(err) } msg := &A{} - if err := github_com_gogo_protobuf_proto.Unmarshal(data, msg); err != nil { + if err := github_com_gogo_protobuf_proto.Unmarshal(dAtA, msg); err != nil { panic(err) } - for i := range data { - data[i] = byte(popr.Intn(256)) + for i := range dAtA { + dAtA[i] = byte(popr.Intn(256)) } if err := p.VerboseEqual(msg); err != nil { t.Fatalf("%#v !VerboseProto %#v, since %v", msg, p, err) @@ -89,11 +91,11 @@ given to the testgen plugin, will generate the following test code: } b.ResetTimer() for i := 0; i < b.N; i++ { - data, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000]) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(pops[i%10000]) if err != nil { panic(err) } - total += len(data) + total += len(dAtA) } b.SetBytes(int64(total / b.N)) } @@ -103,11 +105,11 @@ given to the testgen plugin, will generate the following test code: total := 0 datas := make([][]byte, 10000) for i := 0; i < 10000; i++ { - data, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedA(popr, false)) + dAtA, err := github_com_gogo_protobuf_proto.Marshal(NewPopulatedA(popr, false)) if err != nil { panic(err) } - datas[i] = data + datas[i] = dAtA } msg := &A{} b.ResetTimer() @@ -144,9 +146,9 @@ given to the testgen plugin, will generate the following test code: func TestAProtoText(t *testing2.T) { popr := math_rand2.New(math_rand2.NewSource(time2.Now().UnixNano())) p := NewPopulatedA(popr, true) - data := github_com_gogo_protobuf_proto1.MarshalTextString(p) + dAtA := github_com_gogo_protobuf_proto1.MarshalTextString(p) msg := &A{} - if err := github_com_gogo_protobuf_proto1.UnmarshalText(data, msg); err != nil { + if err := github_com_gogo_protobuf_proto1.UnmarshalText(dAtA, msg); err != nil { panic(err) } if err := p.VerboseEqual(msg); err != nil { @@ -160,9 +162,9 @@ given to the testgen plugin, will generate the following test code: func TestAProtoCompactText(t *testing2.T) { popr := math_rand2.New(math_rand2.NewSource(time2.Now().UnixNano())) p := NewPopulatedA(popr, true) - data := github_com_gogo_protobuf_proto1.CompactTextString(p) + dAtA := github_com_gogo_protobuf_proto1.CompactTextString(p) msg := &A{} - if err := github_com_gogo_protobuf_proto1.UnmarshalText(data, msg); err != nil { + if err := github_com_gogo_protobuf_proto1.UnmarshalText(dAtA, msg); err != nil { panic(err) } if err := p.VerboseEqual(msg); err != nil { @@ -285,23 +287,23 @@ func (p *testProto) Generate(imports generator.PluginImports, file *generator.Fi p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) p.P(`p := NewPopulated`, ccTypeName, `(popr, false)`) - p.P(`data, err := `, protoPkg.Use(), `.Marshal(p)`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(p)`) p.P(`if err != nil {`) p.In() p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.Out() p.P(`}`) p.P(`msg := &`, ccTypeName, `{}`) - p.P(`if err := `, protoPkg.Use(), `.Unmarshal(data, msg); err != nil {`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(dAtA, msg); err != nil {`) p.In() p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.Out() p.P(`}`) - p.P(`littlefuzz := make([]byte, len(data))`) - p.P(`copy(littlefuzz, data)`) - p.P(`for i := range data {`) + p.P(`littlefuzz := make([]byte, len(dAtA))`) + p.P(`copy(littlefuzz, dAtA)`) + p.P(`for i := range dAtA {`) p.In() - p.P(`data[i] = byte(popr.Intn(256))`) + p.P(`dAtA[i] = byte(popr.Intn(256))`) p.Out() p.P(`}`) if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { @@ -346,27 +348,27 @@ func (p *testProto) Generate(imports generator.PluginImports, file *generator.Fi } else { p.P(`size := p.Size()`) } - p.P(`data := make([]byte, size)`) - p.P(`for i := range data {`) + p.P(`dAtA := make([]byte, size)`) + p.P(`for i := range dAtA {`) p.In() - p.P(`data[i] = byte(popr.Intn(256))`) + p.P(`dAtA[i] = byte(popr.Intn(256))`) p.Out() p.P(`}`) - p.P(`_, err := p.MarshalTo(data)`) + p.P(`_, err := p.MarshalTo(dAtA)`) p.P(`if err != nil {`) p.In() p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.Out() p.P(`}`) p.P(`msg := &`, ccTypeName, `{}`) - p.P(`if err := `, protoPkg.Use(), `.Unmarshal(data, msg); err != nil {`) + p.P(`if err := `, protoPkg.Use(), `.Unmarshal(dAtA, msg); err != nil {`) p.In() p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.Out() p.P(`}`) - p.P(`for i := range data {`) + p.P(`for i := range dAtA {`) p.In() - p.P(`data[i] = byte(popr.Intn(256))`) + p.P(`dAtA[i] = byte(popr.Intn(256))`) p.Out() p.P(`}`) if gogoproto.HasVerboseEqual(file.FileDescriptorProto, message.DescriptorProto) { @@ -402,13 +404,13 @@ func (p *testProto) Generate(imports generator.PluginImports, file *generator.Fi p.P(`b.ResetTimer()`) p.P(`for i := 0; i < b.N; i++ {`) p.In() - p.P(`data, err := `, protoPkg.Use(), `.Marshal(pops[i%10000])`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(pops[i%10000])`) p.P(`if err != nil {`) p.In() p.P(`panic(err)`) p.Out() p.P(`}`) - p.P(`total += len(data)`) + p.P(`total += len(dAtA)`) p.Out() p.P(`}`) p.P(`b.SetBytes(int64(total / b.N))`) @@ -423,13 +425,13 @@ func (p *testProto) Generate(imports generator.PluginImports, file *generator.Fi p.P(`datas := make([][]byte, 10000)`) p.P(`for i := 0; i < 10000; i++ {`) p.In() - p.P(`data, err := `, protoPkg.Use(), `.Marshal(NewPopulated`, ccTypeName, `(popr, false))`) + p.P(`dAtA, err := `, protoPkg.Use(), `.Marshal(NewPopulated`, ccTypeName, `(popr, false))`) p.P(`if err != nil {`) p.In() p.P(`panic(err)`) p.Out() p.P(`}`) - p.P(`datas[i] = data`) + p.P(`datas[i] = dAtA`) p.Out() p.P(`}`) p.P(`msg := &`, ccTypeName, `{}`) @@ -543,9 +545,9 @@ func (p *testText) Generate(imports generator.PluginImports, file *generator.Fil p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) - p.P(`data := `, protoPkg.Use(), `.MarshalTextString(p)`) + p.P(`dAtA := `, protoPkg.Use(), `.MarshalTextString(p)`) p.P(`msg := &`, ccTypeName, `{}`) - p.P(`if err := `, protoPkg.Use(), `.UnmarshalText(data, msg); err != nil {`) + p.P(`if err := `, protoPkg.Use(), `.UnmarshalText(dAtA, msg); err != nil {`) p.In() p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.Out() @@ -571,9 +573,9 @@ func (p *testText) Generate(imports generator.PluginImports, file *generator.Fil p.P(`seed := `, timePkg.Use(), `.Now().UnixNano()`) p.P(`popr := `, randPkg.Use(), `.New(`, randPkg.Use(), `.NewSource(seed))`) p.P(`p := NewPopulated`, ccTypeName, `(popr, true)`) - p.P(`data := `, protoPkg.Use(), `.CompactTextString(p)`) + p.P(`dAtA := `, protoPkg.Use(), `.CompactTextString(p)`) p.P(`msg := &`, ccTypeName, `{}`) - p.P(`if err := `, protoPkg.Use(), `.UnmarshalText(data, msg); err != nil {`) + p.P(`if err := `, protoPkg.Use(), `.UnmarshalText(dAtA, msg); err != nil {`) p.In() p.P(`t.Fatalf("seed = %d, err = %v", seed, err)`) p.Out() diff --git a/vendor/github.com/gogo/protobuf/plugin/union/union.go b/vendor/github.com/gogo/protobuf/plugin/union/union.go index 684047770..72edb2498 100644 --- a/vendor/github.com/gogo/protobuf/plugin/union/union.go +++ b/vendor/github.com/gogo/protobuf/plugin/union/union.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/union/uniontest.go b/vendor/github.com/gogo/protobuf/plugin/union/uniontest.go index 75e68ed57..949cf8338 100644 --- a/vendor/github.com/gogo/protobuf/plugin/union/uniontest.go +++ b/vendor/github.com/gogo/protobuf/plugin/union/uniontest.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without diff --git a/vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go b/vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go index e87f4726a..b5d9613df 100644 --- a/vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go +++ b/vendor/github.com/gogo/protobuf/plugin/unmarshal/unmarshal.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -73,8 +75,8 @@ The following message: given to the unmarshal plugin, will generate the following code: - func (m *B) Unmarshal(data []byte) error { - l := len(data) + func (m *B) Unmarshal(dAtA []byte) error { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -82,7 +84,7 @@ given to the unmarshal plugin, will generate the following code: if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -101,7 +103,7 @@ given to the unmarshal plugin, will generate the following code: if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ msglen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -112,7 +114,7 @@ given to the unmarshal plugin, will generate the following code: if postIndex > l { return io.ErrUnexpectedEOF } - if err := m.A.Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.A.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -125,7 +127,7 @@ given to the unmarshal plugin, will generate the following code: if iNdEx >= l { return io.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ byteLen |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -137,7 +139,7 @@ given to the unmarshal plugin, will generate the following code: return io.ErrUnexpectedEOF } m.G = append(m.G, github_com_gogo_protobuf_test_custom.Uint128{}) - if err := m.G[len(m.G)-1].Unmarshal(data[iNdEx:postIndex]); err != nil { + if err := m.G[len(m.G)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { return err } iNdEx = postIndex @@ -151,14 +153,14 @@ given to the unmarshal plugin, will generate the following code: } } iNdEx -= sizeOfWire - skippy, err := skip(data[iNdEx:]) + skippy, err := skip(dAtA[iNdEx:]) if err != nil { return err } if (iNdEx + skippy) > l { return io.ErrUnexpectedEOF } - m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...) + m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...) iNdEx += skippy } } @@ -185,12 +187,12 @@ import ( type unmarshal struct { *generator.Generator - unsafe bool generator.PluginImports atleastOne bool ioPkg generator.Single mathPkg generator.Single - unsafePkg generator.Single + typesPkg generator.Single + binaryPkg generator.Single localName string } @@ -198,14 +200,7 @@ func NewUnmarshal() *unmarshal { return &unmarshal{} } -func NewUnsafeUnmarshal() *unmarshal { - return &unmarshal{unsafe: true} -} - func (p *unmarshal) Name() string { - if p.unsafe { - return "unsafeunmarshaler" - } return "unmarshal" } @@ -226,7 +221,7 @@ func (p *unmarshal) decodeVarint(varName string, typName string) { p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) p.Out() p.P(`}`) - p.P(`b := data[iNdEx]`) + p.P(`b := dAtA[iNdEx]`) p.P(`iNdEx++`) p.P(varName, ` |= (`, typName, `(b) & 0x7F) << shift`) p.P(`if b < 0x80 {`) @@ -244,20 +239,7 @@ func (p *unmarshal) decodeFixed32(varName string, typeName string) { p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) p.Out() p.P(`}`) - p.P(`iNdEx += 4`) - p.P(varName, ` = `, typeName, `(data[iNdEx-4])`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-3]) << 8`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-2]) << 16`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-1]) << 24`) -} - -func (p *unmarshal) unsafeFixed32(varName string, typeName string) { - p.P(`if iNdEx + 4 > l {`) - p.In() - p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) - p.Out() - p.P(`}`) - p.P(varName, ` = *(*`, typeName, `)(`, p.unsafePkg.Use(), `.Pointer(&data[iNdEx]))`) + p.P(varName, ` = `, typeName, `(`, p.binaryPkg.Use(), `.LittleEndian.Uint32(dAtA[iNdEx:]))`) p.P(`iNdEx += 4`) } @@ -267,56 +249,97 @@ func (p *unmarshal) decodeFixed64(varName string, typeName string) { p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) p.Out() p.P(`}`) - p.P(`iNdEx += 8`) - p.P(varName, ` = `, typeName, `(data[iNdEx-8])`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-7]) << 8`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-6]) << 16`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-5]) << 24`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-4]) << 32`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-3]) << 40`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-2]) << 48`) - p.P(varName, ` |= `, typeName, `(data[iNdEx-1]) << 56`) -} - -func (p *unmarshal) unsafeFixed64(varName string, typeName string) { - p.P(`if iNdEx + 8 > l {`) - p.In() - p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) - p.Out() - p.P(`}`) - p.P(varName, ` = *(*`, typeName, `)(`, p.unsafePkg.Use(), `.Pointer(&data[iNdEx]))`) + p.P(varName, ` = `, typeName, `(`, p.binaryPkg.Use(), `.LittleEndian.Uint64(dAtA[iNdEx:]))`) p.P(`iNdEx += 8`) } -func (p *unmarshal) mapField(varName string, field *descriptor.FieldDescriptorProto) { +func (p *unmarshal) declareMapField(varName string, nullable bool, customType bool, field *descriptor.FieldDescriptorProto) { + switch field.GetType() { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE: + p.P(`var `, varName, ` float64`) + case descriptor.FieldDescriptorProto_TYPE_FLOAT: + p.P(`var `, varName, ` float32`) + case descriptor.FieldDescriptorProto_TYPE_INT64: + p.P(`var `, varName, ` int64`) + case descriptor.FieldDescriptorProto_TYPE_UINT64: + p.P(`var `, varName, ` uint64`) + case descriptor.FieldDescriptorProto_TYPE_INT32: + p.P(`var `, varName, ` int32`) + case descriptor.FieldDescriptorProto_TYPE_FIXED64: + p.P(`var `, varName, ` uint64`) + case descriptor.FieldDescriptorProto_TYPE_FIXED32: + p.P(`var `, varName, ` uint32`) + case descriptor.FieldDescriptorProto_TYPE_BOOL: + p.P(`var `, varName, ` bool`) + case descriptor.FieldDescriptorProto_TYPE_STRING: + cast, _ := p.GoType(nil, field) + cast = strings.Replace(cast, "*", "", 1) + p.P(`var `, varName, ` `, cast) + case descriptor.FieldDescriptorProto_TYPE_MESSAGE: + if gogoproto.IsStdTime(field) { + p.P(varName, ` := new(time.Time)`) + } else if gogoproto.IsStdDuration(field) { + p.P(varName, ` := new(time.Duration)`) + } else { + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + if nullable { + p.P(`var `, varName, ` *`, msgname) + } else { + p.P(varName, ` := &`, msgname, `{}`) + } + } + case descriptor.FieldDescriptorProto_TYPE_BYTES: + if customType { + _, ctyp, err := generator.GetCustomType(field) + if err != nil { + panic(err) + } + p.P(`var `, varName, `1 `, ctyp) + p.P(`var `, varName, ` = &`, varName, `1`) + } else { + p.P(varName, ` := []byte{}`) + } + case descriptor.FieldDescriptorProto_TYPE_UINT32: + p.P(`var `, varName, ` uint32`) + case descriptor.FieldDescriptorProto_TYPE_ENUM: + typName := p.TypeName(p.ObjectNamed(field.GetTypeName())) + p.P(`var `, varName, ` `, typName) + case descriptor.FieldDescriptorProto_TYPE_SFIXED32: + p.P(`var `, varName, ` int32`) + case descriptor.FieldDescriptorProto_TYPE_SFIXED64: + p.P(`var `, varName, ` int64`) + case descriptor.FieldDescriptorProto_TYPE_SINT32: + p.P(`var `, varName, ` int32`) + case descriptor.FieldDescriptorProto_TYPE_SINT64: + p.P(`var `, varName, ` int64`) + } +} + +func (p *unmarshal) mapField(varName string, customType bool, field *descriptor.FieldDescriptorProto) { switch field.GetType() { case descriptor.FieldDescriptorProto_TYPE_DOUBLE: p.P(`var `, varName, `temp uint64`) p.decodeFixed64(varName+"temp", "uint64") - p.P(varName, ` := `, p.mathPkg.Use(), `.Float64frombits(`, varName, `temp)`) + p.P(varName, ` = `, p.mathPkg.Use(), `.Float64frombits(`, varName, `temp)`) case descriptor.FieldDescriptorProto_TYPE_FLOAT: p.P(`var `, varName, `temp uint32`) p.decodeFixed32(varName+"temp", "uint32") - p.P(varName, ` := `, p.mathPkg.Use(), `.Float32frombits(`, varName, `temp)`) + p.P(varName, ` = `, p.mathPkg.Use(), `.Float32frombits(`, varName, `temp)`) case descriptor.FieldDescriptorProto_TYPE_INT64: - p.P(`var `, varName, ` int64`) p.decodeVarint(varName, "int64") case descriptor.FieldDescriptorProto_TYPE_UINT64: - p.P(`var `, varName, ` uint64`) p.decodeVarint(varName, "uint64") case descriptor.FieldDescriptorProto_TYPE_INT32: - p.P(`var `, varName, ` int32`) p.decodeVarint(varName, "int32") case descriptor.FieldDescriptorProto_TYPE_FIXED64: - p.P(`var `, varName, ` uint64`) p.decodeFixed64(varName, "uint64") case descriptor.FieldDescriptorProto_TYPE_FIXED32: - p.P(`var `, varName, ` uint32`) p.decodeFixed32(varName, "uint32") case descriptor.FieldDescriptorProto_TYPE_BOOL: p.P(`var `, varName, `temp int`) p.decodeVarint(varName+"temp", "int") - p.P(varName, ` := bool(`, varName, `temp != 0)`) + p.P(varName, ` = bool(`, varName, `temp != 0)`) case descriptor.FieldDescriptorProto_TYPE_STRING: p.P(`var stringLen`, varName, ` uint64`) p.decodeVarint("stringLen"+varName, "uint64") @@ -334,7 +357,7 @@ func (p *unmarshal) mapField(varName string, field *descriptor.FieldDescriptorPr p.P(`}`) cast, _ := p.GoType(nil, field) cast = strings.Replace(cast, "*", "", 1) - p.P(varName, ` := `, cast, `(data[iNdEx:postStringIndex`, varName, `])`) + p.P(varName, ` = `, cast, `(dAtA[iNdEx:postStringIndex`, varName, `])`) p.P(`iNdEx = postStringIndex`, varName) case descriptor.FieldDescriptorProto_TYPE_MESSAGE: p.P(`var mapmsglen int`) @@ -355,10 +378,17 @@ func (p *unmarshal) mapField(varName string, field *descriptor.FieldDescriptorPr p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) p.Out() p.P(`}`) - desc := p.ObjectNamed(field.GetTypeName()) - msgname := p.TypeName(desc) - p.P(varName, ` := &`, msgname, `{}`) - p.P(`if err := `, varName, `.Unmarshal(data[iNdEx:postmsgIndex]); err != nil {`) + buf := `dAtA[iNdEx:postmsgIndex]` + if gogoproto.IsStdTime(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else if gogoproto.IsStdDuration(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(`, varName, `, `, buf, `); err != nil {`) + } else { + desc := p.ObjectNamed(field.GetTypeName()) + msgname := p.TypeName(desc) + p.P(varName, ` = &`, msgname, `{}`) + p.P(`if err := `, varName, `.Unmarshal(`, buf, `); err != nil {`) + } p.In() p.P(`return err`) p.Out() @@ -379,32 +409,36 @@ func (p *unmarshal) mapField(varName string, field *descriptor.FieldDescriptorPr p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) p.Out() p.P(`}`) - p.P(varName, ` := make([]byte, mapbyteLen)`) - p.P(`copy(`, varName, `, data[iNdEx:postbytesIndex])`) + if customType { + p.P(`if err := `, varName, `.Unmarshal(dAtA[iNdEx:postbytesIndex]); err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + } else { + p.P(varName, ` = make([]byte, mapbyteLen)`) + p.P(`copy(`, varName, `, dAtA[iNdEx:postbytesIndex])`) + } p.P(`iNdEx = postbytesIndex`) case descriptor.FieldDescriptorProto_TYPE_UINT32: - p.P(`var `, varName, ` uint32`) p.decodeVarint(varName, "uint32") case descriptor.FieldDescriptorProto_TYPE_ENUM: typName := p.TypeName(p.ObjectNamed(field.GetTypeName())) - p.P(`var `, varName, ` `, typName) p.decodeVarint(varName, typName) case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - p.P(`var `, varName, ` int32`) p.decodeFixed32(varName, "int32") case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - p.P(`var `, varName, ` int64`) p.decodeFixed64(varName, "int64") case descriptor.FieldDescriptorProto_TYPE_SINT32: p.P(`var `, varName, `temp int32`) p.decodeVarint(varName+"temp", "int32") p.P(varName, `temp = int32((uint32(`, varName, `temp) >> 1) ^ uint32(((`, varName, `temp&1)<<31)>>31))`) - p.P(varName, ` := int32(`, varName, `temp)`) + p.P(varName, ` = int32(`, varName, `temp)`) case descriptor.FieldDescriptorProto_TYPE_SINT64: p.P(`var `, varName, `temp uint64`) p.decodeVarint(varName+"temp", "uint64") p.P(varName, `temp = (`, varName, `temp >> 1) ^ uint64((int64(`, varName, `temp&1)<<63)>>63)`) - p.P(varName, ` := int64(`, varName, `temp)`) + p.P(varName, ` = int64(`, varName, `temp)`) } } @@ -426,68 +460,32 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip oneof := field.OneofIndex != nil switch *field.Type { case descriptor.FieldDescriptorProto_TYPE_DOUBLE: - if !p.unsafe || gogoproto.IsCastType(field) { - p.P(`var v uint64`) - p.decodeFixed64("v", "uint64") - if oneof { - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))}`) - } else if repeated { - p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v2)`) - } else if proto3 || !nullable { - p.P(`m.`, fieldname, ` = `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) - } else { - p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) - p.P(`m.`, fieldname, ` = &v2`) - } + p.P(`var v uint64`) + p.decodeFixed64("v", "uint64") + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))}`) + } else if repeated { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v2)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) } else { - if oneof { - p.P(`var v float64`) - p.unsafeFixed64("v", "float64") - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v float64`) - p.unsafeFixed64("v", "float64") - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.unsafeFixed64(`m.`+fieldname, "float64") - } else { - p.P(`var v float64`) - p.unsafeFixed64("v", "float64") - p.P(`m.`, fieldname, ` = &v`) - } + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float64frombits(v))`) + p.P(`m.`, fieldname, ` = &v2`) } case descriptor.FieldDescriptorProto_TYPE_FLOAT: - if !p.unsafe || gogoproto.IsCastType(field) { - p.P(`var v uint32`) - p.decodeFixed32("v", "uint32") - if oneof { - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))}`) - } else if repeated { - p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v2)`) - } else if proto3 || !nullable { - p.P(`m.`, fieldname, ` = `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) - } else { - p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) - p.P(`m.`, fieldname, ` = &v2`) - } + p.P(`var v uint32`) + p.decodeFixed32("v", "uint32") + if oneof { + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))}`) + } else if repeated { + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v2)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) } else { - if oneof { - p.P(`var v float32`) - p.unsafeFixed32("v", "float32") - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v float32`) - p.unsafeFixed32("v", "float32") - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.unsafeFixed32("m."+fieldname, "float32") - } else { - p.P(`var v float32`) - p.unsafeFixed32("v", "float32") - p.P(`m.`, fieldname, ` = &v`) - } + p.P(`v2 := `, typ, "(", p.mathPkg.Use(), `.Float32frombits(v))`) + p.P(`m.`, fieldname, ` = &v2`) } case descriptor.FieldDescriptorProto_TYPE_INT64: if oneof { @@ -541,74 +539,38 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip p.P(`m.`, fieldname, ` = &v`) } case descriptor.FieldDescriptorProto_TYPE_FIXED64: - if !p.unsafe || gogoproto.IsCastType(field) { - if oneof { - p.P(`var v `, typ) - p.decodeFixed64("v", typ) - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v `, typ) - p.decodeFixed64("v", typ) - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.P(`m.`, fieldname, ` = 0`) - p.decodeFixed64("m."+fieldname, typ) - } else { - p.P(`var v `, typ) - p.decodeFixed64("v", typ) - p.P(`m.`, fieldname, ` = &v`) - } + if oneof { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed64("m."+fieldname, typ) } else { - if oneof { - p.P(`var v uint64`) - p.unsafeFixed64("v", "uint64") - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v uint64`) - p.unsafeFixed64("v", "uint64") - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.unsafeFixed64("m."+fieldname, "uint64") - } else { - p.P(`var v uint64`) - p.unsafeFixed64("v", "uint64") - p.P(`m.`, fieldname, ` = &v`) - } + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &v`) } case descriptor.FieldDescriptorProto_TYPE_FIXED32: - if !p.unsafe || gogoproto.IsCastType(field) { - if oneof { - p.P(`var v `, typ) - p.decodeFixed32("v", typ) - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v `, typ) - p.decodeFixed32("v", typ) - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.P(`m.`, fieldname, ` = 0`) - p.decodeFixed32("m."+fieldname, typ) - } else { - p.P(`var v `, typ) - p.decodeFixed32("v", typ) - p.P(`m.`, fieldname, ` = &v`) - } + if oneof { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed32("m."+fieldname, typ) } else { - if oneof { - p.P(`var v uint32`) - p.unsafeFixed32("v", "uint32") - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v uint32`) - p.unsafeFixed32("v", "uint32") - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.unsafeFixed32("m."+fieldname, "uint32") - } else { - p.P(`var v uint32`) - p.unsafeFixed32("v", "uint32") - p.P(`m.`, fieldname, ` = &v`) - } + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &v`) } case descriptor.FieldDescriptorProto_TYPE_BOOL: p.P(`var v int`) @@ -640,13 +602,13 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip p.Out() p.P(`}`) if oneof { - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, `(data[iNdEx:postIndex])}`) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{`, typ, `(dAtA[iNdEx:postIndex])}`) } else if repeated { - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, typ, `(data[iNdEx:postIndex]))`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, typ, `(dAtA[iNdEx:postIndex]))`) } else if proto3 || !nullable { - p.P(`m.`, fieldname, ` = `, typ, `(data[iNdEx:postIndex])`) + p.P(`m.`, fieldname, ` = `, typ, `(dAtA[iNdEx:postIndex])`) } else { - p.P(`s := `, typ, `(data[iNdEx:postIndex])`) + p.P(`s := `, typ, `(dAtA[iNdEx:postIndex])`) p.P(`m.`, fieldname, ` = &s`) } p.P(`iNdEx = postIndex`) @@ -669,8 +631,27 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip p.Out() p.P(`}`) if oneof { - p.P(`v := &`, msgname, `{}`) - p.P(`if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + buf := `dAtA[iNdEx:postIndex]` + if gogoproto.IsStdTime(field) { + if nullable { + p.P(`v := new(time.Time)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := time.Time{}`) + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(&v, `, buf, `); err != nil {`) + } + } else if gogoproto.IsStdDuration(field) { + if nullable { + p.P(`v := new(time.Duration)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(v, `, buf, `); err != nil {`) + } else { + p.P(`v := time.Duration(0)`) + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(&v, `, buf, `); err != nil {`) + } + } else { + p.P(`v := &`, msgname, `{}`) + p.P(`if err := v.Unmarshal(`, buf, `); err != nil {`) + } p.In() p.P(`return err`) p.Out() @@ -690,43 +671,122 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip // if the map type is an alias and key or values are aliases (type Foo map[Bar]Baz), // we need to explicitly record their use here. - p.RecordTypeUse(m.KeyAliasField.GetTypeName()) - p.RecordTypeUse(m.ValueAliasField.GetTypeName()) + if gogoproto.IsCastKey(field) { + p.RecordTypeUse(m.KeyAliasField.GetTypeName()) + } + if gogoproto.IsCastValue(field) { + p.RecordTypeUse(m.ValueAliasField.GetTypeName()) + } nullable, valuegoTyp, valuegoAliasTyp = generator.GoMapValueTypes(field, m.ValueField, valuegoTyp, valuegoAliasTyp) + if gogoproto.IsStdTime(field) || gogoproto.IsStdDuration(field) { + valuegoTyp = valuegoAliasTyp + } - p.P(`var keykey uint64`) - p.decodeVarint("keykey", "uint64") - p.mapField("mapkey", m.KeyAliasField) - p.P(`var valuekey uint64`) - p.decodeVarint("valuekey", "uint64") - p.mapField("mapvalue", m.ValueAliasField) p.P(`if m.`, fieldname, ` == nil {`) p.In() p.P(`m.`, fieldname, ` = make(`, m.GoType, `)`) p.Out() p.P(`}`) + + p.declareMapField("mapkey", false, false, m.KeyAliasField) + p.declareMapField("mapvalue", nullable, gogoproto.IsCustomType(field), m.ValueAliasField) + p.P(`for iNdEx < postIndex {`) + p.In() + + p.P(`entryPreIndex := iNdEx`) + p.P(`var wire uint64`) + p.decodeVarint("wire", "uint64") + p.P(`fieldNum := int32(wire >> 3)`) + + p.P(`if fieldNum == 1 {`) + p.In() + p.mapField("mapkey", false, m.KeyAliasField) + p.Out() + p.P(`} else if fieldNum == 2 {`) + p.In() + p.mapField("mapvalue", gogoproto.IsCustomType(field), m.ValueAliasField) + p.Out() + p.P(`} else {`) + p.In() + p.P(`iNdEx = entryPreIndex`) + p.P(`skippy, err := skip`, p.localName, `(dAtA[iNdEx:])`) + p.P(`if err != nil {`) + p.In() + p.P(`return err`) + p.Out() + p.P(`}`) + p.P(`if skippy < 0 {`) + p.In() + p.P(`return ErrInvalidLength`, p.localName) + p.Out() + p.P(`}`) + p.P(`if (iNdEx + skippy) > postIndex {`) + p.In() + p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) + p.Out() + p.P(`}`) + p.P(`iNdEx += skippy`) + p.Out() + p.P(`}`) + + p.Out() + p.P(`}`) + s := `m.` + fieldname if keygoTyp == keygoAliasTyp { s += `[mapkey]` } else { s += `[` + keygoAliasTyp + `(mapkey)]` } + v := `mapvalue` - if m.ValueField.IsMessage() && !nullable { + if (m.ValueField.IsMessage() || gogoproto.IsCustomType(field)) && !nullable { v = `*` + v } if valuegoTyp != valuegoAliasTyp { v = `((` + valuegoAliasTyp + `)(` + v + `))` } + p.P(s, ` = `, v) } else if repeated { - if nullable { + if gogoproto.IsStdTime(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(time.Time))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, time.Time{})`) + } + } else if gogoproto.IsStdDuration(field) { + if nullable { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, new(time.Duration))`) + } else { + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, time.Duration(0))`) + } + } else if nullable && !gogoproto.IsCustomType(field) { p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, &`, msgname, `{})`) } else { - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, msgname, `{})`) + goType, _ := p.GoType(nil, field) + // remove the slice from the type, i.e. []*T -> *T + goType = goType[2:] + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, `, goType, `{})`) + } + varName := `m.` + fieldname + `[len(m.` + fieldname + `)-1]` + buf := `dAtA[iNdEx:postIndex]` + if gogoproto.IsStdTime(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else if gogoproto.IsStdDuration(field) { + if nullable { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(`, varName, `,`, buf, `); err != nil {`) + } else { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(&(`, varName, `),`, buf, `); err != nil {`) + } + } else { + p.P(`if err := `, varName, `.Unmarshal(`, buf, `); err != nil {`) } - p.P(`if err := m.`, fieldname, `[len(m.`, fieldname, `)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {`) p.In() p.P(`return err`) p.Out() @@ -734,22 +794,43 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip } else if nullable { p.P(`if m.`, fieldname, ` == nil {`) p.In() - p.P(`m.`, fieldname, ` = &`, msgname, `{}`) + if gogoproto.IsStdTime(field) { + p.P(`m.`, fieldname, ` = new(time.Time)`) + } else if gogoproto.IsStdDuration(field) { + p.P(`m.`, fieldname, ` = new(time.Duration)`) + } else { + goType, _ := p.GoType(nil, field) + // remove the star from the type + p.P(`m.`, fieldname, ` = &`, goType[1:], `{}`) + } p.Out() p.P(`}`) - p.P(`if err := m.`, fieldname, `.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + if gogoproto.IsStdTime(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdDuration(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else { + p.P(`if err := m.`, fieldname, `.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) + } p.In() p.P(`return err`) p.Out() p.P(`}`) } else { - p.P(`if err := m.`, fieldname, `.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + if gogoproto.IsStdTime(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdTimeUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else if gogoproto.IsStdDuration(field) { + p.P(`if err := `, p.typesPkg.Use(), `.StdDurationUnmarshal(&m.`, fieldname, `, dAtA[iNdEx:postIndex]); err != nil {`) + } else { + p.P(`if err := m.`, fieldname, `.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) + } p.In() p.P(`return err`) p.Out() p.P(`}`) } p.P(`iNdEx = postIndex`) + case descriptor.FieldDescriptorProto_TYPE_BYTES: p.P(`var byteLen int`) p.decodeVarint("byteLen", "int") @@ -767,13 +848,13 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip if !gogoproto.IsCustomType(field) { if oneof { p.P(`v := make([]byte, postIndex-iNdEx)`) - p.P(`copy(v, data[iNdEx:postIndex])`) + p.P(`copy(v, dAtA[iNdEx:postIndex])`) p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) } else if repeated { p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, make([]byte, postIndex-iNdEx))`) - p.P(`copy(m.`, fieldname, `[len(m.`, fieldname, `)-1], data[iNdEx:postIndex])`) + p.P(`copy(m.`, fieldname, `[len(m.`, fieldname, `)-1], dAtA[iNdEx:postIndex])`) } else { - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `[:0] , data[iNdEx:postIndex]...)`) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `[:0] , dAtA[iNdEx:postIndex]...)`) p.P(`if m.`, fieldname, ` == nil {`) p.In() p.P(`m.`, fieldname, ` = []byte{}`) @@ -788,7 +869,7 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip if oneof { p.P(`var vv `, ctyp) p.P(`v := &vv`) - p.P(`if err := v.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.P(`if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) p.In() p.P(`return err`) p.Out() @@ -797,7 +878,7 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip } else if repeated { p.P(`var v `, ctyp) p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - p.P(`if err := m.`, fieldname, `[len(m.`, fieldname, `)-1].Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.P(`if err := m.`, fieldname, `[len(m.`, fieldname, `)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) p.In() p.P(`return err`) p.Out() @@ -805,13 +886,13 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip } else if nullable { p.P(`var v `, ctyp) p.P(`m.`, fieldname, ` = &v`) - p.P(`if err := m.`, fieldname, `.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.P(`if err := m.`, fieldname, `.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) p.In() p.P(`return err`) p.Out() p.P(`}`) } else { - p.P(`if err := m.`, fieldname, `.Unmarshal(data[iNdEx:postIndex]); err != nil {`) + p.P(`if err := m.`, fieldname, `.Unmarshal(dAtA[iNdEx:postIndex]); err != nil {`) p.In() p.P(`return err`) p.Out() @@ -855,74 +936,38 @@ func (p *unmarshal) field(file *generator.FileDescriptor, msg *generator.Descrip p.P(`m.`, fieldname, ` = &v`) } case descriptor.FieldDescriptorProto_TYPE_SFIXED32: - if !p.unsafe || gogoproto.IsCastType(field) { - if oneof { - p.P(`var v `, typ) - p.decodeFixed32("v", typ) - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v `, typ) - p.decodeFixed32("v", typ) - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.P(`m.`, fieldname, ` = 0`) - p.decodeFixed32("m."+fieldname, typ) - } else { - p.P(`var v `, typ) - p.decodeFixed32("v", typ) - p.P(`m.`, fieldname, ` = &v`) - } + if oneof { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed32("m."+fieldname, typ) } else { - if oneof { - p.P(`var v int32`) - p.unsafeFixed32("v", "int32") - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v int32`) - p.unsafeFixed32("v", "int32") - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.unsafeFixed32("m."+fieldname, "int32") - } else { - p.P(`var v int32`) - p.unsafeFixed32("v", "int32") - p.P(`m.`, fieldname, ` = &v`) - } + p.P(`var v `, typ) + p.decodeFixed32("v", typ) + p.P(`m.`, fieldname, ` = &v`) } case descriptor.FieldDescriptorProto_TYPE_SFIXED64: - if !p.unsafe || gogoproto.IsCastType(field) { - if oneof { - p.P(`var v `, typ) - p.decodeFixed64("v", typ) - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v `, typ) - p.decodeFixed64("v", typ) - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.P(`m.`, fieldname, ` = 0`) - p.decodeFixed64("m."+fieldname, typ) - } else { - p.P(`var v `, typ) - p.decodeFixed64("v", typ) - p.P(`m.`, fieldname, ` = &v`) - } + if oneof { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) + } else if repeated { + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) + } else if proto3 || !nullable { + p.P(`m.`, fieldname, ` = 0`) + p.decodeFixed64("m."+fieldname, typ) } else { - if oneof { - p.P(`var v int64`) - p.unsafeFixed64("v", "int64") - p.P(`m.`, fieldname, ` = &`, p.OneOfTypeName(msg, field), `{v}`) - } else if repeated { - p.P(`var v int64`) - p.unsafeFixed64("v", "int64") - p.P(`m.`, fieldname, ` = append(m.`, fieldname, `, v)`) - } else if proto3 || !nullable { - p.unsafeFixed64("m."+fieldname, "int64") - } else { - p.P(`var v int64`) - p.unsafeFixed64("v", "int64") - p.P(`m.`, fieldname, ` = &v`) - } + p.P(`var v `, typ) + p.decodeFixed64("v", typ) + p.P(`m.`, fieldname, ` = &v`) } case descriptor.FieldDescriptorProto_TYPE_SINT32: p.P(`var v `, typ) @@ -961,13 +1006,11 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { p.PluginImports = generator.NewPluginImports(p.Generator) p.atleastOne = false p.localName = generator.FileName(file) - if p.unsafe { - p.localName += "Unsafe" - } p.ioPkg = p.NewImport("io") p.mathPkg = p.NewImport("math") - p.unsafePkg = p.NewImport("unsafe") + p.typesPkg = p.NewImport("github.com/gogo/protobuf/types") + p.binaryPkg = p.NewImport("encoding/binary") fmtPkg := p.NewImport("fmt") protoPkg := p.NewImport("github.com/gogo/protobuf/proto") if !gogoproto.ImportsGoGoProto(file.FileDescriptorProto) { @@ -976,21 +1019,9 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { for _, message := range file.Messages() { ccTypeName := generator.CamelCaseSlice(message.TypeName()) - if p.unsafe { - if !gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { - continue - } - if gogoproto.IsUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { - panic(fmt.Sprintf("unsafe_unmarshaler and unmarshaler enabled for %v", ccTypeName)) - } - } - if !p.unsafe { - if !gogoproto.IsUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { - continue - } - if gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { - panic(fmt.Sprintf("unsafe_unmarshaler and unmarshaler enabled for %v", ccTypeName)) - } + if !gogoproto.IsUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) && + !gogoproto.IsUnsafeUnmarshaler(file.FileDescriptorProto, message.DescriptorProto) { + continue } if message.DescriptorProto.GetOptions().GetMapEntry() { continue @@ -1008,12 +1039,12 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { } rfCount := len(rfMap) - p.P(`func (m *`, ccTypeName, `) Unmarshal(data []byte) error {`) + p.P(`func (m *`, ccTypeName, `) Unmarshal(dAtA []byte) error {`) p.In() if rfCount > 0 { p.P(`var hasFields [`, strconv.Itoa(1+(rfCount-1)/64), `]uint64`) } - p.P(`l := len(data)`) + p.P(`l := len(dAtA)`) p.P(`iNdEx := 0`) p.P(`for iNdEx < l {`) p.In() @@ -1044,12 +1075,16 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { if field.OneofIndex != nil { errFieldname = p.GetOneOfFieldName(message, field) } - packed := field.IsPacked() + possiblyPacked := field.IsScalar() && field.IsRepeated() p.P(`case `, strconv.Itoa(int(field.GetNumber())), `:`) p.In() wireType := field.WireType() - if packed { - p.P(`if wireType == `, strconv.Itoa(proto.WireBytes), `{`) + if possiblyPacked { + p.P(`if wireType == `, strconv.Itoa(wireType), `{`) + p.In() + p.field(file, message, field, fieldname, false) + p.Out() + p.P(`} else if wireType == `, strconv.Itoa(proto.WireBytes), `{`) p.In() p.P(`var packedLen int`) p.decodeVarint("packedLen", "int") @@ -1070,10 +1105,6 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { p.Out() p.P(`}`) p.Out() - p.P(`} else if wireType == `, strconv.Itoa(wireType), `{`) - p.In() - p.field(file, message, field, fieldname, false) - p.Out() p.P(`} else {`) p.In() p.P(`return ` + fmtPkg.Use() + `.Errorf("proto: wrong wireType = %d for field ` + errFieldname + `", wireType)`) @@ -1119,7 +1150,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { p.Out() p.P(`}`) p.P(`iNdEx-=sizeOfWire`) - p.P(`skippy, err := skip`, p.localName+`(data[iNdEx:])`) + p.P(`skippy, err := skip`, p.localName+`(dAtA[iNdEx:])`) p.P(`if err != nil {`) p.In() p.P(`return err`) @@ -1135,14 +1166,14 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { p.P(`return `, p.ioPkg.Use(), `.ErrUnexpectedEOF`) p.Out() p.P(`}`) - p.P(protoPkg.Use(), `.AppendExtension(m, int32(fieldNum), data[iNdEx:iNdEx+skippy])`) + p.P(protoPkg.Use(), `.AppendExtension(m, int32(fieldNum), dAtA[iNdEx:iNdEx+skippy])`) p.P(`iNdEx += skippy`) p.Out() p.P(`} else {`) p.In() } p.P(`iNdEx=preIndex`) - p.P(`skippy, err := skip`, p.localName, `(data[iNdEx:])`) + p.P(`skippy, err := skip`, p.localName, `(dAtA[iNdEx:])`) p.P(`if err != nil {`) p.In() p.P(`return err`) @@ -1159,7 +1190,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { p.Out() p.P(`}`) if gogoproto.HasUnrecognized(file.FileDescriptorProto, message.DescriptorProto) { - p.P(`m.XXX_unrecognized = append(m.XXX_unrecognized, data[iNdEx:iNdEx+skippy]...)`) + p.P(`m.XXX_unrecognized = append(m.XXX_unrecognized, dAtA[iNdEx:iNdEx+skippy]...)`) } p.P(`iNdEx += skippy`) p.Out() @@ -1206,8 +1237,8 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { return } - p.P(`func skip` + p.localName + `(data []byte) (n int, err error) { - l := len(data) + p.P(`func skip` + p.localName + `(dAtA []byte) (n int, err error) { + l := len(dAtA) iNdEx := 0 for iNdEx < l { var wire uint64 @@ -1218,7 +1249,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { if iNdEx >= l { return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ wire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1236,7 +1267,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF } iNdEx++ - if data[iNdEx-1] < 0x80 { + if dAtA[iNdEx-1] < 0x80 { break } } @@ -1253,7 +1284,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { if iNdEx >= l { return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ length |= (int(b) & 0x7F) << shift if b < 0x80 { @@ -1276,7 +1307,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { if iNdEx >= l { return 0, ` + p.ioPkg.Use() + `.ErrUnexpectedEOF } - b := data[iNdEx] + b := dAtA[iNdEx] iNdEx++ innerWire |= (uint64(b) & 0x7F) << shift if b < 0x80 { @@ -1287,7 +1318,7 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { if innerWireType == 4 { break } - next, err := skip` + p.localName + `(data[start:]) + next, err := skip` + p.localName + `(dAtA[start:]) if err != nil { return 0, err } @@ -1315,5 +1346,4 @@ func (p *unmarshal) Generate(file *generator.FileDescriptor) { func init() { generator.RegisterPlugin(NewUnmarshal()) - generator.RegisterPlugin(NewUnsafeUnmarshal()) } diff --git a/vendor/github.com/gogo/protobuf/proto/Makefile b/vendor/github.com/gogo/protobuf/proto/Makefile index 23a6b1734..41c717573 100644 --- a/vendor/github.com/gogo/protobuf/proto/Makefile +++ b/vendor/github.com/gogo/protobuf/proto/Makefile @@ -39,5 +39,5 @@ test: install generate-test-pbs generate-test-pbs: make install make -C testdata - protoc-min-version --version="3.0.0" --proto_path=.:../../../../ --gogo_out=. proto3_proto/proto3.proto + protoc-min-version --version="3.0.0" --proto_path=.:../../../../:../protobuf --gogo_out=Mtestdata/test.proto=github.com/gogo/protobuf/proto/testdata,Mgoogle/protobuf/any.proto=github.com/gogo/protobuf/types:. proto3_proto/proto3.proto make diff --git a/vendor/github.com/gogo/protobuf/proto/all_test.go b/vendor/github.com/gogo/protobuf/proto/all_test.go index 8c97b2b5e..b5f8709d8 100644 --- a/vendor/github.com/gogo/protobuf/proto/all_test.go +++ b/vendor/github.com/gogo/protobuf/proto/all_test.go @@ -420,7 +420,7 @@ func TestMarshalerEncoding(t *testing.T) { name string m Message want []byte - wantErr error + errType reflect.Type }{ { name: "Marshaler that fails", @@ -428,9 +428,11 @@ func TestMarshalerEncoding(t *testing.T) { err: errors.New("some marshal err"), b: []byte{5, 6, 7}, }, - // Since there's an error, nothing should be written to buffer. - want: nil, - wantErr: errors.New("some marshal err"), + // Since the Marshal method returned bytes, they should be written to the + // buffer. (For efficiency, we assume that Marshal implementations are + // always correct w.r.t. RequiredNotSetError and output.) + want: []byte{5, 6, 7}, + errType: reflect.TypeOf(errors.New("some marshal err")), }, { name: "Marshaler that fails with RequiredNotSetError", @@ -446,30 +448,37 @@ func TestMarshalerEncoding(t *testing.T) { 10, 3, // for &msgWithFakeMarshaler 5, 6, 7, // for &fakeMarshaler }, - wantErr: &RequiredNotSetError{}, + errType: reflect.TypeOf(&RequiredNotSetError{}), }, { name: "Marshaler that succeeds", m: &fakeMarshaler{ b: []byte{0, 1, 2, 3, 4, 127, 255}, }, - want: []byte{0, 1, 2, 3, 4, 127, 255}, - wantErr: nil, + want: []byte{0, 1, 2, 3, 4, 127, 255}, }, } for _, test := range tests { b := NewBuffer(nil) err := b.Marshal(test.m) - if _, ok := err.(*RequiredNotSetError); ok { - // We're not in package proto, so we can only assert the type in this case. - err = &RequiredNotSetError{} - } - if !reflect.DeepEqual(test.wantErr, err) { - t.Errorf("%s: got err %v wanted %v", test.name, err, test.wantErr) + if reflect.TypeOf(err) != test.errType { + t.Errorf("%s: got err %T(%v) wanted %T", test.name, err, err, test.errType) } if !reflect.DeepEqual(test.want, b.Bytes()) { t.Errorf("%s: got bytes %v wanted %v", test.name, b.Bytes(), test.want) } + if size := Size(test.m); size != len(b.Bytes()) { + t.Errorf("%s: Size(_) = %v, but marshaled to %v bytes", test.name, size, len(b.Bytes())) + } + + m, mErr := Marshal(test.m) + if !bytes.Equal(b.Bytes(), m) { + t.Errorf("%s: Marshal returned %v, but (*Buffer).Marshal wrote %v", test.name, m, b.Bytes()) + } + if !reflect.DeepEqual(err, mErr) { + t.Errorf("%s: Marshal err = %q, but (*Buffer).Marshal returned %q", + test.name, fmt.Sprint(mErr), fmt.Sprint(err)) + } } } @@ -1354,7 +1363,7 @@ func TestTypedNilMarshal(t *testing.T) { } { - m := &Communique{Union: &Communique_Msg{nil}} + m := &Communique{Union: &Communique_Msg{Msg: nil}} if _, err := Marshal(m); err == nil || err == ErrNil { t.Errorf("Marshal(%#v): got %v, want errOneofHasNil", m, err) } @@ -1839,42 +1848,42 @@ func TestRequiredNotSetError(t *testing.T) { "b8067f" // field 103, encoding 0, 0x7f zigzag64 o := old() - bytes, err := Marshal(pb) + mbytes, err := Marshal(pb) if _, ok := err.(*RequiredNotSetError); !ok { fmt.Printf("marshal-1 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) + o.DebugPrint("", mbytes) t.Fatalf("expected = %s", expected) } if strings.Index(err.Error(), "RequiredField.Label") < 0 { t.Errorf("marshal-1 wrong err msg: %v", err) } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 1", bytes) + if !equal(mbytes, expected, t) { + o.DebugPrint("neq 1", mbytes) t.Fatalf("expected = %s", expected) } // Now test Unmarshal by recreating the original buffer. pbd := new(GoTest) - err = Unmarshal(bytes, pbd) + err = Unmarshal(mbytes, pbd) if _, ok := err.(*RequiredNotSetError); !ok { t.Fatalf("unmarshal err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) + o.DebugPrint("", mbytes) t.Fatalf("string = %s", expected) } if strings.Index(err.Error(), "RequiredField.{Unknown}") < 0 { t.Errorf("unmarshal wrong err msg: %v", err) } - bytes, err = Marshal(pbd) + mbytes, err = Marshal(pbd) if _, ok := err.(*RequiredNotSetError); !ok { t.Errorf("marshal-2 err = %v, want *RequiredNotSetError", err) - o.DebugPrint("", bytes) + o.DebugPrint("", mbytes) t.Fatalf("string = %s", expected) } if strings.Index(err.Error(), "RequiredField.Label") < 0 { t.Errorf("marshal-2 wrong err msg: %v", err) } - if !equal(bytes, expected, t) { - o.DebugPrint("neq 2", bytes) + if !equal(mbytes, expected, t) { + o.DebugPrint("neq 2", mbytes) t.Fatalf("string = %s", expected) } } @@ -1973,14 +1982,54 @@ func TestMapFieldRoundTrips(t *testing.T) { } func TestMapFieldWithNil(t *testing.T) { - m := &MessageWithMap{ + m1 := &MessageWithMap{ MsgMapping: map[int64]*FloatingPoint{ 1: nil, }, } - b, err := Marshal(m) - if err == nil { - t.Fatalf("Marshal of bad map should have failed, got these bytes: %v", b) + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.MsgMapping[1]; !ok { + t.Error("msg_mapping[1] not present") + } else if v != nil { + t.Errorf("msg_mapping[1] not nil: %v", v) + } +} + +func TestMapFieldWithNilBytes(t *testing.T) { + m1 := &MessageWithMap{ + ByteMapping: map[bool][]byte{ + false: {}, + true: nil, + }, + } + n := Size(m1) + b, err := Marshal(m1) + if err != nil { + t.Fatalf("Marshal: %v", err) + } + if n != len(b) { + t.Errorf("Size(m1) = %d; want len(Marshal(m1)) = %d", n, len(b)) + } + m2 := new(MessageWithMap) + if err := Unmarshal(b, m2); err != nil { + t.Fatalf("Unmarshal: %v, got these bytes: %v", err, b) + } + if v, ok := m2.ByteMapping[false]; !ok { + t.Error("byte_mapping[false] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[false] not empty: %#v", v) + } + if v, ok := m2.ByteMapping[true]; !ok { + t.Error("byte_mapping[true] not present") + } else if len(v) != 0 { + t.Errorf("byte_mapping[true] not empty: %#v", v) } } @@ -2029,7 +2078,7 @@ func TestOneof(t *testing.T) { } m = &Communique{ - Union: &Communique_Name{"Barry"}, + Union: &Communique_Name{Name: "Barry"}, } // Round-trip. @@ -2052,7 +2101,7 @@ func TestOneof(t *testing.T) { } // Let's try with a message in the oneof. - m.Union = &Communique_Msg{&Strings{StringField: String("deep deep string")}} + m.Union = &Communique_Msg{Msg: &Strings{StringField: String("deep deep string")}} b, err = Marshal(m) if err != nil { t.Fatalf("Marshal of message with oneof set to message: %v", err) diff --git a/vendor/github.com/gogo/protobuf/proto/any_test.go b/vendor/github.com/gogo/protobuf/proto/any_test.go new file mode 100644 index 000000000..f098d8287 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/any_test.go @@ -0,0 +1,300 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto_test + +import ( + "strings" + "testing" + + "github.com/gogo/protobuf/proto" + + pb "github.com/gogo/protobuf/proto/proto3_proto" + testpb "github.com/gogo/protobuf/proto/testdata" + "github.com/gogo/protobuf/types" +) + +var ( + expandedMarshaler = proto.TextMarshaler{ExpandAny: true} + expandedCompactMarshaler = proto.TextMarshaler{Compact: true, ExpandAny: true} +) + +// anyEqual reports whether two messages which may be google.protobuf.Any or may +// contain google.protobuf.Any fields are equal. We can't use proto.Equal for +// comparison, because semantically equivalent messages may be marshaled to +// binary in different tag order. Instead, trust that TextMarshaler with +// ExpandAny option works and compare the text marshaling results. +func anyEqual(got, want proto.Message) bool { + // if messages are proto.Equal, no need to marshal. + if proto.Equal(got, want) { + return true + } + g := expandedMarshaler.Text(got) + w := expandedMarshaler.Text(want) + return g == w +} + +type golden struct { + m proto.Message + t, c string +} + +var goldenMessages = makeGolden() + +func makeGolden() []golden { + nested := &pb.Nested{Bunny: "Monty"} + nb, err := proto.Marshal(nested) + if err != nil { + panic(err) + } + m1 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m2 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &types.Any{TypeUrl: "http://[::1]/type.googleapis.com/" + proto.MessageName(nested), Value: nb}, + } + m3 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &types.Any{TypeUrl: `type.googleapis.com/"/` + proto.MessageName(nested), Value: nb}, + } + m4 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &types.Any{TypeUrl: "type.googleapis.com/a/path/" + proto.MessageName(nested), Value: nb}, + } + m5 := &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(nested), Value: nb} + + any1 := &testpb.MyMessage{Count: proto.Int32(47), Name: proto.String("David")} + proto.SetExtension(any1, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("foo")}) + proto.SetExtension(any1, testpb.E_Ext_Text, proto.String("bar")) + any1b, err := proto.Marshal(any1) + if err != nil { + panic(err) + } + any2 := &testpb.MyMessage{Count: proto.Int32(42), Bikeshed: testpb.MyMessage_GREEN.Enum(), RepBytes: [][]byte{[]byte("roboto")}} + proto.SetExtension(any2, testpb.E_Ext_More, &testpb.Ext{Data: proto.String("baz")}) + any2b, err := proto.Marshal(any2) + if err != nil { + panic(err) + } + m6 := &pb.Message{ + Name: "David", + ResultCount: 47, + Anything: &types.Any{TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + ManyThings: []*types.Any{ + {TypeUrl: "type.googleapis.com/" + proto.MessageName(any2), Value: any2b}, + {TypeUrl: "type.googleapis.com/" + proto.MessageName(any1), Value: any1b}, + }, + } + + const ( + m1Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m2Golden = ` +name: "David" +result_count: 47 +anything: < + ["http://[::1]/type.googleapis.com/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m3Golden = ` +name: "David" +result_count: 47 +anything: < + ["type.googleapis.com/\"/proto3_proto.Nested"]: < + bunny: "Monty" + > +> +` + m4Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > +> +` + m5Golden = ` +[type.googleapis.com/proto3_proto.Nested]: < + bunny: "Monty" +> +` + m6Golden = ` +name: "David" +result_count: 47 +anything: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 42 + bikeshed: GREEN + rep_bytes: "roboto" + [testdata.Ext.more]: < + data: "baz" + > + > +> +many_things: < + [type.googleapis.com/testdata.MyMessage]: < + count: 47 + name: "David" + [testdata.Ext.more]: < + data: "foo" + > + [testdata.Ext.text]: "bar" + > +> +` + ) + return []golden{ + {m1, strings.TrimSpace(m1Golden) + "\n", strings.TrimSpace(compact(m1Golden)) + " "}, + {m2, strings.TrimSpace(m2Golden) + "\n", strings.TrimSpace(compact(m2Golden)) + " "}, + {m3, strings.TrimSpace(m3Golden) + "\n", strings.TrimSpace(compact(m3Golden)) + " "}, + {m4, strings.TrimSpace(m4Golden) + "\n", strings.TrimSpace(compact(m4Golden)) + " "}, + {m5, strings.TrimSpace(m5Golden) + "\n", strings.TrimSpace(compact(m5Golden)) + " "}, + {m6, strings.TrimSpace(m6Golden) + "\n", strings.TrimSpace(compact(m6Golden)) + " "}, + } +} + +func TestMarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + if got, want := expandedMarshaler.Text(tt.m), tt.t; got != want { + t.Errorf("message %v: got:\n%s\nwant:\n%s", tt.m, got, want) + } + if got, want := expandedCompactMarshaler.Text(tt.m), tt.c; got != want { + t.Errorf("message %v: got:\n`%s`\nwant:\n`%s`", tt.m, got, want) + } + } +} + +func TestUnmarshalGolden(t *testing.T) { + for _, tt := range goldenMessages { + want := tt.m + got := proto.Clone(tt.m) + got.Reset() + if err := proto.UnmarshalText(tt.t, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.t, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.t, got, want) + } + got.Reset() + if err := proto.UnmarshalText(tt.c, got); err != nil { + t.Errorf("failed to unmarshal\n%s\nerror: %v", tt.c, err) + } + if !anyEqual(got, want) { + t.Errorf("message:\n%s\ngot:\n%s\nwant:\n%s", tt.c, got, want) + } + } +} + +func TestMarshalUnknownAny(t *testing.T) { + m := &pb.Message{ + Anything: &types.Any{ + TypeUrl: "foo", + Value: []byte("bar"), + }, + } + want := `anything: < + type_url: "foo" + value: "bar" +> +` + got := expandedMarshaler.Text(m) + if got != want { + t.Errorf("got\n`%s`\nwant\n`%s`", got, want) + } +} + +func TestAmbiguousAny(t *testing.T) { + pb := &types.Any{} + err := proto.UnmarshalText(` + type_url: "ttt/proto3_proto.Nested" + value: "\n\x05Monty" + `, pb) + t.Logf("result: %v (error: %v)", expandedMarshaler.Text(pb), err) + if err != nil { + t.Errorf("failed to parse ambiguous Any message: %v", err) + } +} + +func TestUnmarshalOverwriteAny(t *testing.T) { + pb := &types.Any{} + err := proto.UnmarshalText(` + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Monty" + > + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 7: Any message unpacked multiple times, or "type_url" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} + +func TestUnmarshalAnyMixAndMatch(t *testing.T) { + pb := &types.Any{} + err := proto.UnmarshalText(` + value: "\n\x05Monty" + [type.googleapis.com/a/path/proto3_proto.Nested]: < + bunny: "Rabbit of Caerbannog" + > + `, pb) + want := `line 5: Any message unpacked multiple times, or "value" already set` + if err.Error() != want { + t.Errorf("incorrect error.\nHave: %v\nWant: %v", err.Error(), want) + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/decode.go b/vendor/github.com/gogo/protobuf/proto/decode.go index 0d6634cc0..737f2731d 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode.go +++ b/vendor/github.com/gogo/protobuf/proto/decode.go @@ -61,7 +61,6 @@ var ErrInternalBadWireType = errors.New("proto: internal error: bad wiretype for // int32, int64, uint32, uint64, bool, and enum // protocol buffer types. func DecodeVarint(buf []byte) (x uint64, n int) { - // x, n already 0 for shift := uint(0); shift < 64; shift += 7 { if n >= len(buf) { return 0, 0 @@ -78,13 +77,7 @@ func DecodeVarint(buf []byte) (x uint64, n int) { return 0, 0 } -// DecodeVarint reads a varint-encoded integer from the Buffer. -// This is the format for the -// int32, int64, uint32, uint64, bool, and enum -// protocol buffer types. -func (p *Buffer) DecodeVarint() (x uint64, err error) { - // x, err already 0 - +func (p *Buffer) decodeVarintSlow() (x uint64, err error) { i := p.index l := len(p.buf) @@ -107,6 +100,107 @@ func (p *Buffer) DecodeVarint() (x uint64, err error) { return } +// DecodeVarint reads a varint-encoded integer from the Buffer. +// This is the format for the +// int32, int64, uint32, uint64, bool, and enum +// protocol buffer types. +func (p *Buffer) DecodeVarint() (x uint64, err error) { + i := p.index + buf := p.buf + + if i >= len(buf) { + return 0, io.ErrUnexpectedEOF + } else if buf[i] < 0x80 { + p.index++ + return uint64(buf[i]), nil + } else if len(buf)-i < 10 { + return p.decodeVarintSlow() + } + + var b uint64 + // we already checked the first byte + x = uint64(buf[i]) - 0x80 + i++ + + b = uint64(buf[i]) + i++ + x += b << 7 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 7 + + b = uint64(buf[i]) + i++ + x += b << 14 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 14 + + b = uint64(buf[i]) + i++ + x += b << 21 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 21 + + b = uint64(buf[i]) + i++ + x += b << 28 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 28 + + b = uint64(buf[i]) + i++ + x += b << 35 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 35 + + b = uint64(buf[i]) + i++ + x += b << 42 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 42 + + b = uint64(buf[i]) + i++ + x += b << 49 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 49 + + b = uint64(buf[i]) + i++ + x += b << 56 + if b&0x80 == 0 { + goto done + } + x -= 0x80 << 56 + + b = uint64(buf[i]) + i++ + x += b << 63 + if b&0x80 == 0 { + goto done + } + // x -= 0x80 << 63 // Always zero. + + return 0, errOverflow + +done: + p.index = i + return x, nil +} + // DecodeFixed64 reads a 64-bit integer from the Buffer. // This is the format for the // fixed64, sfixed64, and double protocol buffer types. @@ -340,6 +434,8 @@ func (p *Buffer) DecodeGroup(pb Message) error { // Buffer and places the decoded result in pb. If the struct // underlying pb does not match the data in the buffer, the results can be // unpredictable. +// +// Unlike proto.Unmarshal, this does not reset pb before starting to unmarshal. func (p *Buffer) Unmarshal(pb Message) error { // If the object can unmarshal itself, let it. if u, ok := pb.(Unmarshaler); ok { diff --git a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go index 603dabec3..6fb74de4c 100644 --- a/vendor/github.com/gogo/protobuf/proto/decode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/decode_gogo.go @@ -1,5 +1,7 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -96,7 +98,7 @@ func setPtrCustomType(base structPointer, f field, v interface{}) { if v == nil { return } - structPointer_SetStructPointer(base, f, structPointer(reflect.ValueOf(v).Pointer())) + structPointer_SetStructPointer(base, f, toStructPointer(reflect.ValueOf(v))) } func setCustomType(base structPointer, f field, value interface{}) { @@ -163,7 +165,8 @@ func (o *Buffer) dec_custom_slice_bytes(p *Properties, base structPointer) error } newBas := appendStructPointer(base, p.field, p.ctype) - setCustomType(newBas, 0, custom) + var zero field + setCustomType(newBas, zero, custom) return nil } diff --git a/vendor/github.com/gogo/protobuf/proto/decode_test.go b/vendor/github.com/gogo/protobuf/proto/decode_test.go new file mode 100644 index 000000000..64d4decd9 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/decode_test.go @@ -0,0 +1,262 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.7 + +package proto_test + +import ( + "testing" + + "github.com/gogo/protobuf/proto" + tpb "github.com/gogo/protobuf/proto/proto3_proto" +) + +var ( + bytesBlackhole []byte + msgBlackhole = new(tpb.Message) +) + +// Disabled this Benchmark because it is using features (b.Run) from go1.7 and gogoprotobuf still have compatibility with go1.5 +// BenchmarkVarint32ArraySmall shows the performance on an array of small int32 fields (1 and +// 2 bytes long). +// func BenchmarkVarint32ArraySmall(b *testing.B) { +// for i := uint(1); i <= 10; i++ { +// dist := genInt32Dist([7]int{0, 3, 1}, 1< maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func durationFromProto(p *duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func durationProto(d time.Duration) *duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/duration_gogo.go b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go new file mode 100644 index 000000000..18e2a5f77 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/duration_gogo.go @@ -0,0 +1,203 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var durationType = reflect.TypeOf((*time.Duration)(nil)).Elem() + +type duration struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *duration) Reset() { *m = duration{} } +func (*duration) ProtoMessage() {} +func (*duration) String() string { return "duration" } + +func init() { + RegisterType((*duration)(nil), "gogo.protobuf.proto.duration") +} + +func (o *Buffer) decDuration() (time.Duration, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return 0, err + } + dproto := &duration{} + if err := Unmarshal(b, dproto); err != nil { + return 0, err + } + return durationFromProto(dproto) +} + +func (o *Buffer) dec_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64_Set(structPointer_Word64(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + word64Val_Set(structPointer_Word64Val(base, p.field), o, uint64(d)) + return nil +} + +func (o *Buffer) dec_slice_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))) + var zero field + setPtrCustomType(newBas, zero, &d) + return nil +} + +func (o *Buffer) dec_slice_ref_duration(p *Properties, base structPointer) error { + d, err := o.decDuration() + if err != nil { + return err + } + structPointer_Word64Slice(base, p.field).Append(uint64(d)) + return nil +} + +func size_duration(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_duration(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + dur := structPointer_Interface(structp, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_duration(p *Properties, base structPointer) (n int) { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + size := Size(d) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_duration(p *Properties, base structPointer) error { + dur := structPointer_InterfaceAt(base, p.field, durationType).(*time.Duration) + d := durationProto(*dur) + data, err := Marshal(d) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return 0 + } + dproto := durationProto(*durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(durationType))).(*[]*time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + if durs[i] == nil { + return errRepeatedHasNil + } + dproto := durationProto(*durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_duration(p *Properties, base structPointer) (n int) { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + size := Size(dproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_duration(p *Properties, base structPointer) error { + pdurs := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(durationType)).(*[]time.Duration) + durs := *pdurs + for i := 0; i < len(durs); i++ { + dproto := durationProto(durs[i]) + data, err := Marshal(dproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/encode.go b/vendor/github.com/gogo/protobuf/proto/encode.go index 1b5578d9d..8b84d1b22 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode.go +++ b/vendor/github.com/gogo/protobuf/proto/encode.go @@ -174,11 +174,11 @@ func sizeFixed32(x uint64) int { // This is the format used for the sint64 protocol buffer type. func (p *Buffer) EncodeZigzag64(x uint64) error { // use signed number to get arithmetic right shift. - return p.EncodeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return p.EncodeVarint((x << 1) ^ uint64((int64(x) >> 63))) } func sizeZigzag64(x uint64) int { - return sizeVarint(uint64((x << 1) ^ uint64((int64(x) >> 63)))) + return sizeVarint((x << 1) ^ uint64((int64(x) >> 63))) } // EncodeZigzag32 writes a zigzag-encoded 32-bit integer @@ -234,10 +234,6 @@ func Marshal(pb Message) ([]byte, error) { } p := NewBuffer(nil) err := p.Marshal(pb) - var state errorState - if err != nil && !state.shouldContinue(err, nil) { - return nil, err - } if p.buf == nil && err == nil { // Return a non-nil slice on success. return []byte{}, nil @@ -266,11 +262,8 @@ func (p *Buffer) Marshal(pb Message) error { // Can the object marshal itself? if m, ok := pb.(Marshaler); ok { data, err := m.Marshal() - if err != nil { - return err - } p.buf = append(p.buf, data...) - return nil + return err } t, base, err := getbase(pb) @@ -282,7 +275,7 @@ func (p *Buffer) Marshal(pb Message) error { } if collectStats { - stats.Encode++ + (stats).Encode++ // Parens are to work around a goimports bug. } if len(p.buf) > maxMarshalSize { @@ -309,7 +302,7 @@ func Size(pb Message) (n int) { } if collectStats { - stats.Size++ + (stats).Size++ // Parens are to work around a goimports bug. } return @@ -1014,7 +1007,6 @@ func size_slice_struct_message(p *Properties, base structPointer) (n int) { if p.isMarshaler { m := structPointer_Interface(structp, p.stype).(Marshaler) data, _ := m.Marshal() - n += len(p.tagcode) n += sizeRawBytes(data) continue } @@ -1083,10 +1075,17 @@ func (o *Buffer) enc_map(p *Properties, base structPointer) error { func (o *Buffer) enc_exts(p *Properties, base structPointer) error { exts := structPointer_Extensions(base, p.field) - if err := encodeExtensions(exts); err != nil { + + v, mu := exts.extensionsRead() + if v == nil { + return nil + } + + mu.Lock() + defer mu.Unlock() + if err := encodeExtensionsMap(v); err != nil { return err } - v, _ := exts.extensionsRead() return o.enc_map_body(v) } @@ -1149,7 +1148,7 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { if err := p.mkeyprop.enc(o, p.mkeyprop, keybase); err != nil { return err } - if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil { + if err := p.mvalprop.enc(o, p.mvalprop, valbase); err != nil && err != ErrNil { return err } return nil @@ -1159,11 +1158,6 @@ func (o *Buffer) enc_new_map(p *Properties, base structPointer) error { for _, key := range v.MapKeys() { val := v.MapIndex(key) - // The only illegal map entry values are nil message pointers. - if val.Kind() == reflect.Ptr && val.IsNil() { - return errors.New("proto: map has nil element") - } - keycopy.Set(key) valcopy.Set(val) diff --git a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go index f77cfb1ee..32111b7f4 100644 --- a/vendor/github.com/gogo/protobuf/proto/encode_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/encode_gogo.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Go support for Protocol Buffers - Google's data interchange format // @@ -196,12 +196,10 @@ func size_ref_struct_message(p *Properties, base structPointer) int { // Encode a slice of references to message struct pointers ([]struct). func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) error { var state errorState - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) + structp := ss.Index(i) if structPointer_IsNil(structp) { return errRepeatedHasNil } @@ -233,13 +231,11 @@ func (o *Buffer) enc_slice_ref_struct_message(p *Properties, base structPointer) //TODO this is only copied, please fix this func size_slice_ref_struct_message(p *Properties, base structPointer) (n int) { - ss := structPointer_GetStructPointer(base, p.field) - ss1 := structPointer_GetRefStructPointer(ss, field(0)) - size := p.stype.Size() - l := structPointer_Len(base, p.field) + ss := structPointer_StructRefSlice(base, p.field, p.stype.Size()) + l := ss.Len() n += l * len(p.tagcode) for i := 0; i < l; i++ { - structp := structPointer_Add(ss1, field(uintptr(i)*size)) + structp := ss.Index(i) if structPointer_IsNil(structp) { return // return the size up to this point } diff --git a/vendor/github.com/gogo/protobuf/proto/encode_test.go b/vendor/github.com/gogo/protobuf/proto/encode_test.go new file mode 100644 index 000000000..2176b894d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/encode_test.go @@ -0,0 +1,84 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2010 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build go1.7 + +package proto_test + +import ( + "testing" + + "github.com/gogo/protobuf/proto" + tpb "github.com/gogo/protobuf/proto/proto3_proto" +) + +var ( + blackhole []byte +) + +// Disabled this Benchmark because it is using features (b.Run) from go1.7 and gogoprotobuf still have compatibility with go1.5 +// BenchmarkAny creates increasingly large arbitrary Any messages. The type is always the +// same. +// func BenchmarkAny(b *testing.B) { +// data := make([]byte, 1<<20) +// quantum := 1 << 10 +// for i := uint(0); i <= 10; i++ { +// b.Run(strconv.Itoa(quantum< 0; n-- { + go func() { + _, err := proto.Marshal(m) + errChan <- err + }() + } + for i := 0; i < 3; i++ { + err := <-errChan + if err != nil { + t.Fatal(err) + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/lib.go b/vendor/github.com/gogo/protobuf/proto/lib.go index 2c30d7095..c98d73da4 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib.go +++ b/vendor/github.com/gogo/protobuf/proto/lib.go @@ -73,7 +73,6 @@ for a protocol buffer variable v: When the .proto file specifies `syntax="proto3"`, there are some differences: - Non-repeated fields of non-message type are values instead of pointers. - - Getters are only generated for message and oneof fields. - Enum types do not get an Enum method. The simplest way to describe this is to see an example. @@ -308,7 +307,7 @@ func GetStats() Stats { return stats } // temporary Buffer and are fine for most applications. type Buffer struct { buf []byte // encode/decode byte stream - index int // write point + index int // read point // pools of basic types to amortize allocation. bools []bool diff --git a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go index a6c2c06b2..4b4f7c909 100644 --- a/vendor/github.com/gogo/protobuf/proto/lib_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/lib_gogo.go @@ -1,5 +1,7 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/proto/map_test.go b/vendor/github.com/gogo/protobuf/proto/map_test.go new file mode 100644 index 000000000..18b946d00 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/map_test.go @@ -0,0 +1,46 @@ +package proto_test + +import ( + "fmt" + "testing" + + "github.com/gogo/protobuf/proto" + ppb "github.com/gogo/protobuf/proto/proto3_proto" +) + +func marshalled() []byte { + m := &ppb.IntMaps{} + for i := 0; i < 1000; i++ { + m.Maps = append(m.Maps, &ppb.IntMap{ + Rtt: map[int32]int32{1: 2}, + }) + } + b, err := proto.Marshal(m) + if err != nil { + panic(fmt.Sprintf("Can't marshal %+v: %v", m, err)) + } + return b +} + +func BenchmarkConcurrentMapUnmarshal(b *testing.B) { + in := marshalled() + b.RunParallel(func(pb *testing.PB) { + for pb.Next() { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } + }) +} + +func BenchmarkSequentialMapUnmarshal(b *testing.B) { + in := marshalled() + b.ResetTimer() + for i := 0; i < b.N; i++ { + var out ppb.IntMaps + if err := proto.Unmarshal(in, &out); err != nil { + b.Errorf("Can't unmarshal ppb.IntMaps: %v", err) + } + } +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go new file mode 100644 index 000000000..1763a5f22 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/pointer_reflect_gogo.go @@ -0,0 +1,85 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// +build appengine js + +package proto + +import ( + "reflect" +) + +func structPointer_FieldPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { + panic("not implemented") +} + +func structPointer_InterfaceAt(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_InterfaceRef(p structPointer, f field, t reflect.Type) interface{} { + panic("not implemented") +} + +func structPointer_GetRefStructPointer(p structPointer, f field) structPointer { + panic("not implemented") +} + +func structPointer_Add(p structPointer, size field) structPointer { + panic("not implemented") +} + +func structPointer_Len(p structPointer, f field) int { + panic("not implemented") +} + +func structPointer_GetSliceHeader(p structPointer, f field) *reflect.SliceHeader { + panic("not implemented") +} + +func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { + panic("not implemented") +} + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + panic("not implemented") +} + +type structRefSlice struct{} + +func (v *structRefSlice) Len() int { + panic("not implemented") +} + +func (v *structRefSlice) Index(i int) structPointer { + panic("not implemented") +} diff --git a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go index 6bc85fa98..f156a29f0 100644 --- a/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/pointer_unsafe_gogo.go @@ -1,5 +1,7 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -24,7 +26,7 @@ // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. -// +build !appengine +// +build !appengine,!js // This file contains the implementation of the proto field accesses using package unsafe. @@ -70,16 +72,13 @@ func structPointer_Copy(oldptr structPointer, newptr structPointer, size int) { func appendStructPointer(base structPointer, f field, typ reflect.Type) structPointer { size := typ.Elem().Size() - oldHeader := structPointer_GetSliceHeader(base, f) - newLen := oldHeader.Len + 1 - slice := reflect.MakeSlice(typ, newLen, newLen) - bas := toStructPointer(slice) - for i := 0; i < oldHeader.Len; i++ { - newElemptr := uintptr(bas) + uintptr(i)*size - oldElemptr := oldHeader.Data + uintptr(i)*size - copyUintPtr(oldElemptr, newElemptr, int(size)) - } + oldHeader := structPointer_GetSliceHeader(base, f) + oldSlice := reflect.NewAt(typ, unsafe.Pointer(oldHeader)).Elem() + newLen := oldHeader.Len + 1 + newSlice := reflect.MakeSlice(typ, newLen, newLen) + reflect.Copy(newSlice, oldSlice) + bas := toStructPointer(newSlice) oldHeader.Data = uintptr(bas) oldHeader.Len = newLen oldHeader.Cap = newLen @@ -106,3 +105,24 @@ func structPointer_Add(p structPointer, size field) structPointer { func structPointer_Len(p structPointer, f field) int { return len(*(*[]interface{})(unsafe.Pointer(structPointer_GetRefStructPointer(p, f)))) } + +func structPointer_StructRefSlice(p structPointer, f field, size uintptr) *structRefSlice { + return &structRefSlice{p: p, f: f, size: size} +} + +// A structRefSlice represents a slice of structs (themselves submessages or groups). +type structRefSlice struct { + p structPointer + f field + size uintptr +} + +func (v *structRefSlice) Len() int { + return structPointer_Len(v.p, v.f) +} + +func (v *structRefSlice) Index(i int) structPointer { + ss := structPointer_GetStructPointer(v.p, v.f) + ss1 := structPointer_GetRefStructPointer(ss, 0) + return structPointer_Add(ss1, field(uintptr(i)*v.size)) +} diff --git a/vendor/github.com/gogo/protobuf/proto/properties.go b/vendor/github.com/gogo/protobuf/proto/properties.go index 4212d77e8..2a69e8862 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties.go +++ b/vendor/github.com/gogo/protobuf/proto/properties.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Go support for Protocol Buffers - Google's data interchange format // @@ -190,10 +190,12 @@ type Properties struct { proto3 bool // whether this is known to be a proto3 field; set for []byte only oneof bool // whether this is a oneof field - Default string // default value - HasDefault bool // whether an explicit default was provided - CustomType string - def_uint64 uint64 + Default string // default value + HasDefault bool // whether an explicit default was provided + CustomType string + CastType string + StdTime bool + StdDuration bool enc encoder valEnc valueEncoder // set for bool and numeric types only @@ -340,6 +342,12 @@ func (p *Properties) Parse(s string) { p.OrigName = strings.Split(f, "=")[1] case strings.HasPrefix(f, "customtype="): p.CustomType = strings.Split(f, "=")[1] + case strings.HasPrefix(f, "casttype="): + p.CastType = strings.Split(f, "=")[1] + case f == "stdtime": + p.StdTime = true + case f == "stdduration": + p.StdDuration = true } } } @@ -355,11 +363,22 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.enc = nil p.dec = nil p.size = nil - if len(p.CustomType) > 0 { + isMap := typ.Kind() == reflect.Map + if len(p.CustomType) > 0 && !isMap { p.setCustomEncAndDec(typ) p.setTag(lockGetProp) return } + if p.StdTime && !isMap { + p.setTimeEncAndDec(typ) + p.setTag(lockGetProp) + return + } + if p.StdDuration && !isMap { + p.setDurationEncAndDec(typ) + p.setTag(lockGetProp) + return + } switch t1 := typ; t1.Kind() { default: fmt.Fprintf(os.Stderr, "proto: no coders for %v\n", t1) @@ -542,17 +561,13 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock p.dec = (*Buffer).dec_slice_int64 p.packedDec = (*Buffer).dec_slice_packed_int64 case reflect.Uint8: - p.enc = (*Buffer).enc_slice_byte p.dec = (*Buffer).dec_slice_byte - p.size = size_slice_byte - // This is a []byte, which is either a bytes field, - // or the value of a map field. In the latter case, - // we always encode an empty []byte, so we should not - // use the proto3 enc/size funcs. - // f == nil iff this is the key/value of a map field. - if p.proto3 && f != nil { + if p.proto3 { p.enc = (*Buffer).enc_proto3_slice_byte p.size = size_proto3_slice_byte + } else { + p.enc = (*Buffer).enc_slice_byte + p.size = size_slice_byte } case reflect.Float32, reflect.Float64: switch t2.Bits() { @@ -634,6 +649,10 @@ func (p *Properties) setEncAndDec(typ reflect.Type, f *reflect.StructField, lock // so we need encoders for the pointer to this type. vtype = reflect.PtrTo(vtype) } + + p.mvalprop.CustomType = p.CustomType + p.mvalprop.StdDuration = p.StdDuration + p.mvalprop.StdTime = p.StdTime p.mvalprop.init(vtype, "Value", f.Tag.Get("protobuf_val"), nil, lockGetProp) } p.setTag(lockGetProp) @@ -924,7 +943,15 @@ func RegisterType(x Message, name string) { } // MessageName returns the fully-qualified proto name for the given message type. -func MessageName(x Message) string { return revProtoTypes[reflect.TypeOf(x)] } +func MessageName(x Message) string { + type xname interface { + XXX_MessageName() string + } + if m, ok := x.(xname); ok { + return m.XXX_MessageName() + } + return revProtoTypes[reflect.TypeOf(x)] +} // MessageType returns the message type (pointer to struct) for a named message. func MessageType(name string) reflect.Type { return protoTypes[name] } diff --git a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go index 8daf9f776..b6b7176c5 100644 --- a/vendor/github.com/gogo/protobuf/proto/properties_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/properties_gogo.go @@ -1,5 +1,7 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -49,6 +51,51 @@ func (p *Properties) setCustomEncAndDec(typ reflect.Type) { } } +func (p *Properties) setDurationEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_duration + p.dec = (*Buffer).dec_slice_duration + p.size = size_slice_duration + } else { + p.enc = (*Buffer).enc_slice_ref_duration + p.dec = (*Buffer).dec_slice_ref_duration + p.size = size_slice_ref_duration + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_duration + p.dec = (*Buffer).dec_duration + p.size = size_duration + } else { + p.enc = (*Buffer).enc_ref_duration + p.dec = (*Buffer).dec_ref_duration + p.size = size_ref_duration + } +} + +func (p *Properties) setTimeEncAndDec(typ reflect.Type) { + if p.Repeated { + if typ.Elem().Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_slice_time + p.dec = (*Buffer).dec_slice_time + p.size = size_slice_time + } else { + p.enc = (*Buffer).enc_slice_ref_time + p.dec = (*Buffer).dec_slice_ref_time + p.size = size_slice_ref_time + } + } else if typ.Kind() == reflect.Ptr { + p.enc = (*Buffer).enc_time + p.dec = (*Buffer).dec_time + p.size = size_time + } else { + p.enc = (*Buffer).enc_ref_time + p.dec = (*Buffer).dec_ref_time + p.size = size_ref_time + } + +} + func (p *Properties) setSliceOfNonPointerStructs(typ reflect.Type) { t2 := typ.Elem() p.sstype = typ diff --git a/vendor/github.com/gogo/protobuf/proto/proto3_test.go b/vendor/github.com/gogo/protobuf/proto/proto3_test.go index 6f9cddc3f..75b66c179 100644 --- a/vendor/github.com/gogo/protobuf/proto/proto3_test.go +++ b/vendor/github.com/gogo/protobuf/proto/proto3_test.go @@ -93,6 +93,16 @@ func TestRoundTripProto3(t *testing.T) { } } +func TestGettersForBasicTypesExist(t *testing.T) { + var m pb.Message + if got := m.GetNested().GetBunny(); got != "" { + t.Errorf("m.GetNested().GetBunny() = %q, want empty string", got) + } + if got := m.GetNested().GetCute(); got { + t.Errorf("m.GetNested().GetCute() = %t, want false", got) + } +} + func TestProto3SetDefaults(t *testing.T) { in := &pb.Message{ Terrain: map[string]*pb.Nested{ diff --git a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go index 4fe7e0815..5a5fd93f7 100644 --- a/vendor/github.com/gogo/protobuf/proto/skip_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/skip_gogo.go @@ -1,5 +1,7 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/proto/text.go b/vendor/github.com/gogo/protobuf/proto/text.go index ec7c75421..f609d1d45 100644 --- a/vendor/github.com/gogo/protobuf/proto/text.go +++ b/vendor/github.com/gogo/protobuf/proto/text.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Go support for Protocol Buffers - Google's data interchange format // @@ -51,6 +51,7 @@ import ( "sort" "strings" "sync" + "time" ) var ( @@ -181,7 +182,93 @@ type raw interface { Bytes() []byte } -func writeStruct(w *textWriter, sv reflect.Value) error { +func requiresQuotes(u string) bool { + // When type URL contains any characters except [0-9A-Za-z./\-]*, it must be quoted. + for _, ch := range u { + switch { + case ch == '.' || ch == '/' || ch == '_': + continue + case '0' <= ch && ch <= '9': + continue + case 'A' <= ch && ch <= 'Z': + continue + case 'a' <= ch && ch <= 'z': + continue + default: + return true + } + } + return false +} + +// isAny reports whether sv is a google.protobuf.Any message +func isAny(sv reflect.Value) bool { + type wkt interface { + XXX_WellKnownType() string + } + t, ok := sv.Addr().Interface().(wkt) + return ok && t.XXX_WellKnownType() == "Any" +} + +// writeProto3Any writes an expanded google.protobuf.Any message. +// +// It returns (false, nil) if sv value can't be unmarshaled (e.g. because +// required messages are not linked in). +// +// It returns (true, error) when sv was written in expanded format or an error +// was encountered. +func (tm *TextMarshaler) writeProto3Any(w *textWriter, sv reflect.Value) (bool, error) { + turl := sv.FieldByName("TypeUrl") + val := sv.FieldByName("Value") + if !turl.IsValid() || !val.IsValid() { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + b, ok := val.Interface().([]byte) + if !ok { + return true, errors.New("proto: invalid google.protobuf.Any message") + } + + parts := strings.Split(turl.String(), "/") + mt := MessageType(parts[len(parts)-1]) + if mt == nil { + return false, nil + } + m := reflect.New(mt.Elem()) + if err := Unmarshal(b, m.Interface().(Message)); err != nil { + return false, nil + } + w.Write([]byte("[")) + u := turl.String() + if requiresQuotes(u) { + writeString(w, u) + } else { + w.Write([]byte(u)) + } + if w.compact { + w.Write([]byte("]:<")) + } else { + w.Write([]byte("]: <\n")) + w.ind++ + } + if err := tm.writeStruct(w, m.Elem()); err != nil { + return true, err + } + if w.compact { + w.Write([]byte("> ")) + } else { + w.ind-- + w.Write([]byte(">\n")) + } + return true, nil +} + +func (tm *TextMarshaler) writeStruct(w *textWriter, sv reflect.Value) error { + if tm.ExpandAny && isAny(sv) { + if canExpand, err := tm.writeProto3Any(w, sv); canExpand { + return err + } + } st := sv.Type() sprops := GetProperties(st) for i := 0; i < sv.NumField(); i++ { @@ -234,10 +321,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error { continue } if len(props.Enum) > 0 { - if err := writeEnum(w, v, props); err != nil { + if err := tm.writeEnum(w, v, props); err != nil { return err } - } else if err := writeAny(w, v, props); err != nil { + } else if err := tm.writeAny(w, v, props); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -279,7 +366,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, key, props.mkeyprop); err != nil { + if err := tm.writeAny(w, key, props.mkeyprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -296,7 +383,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { return err } } - if err := writeAny(w, val, props.mvalprop); err != nil { + if err := tm.writeAny(w, val, props.mvalprop); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -368,10 +455,10 @@ func writeStruct(w *textWriter, sv reflect.Value) error { } if len(props.Enum) > 0 { - if err := writeEnum(w, fv, props); err != nil { + if err := tm.writeEnum(w, fv, props); err != nil { return err } - } else if err := writeAny(w, fv, props); err != nil { + } else if err := tm.writeAny(w, fv, props); err != nil { return err } @@ -389,7 +476,7 @@ func writeStruct(w *textWriter, sv reflect.Value) error { pv.Elem().Set(sv) } if pv.Type().Implements(extensionRangeType) { - if err := writeExtensions(w, pv); err != nil { + if err := tm.writeExtensions(w, pv); err != nil { return err } } @@ -419,20 +506,56 @@ func writeRaw(w *textWriter, b []byte) error { } // writeAny writes an arbitrary field. -func writeAny(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeAny(w *textWriter, v reflect.Value, props *Properties) error { v = reflect.Indirect(v) - if props != nil && len(props.CustomType) > 0 { - custom, ok := v.Interface().(Marshaler) - if ok { - data, err := custom.Marshal() + if props != nil { + if len(props.CustomType) > 0 { + custom, ok := v.Interface().(Marshaler) + if ok { + data, err := custom.Marshal() + if err != nil { + return err + } + if err := writeString(w, string(data)); err != nil { + return err + } + return nil + } + } else if len(props.CastType) > 0 { + if _, ok := v.Interface().(interface { + String() string + }); ok { + switch v.Kind() { + case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, + reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64: + _, err := fmt.Fprintf(w, "%d", v.Interface()) + return err + } + } + } else if props.StdTime { + t, ok := v.Interface().(time.Time) + if !ok { + return fmt.Errorf("stdtime is not time.Time, but %T", v.Interface()) + } + tproto, err := timestampProto(t) if err != nil { return err } - if err := writeString(w, string(data)); err != nil { - return err + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdTime = false + err = tm.writeAny(w, reflect.ValueOf(tproto), &propsCopy) + return err + } else if props.StdDuration { + d, ok := v.Interface().(time.Duration) + if !ok { + return fmt.Errorf("stdtime is not time.Duration, but %T", v.Interface()) } - return nil + dproto := durationProto(d) + propsCopy := *props // Make a copy so that this is goroutine-safe + propsCopy.StdDuration = false + err := tm.writeAny(w, reflect.ValueOf(dproto), &propsCopy) + return err } } @@ -482,15 +605,15 @@ func writeAny(w *textWriter, v reflect.Value, props *Properties) error { } } w.indent() - if tm, ok := v.Interface().(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := v.Interface().(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } if _, err = w.Write(text); err != nil { return err } - } else if err := writeStruct(w, v); err != nil { + } else if err := tm.writeStruct(w, v); err != nil { return err } w.unindent() @@ -634,7 +757,7 @@ func (s int32Slice) Swap(i, j int) { s[i], s[j] = s[j], s[i] } // writeExtensions writes all the extensions in pv. // pv is assumed to be a pointer to a protocol message struct that is extendable. -func writeExtensions(w *textWriter, pv reflect.Value) error { +func (tm *TextMarshaler) writeExtensions(w *textWriter, pv reflect.Value) error { emap := extensionMaps[pv.Type().Elem()] e := pv.Interface().(Message) @@ -689,13 +812,13 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { // Repeated extensions will appear as a slice. if !desc.repeated() { - if err := writeExtension(w, desc.Name, pb); err != nil { + if err := tm.writeExtension(w, desc.Name, pb); err != nil { return err } } else { v := reflect.ValueOf(pb) for i := 0; i < v.Len(); i++ { - if err := writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { + if err := tm.writeExtension(w, desc.Name, v.Index(i).Interface()); err != nil { return err } } @@ -704,7 +827,7 @@ func writeExtensions(w *textWriter, pv reflect.Value) error { return nil } -func writeExtension(w *textWriter, name string, pb interface{}) error { +func (tm *TextMarshaler) writeExtension(w *textWriter, name string, pb interface{}) error { if _, err := fmt.Fprintf(w, "[%s]:", name); err != nil { return err } @@ -713,7 +836,7 @@ func writeExtension(w *textWriter, name string, pb interface{}) error { return err } } - if err := writeAny(w, reflect.ValueOf(pb), nil); err != nil { + if err := tm.writeAny(w, reflect.ValueOf(pb), nil); err != nil { return err } if err := w.WriteByte('\n'); err != nil { @@ -740,12 +863,13 @@ func (w *textWriter) writeIndent() { // TextMarshaler is a configurable text format marshaler. type TextMarshaler struct { - Compact bool // use compact text format (one line). + Compact bool // use compact text format (one line). + ExpandAny bool // expand google.protobuf.Any messages of known types } // Marshal writes a given protocol buffer in text format. // The only errors returned are from w. -func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { +func (tm *TextMarshaler) Marshal(w io.Writer, pb Message) error { val := reflect.ValueOf(pb) if pb == nil || val.IsNil() { w.Write([]byte("")) @@ -760,11 +884,11 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { aw := &textWriter{ w: ww, complete: true, - compact: m.Compact, + compact: tm.Compact, } - if tm, ok := pb.(encoding.TextMarshaler); ok { - text, err := tm.MarshalText() + if etm, ok := pb.(encoding.TextMarshaler); ok { + text, err := etm.MarshalText() if err != nil { return err } @@ -778,7 +902,7 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Dereference the received pointer so we don't have outer < and >. v := reflect.Indirect(val) - if err := writeStruct(aw, v); err != nil { + if err := tm.writeStruct(aw, v); err != nil { return err } if bw != nil { @@ -788,9 +912,9 @@ func (m *TextMarshaler) Marshal(w io.Writer, pb Message) error { } // Text is the same as Marshal, but returns the string directly. -func (m *TextMarshaler) Text(pb Message) string { +func (tm *TextMarshaler) Text(pb Message) string { var buf bytes.Buffer - m.Marshal(&buf, pb) + tm.Marshal(&buf, pb) return buf.String() } diff --git a/vendor/github.com/gogo/protobuf/proto/text_gogo.go b/vendor/github.com/gogo/protobuf/proto/text_gogo.go index cdb23373c..1d6c6aa0e 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_gogo.go +++ b/vendor/github.com/gogo/protobuf/proto/text_gogo.go @@ -1,5 +1,7 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -31,10 +33,10 @@ import ( "reflect" ) -func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { +func (tm *TextMarshaler) writeEnum(w *textWriter, v reflect.Value, props *Properties) error { m, ok := enumStringMaps[props.Enum] if !ok { - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } } @@ -46,7 +48,7 @@ func writeEnum(w *textWriter, v reflect.Value, props *Properties) error { } s, ok := m[key] if !ok { - if err := writeAny(w, v, props); err != nil { + if err := tm.writeAny(w, v, props); err != nil { return err } } diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser.go b/vendor/github.com/gogo/protobuf/proto/text_parser.go index fd214b8f1..f1276729a 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Go support for Protocol Buffers - Google's data interchange format // @@ -46,9 +46,13 @@ import ( "reflect" "strconv" "strings" + "time" "unicode/utf8" ) +// Error string emitted when deserializing Any and fields are already set +const anyRepeatedlyUnpacked = "Any message unpacked multiple times, or %q already set" + type ParseError struct { Message string Line int // 1-based line number @@ -168,7 +172,7 @@ func (p *textParser) advance() { p.cur.offset, p.cur.line = p.offset, p.line p.cur.unquoted = "" switch p.s[0] { - case '<', '>', '{', '}', ':', '[', ']', ';', ',': + case '<', '>', '{', '}', ':', '[', ']', ';', ',', '/': // Single symbol p.cur.value, p.s = p.s[0:1], p.s[1:len(p.s)] case '"', '\'': @@ -456,7 +460,10 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { fieldSet := make(map[string]bool) // A struct is a sequence of "name: value", terminated by one of // '>' or '}', or the end of the input. A name may also be - // "[extension]". + // "[extension]" or "[type/url]". + // + // The whole struct can also be an expanded Any message, like: + // [type/url] < ... struct contents ... > for { tok := p.next() if tok.err != nil { @@ -466,33 +473,74 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { break } if tok.value == "[" { - // Looks like an extension. + // Looks like an extension or an Any. // // TODO: Check whether we need to handle // namespace rooted names (e.g. ".something.Foo"). - tok = p.next() - if tok.err != nil { - return tok.err + extName, err := p.consumeExtName() + if err != nil { + return err } + + if s := strings.LastIndex(extName, "/"); s >= 0 { + // If it contains a slash, it's an Any type URL. + messageName := extName[s+1:] + mt := MessageType(messageName) + if mt == nil { + return p.errorf("unrecognized message %q in google.protobuf.Any", messageName) + } + tok = p.next() + if tok.err != nil { + return tok.err + } + // consume an optional colon + if tok.value == ":" { + tok = p.next() + if tok.err != nil { + return tok.err + } + } + var terminator string + switch tok.value { + case "<": + terminator = ">" + case "{": + terminator = "}" + default: + return p.errorf("expected '{' or '<', found %q", tok.value) + } + v := reflect.New(mt.Elem()) + if pe := p.readStruct(v.Elem(), terminator); pe != nil { + return pe + } + b, err := Marshal(v.Interface().(Message)) + if err != nil { + return p.errorf("failed to marshal message of type %q: %v", messageName, err) + } + if fieldSet["type_url"] { + return p.errorf(anyRepeatedlyUnpacked, "type_url") + } + if fieldSet["value"] { + return p.errorf(anyRepeatedlyUnpacked, "value") + } + sv.FieldByName("TypeUrl").SetString(extName) + sv.FieldByName("Value").SetBytes(b) + fieldSet["type_url"] = true + fieldSet["value"] = true + continue + } + var desc *ExtensionDesc // This could be faster, but it's functional. // TODO: Do something smarter than a linear scan. for _, d := range RegisteredExtensions(reflect.New(st).Interface().(Message)) { - if d.Name == tok.value { + if d.Name == extName { desc = d break } } if desc == nil { - return p.errorf("unrecognized extension %q", tok.value) - } - // Check the extension terminator. - tok = p.next() - if tok.err != nil { - return tok.err - } - if tok.value != "]" { - return p.errorf("unrecognized extension terminator %q", tok.value) + return p.errorf("unrecognized extension %q", extName) } props := &Properties{} @@ -550,7 +598,11 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { props = oop.Prop nv := reflect.New(oop.Type.Elem()) dst = nv.Elem().Field(0) - sv.Field(oop.Field).Set(nv) + field := sv.Field(oop.Field) + if !field.IsNil() { + return p.errorf("field '%s' would overwrite already parsed oneof '%s'", name, sv.Type().Field(oop.Field).Name) + } + field.Set(nv) } if !dst.IsValid() { return p.errorf("unknown field name %q in %v", name, st) @@ -657,6 +709,35 @@ func (p *textParser) readStruct(sv reflect.Value, terminator string) error { return reqFieldErr } +// consumeExtName consumes extension name or expanded Any type URL and the +// following ']'. It returns the name or URL consumed. +func (p *textParser) consumeExtName() (string, error) { + tok := p.next() + if tok.err != nil { + return "", tok.err + } + + // If extension name or type url is quoted, it's a single token. + if len(tok.value) > 2 && isQuote(tok.value[0]) && tok.value[len(tok.value)-1] == tok.value[0] { + name, err := unquoteC(tok.value[1:len(tok.value)-1], rune(tok.value[0])) + if err != nil { + return "", err + } + return name, p.consumeToken("]") + } + + // Consume everything up to "]" + var parts []string + for tok.value != "]" { + parts = append(parts, tok.value) + tok = p.next() + if tok.err != nil { + return "", p.errorf("unrecognized type_url or extension name: %s", tok.err) + } + } + return strings.Join(parts, ""), nil +} + // consumeOptionalSeparator consumes an optional semicolon or comma. // It is used in readStruct to provide backward compatibility. func (p *textParser) consumeOptionalSeparator() error { @@ -717,6 +798,80 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { } return nil } + if props.StdTime { + fv := v + p.back() + props.StdTime = false + tproto := ×tamp{} + err := p.readAny(reflect.ValueOf(tproto).Elem(), props) + props.StdTime = true + if err != nil { + return err + } + tim, err := timestampFromProto(tproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ts := fv.Interface().([]*time.Time) + ts = append(ts, &tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } else { + ts := fv.Interface().([]time.Time) + ts = append(ts, tim) + fv.Set(reflect.ValueOf(ts)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&tim)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&tim))) + } + return nil + } + if props.StdDuration { + fv := v + p.back() + props.StdDuration = false + dproto := &duration{} + err := p.readAny(reflect.ValueOf(dproto).Elem(), props) + props.StdDuration = true + if err != nil { + return err + } + dur, err := durationFromProto(dproto) + if err != nil { + return err + } + if props.Repeated { + t := reflect.TypeOf(v.Interface()) + if t.Kind() == reflect.Slice { + if t.Elem().Kind() == reflect.Ptr { + ds := fv.Interface().([]*time.Duration) + ds = append(ds, &dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } else { + ds := fv.Interface().([]time.Duration) + ds = append(ds, dur) + fv.Set(reflect.ValueOf(ds)) + return nil + } + } + } + if reflect.TypeOf(v.Interface()).Kind() == reflect.Ptr { + v.Set(reflect.ValueOf(&dur)) + } else { + v.Set(reflect.Indirect(reflect.ValueOf(&dur))) + } + return nil + } switch fv := v; fv.Kind() { case reflect.Slice: at := v.Type() @@ -759,12 +914,12 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { fv.Set(reflect.Append(fv, reflect.New(at.Elem()).Elem())) return p.readAny(fv.Index(fv.Len()-1), props) case reflect.Bool: - // Either "true", "false", 1 or 0. + // true/1/t/True or false/f/0/False. switch tok.value { - case "true", "1": + case "true", "1", "t", "True": fv.SetBool(true) return nil - case "false", "0": + case "false", "0", "f", "False": fv.SetBool(false) return nil } @@ -828,7 +983,7 @@ func (p *textParser) readAny(v reflect.Value, props *Properties) error { return p.readStruct(fv, terminator) case reflect.Uint32: if x, err := strconv.ParseUint(tok.value, 0, 32); err == nil { - fv.SetUint(uint64(x)) + fv.SetUint(x) return nil } case reflect.Uint64: diff --git a/vendor/github.com/gogo/protobuf/proto/text_parser_test.go b/vendor/github.com/gogo/protobuf/proto/text_parser_test.go index 2ba725348..9a3a447ce 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_parser_test.go +++ b/vendor/github.com/gogo/protobuf/proto/text_parser_test.go @@ -380,6 +380,95 @@ var unMarshalTextTests = []UnmarshalTextTest{ }, }, + // Boolean false + { + in: `count:42 inner { host: "example.com" connected: false }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean true + { + in: `count:42 inner { host: "example.com" connected: true }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean 0 + { + in: `count:42 inner { host: "example.com" connected: 0 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean 1 + { + in: `count:42 inner { host: "example.com" connected: 1 }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean f + { + in: `count:42 inner { host: "example.com" connected: f }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean t + { + in: `count:42 inner { host: "example.com" connected: t }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Boolean False + { + in: `count:42 inner { host: "example.com" connected: False }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(false), + }, + }, + }, + // Boolean True + { + in: `count:42 inner { host: "example.com" connected: True }`, + out: &MyMessage{ + Count: Int32(42), + Inner: &InnerMessage{ + Host: String("example.com"), + Connected: Bool(true), + }, + }, + }, + // Extension buildExtStructTest(`count: 42 [testdata.Ext.more]:`), buildExtStructTest(`count: 42 [testdata.Ext.more] {data:"Hello, world!"}`), @@ -539,13 +628,24 @@ func TestMapParsing(t *testing.T) { func TestOneofParsing(t *testing.T) { const in = `name:"Shrek"` m := new(Communique) - want := &Communique{Union: &Communique_Name{"Shrek"}} + want := &Communique{Union: &Communique_Name{Name: "Shrek"}} if err := UnmarshalText(in, m); err != nil { t.Fatal(err) } if !Equal(m, want) { t.Errorf("\n got %v\nwant %v", m, want) } + + const inOverwrite = `name:"Shrek" number:42` + m = new(Communique) + testErr := "line 1.13: field 'number' would overwrite already parsed oneof 'Union'" + if err := UnmarshalText(inOverwrite, m); err == nil { + t.Errorf("TestOneofParsing: Didn't get expected error: %v", testErr) + } else if err.Error() != testErr { + t.Errorf("TestOneofParsing: Incorrect error.\nHave: %v\nWant: %v", + err.Error(), testErr) + } + } var benchInput string diff --git a/vendor/github.com/gogo/protobuf/proto/text_test.go b/vendor/github.com/gogo/protobuf/proto/text_test.go index 652404842..27df6cb9b 100644 --- a/vendor/github.com/gogo/protobuf/proto/text_test.go +++ b/vendor/github.com/gogo/protobuf/proto/text_test.go @@ -339,13 +339,13 @@ func TestStringEscaping(t *testing.T) { } // Check round-trip. - pb := new(pb.Strings) - if err := proto.UnmarshalText(s, pb); err != nil { + pbStrings := new(pb.Strings) + if err := proto.UnmarshalText(s, pbStrings); err != nil { t.Errorf("#%d: UnmarshalText: %v", i, err) continue } - if !proto.Equal(pb, tc.in) { - t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pb) + if !proto.Equal(pbStrings, tc.in) { + t.Errorf("#%d: Round-trip failed:\nstart: %v\n end: %v", i, tc.in, pbStrings) } } } diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp.go b/vendor/github.com/gogo/protobuf/proto/timestamp.go new file mode 100644 index 000000000..9324f6542 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp.go @@ -0,0 +1,113 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func timestampFromProto(ts *timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func timestampProto(t time.Time) (*timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := ×tamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} diff --git a/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go new file mode 100644 index 000000000..d42764743 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/proto/timestamp_gogo.go @@ -0,0 +1,229 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package proto + +import ( + "reflect" + "time" +) + +var timeType = reflect.TypeOf((*time.Time)(nil)).Elem() + +type timestamp struct { + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *timestamp) Reset() { *m = timestamp{} } +func (*timestamp) ProtoMessage() {} +func (*timestamp) String() string { return "timestamp" } + +func init() { + RegisterType((*timestamp)(nil), "gogo.protobuf.proto.timestamp") +} + +func (o *Buffer) decTimestamp() (time.Time, error) { + b, err := o.DecodeRawBytes(true) + if err != nil { + return time.Time{}, err + } + tproto := ×tamp{} + if err := Unmarshal(b, tproto); err != nil { + return time.Time{}, err + } + return timestampFromProto(tproto) +} + +func (o *Buffer) dec_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setPtrCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + setCustomType(base, p.field, &t) + return nil +} + +func (o *Buffer) dec_slice_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))) + var zero field + setPtrCustomType(newBas, zero, &t) + return nil +} + +func (o *Buffer) dec_slice_ref_time(p *Properties, base structPointer) error { + t, err := o.decTimestamp() + if err != nil { + return err + } + newBas := appendStructPointer(base, p.field, reflect.SliceOf(timeType)) + var zero field + setCustomType(newBas, zero, &t) + return nil +} + +func size_time(p *Properties, base structPointer) (n int) { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return 0 + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_time(p *Properties, base structPointer) error { + structp := structPointer_GetStructPointer(base, p.field) + if structPointer_IsNil(structp) { + return ErrNil + } + tim := structPointer_Interface(structp, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_ref_time(p *Properties, base structPointer) (n int) { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return 0 + } + size := Size(t) + return size + sizeVarint(uint64(size)) + len(p.tagcode) +} + +func (o *Buffer) enc_ref_time(p *Properties, base structPointer) error { + tim := structPointer_InterfaceAt(base, p.field, timeType).(*time.Time) + t, err := timestampProto(*tim) + if err != nil { + return err + } + data, err := Marshal(t) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + return nil +} + +func size_slice_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return 0 + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(reflect.PtrTo(timeType))).(*[]*time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + if tims[i] == nil { + return errRepeatedHasNil + } + tproto, err := timestampProto(*tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} + +func size_slice_ref_time(p *Properties, base structPointer) (n int) { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return 0 + } + size := Size(tproto) + n += len(p.tagcode) + size + sizeVarint(uint64(size)) + } + return n +} + +func (o *Buffer) enc_slice_ref_time(p *Properties, base structPointer) error { + ptims := structPointer_InterfaceAt(base, p.field, reflect.SliceOf(timeType)).(*[]time.Time) + tims := *ptims + for i := 0; i < len(tims); i++ { + tproto, err := timestampProto(tims[i]) + if err != nil { + return err + } + data, err := Marshal(tproto) + if err != nil { + return err + } + o.buf = append(o.buf, p.tagcode...) + o.EncodeRawBytes(data) + } + return nil +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile index d80ceffee..3496dc99d 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/Makefile @@ -30,4 +30,7 @@ # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo + go install github.com/gogo/protobuf/protoc-gen-gostring protoc --gogo_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto + protoc --gostring_out=. -I=../../protobuf/google/protobuf ../../protobuf/google/protobuf/descriptor.proto diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go new file mode 100644 index 000000000..a85bf1984 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.go @@ -0,0 +1,118 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +// Package descriptor provides functions for obtaining protocol buffer +// descriptors for generated Go types. +// +// These functions cannot go in package proto because they depend on the +// generated protobuf descriptor messages, which themselves depend on proto. +package descriptor + +import ( + "bytes" + "compress/gzip" + "fmt" + "io/ioutil" + + "github.com/gogo/protobuf/proto" +) + +// extractFile extracts a FileDescriptorProto from a gzip'd buffer. +func extractFile(gz []byte) (*FileDescriptorProto, error) { + r, err := gzip.NewReader(bytes.NewReader(gz)) + if err != nil { + return nil, fmt.Errorf("failed to open gzip reader: %v", err) + } + defer r.Close() + + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, fmt.Errorf("failed to uncompress descriptor: %v", err) + } + + fd := new(FileDescriptorProto) + if err := proto.Unmarshal(b, fd); err != nil { + return nil, fmt.Errorf("malformed FileDescriptorProto: %v", err) + } + + return fd, nil +} + +// Message is a proto.Message with a method to return its descriptor. +// +// Message types generated by the protocol compiler always satisfy +// the Message interface. +type Message interface { + proto.Message + Descriptor() ([]byte, []int) +} + +// ForMessage returns a FileDescriptorProto and a DescriptorProto from within it +// describing the given message. +func ForMessage(msg Message) (fd *FileDescriptorProto, md *DescriptorProto) { + gz, path := msg.Descriptor() + fd, err := extractFile(gz) + if err != nil { + panic(fmt.Sprintf("invalid FileDescriptorProto for %T: %v", msg, err)) + } + + md = fd.MessageType[path[0]] + for _, i := range path[1:] { + md = md.NestedType[i] + } + return fd, md +} + +// Is this field a scalar numeric type? +func (field *FieldDescriptorProto) IsScalar() bool { + if field.Type == nil { + return false + } + switch *field.Type { + case FieldDescriptorProto_TYPE_DOUBLE, + FieldDescriptorProto_TYPE_FLOAT, + FieldDescriptorProto_TYPE_INT64, + FieldDescriptorProto_TYPE_UINT64, + FieldDescriptorProto_TYPE_INT32, + FieldDescriptorProto_TYPE_FIXED64, + FieldDescriptorProto_TYPE_FIXED32, + FieldDescriptorProto_TYPE_BOOL, + FieldDescriptorProto_TYPE_UINT32, + FieldDescriptorProto_TYPE_ENUM, + FieldDescriptorProto_TYPE_SFIXED32, + FieldDescriptorProto_TYPE_SFIXED64, + FieldDescriptorProto_TYPE_SINT32, + FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go index 6b361d6a8..a63db3c4b 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: descriptor.proto -// DO NOT EDIT! /* Package descriptor is a generated protocol buffer package. @@ -12,6 +11,7 @@ It has these top-level messages: FileDescriptorSet FileDescriptorProto DescriptorProto + ExtensionRangeOptions FieldDescriptorProto OneofDescriptorProto EnumDescriptorProto @@ -21,12 +21,14 @@ It has these top-level messages: FileOptions MessageOptions FieldOptions + OneofOptions EnumOptions EnumValueOptions ServiceOptions MethodOptions UninterpretedOption SourceCodeInfo + GeneratedCodeInfo */ package descriptor @@ -63,6 +65,10 @@ const ( FieldDescriptorProto_TYPE_FIXED32 FieldDescriptorProto_Type = 7 FieldDescriptorProto_TYPE_BOOL FieldDescriptorProto_Type = 8 FieldDescriptorProto_TYPE_STRING FieldDescriptorProto_Type = 9 + // Tag-delimited aggregate. + // Group type is deprecated and not supported in proto3. However, Proto3 + // implementations should still be able to parse the group wire format and + // treat group fields as unknown fields. FieldDescriptorProto_TYPE_GROUP FieldDescriptorProto_Type = 10 FieldDescriptorProto_TYPE_MESSAGE FieldDescriptorProto_Type = 11 // New in version 2. @@ -133,7 +139,7 @@ func (x *FieldDescriptorProto_Type) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Type) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{3, 0} + return fileDescriptorDescriptor, []int{4, 0} } type FieldDescriptorProto_Label int32 @@ -173,7 +179,7 @@ func (x *FieldDescriptorProto_Label) UnmarshalJSON(data []byte) error { return nil } func (FieldDescriptorProto_Label) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{3, 1} + return fileDescriptorDescriptor, []int{4, 1} } // Generated classes can be optimized for speed or code size. @@ -214,7 +220,7 @@ func (x *FileOptions_OptimizeMode) UnmarshalJSON(data []byte) error { return nil } func (FileOptions_OptimizeMode) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{9, 0} + return fileDescriptorDescriptor, []int{10, 0} } type FieldOptions_CType int32 @@ -254,7 +260,7 @@ func (x *FieldOptions_CType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_CType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{11, 0} + return fileDescriptorDescriptor, []int{12, 0} } type FieldOptions_JSType int32 @@ -296,7 +302,49 @@ func (x *FieldOptions_JSType) UnmarshalJSON(data []byte) error { return nil } func (FieldOptions_JSType) EnumDescriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{11, 1} + return fileDescriptorDescriptor, []int{12, 1} +} + +// Is this method side-effect-free (or safe in HTTP parlance), or idempotent, +// or neither? HTTP based RPC implementation may choose GET verb for safe +// methods, and PUT verb for idempotent methods instead of the default POST. +type MethodOptions_IdempotencyLevel int32 + +const ( + MethodOptions_IDEMPOTENCY_UNKNOWN MethodOptions_IdempotencyLevel = 0 + MethodOptions_NO_SIDE_EFFECTS MethodOptions_IdempotencyLevel = 1 + MethodOptions_IDEMPOTENT MethodOptions_IdempotencyLevel = 2 +) + +var MethodOptions_IdempotencyLevel_name = map[int32]string{ + 0: "IDEMPOTENCY_UNKNOWN", + 1: "NO_SIDE_EFFECTS", + 2: "IDEMPOTENT", +} +var MethodOptions_IdempotencyLevel_value = map[string]int32{ + "IDEMPOTENCY_UNKNOWN": 0, + "NO_SIDE_EFFECTS": 1, + "IDEMPOTENT": 2, +} + +func (x MethodOptions_IdempotencyLevel) Enum() *MethodOptions_IdempotencyLevel { + p := new(MethodOptions_IdempotencyLevel) + *p = x + return p +} +func (x MethodOptions_IdempotencyLevel) String() string { + return proto.EnumName(MethodOptions_IdempotencyLevel_name, int32(x)) +} +func (x *MethodOptions_IdempotencyLevel) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MethodOptions_IdempotencyLevel_value, data, "MethodOptions_IdempotencyLevel") + if err != nil { + return err + } + *x = MethodOptions_IdempotencyLevel(value) + return nil +} +func (MethodOptions_IdempotencyLevel) EnumDescriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{17, 0} } // The protocol compiler can output a FileDescriptorSet containing the .proto @@ -528,9 +576,10 @@ func (m *DescriptorProto) GetReservedName() []string { } type DescriptorProto_ExtensionRange struct { - Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` - End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` - XXX_unrecognized []byte `json:"-"` + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + Options *ExtensionRangeOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *DescriptorProto_ExtensionRange) Reset() { *m = DescriptorProto_ExtensionRange{} } @@ -554,6 +603,13 @@ func (m *DescriptorProto_ExtensionRange) GetEnd() int32 { return 0 } +func (m *DescriptorProto_ExtensionRange) GetOptions() *ExtensionRangeOptions { + if m != nil { + return m.Options + } + return nil +} + // Range of reserved tag numbers. Reserved tag numbers may not be used by // fields or extension ranges in the same message. Reserved ranges may // not overlap. @@ -584,6 +640,33 @@ func (m *DescriptorProto_ReservedRange) GetEnd() int32 { return 0 } +type ExtensionRangeOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *ExtensionRangeOptions) Reset() { *m = ExtensionRangeOptions{} } +func (m *ExtensionRangeOptions) String() string { return proto.CompactTextString(m) } +func (*ExtensionRangeOptions) ProtoMessage() {} +func (*ExtensionRangeOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } + +var extRange_ExtensionRangeOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*ExtensionRangeOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_ExtensionRangeOptions +} + +func (m *ExtensionRangeOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + // Describes a field within a message. type FieldDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -622,7 +705,7 @@ type FieldDescriptorProto struct { func (m *FieldDescriptorProto) Reset() { *m = FieldDescriptorProto{} } func (m *FieldDescriptorProto) String() string { return proto.CompactTextString(m) } func (*FieldDescriptorProto) ProtoMessage() {} -func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{3} } +func (*FieldDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } func (m *FieldDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -696,14 +779,15 @@ func (m *FieldDescriptorProto) GetOptions() *FieldOptions { // Describes a oneof. type OneofDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Options *OneofOptions `protobuf:"bytes,2,opt,name=options" json:"options,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *OneofDescriptorProto) Reset() { *m = OneofDescriptorProto{} } func (m *OneofDescriptorProto) String() string { return proto.CompactTextString(m) } func (*OneofDescriptorProto) ProtoMessage() {} -func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{4} } +func (*OneofDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } func (m *OneofDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -712,18 +796,32 @@ func (m *OneofDescriptorProto) GetName() string { return "" } +func (m *OneofDescriptorProto) GetOptions() *OneofOptions { + if m != nil { + return m.Options + } + return nil +} + // Describes an enum type. type EnumDescriptorProto struct { - Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` - Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` - Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` - XXX_unrecognized []byte `json:"-"` + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value []*EnumValueDescriptorProto `protobuf:"bytes,2,rep,name=value" json:"value,omitempty"` + Options *EnumOptions `protobuf:"bytes,3,opt,name=options" json:"options,omitempty"` + // Range of reserved numeric values. Reserved numeric values may not be used + // by enum values in the same enum declaration. Reserved ranges may not + // overlap. + ReservedRange []*EnumDescriptorProto_EnumReservedRange `protobuf:"bytes,4,rep,name=reserved_range,json=reservedRange" json:"reserved_range,omitempty"` + // Reserved enum value names, which may not be reused. A given name may only + // be reserved once. + ReservedName []string `protobuf:"bytes,5,rep,name=reserved_name,json=reservedName" json:"reserved_name,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *EnumDescriptorProto) Reset() { *m = EnumDescriptorProto{} } func (m *EnumDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumDescriptorProto) ProtoMessage() {} -func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{5} } +func (*EnumDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{6} } func (m *EnumDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -746,6 +844,53 @@ func (m *EnumDescriptorProto) GetOptions() *EnumOptions { return nil } +func (m *EnumDescriptorProto) GetReservedRange() []*EnumDescriptorProto_EnumReservedRange { + if m != nil { + return m.ReservedRange + } + return nil +} + +func (m *EnumDescriptorProto) GetReservedName() []string { + if m != nil { + return m.ReservedName + } + return nil +} + +// Range of reserved numeric values. Reserved values may not be used by +// entries in the same enum. Reserved ranges may not overlap. +// +// Note that this is distinct from DescriptorProto.ReservedRange in that it +// is inclusive such that it can appropriately represent the entire int32 +// domain. +type EnumDescriptorProto_EnumReservedRange struct { + Start *int32 `protobuf:"varint,1,opt,name=start" json:"start,omitempty"` + End *int32 `protobuf:"varint,2,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *EnumDescriptorProto_EnumReservedRange) Reset() { *m = EnumDescriptorProto_EnumReservedRange{} } +func (m *EnumDescriptorProto_EnumReservedRange) String() string { return proto.CompactTextString(m) } +func (*EnumDescriptorProto_EnumReservedRange) ProtoMessage() {} +func (*EnumDescriptorProto_EnumReservedRange) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{6, 0} +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetStart() int32 { + if m != nil && m.Start != nil { + return *m.Start + } + return 0 +} + +func (m *EnumDescriptorProto_EnumReservedRange) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + // Describes a value within an enum. type EnumValueDescriptorProto struct { Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` @@ -758,7 +903,7 @@ func (m *EnumValueDescriptorProto) Reset() { *m = EnumValueDescriptorPro func (m *EnumValueDescriptorProto) String() string { return proto.CompactTextString(m) } func (*EnumValueDescriptorProto) ProtoMessage() {} func (*EnumValueDescriptorProto) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{6} + return fileDescriptorDescriptor, []int{7} } func (m *EnumValueDescriptorProto) GetName() string { @@ -793,7 +938,7 @@ type ServiceDescriptorProto struct { func (m *ServiceDescriptorProto) Reset() { *m = ServiceDescriptorProto{} } func (m *ServiceDescriptorProto) String() string { return proto.CompactTextString(m) } func (*ServiceDescriptorProto) ProtoMessage() {} -func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{7} } +func (*ServiceDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } func (m *ServiceDescriptorProto) GetName() string { if m != nil && m.Name != nil { @@ -834,7 +979,7 @@ type MethodDescriptorProto struct { func (m *MethodDescriptorProto) Reset() { *m = MethodDescriptorProto{} } func (m *MethodDescriptorProto) String() string { return proto.CompactTextString(m) } func (*MethodDescriptorProto) ProtoMessage() {} -func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{8} } +func (*MethodDescriptorProto) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } const Default_MethodDescriptorProto_ClientStreaming bool = false const Default_MethodDescriptorProto_ServerStreaming bool = false @@ -900,19 +1045,8 @@ type FileOptions struct { // generated to contain the file's getDescriptor() method as well as any // top-level extensions defined in the file. JavaMultipleFiles *bool `protobuf:"varint,10,opt,name=java_multiple_files,json=javaMultipleFiles,def=0" json:"java_multiple_files,omitempty"` - // If set true, then the Java code generator will generate equals() and - // hashCode() methods for all messages defined in the .proto file. - // This increases generated code size, potentially substantially for large - // protos, which may harm a memory-constrained application. - // - In the full runtime this is a speed optimization, as the - // AbstractMessage base class includes reflection-based implementations of - // these methods. - // - In the lite runtime, setting this option changes the semantics of - // equals() and hashCode() to more closely match those of the full runtime; - // the generated methods compute their results based on field values rather - // than object identity. (Implementations should not assume that hashcodes - // will be consistent across runtimes or versions of the protocol compiler.) - JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash,def=0" json:"java_generate_equals_and_hash,omitempty"` + // This option does nothing. + JavaGenerateEqualsAndHash *bool `protobuf:"varint,20,opt,name=java_generate_equals_and_hash,json=javaGenerateEqualsAndHash" json:"java_generate_equals_and_hash,omitempty"` // If set true, then the Java2 code generator will generate code that // throws an exception whenever an attempt is made to assign a non-UTF-8 // byte sequence to a string field. @@ -940,6 +1074,7 @@ type FileOptions struct { CcGenericServices *bool `protobuf:"varint,16,opt,name=cc_generic_services,json=ccGenericServices,def=0" json:"cc_generic_services,omitempty"` JavaGenericServices *bool `protobuf:"varint,17,opt,name=java_generic_services,json=javaGenericServices,def=0" json:"java_generic_services,omitempty"` PyGenericServices *bool `protobuf:"varint,18,opt,name=py_generic_services,json=pyGenericServices,def=0" json:"py_generic_services,omitempty"` + PhpGenericServices *bool `protobuf:"varint,42,opt,name=php_generic_services,json=phpGenericServices,def=0" json:"php_generic_services,omitempty"` // Is this file deprecated? // Depending on the target platform, this can emit Deprecated annotations // for everything in the file, or it will be completely ignored; in the very @@ -953,10 +1088,20 @@ type FileOptions struct { ObjcClassPrefix *string `protobuf:"bytes,36,opt,name=objc_class_prefix,json=objcClassPrefix" json:"objc_class_prefix,omitempty"` // Namespace for generated classes; defaults to the package. CsharpNamespace *string `protobuf:"bytes,37,opt,name=csharp_namespace,json=csharpNamespace" json:"csharp_namespace,omitempty"` - // Whether the nano proto compiler should generate in the deprecated non-nano - // suffixed package. - JavananoUseDeprecatedPackage *bool `protobuf:"varint,38,opt,name=javanano_use_deprecated_package,json=javananoUseDeprecatedPackage" json:"javanano_use_deprecated_package,omitempty"` - // The parser stores options it doesn't recognize here. See above. + // By default Swift generators will take the proto package and CamelCase it + // replacing '.' with underscore and use that to prefix the types/symbols + // defined. When this options is provided, they will use this value instead + // to prefix the types/symbols defined. + SwiftPrefix *string `protobuf:"bytes,39,opt,name=swift_prefix,json=swiftPrefix" json:"swift_prefix,omitempty"` + // Sets the php class prefix which is prepended to all php generated classes + // from this .proto. Default is empty. + PhpClassPrefix *string `protobuf:"bytes,40,opt,name=php_class_prefix,json=phpClassPrefix" json:"php_class_prefix,omitempty"` + // Use this option to change the namespace of php generated classes. Default + // is empty. When this option is empty, the package name will be used for + // determining the namespace. + PhpNamespace *string `protobuf:"bytes,41,opt,name=php_namespace,json=phpNamespace" json:"php_namespace,omitempty"` + // The parser stores options it doesn't recognize here. + // See the documentation for the "Options" section above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` XXX_unrecognized []byte `json:"-"` @@ -965,10 +1110,10 @@ type FileOptions struct { func (m *FileOptions) Reset() { *m = FileOptions{} } func (m *FileOptions) String() string { return proto.CompactTextString(m) } func (*FileOptions) ProtoMessage() {} -func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{9} } +func (*FileOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } var extRange_FileOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -976,12 +1121,12 @@ func (*FileOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_FileOptions_JavaMultipleFiles bool = false -const Default_FileOptions_JavaGenerateEqualsAndHash bool = false const Default_FileOptions_JavaStringCheckUtf8 bool = false const Default_FileOptions_OptimizeFor FileOptions_OptimizeMode = FileOptions_SPEED const Default_FileOptions_CcGenericServices bool = false const Default_FileOptions_JavaGenericServices bool = false const Default_FileOptions_PyGenericServices bool = false +const Default_FileOptions_PhpGenericServices bool = false const Default_FileOptions_Deprecated bool = false const Default_FileOptions_CcEnableArenas bool = false @@ -1010,7 +1155,7 @@ func (m *FileOptions) GetJavaGenerateEqualsAndHash() bool { if m != nil && m.JavaGenerateEqualsAndHash != nil { return *m.JavaGenerateEqualsAndHash } - return Default_FileOptions_JavaGenerateEqualsAndHash + return false } func (m *FileOptions) GetJavaStringCheckUtf8() bool { @@ -1055,6 +1200,13 @@ func (m *FileOptions) GetPyGenericServices() bool { return Default_FileOptions_PyGenericServices } +func (m *FileOptions) GetPhpGenericServices() bool { + if m != nil && m.PhpGenericServices != nil { + return *m.PhpGenericServices + } + return Default_FileOptions_PhpGenericServices +} + func (m *FileOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { return *m.Deprecated @@ -1083,11 +1235,25 @@ func (m *FileOptions) GetCsharpNamespace() string { return "" } -func (m *FileOptions) GetJavananoUseDeprecatedPackage() bool { - if m != nil && m.JavananoUseDeprecatedPackage != nil { - return *m.JavananoUseDeprecatedPackage +func (m *FileOptions) GetSwiftPrefix() string { + if m != nil && m.SwiftPrefix != nil { + return *m.SwiftPrefix } - return false + return "" +} + +func (m *FileOptions) GetPhpClassPrefix() string { + if m != nil && m.PhpClassPrefix != nil { + return *m.PhpClassPrefix + } + return "" +} + +func (m *FileOptions) GetPhpNamespace() string { + if m != nil && m.PhpNamespace != nil { + return *m.PhpNamespace + } + return "" } func (m *FileOptions) GetUninterpretedOption() []*UninterpretedOption { @@ -1157,10 +1323,10 @@ type MessageOptions struct { func (m *MessageOptions) Reset() { *m = MessageOptions{} } func (m *MessageOptions) String() string { return proto.CompactTextString(m) } func (*MessageOptions) ProtoMessage() {} -func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{10} } +func (*MessageOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } var extRange_MessageOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MessageOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1220,13 +1386,15 @@ type FieldOptions struct { Packed *bool `protobuf:"varint,2,opt,name=packed" json:"packed,omitempty"` // The jstype option determines the JavaScript type used for values of the // field. The option is permitted only for 64 bit integral and fixed types - // (int64, uint64, sint64, fixed64, sfixed64). By default these types are - // represented as JavaScript strings. This avoids loss of precision that can - // happen when a large value is converted to a floating point JavaScript - // numbers. Specifying JS_NUMBER for the jstype causes the generated - // JavaScript code to use the JavaScript "number" type instead of strings. - // This option is an enum to permit additional types to be added, - // e.g. goog.math.Integer. + // (int64, uint64, sint64, fixed64, sfixed64). A field with jstype JS_STRING + // is represented as JavaScript string, which avoids loss of precision that + // can happen when a large value is converted to a floating point JavaScript. + // Specifying JS_NUMBER for the jstype causes the generated JavaScript code to + // use the JavaScript "number" type. The behavior of the default option + // JS_NORMAL is implementation dependent. + // + // This option is an enum to permit additional types to be added, e.g. + // goog.math.Integer. Jstype *FieldOptions_JSType `protobuf:"varint,6,opt,name=jstype,enum=google.protobuf.FieldOptions_JSType,def=0" json:"jstype,omitempty"` // Should this field be parsed lazily? Lazy applies only to message-type // fields. It means that when the outer message is initially parsed, the @@ -1247,7 +1415,7 @@ type FieldOptions struct { // // // Note that implementations may choose not to check required fields within - // a lazy sub-message. That is, calling IsInitialized() on the outher message + // a lazy sub-message. That is, calling IsInitialized() on the outer message // may return true even if the inner message has missing required fields. // This is necessary because otherwise the inner message would have to be // parsed in order to perform the check, defeating the purpose of lazy @@ -1273,10 +1441,10 @@ type FieldOptions struct { func (m *FieldOptions) Reset() { *m = FieldOptions{} } func (m *FieldOptions) String() string { return proto.CompactTextString(m) } func (*FieldOptions) ProtoMessage() {} -func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{11} } +func (*FieldOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } var extRange_FieldOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*FieldOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1338,6 +1506,33 @@ func (m *FieldOptions) GetUninterpretedOption() []*UninterpretedOption { return nil } +type OneofOptions struct { + // The parser stores options it doesn't recognize here. See above. + UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` + proto.XXX_InternalExtensions `json:"-"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *OneofOptions) Reset() { *m = OneofOptions{} } +func (m *OneofOptions) String() string { return proto.CompactTextString(m) } +func (*OneofOptions) ProtoMessage() {} +func (*OneofOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } + +var extRange_OneofOptions = []proto.ExtensionRange{ + {Start: 1000, End: 536870911}, +} + +func (*OneofOptions) ExtensionRangeArray() []proto.ExtensionRange { + return extRange_OneofOptions +} + +func (m *OneofOptions) GetUninterpretedOption() []*UninterpretedOption { + if m != nil { + return m.UninterpretedOption + } + return nil +} + type EnumOptions struct { // Set this option to true to allow mapping different tag names to the same // value. @@ -1356,10 +1551,10 @@ type EnumOptions struct { func (m *EnumOptions) Reset() { *m = EnumOptions{} } func (m *EnumOptions) String() string { return proto.CompactTextString(m) } func (*EnumOptions) ProtoMessage() {} -func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{12} } +func (*EnumOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } var extRange_EnumOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1404,10 +1599,10 @@ type EnumValueOptions struct { func (m *EnumValueOptions) Reset() { *m = EnumValueOptions{} } func (m *EnumValueOptions) String() string { return proto.CompactTextString(m) } func (*EnumValueOptions) ProtoMessage() {} -func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{13} } +func (*EnumValueOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } var extRange_EnumValueOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*EnumValueOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1445,10 +1640,10 @@ type ServiceOptions struct { func (m *ServiceOptions) Reset() { *m = ServiceOptions{} } func (m *ServiceOptions) String() string { return proto.CompactTextString(m) } func (*ServiceOptions) ProtoMessage() {} -func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{14} } +func (*ServiceOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } var extRange_ServiceOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*ServiceOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1476,7 +1671,8 @@ type MethodOptions struct { // Depending on the target platform, this can emit Deprecated annotations // for the method, or it will be completely ignored; in the very least, // this is a formalization for deprecating methods. - Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + Deprecated *bool `protobuf:"varint,33,opt,name=deprecated,def=0" json:"deprecated,omitempty"` + IdempotencyLevel *MethodOptions_IdempotencyLevel `protobuf:"varint,34,opt,name=idempotency_level,json=idempotencyLevel,enum=google.protobuf.MethodOptions_IdempotencyLevel,def=0" json:"idempotency_level,omitempty"` // The parser stores options it doesn't recognize here. See above. UninterpretedOption []*UninterpretedOption `protobuf:"bytes,999,rep,name=uninterpreted_option,json=uninterpretedOption" json:"uninterpreted_option,omitempty"` proto.XXX_InternalExtensions `json:"-"` @@ -1486,10 +1682,10 @@ type MethodOptions struct { func (m *MethodOptions) Reset() { *m = MethodOptions{} } func (m *MethodOptions) String() string { return proto.CompactTextString(m) } func (*MethodOptions) ProtoMessage() {} -func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{15} } +func (*MethodOptions) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } var extRange_MethodOptions = []proto.ExtensionRange{ - {1000, 536870911}, + {Start: 1000, End: 536870911}, } func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { @@ -1497,6 +1693,7 @@ func (*MethodOptions) ExtensionRangeArray() []proto.ExtensionRange { } const Default_MethodOptions_Deprecated bool = false +const Default_MethodOptions_IdempotencyLevel MethodOptions_IdempotencyLevel = MethodOptions_IDEMPOTENCY_UNKNOWN func (m *MethodOptions) GetDeprecated() bool { if m != nil && m.Deprecated != nil { @@ -1505,6 +1702,13 @@ func (m *MethodOptions) GetDeprecated() bool { return Default_MethodOptions_Deprecated } +func (m *MethodOptions) GetIdempotencyLevel() MethodOptions_IdempotencyLevel { + if m != nil && m.IdempotencyLevel != nil { + return *m.IdempotencyLevel + } + return Default_MethodOptions_IdempotencyLevel +} + func (m *MethodOptions) GetUninterpretedOption() []*UninterpretedOption { if m != nil { return m.UninterpretedOption @@ -1534,7 +1738,7 @@ type UninterpretedOption struct { func (m *UninterpretedOption) Reset() { *m = UninterpretedOption{} } func (m *UninterpretedOption) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption) ProtoMessage() {} -func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{16} } +func (*UninterpretedOption) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{18} } func (m *UninterpretedOption) GetName() []*UninterpretedOption_NamePart { if m != nil { @@ -1600,7 +1804,7 @@ func (m *UninterpretedOption_NamePart) Reset() { *m = UninterpretedOptio func (m *UninterpretedOption_NamePart) String() string { return proto.CompactTextString(m) } func (*UninterpretedOption_NamePart) ProtoMessage() {} func (*UninterpretedOption_NamePart) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{16, 0} + return fileDescriptorDescriptor, []int{18, 0} } func (m *UninterpretedOption_NamePart) GetNamePart() string { @@ -1670,7 +1874,7 @@ type SourceCodeInfo struct { func (m *SourceCodeInfo) Reset() { *m = SourceCodeInfo{} } func (m *SourceCodeInfo) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo) ProtoMessage() {} -func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{17} } +func (*SourceCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{19} } func (m *SourceCodeInfo) GetLocation() []*SourceCodeInfo_Location { if m != nil { @@ -1767,7 +1971,7 @@ func (m *SourceCodeInfo_Location) Reset() { *m = SourceCodeInfo_Location func (m *SourceCodeInfo_Location) String() string { return proto.CompactTextString(m) } func (*SourceCodeInfo_Location) ProtoMessage() {} func (*SourceCodeInfo_Location) Descriptor() ([]byte, []int) { - return fileDescriptorDescriptor, []int{17, 0} + return fileDescriptorDescriptor, []int{19, 0} } func (m *SourceCodeInfo_Location) GetPath() []int32 { @@ -1805,21 +2009,97 @@ func (m *SourceCodeInfo_Location) GetLeadingDetachedComments() []string { return nil } +// Describes the relationship between generated code and its original source +// file. A GeneratedCodeInfo message is associated with only one generated +// source file, but may contain references to different source .proto files. +type GeneratedCodeInfo struct { + // An Annotation connects some span of text in generated code to an element + // of its generating .proto file. + Annotation []*GeneratedCodeInfo_Annotation `protobuf:"bytes,1,rep,name=annotation" json:"annotation,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo) Reset() { *m = GeneratedCodeInfo{} } +func (m *GeneratedCodeInfo) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo) ProtoMessage() {} +func (*GeneratedCodeInfo) Descriptor() ([]byte, []int) { return fileDescriptorDescriptor, []int{20} } + +func (m *GeneratedCodeInfo) GetAnnotation() []*GeneratedCodeInfo_Annotation { + if m != nil { + return m.Annotation + } + return nil +} + +type GeneratedCodeInfo_Annotation struct { + // Identifies the element in the original source .proto file. This field + // is formatted the same as SourceCodeInfo.Location.path. + Path []int32 `protobuf:"varint,1,rep,packed,name=path" json:"path,omitempty"` + // Identifies the filesystem path to the original source .proto. + SourceFile *string `protobuf:"bytes,2,opt,name=source_file,json=sourceFile" json:"source_file,omitempty"` + // Identifies the starting offset in bytes in the generated code + // that relates to the identified object. + Begin *int32 `protobuf:"varint,3,opt,name=begin" json:"begin,omitempty"` + // Identifies the ending offset in bytes in the generated code that + // relates to the identified offset. The end offset should be one past + // the last relevant byte (so the length of the text = end - begin). + End *int32 `protobuf:"varint,4,opt,name=end" json:"end,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *GeneratedCodeInfo_Annotation) Reset() { *m = GeneratedCodeInfo_Annotation{} } +func (m *GeneratedCodeInfo_Annotation) String() string { return proto.CompactTextString(m) } +func (*GeneratedCodeInfo_Annotation) ProtoMessage() {} +func (*GeneratedCodeInfo_Annotation) Descriptor() ([]byte, []int) { + return fileDescriptorDescriptor, []int{20, 0} +} + +func (m *GeneratedCodeInfo_Annotation) GetPath() []int32 { + if m != nil { + return m.Path + } + return nil +} + +func (m *GeneratedCodeInfo_Annotation) GetSourceFile() string { + if m != nil && m.SourceFile != nil { + return *m.SourceFile + } + return "" +} + +func (m *GeneratedCodeInfo_Annotation) GetBegin() int32 { + if m != nil && m.Begin != nil { + return *m.Begin + } + return 0 +} + +func (m *GeneratedCodeInfo_Annotation) GetEnd() int32 { + if m != nil && m.End != nil { + return *m.End + } + return 0 +} + func init() { proto.RegisterType((*FileDescriptorSet)(nil), "google.protobuf.FileDescriptorSet") proto.RegisterType((*FileDescriptorProto)(nil), "google.protobuf.FileDescriptorProto") proto.RegisterType((*DescriptorProto)(nil), "google.protobuf.DescriptorProto") proto.RegisterType((*DescriptorProto_ExtensionRange)(nil), "google.protobuf.DescriptorProto.ExtensionRange") proto.RegisterType((*DescriptorProto_ReservedRange)(nil), "google.protobuf.DescriptorProto.ReservedRange") + proto.RegisterType((*ExtensionRangeOptions)(nil), "google.protobuf.ExtensionRangeOptions") proto.RegisterType((*FieldDescriptorProto)(nil), "google.protobuf.FieldDescriptorProto") proto.RegisterType((*OneofDescriptorProto)(nil), "google.protobuf.OneofDescriptorProto") proto.RegisterType((*EnumDescriptorProto)(nil), "google.protobuf.EnumDescriptorProto") + proto.RegisterType((*EnumDescriptorProto_EnumReservedRange)(nil), "google.protobuf.EnumDescriptorProto.EnumReservedRange") proto.RegisterType((*EnumValueDescriptorProto)(nil), "google.protobuf.EnumValueDescriptorProto") proto.RegisterType((*ServiceDescriptorProto)(nil), "google.protobuf.ServiceDescriptorProto") proto.RegisterType((*MethodDescriptorProto)(nil), "google.protobuf.MethodDescriptorProto") proto.RegisterType((*FileOptions)(nil), "google.protobuf.FileOptions") proto.RegisterType((*MessageOptions)(nil), "google.protobuf.MessageOptions") proto.RegisterType((*FieldOptions)(nil), "google.protobuf.FieldOptions") + proto.RegisterType((*OneofOptions)(nil), "google.protobuf.OneofOptions") proto.RegisterType((*EnumOptions)(nil), "google.protobuf.EnumOptions") proto.RegisterType((*EnumValueOptions)(nil), "google.protobuf.EnumValueOptions") proto.RegisterType((*ServiceOptions)(nil), "google.protobuf.ServiceOptions") @@ -1828,152 +2108,174 @@ func init() { proto.RegisterType((*UninterpretedOption_NamePart)(nil), "google.protobuf.UninterpretedOption.NamePart") proto.RegisterType((*SourceCodeInfo)(nil), "google.protobuf.SourceCodeInfo") proto.RegisterType((*SourceCodeInfo_Location)(nil), "google.protobuf.SourceCodeInfo.Location") + proto.RegisterType((*GeneratedCodeInfo)(nil), "google.protobuf.GeneratedCodeInfo") + proto.RegisterType((*GeneratedCodeInfo_Annotation)(nil), "google.protobuf.GeneratedCodeInfo.Annotation") proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Type", FieldDescriptorProto_Type_name, FieldDescriptorProto_Type_value) proto.RegisterEnum("google.protobuf.FieldDescriptorProto_Label", FieldDescriptorProto_Label_name, FieldDescriptorProto_Label_value) proto.RegisterEnum("google.protobuf.FileOptions_OptimizeMode", FileOptions_OptimizeMode_name, FileOptions_OptimizeMode_value) proto.RegisterEnum("google.protobuf.FieldOptions_CType", FieldOptions_CType_name, FieldOptions_CType_value) proto.RegisterEnum("google.protobuf.FieldOptions_JSType", FieldOptions_JSType_name, FieldOptions_JSType_value) + proto.RegisterEnum("google.protobuf.MethodOptions_IdempotencyLevel", MethodOptions_IdempotencyLevel_name, MethodOptions_IdempotencyLevel_value) } func init() { proto.RegisterFile("descriptor.proto", fileDescriptorDescriptor) } var fileDescriptorDescriptor = []byte{ - // 2192 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0xcc, 0x58, 0x4f, 0x73, 0xdb, 0xd6, - 0x11, 0x2f, 0xff, 0x8a, 0x5c, 0x52, 0x24, 0xf4, 0xac, 0xd8, 0xb4, 0x62, 0xc7, 0x31, 0x63, 0xc7, - 0x8e, 0xd3, 0xd2, 0x19, 0xb7, 0x49, 0x5c, 0xa5, 0x93, 0x0e, 0x45, 0xc2, 0x0a, 0x3d, 0x94, 0xc8, - 0x3e, 0x92, 0xad, 0x93, 0x0b, 0x06, 0x02, 0x1f, 0x29, 0xd8, 0x20, 0xc0, 0x02, 0xa0, 0x6d, 0xe5, - 0xd4, 0x99, 0x9e, 0xfa, 0x0d, 0x3a, 0x6d, 0xa7, 0x87, 0x5c, 0x32, 0xd3, 0x0f, 0xd0, 0x43, 0xef, - 0xbd, 0xf6, 0xd0, 0x73, 0x8f, 0x9d, 0x69, 0xbf, 0x41, 0xaf, 0xdd, 0xf7, 0x1e, 0x00, 0x02, 0x24, - 0x15, 0xab, 0x99, 0x49, 0x13, 0x5d, 0xc4, 0xb7, 0xfb, 0xdb, 0xc5, 0xbe, 0x7d, 0xbf, 0xb7, 0xbb, - 0x00, 0x28, 0x63, 0xe6, 0x19, 0xae, 0x39, 0xf7, 0x1d, 0xb7, 0x31, 0x77, 0x1d, 0xdf, 0x21, 0xd5, - 0xa9, 0xe3, 0x4c, 0x2d, 0x26, 0x57, 0x27, 0x8b, 0x49, 0xfd, 0x08, 0x76, 0x1e, 0x99, 0x16, 0x6b, - 0x47, 0xc0, 0x01, 0xf3, 0xc9, 0x43, 0xc8, 0x4e, 0x50, 0x58, 0x4b, 0xbd, 0x99, 0xb9, 0x5b, 0x7a, - 0x70, 0xab, 0xb1, 0x62, 0xd4, 0x48, 0x5a, 0xf4, 0xb9, 0x98, 0x0a, 0x8b, 0xfa, 0x3f, 0xb3, 0x70, - 0x69, 0x83, 0x96, 0x10, 0xc8, 0xda, 0xfa, 0x8c, 0x7b, 0x4c, 0xdd, 0x2d, 0x52, 0xf1, 0x9b, 0xd4, - 0x60, 0x6b, 0xae, 0x1b, 0xcf, 0xf4, 0x29, 0xab, 0xa5, 0x85, 0x38, 0x5c, 0x92, 0x37, 0x00, 0xc6, - 0x6c, 0xce, 0xec, 0x31, 0xb3, 0x8d, 0xb3, 0x5a, 0x06, 0xa3, 0x28, 0xd2, 0x98, 0x84, 0xbc, 0x0b, - 0x3b, 0xf3, 0xc5, 0x89, 0x65, 0x1a, 0x5a, 0x0c, 0x06, 0x08, 0xcb, 0x51, 0x45, 0x2a, 0xda, 0x4b, - 0xf0, 0x1d, 0xa8, 0xbe, 0x60, 0xfa, 0xb3, 0x38, 0xb4, 0x24, 0xa0, 0x15, 0x2e, 0x8e, 0x01, 0x5b, - 0x50, 0x9e, 0x31, 0xcf, 0xc3, 0x00, 0x34, 0xff, 0x6c, 0xce, 0x6a, 0x59, 0xb1, 0xfb, 0x37, 0xd7, - 0x76, 0xbf, 0xba, 0xf3, 0x52, 0x60, 0x35, 0x44, 0x23, 0xd2, 0x84, 0x22, 0xb3, 0x17, 0x33, 0xe9, - 0x21, 0x77, 0x4e, 0xfe, 0x54, 0x44, 0xac, 0x7a, 0x29, 0x70, 0xb3, 0xc0, 0xc5, 0x96, 0xc7, 0xdc, - 0xe7, 0xa6, 0xc1, 0x6a, 0x79, 0xe1, 0xe0, 0xce, 0x9a, 0x83, 0x81, 0xd4, 0xaf, 0xfa, 0x08, 0xed, - 0x70, 0x2b, 0x45, 0xf6, 0xd2, 0x67, 0xb6, 0x67, 0x3a, 0x76, 0x6d, 0x4b, 0x38, 0xb9, 0xbd, 0xe1, - 0x14, 0x99, 0x35, 0x5e, 0x75, 0xb1, 0xb4, 0x23, 0x1f, 0xc0, 0x96, 0x33, 0xf7, 0xf1, 0x97, 0x57, - 0x2b, 0xe0, 0xf9, 0x94, 0x1e, 0x5c, 0xdb, 0x48, 0x84, 0x9e, 0xc4, 0xd0, 0x10, 0x4c, 0x3a, 0xa0, - 0x78, 0xce, 0xc2, 0x35, 0x98, 0x66, 0x38, 0x63, 0xa6, 0x99, 0xf6, 0xc4, 0xa9, 0x15, 0x85, 0x83, - 0x1b, 0xeb, 0x1b, 0x11, 0xc0, 0x16, 0xe2, 0x3a, 0x08, 0xa3, 0x15, 0x2f, 0xb1, 0x26, 0x97, 0x21, - 0xef, 0x9d, 0xd9, 0xbe, 0xfe, 0xb2, 0x56, 0x16, 0x0c, 0x09, 0x56, 0xf5, 0xff, 0xe4, 0xa0, 0x7a, - 0x11, 0x8a, 0x7d, 0x04, 0xb9, 0x09, 0xdf, 0x25, 0x12, 0xec, 0x7f, 0xc8, 0x81, 0xb4, 0x49, 0x26, - 0x31, 0xff, 0x35, 0x93, 0xd8, 0x84, 0x92, 0xcd, 0x3c, 0x9f, 0x8d, 0x25, 0x23, 0x32, 0x17, 0xe4, - 0x14, 0x48, 0xa3, 0x75, 0x4a, 0x65, 0xbf, 0x16, 0xa5, 0x9e, 0x40, 0x35, 0x0a, 0x49, 0x73, 0x75, - 0x7b, 0x1a, 0x72, 0xf3, 0xfe, 0xab, 0x22, 0x69, 0xa8, 0xa1, 0x1d, 0xe5, 0x66, 0xb4, 0xc2, 0x12, - 0x6b, 0xd2, 0x06, 0x70, 0x6c, 0xe6, 0x4c, 0xf0, 0x7a, 0x19, 0x16, 0xf2, 0x64, 0x73, 0x96, 0x7a, - 0x1c, 0xb2, 0x96, 0x25, 0x47, 0x4a, 0x0d, 0x8b, 0xfc, 0x78, 0x49, 0xb5, 0xad, 0x73, 0x98, 0x72, - 0x24, 0x2f, 0xd9, 0x1a, 0xdb, 0x46, 0x50, 0x71, 0x19, 0xe7, 0x3d, 0xa6, 0x58, 0xee, 0xac, 0x28, - 0x82, 0x68, 0xbc, 0x72, 0x67, 0x34, 0x30, 0x93, 0x1b, 0xdb, 0x76, 0xe3, 0x4b, 0xf2, 0x16, 0x44, - 0x02, 0x4d, 0xd0, 0x0a, 0x44, 0x15, 0x2a, 0x87, 0xc2, 0x63, 0x94, 0xed, 0x3d, 0x84, 0x4a, 0x32, - 0x3d, 0x64, 0x17, 0x72, 0x9e, 0xaf, 0xbb, 0xbe, 0x60, 0x61, 0x8e, 0xca, 0x05, 0x51, 0x20, 0x83, - 0x45, 0x46, 0x54, 0xb9, 0x1c, 0xe5, 0x3f, 0xf7, 0x3e, 0x84, 0xed, 0xc4, 0xe3, 0x2f, 0x6a, 0x58, - 0xff, 0x6d, 0x1e, 0x76, 0x37, 0x71, 0x6e, 0x23, 0xfd, 0xf1, 0xfa, 0x20, 0x03, 0x4e, 0x98, 0x8b, - 0xbc, 0xe3, 0x1e, 0x82, 0x15, 0x32, 0x2a, 0x67, 0xe9, 0x27, 0xcc, 0x42, 0x36, 0xa5, 0xee, 0x56, - 0x1e, 0xbc, 0x7b, 0x21, 0x56, 0x37, 0xba, 0xdc, 0x84, 0x4a, 0x4b, 0xf2, 0x31, 0x64, 0x83, 0x12, - 0xc7, 0x3d, 0xdc, 0xbb, 0x98, 0x07, 0xce, 0x45, 0x2a, 0xec, 0xc8, 0xeb, 0x50, 0xe4, 0xff, 0x65, - 0x6e, 0xf3, 0x22, 0xe6, 0x02, 0x17, 0xf0, 0xbc, 0x92, 0x3d, 0x28, 0x08, 0x9a, 0x8d, 0x59, 0xd8, - 0x1a, 0xa2, 0x35, 0x3f, 0x98, 0x31, 0x9b, 0xe8, 0x0b, 0xcb, 0xd7, 0x9e, 0xeb, 0xd6, 0x82, 0x09, - 0xc2, 0xe0, 0xc1, 0x04, 0xc2, 0x9f, 0x73, 0x19, 0xb9, 0x01, 0x25, 0xc9, 0x4a, 0x13, 0x6d, 0x5e, - 0x8a, 0xea, 0x93, 0xa3, 0x92, 0xa8, 0x1d, 0x2e, 0xe1, 0x8f, 0x7f, 0xea, 0xe1, 0x5d, 0x08, 0x8e, - 0x56, 0x3c, 0x82, 0x0b, 0xc4, 0xe3, 0x3f, 0x5c, 0x2d, 0x7c, 0xd7, 0x37, 0x6f, 0x6f, 0x95, 0x8b, - 0xf5, 0x3f, 0xa7, 0x21, 0x2b, 0xee, 0x5b, 0x15, 0x4a, 0xc3, 0x4f, 0xfb, 0xaa, 0xd6, 0xee, 0x8d, - 0x0e, 0xba, 0xaa, 0x92, 0x22, 0x15, 0x00, 0x21, 0x78, 0xd4, 0xed, 0x35, 0x87, 0x4a, 0x3a, 0x5a, - 0x77, 0x8e, 0x87, 0x1f, 0xfc, 0x48, 0xc9, 0x44, 0x06, 0x23, 0x29, 0xc8, 0xc6, 0x01, 0x3f, 0x7c, - 0xa0, 0xe4, 0x90, 0x09, 0x65, 0xe9, 0xa0, 0xf3, 0x44, 0x6d, 0x23, 0x22, 0x9f, 0x94, 0x20, 0x66, - 0x8b, 0x6c, 0x43, 0x51, 0x48, 0x0e, 0x7a, 0xbd, 0xae, 0x52, 0x88, 0x7c, 0x0e, 0x86, 0xb4, 0x73, - 0x7c, 0xa8, 0x14, 0x23, 0x9f, 0x87, 0xb4, 0x37, 0xea, 0x2b, 0x10, 0x79, 0x38, 0x52, 0x07, 0x83, - 0xe6, 0xa1, 0xaa, 0x94, 0x22, 0xc4, 0xc1, 0xa7, 0x43, 0x75, 0xa0, 0x94, 0x13, 0x61, 0xe1, 0x23, - 0xb6, 0xa3, 0x47, 0xa8, 0xc7, 0xa3, 0x23, 0xa5, 0x42, 0x76, 0x60, 0x5b, 0x3e, 0x22, 0x0c, 0xa2, - 0xba, 0x22, 0xc2, 0x48, 0x95, 0x65, 0x20, 0xd2, 0xcb, 0x4e, 0x42, 0x80, 0x08, 0x52, 0x6f, 0x41, - 0x4e, 0xb0, 0x0b, 0x59, 0x5c, 0xe9, 0x36, 0x0f, 0xd4, 0xae, 0xd6, 0xeb, 0x0f, 0x3b, 0xbd, 0xe3, - 0x66, 0x17, 0x73, 0x17, 0xc9, 0xa8, 0xfa, 0xb3, 0x51, 0x87, 0xaa, 0x6d, 0xcc, 0x5f, 0x4c, 0xd6, - 0x57, 0x9b, 0x43, 0x94, 0x65, 0xea, 0xf7, 0x60, 0x77, 0x53, 0x9d, 0xd9, 0x74, 0x33, 0xea, 0x5f, - 0xa4, 0xe0, 0xd2, 0x86, 0x92, 0xb9, 0xf1, 0x16, 0xfd, 0x14, 0x72, 0x92, 0x69, 0xb2, 0x89, 0xbc, - 0xb3, 0xb1, 0xf6, 0x0a, 0xde, 0xad, 0x35, 0x12, 0x61, 0x17, 0x6f, 0xa4, 0x99, 0x73, 0x1a, 0x29, - 0x77, 0xb1, 0x46, 0xa7, 0x5f, 0xa7, 0xa0, 0x76, 0x9e, 0xef, 0x57, 0xdc, 0xf7, 0x74, 0xe2, 0xbe, - 0x7f, 0xb4, 0x1a, 0xc0, 0xcd, 0xf3, 0xf7, 0xb0, 0x16, 0xc5, 0x97, 0x29, 0xb8, 0xbc, 0x79, 0xde, - 0xd8, 0x18, 0xc3, 0xc7, 0x90, 0x9f, 0x31, 0xff, 0xd4, 0x09, 0x7b, 0xee, 0xdb, 0x1b, 0x2a, 0x39, - 0x57, 0xaf, 0xe6, 0x2a, 0xb0, 0x8a, 0xb7, 0x82, 0xcc, 0x79, 0x43, 0x83, 0x8c, 0x66, 0x2d, 0xd2, - 0xdf, 0xa4, 0xe1, 0xb5, 0x8d, 0xce, 0x37, 0x06, 0x7a, 0x1d, 0xc0, 0xb4, 0xe7, 0x0b, 0x5f, 0xf6, - 0x55, 0x59, 0x66, 0x8a, 0x42, 0x22, 0xae, 0x30, 0x2f, 0x21, 0x0b, 0x3f, 0xd2, 0x67, 0x84, 0x1e, - 0xa4, 0x48, 0x00, 0x1e, 0x2e, 0x03, 0xcd, 0x8a, 0x40, 0xdf, 0x38, 0x67, 0xa7, 0x6b, 0x2d, 0xeb, - 0x3d, 0x50, 0x0c, 0xcb, 0x64, 0xb6, 0xaf, 0x79, 0xbe, 0xcb, 0xf4, 0x99, 0x69, 0x4f, 0x45, 0x1d, - 0x2d, 0xec, 0xe7, 0x26, 0xba, 0xe5, 0x31, 0x5a, 0x95, 0xea, 0x41, 0xa8, 0xe5, 0x16, 0xa2, 0x59, - 0xb8, 0x31, 0x8b, 0x7c, 0xc2, 0x42, 0xaa, 0x23, 0x8b, 0xfa, 0xdf, 0xb7, 0xa0, 0x14, 0x9b, 0xce, - 0xc8, 0x4d, 0x28, 0x3f, 0xd5, 0x9f, 0xeb, 0x5a, 0x38, 0x71, 0xcb, 0x4c, 0x94, 0xb8, 0xac, 0x1f, - 0x4c, 0xdd, 0xef, 0xc1, 0xae, 0x80, 0xe0, 0x1e, 0xf1, 0x41, 0x86, 0xa5, 0x7b, 0x9e, 0x48, 0x5a, - 0x41, 0x40, 0x09, 0xd7, 0xf5, 0xb8, 0xaa, 0x15, 0x6a, 0xc8, 0xfb, 0x70, 0x49, 0x58, 0xcc, 0xb0, - 0xf0, 0x9a, 0x73, 0x8b, 0x69, 0xfc, 0x1d, 0xc0, 0x13, 0xf5, 0x34, 0x8a, 0x6c, 0x87, 0x23, 0x8e, - 0x02, 0x00, 0x8f, 0xc8, 0x23, 0x87, 0x70, 0x5d, 0x98, 0x4d, 0x99, 0xcd, 0x5c, 0xdd, 0x67, 0x1a, - 0xfb, 0xe5, 0x02, 0xb1, 0x9a, 0x6e, 0x8f, 0xb5, 0x53, 0xdd, 0x3b, 0xad, 0xed, 0xc6, 0x1d, 0x5c, - 0xe5, 0xd8, 0xc3, 0x00, 0xaa, 0x0a, 0x64, 0xd3, 0x1e, 0x7f, 0x82, 0x38, 0xb2, 0x0f, 0x97, 0x85, - 0x23, 0x4c, 0x0a, 0xee, 0x59, 0x33, 0x4e, 0x99, 0xf1, 0x4c, 0x5b, 0xf8, 0x93, 0x87, 0xb5, 0xd7, - 0xe3, 0x1e, 0x44, 0x90, 0x03, 0x81, 0x69, 0x71, 0xc8, 0x08, 0x11, 0x64, 0x00, 0x65, 0x7e, 0x1e, - 0x33, 0xf3, 0x73, 0x0c, 0xdb, 0x71, 0x45, 0x8f, 0xa8, 0x6c, 0xb8, 0xdc, 0xb1, 0x24, 0x36, 0x7a, - 0x81, 0xc1, 0x11, 0xce, 0xa7, 0xfb, 0xb9, 0x41, 0x5f, 0x55, 0xdb, 0xb4, 0x14, 0x7a, 0x79, 0xe4, - 0xb8, 0x9c, 0x53, 0x53, 0x27, 0xca, 0x71, 0x49, 0x72, 0x6a, 0xea, 0x84, 0x19, 0xc6, 0x7c, 0x19, - 0x86, 0xdc, 0x36, 0xbe, 0xbb, 0x04, 0xc3, 0xba, 0x57, 0x53, 0x12, 0xf9, 0x32, 0x8c, 0x43, 0x09, - 0x08, 0x68, 0xee, 0xe1, 0x95, 0x78, 0x6d, 0x99, 0xaf, 0xb8, 0xe1, 0xce, 0xda, 0x2e, 0x57, 0x4d, - 0xf1, 0x89, 0xf3, 0xb3, 0x75, 0x43, 0x92, 0x78, 0xe2, 0xfc, 0x6c, 0xd5, 0xec, 0xb6, 0x78, 0x01, - 0x73, 0x99, 0x81, 0x29, 0x1f, 0xd7, 0xae, 0xc4, 0xd1, 0x31, 0x05, 0xb9, 0x8f, 0x44, 0x36, 0x34, - 0x66, 0xeb, 0x27, 0x78, 0xf6, 0xba, 0x8b, 0x3f, 0xbc, 0xda, 0x8d, 0x38, 0xb8, 0x62, 0x18, 0xaa, - 0xd0, 0x36, 0x85, 0x92, 0xdc, 0x83, 0x1d, 0xe7, 0xe4, 0xa9, 0x21, 0xc9, 0xa5, 0xa1, 0x9f, 0x89, - 0xf9, 0xb2, 0x76, 0x4b, 0xa4, 0xa9, 0xca, 0x15, 0x82, 0x5a, 0x7d, 0x21, 0x26, 0xef, 0xa0, 0x73, - 0xef, 0x54, 0x77, 0xe7, 0xa2, 0x49, 0x7b, 0x98, 0x54, 0x56, 0xbb, 0x2d, 0xa1, 0x52, 0x7e, 0x1c, - 0x8a, 0x89, 0x0a, 0x37, 0xf8, 0xe6, 0x6d, 0xdd, 0x76, 0xb4, 0x85, 0xc7, 0xb4, 0x65, 0x88, 0xd1, - 0x59, 0xbc, 0xcd, 0xc3, 0xa2, 0xd7, 0x42, 0xd8, 0xc8, 0xc3, 0x62, 0x16, 0x82, 0xc2, 0xe3, 0x79, - 0x02, 0xbb, 0x0b, 0xdb, 0xb4, 0x91, 0xe2, 0xa8, 0xe1, 0xc6, 0xf2, 0xc2, 0xd6, 0xfe, 0xb5, 0x75, - 0xce, 0xd0, 0x3d, 0x8a, 0xa3, 0x25, 0x49, 0xe8, 0xa5, 0xc5, 0xba, 0xb0, 0xbe, 0x0f, 0xe5, 0x38, - 0x77, 0x48, 0x11, 0x24, 0x7b, 0xb0, 0xbb, 0x61, 0x47, 0x6d, 0xf5, 0xda, 0xbc, 0x17, 0x7e, 0xa6, - 0x62, 0x63, 0xc3, 0x9e, 0xdc, 0xed, 0x0c, 0x55, 0x8d, 0x8e, 0x8e, 0x87, 0x9d, 0x23, 0x55, 0xc9, - 0xdc, 0x2b, 0x16, 0xfe, 0xbd, 0xa5, 0xfc, 0x0a, 0xff, 0xd2, 0xf5, 0xbf, 0xa6, 0xa1, 0x92, 0x9c, - 0x83, 0xc9, 0x4f, 0xe0, 0x4a, 0xf8, 0xd2, 0xea, 0x31, 0x5f, 0x7b, 0x61, 0xba, 0x82, 0xce, 0x33, - 0x5d, 0x4e, 0x92, 0xd1, 0x49, 0xec, 0x06, 0x28, 0x7c, 0xbd, 0xff, 0x05, 0x62, 0x1e, 0x09, 0x08, - 0xe9, 0xc2, 0x0d, 0x4c, 0x19, 0xce, 0x9a, 0xf6, 0x58, 0x77, 0xc7, 0xda, 0xf2, 0x73, 0x81, 0xa6, - 0x1b, 0xc8, 0x03, 0xcf, 0x91, 0x9d, 0x24, 0xf2, 0x72, 0xcd, 0x76, 0x06, 0x01, 0x78, 0x59, 0x62, - 0x9b, 0x01, 0x74, 0x85, 0x35, 0x99, 0xf3, 0x58, 0x83, 0xb3, 0xd7, 0x4c, 0x9f, 0x23, 0x6d, 0x7c, - 0xf7, 0x4c, 0x4c, 0x6f, 0x05, 0x5a, 0x40, 0x81, 0xca, 0xd7, 0xdf, 0xdc, 0x19, 0xc4, 0xf3, 0xf8, - 0x8f, 0x0c, 0x94, 0xe3, 0x13, 0x1c, 0x1f, 0x88, 0x0d, 0x51, 0xe6, 0x53, 0xa2, 0x0a, 0xbc, 0xf5, - 0x95, 0xf3, 0x5e, 0xa3, 0xc5, 0xeb, 0xff, 0x7e, 0x5e, 0xce, 0x55, 0x54, 0x5a, 0xf2, 0xde, 0xcb, - 0xb9, 0xc6, 0xe4, 0xb4, 0x5e, 0xa0, 0xc1, 0x0a, 0x8b, 0x5d, 0xfe, 0xa9, 0x27, 0x7c, 0xe7, 0x85, - 0xef, 0x5b, 0x5f, 0xed, 0xfb, 0xf1, 0x40, 0x38, 0x2f, 0x3e, 0x1e, 0x68, 0xc7, 0x3d, 0x7a, 0xd4, - 0xec, 0xd2, 0xc0, 0x9c, 0x5c, 0x85, 0xac, 0xa5, 0x7f, 0x7e, 0x96, 0xec, 0x14, 0x42, 0x74, 0xd1, - 0xc4, 0xa3, 0x07, 0xfe, 0xc9, 0x23, 0x59, 0x9f, 0x85, 0xe8, 0x1b, 0xa4, 0xfe, 0x7d, 0xc8, 0x89, - 0x7c, 0x11, 0x80, 0x20, 0x63, 0xca, 0xf7, 0x48, 0x01, 0xb2, 0xad, 0x1e, 0xe5, 0xf4, 0x47, 0xbe, - 0x4b, 0xa9, 0xd6, 0xef, 0xa8, 0x2d, 0xbc, 0x01, 0xf5, 0xf7, 0x21, 0x2f, 0x93, 0xc0, 0xaf, 0x46, - 0x94, 0x06, 0x34, 0x92, 0xcb, 0xc0, 0x47, 0x2a, 0xd4, 0x8e, 0x8e, 0x0e, 0x54, 0xaa, 0xa4, 0xe3, - 0xc7, 0xfb, 0x97, 0x14, 0x94, 0x62, 0x03, 0x15, 0x6f, 0xe5, 0xba, 0x65, 0x39, 0x2f, 0x34, 0xdd, - 0x32, 0xb1, 0x42, 0xc9, 0xf3, 0x01, 0x21, 0x6a, 0x72, 0xc9, 0x45, 0xf3, 0xf7, 0x7f, 0xe1, 0xe6, - 0x1f, 0x53, 0xa0, 0xac, 0x0e, 0x63, 0x2b, 0x01, 0xa6, 0xbe, 0xd5, 0x00, 0xff, 0x90, 0x82, 0x4a, - 0x72, 0x02, 0x5b, 0x09, 0xef, 0xe6, 0xb7, 0x1a, 0xde, 0xef, 0x53, 0xb0, 0x9d, 0x98, 0xbb, 0xbe, - 0x53, 0xd1, 0xfd, 0x2e, 0x03, 0x97, 0x36, 0xd8, 0x61, 0x01, 0x92, 0x03, 0xaa, 0x9c, 0x99, 0x7f, - 0x70, 0x91, 0x67, 0x35, 0x78, 0xff, 0xeb, 0xeb, 0xae, 0x1f, 0xcc, 0xb3, 0xd8, 0x2f, 0xcd, 0x31, - 0x16, 0x55, 0x73, 0x62, 0xe2, 0xf8, 0x26, 0xdf, 0x58, 0xe4, 0xd4, 0x5a, 0x5d, 0xca, 0xe5, 0xeb, - 0xf1, 0xf7, 0x81, 0xcc, 0x1d, 0xcf, 0xf4, 0xcd, 0xe7, 0xfc, 0xf3, 0x5c, 0xf8, 0x22, 0xcd, 0xa7, - 0xd8, 0x2c, 0x55, 0x42, 0x4d, 0xc7, 0xf6, 0x23, 0xb4, 0xcd, 0xa6, 0xfa, 0x0a, 0x9a, 0x97, 0xa1, - 0x0c, 0x55, 0x42, 0x4d, 0x84, 0xc6, 0x41, 0x73, 0xec, 0x2c, 0xf8, 0x40, 0x20, 0x71, 0xbc, 0xea, - 0xa5, 0x68, 0x49, 0xca, 0x22, 0x48, 0x30, 0xb1, 0x2d, 0xdf, 0xe0, 0xcb, 0xb4, 0x24, 0x65, 0x12, - 0x72, 0x07, 0xaa, 0xfa, 0x74, 0xea, 0x72, 0xe7, 0xa1, 0x23, 0x39, 0x86, 0x56, 0x22, 0xb1, 0x00, - 0xee, 0x3d, 0x86, 0x42, 0x98, 0x07, 0xde, 0x58, 0x78, 0x26, 0xb0, 0xe7, 0x8b, 0xef, 0x28, 0x69, - 0xfe, 0x52, 0x6f, 0x87, 0x4a, 0x7c, 0xa8, 0xe9, 0x69, 0xcb, 0x0f, 0x7a, 0x69, 0xd4, 0x17, 0x68, - 0xc9, 0xf4, 0xa2, 0x2f, 0x38, 0xf5, 0x2f, 0xb1, 0xbd, 0x26, 0x3f, 0x48, 0x92, 0x36, 0x14, 0x2c, - 0x07, 0xf9, 0xc1, 0x2d, 0xe4, 0xd7, 0xf0, 0xbb, 0xaf, 0xf8, 0x86, 0xd9, 0xe8, 0x06, 0x78, 0x1a, - 0x59, 0xee, 0xfd, 0x2d, 0x05, 0x85, 0x50, 0x8c, 0x8d, 0x22, 0x3b, 0xd7, 0xfd, 0x53, 0xe1, 0x2e, - 0x77, 0x90, 0x56, 0x52, 0x54, 0xac, 0xb9, 0x1c, 0xa7, 0x19, 0x5b, 0x50, 0x20, 0x90, 0xf3, 0x35, - 0x3f, 0x57, 0x8b, 0xe9, 0x63, 0x31, 0xe0, 0x3a, 0xb3, 0x19, 0x9e, 0xa4, 0x17, 0x9e, 0x6b, 0x20, - 0x6f, 0x05, 0x62, 0xfe, 0x5d, 0xdc, 0x77, 0x75, 0xd3, 0x4a, 0x60, 0xb3, 0x02, 0xab, 0x84, 0x8a, - 0x08, 0xbc, 0x0f, 0x57, 0x43, 0xbf, 0x63, 0xe6, 0xeb, 0x38, 0x3c, 0x8f, 0x97, 0x46, 0x79, 0xf1, - 0xb5, 0xeb, 0x4a, 0x00, 0x68, 0x07, 0xfa, 0xd0, 0xf6, 0xe0, 0x09, 0x0e, 0xb2, 0xce, 0x6c, 0x35, - 0x13, 0x07, 0xca, 0xca, 0x7b, 0x97, 0xf7, 0x49, 0xea, 0x33, 0x58, 0x0e, 0x15, 0x5f, 0xa4, 0x33, - 0x87, 0xfd, 0x83, 0x3f, 0xa5, 0xf7, 0x0e, 0xa5, 0x5d, 0x3f, 0xcc, 0x20, 0x65, 0x13, 0x8b, 0x19, - 0x3c, 0x3b, 0xff, 0x0d, 0x00, 0x00, 0xff, 0xff, 0x78, 0x42, 0x69, 0x71, 0xb3, 0x18, 0x00, 0x00, + // 2487 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xc4, 0x59, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0x5f, 0x7d, 0x5a, 0x7a, 0x92, 0xe5, 0xf1, 0xd8, 0x9b, 0x30, 0xde, 0x8f, 0x38, 0xda, 0x8f, + 0x38, 0x49, 0xab, 0x2c, 0x9c, 0xc4, 0xc9, 0x3a, 0xc5, 0xb6, 0xb2, 0xc4, 0x78, 0x95, 0xca, 0x92, + 0x4a, 0xc9, 0xdd, 0x64, 0x8b, 0x82, 0x18, 0x93, 0x23, 0x89, 0x09, 0x45, 0x72, 0x49, 0x2a, 0x89, + 0x83, 0x1e, 0x02, 0xf4, 0xd4, 0xff, 0xa0, 0x28, 0x8a, 0x1e, 0x7a, 0x59, 0xa0, 0xd7, 0x02, 0x05, + 0xda, 0x7b, 0xaf, 0x05, 0x7a, 0xef, 0xa1, 0x40, 0x0b, 0xb4, 0x7f, 0x42, 0x8f, 0xc5, 0xcc, 0x90, + 0x14, 0xf5, 0x95, 0x78, 0x17, 0x48, 0xf6, 0x64, 0xcf, 0xef, 0xfd, 0xde, 0xe3, 0x9b, 0x37, 0x6f, + 0xde, 0xbc, 0x19, 0x01, 0xd2, 0xa9, 0xa7, 0xb9, 0x86, 0xe3, 0xdb, 0x6e, 0xc5, 0x71, 0x6d, 0xdf, + 0xc6, 0x6b, 0x03, 0xdb, 0x1e, 0x98, 0x54, 0x8c, 0x4e, 0xc6, 0xfd, 0xf2, 0x11, 0xac, 0xdf, 0x33, + 0x4c, 0x5a, 0x8f, 0x88, 0x5d, 0xea, 0xe3, 0x3b, 0x90, 0xee, 0x1b, 0x26, 0x95, 0x12, 0xdb, 0xa9, + 0x9d, 0xc2, 0xee, 0x87, 0x95, 0x19, 0xa5, 0xca, 0xb4, 0x46, 0x87, 0xc1, 0x0a, 0xd7, 0x28, 0xff, + 0x3b, 0x0d, 0x1b, 0x0b, 0xa4, 0x18, 0x43, 0xda, 0x22, 0x23, 0x66, 0x31, 0xb1, 0x93, 0x57, 0xf8, + 0xff, 0x58, 0x82, 0x15, 0x87, 0x68, 0x8f, 0xc9, 0x80, 0x4a, 0x49, 0x0e, 0x87, 0x43, 0xfc, 0x3e, + 0x80, 0x4e, 0x1d, 0x6a, 0xe9, 0xd4, 0xd2, 0x4e, 0xa5, 0xd4, 0x76, 0x6a, 0x27, 0xaf, 0xc4, 0x10, + 0x7c, 0x0d, 0xd6, 0x9d, 0xf1, 0x89, 0x69, 0x68, 0x6a, 0x8c, 0x06, 0xdb, 0xa9, 0x9d, 0x8c, 0x82, + 0x84, 0xa0, 0x3e, 0x21, 0x5f, 0x86, 0xb5, 0xa7, 0x94, 0x3c, 0x8e, 0x53, 0x0b, 0x9c, 0x5a, 0x62, + 0x70, 0x8c, 0x58, 0x83, 0xe2, 0x88, 0x7a, 0x1e, 0x19, 0x50, 0xd5, 0x3f, 0x75, 0xa8, 0x94, 0xe6, + 0xb3, 0xdf, 0x9e, 0x9b, 0xfd, 0xec, 0xcc, 0x0b, 0x81, 0x56, 0xef, 0xd4, 0xa1, 0xb8, 0x0a, 0x79, + 0x6a, 0x8d, 0x47, 0xc2, 0x42, 0x66, 0x49, 0xfc, 0x64, 0x6b, 0x3c, 0x9a, 0xb5, 0x92, 0x63, 0x6a, + 0x81, 0x89, 0x15, 0x8f, 0xba, 0x4f, 0x0c, 0x8d, 0x4a, 0x59, 0x6e, 0xe0, 0xf2, 0x9c, 0x81, 0xae, + 0x90, 0xcf, 0xda, 0x08, 0xf5, 0x70, 0x0d, 0xf2, 0xf4, 0x99, 0x4f, 0x2d, 0xcf, 0xb0, 0x2d, 0x69, + 0x85, 0x1b, 0xf9, 0x68, 0xc1, 0x2a, 0x52, 0x53, 0x9f, 0x35, 0x31, 0xd1, 0xc3, 0x7b, 0xb0, 0x62, + 0x3b, 0xbe, 0x61, 0x5b, 0x9e, 0x94, 0xdb, 0x4e, 0xec, 0x14, 0x76, 0xdf, 0x5d, 0x98, 0x08, 0x6d, + 0xc1, 0x51, 0x42, 0x32, 0x6e, 0x00, 0xf2, 0xec, 0xb1, 0xab, 0x51, 0x55, 0xb3, 0x75, 0xaa, 0x1a, + 0x56, 0xdf, 0x96, 0xf2, 0xdc, 0xc0, 0xc5, 0xf9, 0x89, 0x70, 0x62, 0xcd, 0xd6, 0x69, 0xc3, 0xea, + 0xdb, 0x4a, 0xc9, 0x9b, 0x1a, 0xe3, 0x73, 0x90, 0xf5, 0x4e, 0x2d, 0x9f, 0x3c, 0x93, 0x8a, 0x3c, + 0x43, 0x82, 0x51, 0xf9, 0xcf, 0x59, 0x58, 0x3b, 0x4b, 0x8a, 0xdd, 0x85, 0x4c, 0x9f, 0xcd, 0x52, + 0x4a, 0x7e, 0x93, 0x18, 0x08, 0x9d, 0xe9, 0x20, 0x66, 0xbf, 0x65, 0x10, 0xab, 0x50, 0xb0, 0xa8, + 0xe7, 0x53, 0x5d, 0x64, 0x44, 0xea, 0x8c, 0x39, 0x05, 0x42, 0x69, 0x3e, 0xa5, 0xd2, 0xdf, 0x2a, + 0xa5, 0x1e, 0xc0, 0x5a, 0xe4, 0x92, 0xea, 0x12, 0x6b, 0x10, 0xe6, 0xe6, 0xf5, 0x57, 0x79, 0x52, + 0x91, 0x43, 0x3d, 0x85, 0xa9, 0x29, 0x25, 0x3a, 0x35, 0xc6, 0x75, 0x00, 0xdb, 0xa2, 0x76, 0x5f, + 0xd5, 0xa9, 0x66, 0x4a, 0xb9, 0x25, 0x51, 0x6a, 0x33, 0xca, 0x5c, 0x94, 0x6c, 0x81, 0x6a, 0x26, + 0xfe, 0x74, 0x92, 0x6a, 0x2b, 0x4b, 0x32, 0xe5, 0x48, 0x6c, 0xb2, 0xb9, 0x6c, 0x3b, 0x86, 0x92, + 0x4b, 0x59, 0xde, 0x53, 0x3d, 0x98, 0x59, 0x9e, 0x3b, 0x51, 0x79, 0xe5, 0xcc, 0x94, 0x40, 0x4d, + 0x4c, 0x6c, 0xd5, 0x8d, 0x0f, 0xf1, 0x07, 0x10, 0x01, 0x2a, 0x4f, 0x2b, 0xe0, 0x55, 0xa8, 0x18, + 0x82, 0x2d, 0x32, 0xa2, 0x5b, 0xcf, 0xa1, 0x34, 0x1d, 0x1e, 0xbc, 0x09, 0x19, 0xcf, 0x27, 0xae, + 0xcf, 0xb3, 0x30, 0xa3, 0x88, 0x01, 0x46, 0x90, 0xa2, 0x96, 0xce, 0xab, 0x5c, 0x46, 0x61, 0xff, + 0xe2, 0x1f, 0x4d, 0x26, 0x9c, 0xe2, 0x13, 0xfe, 0x78, 0x7e, 0x45, 0xa7, 0x2c, 0xcf, 0xce, 0x7b, + 0xeb, 0x36, 0xac, 0x4e, 0x4d, 0xe0, 0xac, 0x9f, 0x2e, 0xff, 0x02, 0xde, 0x5e, 0x68, 0x1a, 0x3f, + 0x80, 0xcd, 0xb1, 0x65, 0x58, 0x3e, 0x75, 0x1d, 0x97, 0xb2, 0x8c, 0x15, 0x9f, 0x92, 0xfe, 0xb3, + 0xb2, 0x24, 0xe7, 0x8e, 0xe3, 0x6c, 0x61, 0x45, 0xd9, 0x18, 0xcf, 0x83, 0x57, 0xf3, 0xb9, 0xff, + 0xae, 0xa0, 0x17, 0x2f, 0x5e, 0xbc, 0x48, 0x96, 0x7f, 0x9d, 0x85, 0xcd, 0x45, 0x7b, 0x66, 0xe1, + 0xf6, 0x3d, 0x07, 0x59, 0x6b, 0x3c, 0x3a, 0xa1, 0x2e, 0x0f, 0x52, 0x46, 0x09, 0x46, 0xb8, 0x0a, + 0x19, 0x93, 0x9c, 0x50, 0x53, 0x4a, 0x6f, 0x27, 0x76, 0x4a, 0xbb, 0xd7, 0xce, 0xb4, 0x2b, 0x2b, + 0x4d, 0xa6, 0xa2, 0x08, 0x4d, 0xfc, 0x19, 0xa4, 0x83, 0x12, 0xcd, 0x2c, 0x5c, 0x3d, 0x9b, 0x05, + 0xb6, 0x97, 0x14, 0xae, 0x87, 0xdf, 0x81, 0x3c, 0xfb, 0x2b, 0x72, 0x23, 0xcb, 0x7d, 0xce, 0x31, + 0x80, 0xe5, 0x05, 0xde, 0x82, 0x1c, 0xdf, 0x26, 0x3a, 0x0d, 0x8f, 0xb6, 0x68, 0xcc, 0x12, 0x4b, + 0xa7, 0x7d, 0x32, 0x36, 0x7d, 0xf5, 0x09, 0x31, 0xc7, 0x94, 0x27, 0x7c, 0x5e, 0x29, 0x06, 0xe0, + 0x4f, 0x19, 0x86, 0x2f, 0x42, 0x41, 0xec, 0x2a, 0xc3, 0xd2, 0xe9, 0x33, 0x5e, 0x3d, 0x33, 0x8a, + 0xd8, 0x68, 0x0d, 0x86, 0xb0, 0xcf, 0x3f, 0xf2, 0x6c, 0x2b, 0x4c, 0x4d, 0xfe, 0x09, 0x06, 0xf0, + 0xcf, 0xdf, 0x9e, 0x2d, 0xdc, 0xef, 0x2d, 0x9e, 0xde, 0x6c, 0x4e, 0x95, 0xff, 0x94, 0x84, 0x34, + 0xaf, 0x17, 0x6b, 0x50, 0xe8, 0x3d, 0xec, 0xc8, 0x6a, 0xbd, 0x7d, 0x7c, 0xd0, 0x94, 0x51, 0x02, + 0x97, 0x00, 0x38, 0x70, 0xaf, 0xd9, 0xae, 0xf6, 0x50, 0x32, 0x1a, 0x37, 0x5a, 0xbd, 0xbd, 0x9b, + 0x28, 0x15, 0x29, 0x1c, 0x0b, 0x20, 0x1d, 0x27, 0xdc, 0xd8, 0x45, 0x19, 0x8c, 0xa0, 0x28, 0x0c, + 0x34, 0x1e, 0xc8, 0xf5, 0xbd, 0x9b, 0x28, 0x3b, 0x8d, 0xdc, 0xd8, 0x45, 0x2b, 0x78, 0x15, 0xf2, + 0x1c, 0x39, 0x68, 0xb7, 0x9b, 0x28, 0x17, 0xd9, 0xec, 0xf6, 0x94, 0x46, 0xeb, 0x10, 0xe5, 0x23, + 0x9b, 0x87, 0x4a, 0xfb, 0xb8, 0x83, 0x20, 0xb2, 0x70, 0x24, 0x77, 0xbb, 0xd5, 0x43, 0x19, 0x15, + 0x22, 0xc6, 0xc1, 0xc3, 0x9e, 0xdc, 0x45, 0xc5, 0x29, 0xb7, 0x6e, 0xec, 0xa2, 0xd5, 0xe8, 0x13, + 0x72, 0xeb, 0xf8, 0x08, 0x95, 0xf0, 0x3a, 0xac, 0x8a, 0x4f, 0x84, 0x4e, 0xac, 0xcd, 0x40, 0x7b, + 0x37, 0x11, 0x9a, 0x38, 0x22, 0xac, 0xac, 0x4f, 0x01, 0x7b, 0x37, 0x11, 0x2e, 0xd7, 0x20, 0xc3, + 0xb3, 0x0b, 0x63, 0x28, 0x35, 0xab, 0x07, 0x72, 0x53, 0x6d, 0x77, 0x7a, 0x8d, 0x76, 0xab, 0xda, + 0x44, 0x89, 0x09, 0xa6, 0xc8, 0x3f, 0x39, 0x6e, 0x28, 0x72, 0x1d, 0x25, 0xe3, 0x58, 0x47, 0xae, + 0xf6, 0xe4, 0x3a, 0x4a, 0x95, 0x35, 0xd8, 0x5c, 0x54, 0x27, 0x17, 0xee, 0x8c, 0xd8, 0x12, 0x27, + 0x97, 0x2c, 0x31, 0xb7, 0x35, 0xb7, 0xc4, 0xff, 0x4a, 0xc2, 0xc6, 0x82, 0xb3, 0x62, 0xe1, 0x47, + 0x7e, 0x08, 0x19, 0x91, 0xa2, 0xe2, 0xf4, 0xbc, 0xb2, 0xf0, 0xd0, 0xe1, 0x09, 0x3b, 0x77, 0x82, + 0x72, 0xbd, 0x78, 0x07, 0x91, 0x5a, 0xd2, 0x41, 0x30, 0x13, 0x73, 0x35, 0xfd, 0xe7, 0x73, 0x35, + 0x5d, 0x1c, 0x7b, 0x7b, 0x67, 0x39, 0xf6, 0x38, 0xf6, 0xcd, 0x6a, 0x7b, 0x66, 0x41, 0x6d, 0xbf, + 0x0b, 0xeb, 0x73, 0x86, 0xce, 0x5c, 0x63, 0x7f, 0x99, 0x00, 0x69, 0x59, 0x70, 0x5e, 0x51, 0xe9, + 0x92, 0x53, 0x95, 0xee, 0xee, 0x6c, 0x04, 0x2f, 0x2d, 0x5f, 0x84, 0xb9, 0xb5, 0xfe, 0x3a, 0x01, + 0xe7, 0x16, 0x77, 0x8a, 0x0b, 0x7d, 0xf8, 0x0c, 0xb2, 0x23, 0xea, 0x0f, 0xed, 0xb0, 0x5b, 0xfa, + 0x78, 0xc1, 0x19, 0xcc, 0xc4, 0xb3, 0x8b, 0x1d, 0x68, 0xc5, 0x0f, 0xf1, 0xd4, 0xb2, 0x76, 0x4f, + 0x78, 0x33, 0xe7, 0xe9, 0xaf, 0x92, 0xf0, 0xf6, 0x42, 0xe3, 0x0b, 0x1d, 0x7d, 0x0f, 0xc0, 0xb0, + 0x9c, 0xb1, 0x2f, 0x3a, 0x22, 0x51, 0x60, 0xf3, 0x1c, 0xe1, 0xc5, 0x8b, 0x15, 0xcf, 0xb1, 0x1f, + 0xc9, 0x53, 0x5c, 0x0e, 0x02, 0xe2, 0x84, 0x3b, 0x13, 0x47, 0xd3, 0xdc, 0xd1, 0xf7, 0x97, 0xcc, + 0x74, 0x2e, 0x31, 0x3f, 0x01, 0xa4, 0x99, 0x06, 0xb5, 0x7c, 0xd5, 0xf3, 0x5d, 0x4a, 0x46, 0x86, + 0x35, 0xe0, 0x27, 0x48, 0x6e, 0x3f, 0xd3, 0x27, 0xa6, 0x47, 0x95, 0x35, 0x21, 0xee, 0x86, 0x52, + 0xa6, 0xc1, 0x13, 0xc8, 0x8d, 0x69, 0x64, 0xa7, 0x34, 0x84, 0x38, 0xd2, 0x28, 0xff, 0x31, 0x07, + 0x85, 0x58, 0x5f, 0x8d, 0x2f, 0x41, 0xf1, 0x11, 0x79, 0x42, 0xd4, 0xf0, 0xae, 0x24, 0x22, 0x51, + 0x60, 0x58, 0x27, 0xb8, 0x2f, 0x7d, 0x02, 0x9b, 0x9c, 0x62, 0x8f, 0x7d, 0xea, 0xaa, 0x9a, 0x49, + 0x3c, 0x8f, 0x07, 0x2d, 0xc7, 0xa9, 0x98, 0xc9, 0xda, 0x4c, 0x54, 0x0b, 0x25, 0xf8, 0x16, 0x6c, + 0x70, 0x8d, 0xd1, 0xd8, 0xf4, 0x0d, 0xc7, 0xa4, 0x2a, 0xbb, 0xbd, 0x79, 0xfc, 0x24, 0x89, 0x3c, + 0x5b, 0x67, 0x8c, 0xa3, 0x80, 0xc0, 0x3c, 0xf2, 0x70, 0x1d, 0xde, 0xe3, 0x6a, 0x03, 0x6a, 0x51, + 0x97, 0xf8, 0x54, 0xa5, 0x5f, 0x8d, 0x89, 0xe9, 0xa9, 0xc4, 0xd2, 0xd5, 0x21, 0xf1, 0x86, 0xd2, + 0x26, 0x33, 0x70, 0x90, 0x94, 0x12, 0xca, 0x05, 0x46, 0x3c, 0x0c, 0x78, 0x32, 0xa7, 0x55, 0x2d, + 0xfd, 0x73, 0xe2, 0x0d, 0xf1, 0x3e, 0x9c, 0xe3, 0x56, 0x3c, 0xdf, 0x35, 0xac, 0x81, 0xaa, 0x0d, + 0xa9, 0xf6, 0x58, 0x1d, 0xfb, 0xfd, 0x3b, 0xd2, 0x3b, 0xf1, 0xef, 0x73, 0x0f, 0xbb, 0x9c, 0x53, + 0x63, 0x94, 0x63, 0xbf, 0x7f, 0x07, 0x77, 0xa1, 0xc8, 0x16, 0x63, 0x64, 0x3c, 0xa7, 0x6a, 0xdf, + 0x76, 0xf9, 0xd1, 0x58, 0x5a, 0x50, 0x9a, 0x62, 0x11, 0xac, 0xb4, 0x03, 0x85, 0x23, 0x5b, 0xa7, + 0xfb, 0x99, 0x6e, 0x47, 0x96, 0xeb, 0x4a, 0x21, 0xb4, 0x72, 0xcf, 0x76, 0x59, 0x42, 0x0d, 0xec, + 0x28, 0xc0, 0x05, 0x91, 0x50, 0x03, 0x3b, 0x0c, 0xef, 0x2d, 0xd8, 0xd0, 0x34, 0x31, 0x67, 0x43, + 0x53, 0x83, 0x3b, 0x96, 0x27, 0xa1, 0xa9, 0x60, 0x69, 0xda, 0xa1, 0x20, 0x04, 0x39, 0xee, 0xe1, + 0x4f, 0xe1, 0xed, 0x49, 0xb0, 0xe2, 0x8a, 0xeb, 0x73, 0xb3, 0x9c, 0x55, 0xbd, 0x05, 0x1b, 0xce, + 0xe9, 0xbc, 0x22, 0x9e, 0xfa, 0xa2, 0x73, 0x3a, 0xab, 0x76, 0x1b, 0x36, 0x9d, 0xa1, 0x33, 0xaf, + 0x77, 0x35, 0xae, 0x87, 0x9d, 0xa1, 0x33, 0xab, 0xf8, 0x11, 0xbf, 0x70, 0xbb, 0x54, 0x23, 0x3e, + 0xd5, 0xa5, 0xf3, 0x71, 0x7a, 0x4c, 0x80, 0xaf, 0x03, 0xd2, 0x34, 0x95, 0x5a, 0xe4, 0xc4, 0xa4, + 0x2a, 0x71, 0xa9, 0x45, 0x3c, 0xe9, 0x62, 0x9c, 0x5c, 0xd2, 0x34, 0x99, 0x4b, 0xab, 0x5c, 0x88, + 0xaf, 0xc2, 0xba, 0x7d, 0xf2, 0x48, 0x13, 0x29, 0xa9, 0x3a, 0x2e, 0xed, 0x1b, 0xcf, 0xa4, 0x0f, + 0x79, 0x7c, 0xd7, 0x98, 0x80, 0x27, 0x64, 0x87, 0xc3, 0xf8, 0x0a, 0x20, 0xcd, 0x1b, 0x12, 0xd7, + 0xe1, 0x35, 0xd9, 0x73, 0x88, 0x46, 0xa5, 0x8f, 0x04, 0x55, 0xe0, 0xad, 0x10, 0x66, 0x5b, 0xc2, + 0x7b, 0x6a, 0xf4, 0xfd, 0xd0, 0xe2, 0x65, 0xb1, 0x25, 0x38, 0x16, 0x58, 0xdb, 0x01, 0xc4, 0x42, + 0x31, 0xf5, 0xe1, 0x1d, 0x4e, 0x2b, 0x39, 0x43, 0x27, 0xfe, 0xdd, 0x0f, 0x60, 0x95, 0x31, 0x27, + 0x1f, 0xbd, 0x22, 0x1a, 0x32, 0x67, 0x18, 0xfb, 0xe2, 0x6b, 0xeb, 0x8d, 0xcb, 0xfb, 0x50, 0x8c, + 0xe7, 0x27, 0xce, 0x83, 0xc8, 0x50, 0x94, 0x60, 0xcd, 0x4a, 0xad, 0x5d, 0x67, 0x6d, 0xc6, 0x97, + 0x32, 0x4a, 0xb2, 0x76, 0xa7, 0xd9, 0xe8, 0xc9, 0xaa, 0x72, 0xdc, 0xea, 0x35, 0x8e, 0x64, 0x94, + 0x8a, 0xf7, 0xd5, 0x7f, 0x4d, 0x42, 0x69, 0xfa, 0x8a, 0x84, 0x7f, 0x00, 0xe7, 0xc3, 0xf7, 0x0c, + 0x8f, 0xfa, 0xea, 0x53, 0xc3, 0xe5, 0x5b, 0x66, 0x44, 0xc4, 0xf1, 0x15, 0x2d, 0xda, 0x66, 0xc0, + 0xea, 0x52, 0xff, 0x0b, 0xc3, 0x65, 0x1b, 0x62, 0x44, 0x7c, 0xdc, 0x84, 0x8b, 0x96, 0xad, 0x7a, + 0x3e, 0xb1, 0x74, 0xe2, 0xea, 0xea, 0xe4, 0x25, 0x49, 0x25, 0x9a, 0x46, 0x3d, 0xcf, 0x16, 0x47, + 0x55, 0x64, 0xe5, 0x5d, 0xcb, 0xee, 0x06, 0xe4, 0x49, 0x0d, 0xaf, 0x06, 0xd4, 0x99, 0x04, 0x4b, + 0x2d, 0x4b, 0xb0, 0x77, 0x20, 0x3f, 0x22, 0x8e, 0x4a, 0x2d, 0xdf, 0x3d, 0xe5, 0x8d, 0x71, 0x4e, + 0xc9, 0x8d, 0x88, 0x23, 0xb3, 0xf1, 0x9b, 0xb9, 0x9f, 0xfc, 0x23, 0x05, 0xc5, 0x78, 0x73, 0xcc, + 0xee, 0x1a, 0x1a, 0x3f, 0x47, 0x12, 0xbc, 0xd2, 0x7c, 0xf0, 0xd2, 0x56, 0xba, 0x52, 0x63, 0x07, + 0xcc, 0x7e, 0x56, 0xb4, 0xac, 0x8a, 0xd0, 0x64, 0x87, 0x3b, 0xab, 0x2d, 0x54, 0xb4, 0x08, 0x39, + 0x25, 0x18, 0xe1, 0x43, 0xc8, 0x3e, 0xf2, 0xb8, 0xed, 0x2c, 0xb7, 0xfd, 0xe1, 0xcb, 0x6d, 0xdf, + 0xef, 0x72, 0xe3, 0xf9, 0xfb, 0x5d, 0xb5, 0xd5, 0x56, 0x8e, 0xaa, 0x4d, 0x25, 0x50, 0xc7, 0x17, + 0x20, 0x6d, 0x92, 0xe7, 0xa7, 0xd3, 0x47, 0x11, 0x87, 0xce, 0x1a, 0xf8, 0x0b, 0x90, 0x7e, 0x4a, + 0xc9, 0xe3, 0xe9, 0x03, 0x80, 0x43, 0xaf, 0x31, 0xf5, 0xaf, 0x43, 0x86, 0xc7, 0x0b, 0x03, 0x04, + 0x11, 0x43, 0x6f, 0xe1, 0x1c, 0xa4, 0x6b, 0x6d, 0x85, 0xa5, 0x3f, 0x82, 0xa2, 0x40, 0xd5, 0x4e, + 0x43, 0xae, 0xc9, 0x28, 0x59, 0xbe, 0x05, 0x59, 0x11, 0x04, 0xb6, 0x35, 0xa2, 0x30, 0xa0, 0xb7, + 0x82, 0x61, 0x60, 0x23, 0x11, 0x4a, 0x8f, 0x8f, 0x0e, 0x64, 0x05, 0x25, 0xe3, 0xcb, 0xeb, 0x41, + 0x31, 0xde, 0x17, 0xbf, 0x99, 0x9c, 0xfa, 0x4b, 0x02, 0x0a, 0xb1, 0x3e, 0x97, 0x35, 0x28, 0xc4, + 0x34, 0xed, 0xa7, 0x2a, 0x31, 0x0d, 0xe2, 0x05, 0x49, 0x01, 0x1c, 0xaa, 0x32, 0xe4, 0xac, 0x8b, + 0xf6, 0x46, 0x9c, 0xff, 0x5d, 0x02, 0xd0, 0x6c, 0x8b, 0x39, 0xe3, 0x60, 0xe2, 0x3b, 0x75, 0xf0, + 0xb7, 0x09, 0x28, 0x4d, 0xf7, 0x95, 0x33, 0xee, 0x5d, 0xfa, 0x4e, 0xdd, 0xfb, 0x67, 0x12, 0x56, + 0xa7, 0xba, 0xc9, 0xb3, 0x7a, 0xf7, 0x15, 0xac, 0x1b, 0x3a, 0x1d, 0x39, 0xb6, 0x4f, 0x2d, 0xed, + 0x54, 0x35, 0xe9, 0x13, 0x6a, 0x4a, 0x65, 0x5e, 0x28, 0xae, 0xbf, 0xbc, 0x5f, 0xad, 0x34, 0x26, + 0x7a, 0x4d, 0xa6, 0xb6, 0xbf, 0xd1, 0xa8, 0xcb, 0x47, 0x9d, 0x76, 0x4f, 0x6e, 0xd5, 0x1e, 0xaa, + 0xc7, 0xad, 0x1f, 0xb7, 0xda, 0x5f, 0xb4, 0x14, 0x64, 0xcc, 0xd0, 0x5e, 0xe3, 0x56, 0xef, 0x00, + 0x9a, 0x75, 0x0a, 0x9f, 0x87, 0x45, 0x6e, 0xa1, 0xb7, 0xf0, 0x06, 0xac, 0xb5, 0xda, 0x6a, 0xb7, + 0x51, 0x97, 0x55, 0xf9, 0xde, 0x3d, 0xb9, 0xd6, 0xeb, 0x8a, 0x17, 0x88, 0x88, 0xdd, 0x9b, 0xde, + 0xd4, 0xbf, 0x49, 0xc1, 0xc6, 0x02, 0x4f, 0x70, 0x35, 0xb8, 0x3b, 0x88, 0xeb, 0xcc, 0xf7, 0xcf, + 0xe2, 0x7d, 0x85, 0x1d, 0xf9, 0x1d, 0xe2, 0xfa, 0xc1, 0x55, 0xe3, 0x0a, 0xb0, 0x28, 0x59, 0xbe, + 0xd1, 0x37, 0xa8, 0x1b, 0x3c, 0xd8, 0x88, 0x0b, 0xc5, 0xda, 0x04, 0x17, 0x6f, 0x36, 0xdf, 0x03, + 0xec, 0xd8, 0x9e, 0xe1, 0x1b, 0x4f, 0xa8, 0x6a, 0x58, 0xe1, 0xeb, 0x0e, 0xbb, 0x60, 0xa4, 0x15, + 0x14, 0x4a, 0x1a, 0x96, 0x1f, 0xb1, 0x2d, 0x3a, 0x20, 0x33, 0x6c, 0x56, 0xc0, 0x53, 0x0a, 0x0a, + 0x25, 0x11, 0xfb, 0x12, 0x14, 0x75, 0x7b, 0xcc, 0xba, 0x2e, 0xc1, 0x63, 0xe7, 0x45, 0x42, 0x29, + 0x08, 0x2c, 0xa2, 0x04, 0xfd, 0xf4, 0xe4, 0x59, 0xa9, 0xa8, 0x14, 0x04, 0x26, 0x28, 0x97, 0x61, + 0x8d, 0x0c, 0x06, 0x2e, 0x33, 0x1e, 0x1a, 0x12, 0x37, 0x84, 0x52, 0x04, 0x73, 0xe2, 0xd6, 0x7d, + 0xc8, 0x85, 0x71, 0x60, 0x47, 0x32, 0x8b, 0x84, 0xea, 0x88, 0x6b, 0x6f, 0x72, 0x27, 0xaf, 0xe4, + 0xac, 0x50, 0x78, 0x09, 0x8a, 0x86, 0xa7, 0x4e, 0x5e, 0xc9, 0x93, 0xdb, 0xc9, 0x9d, 0x9c, 0x52, + 0x30, 0xbc, 0xe8, 0x85, 0xb1, 0xfc, 0x75, 0x12, 0x4a, 0xd3, 0xaf, 0xfc, 0xb8, 0x0e, 0x39, 0xd3, + 0xd6, 0x08, 0x4f, 0x2d, 0xf1, 0x13, 0xd3, 0xce, 0x2b, 0x7e, 0x18, 0xa8, 0x34, 0x03, 0xbe, 0x12, + 0x69, 0x6e, 0xfd, 0x2d, 0x01, 0xb9, 0x10, 0xc6, 0xe7, 0x20, 0xed, 0x10, 0x7f, 0xc8, 0xcd, 0x65, + 0x0e, 0x92, 0x28, 0xa1, 0xf0, 0x31, 0xc3, 0x3d, 0x87, 0x58, 0x3c, 0x05, 0x02, 0x9c, 0x8d, 0xd9, + 0xba, 0x9a, 0x94, 0xe8, 0xfc, 0xfa, 0x61, 0x8f, 0x46, 0xd4, 0xf2, 0xbd, 0x70, 0x5d, 0x03, 0xbc, + 0x16, 0xc0, 0xf8, 0x1a, 0xac, 0xfb, 0x2e, 0x31, 0xcc, 0x29, 0x6e, 0x9a, 0x73, 0x51, 0x28, 0x88, + 0xc8, 0xfb, 0x70, 0x21, 0xb4, 0xab, 0x53, 0x9f, 0x68, 0x43, 0xaa, 0x4f, 0x94, 0xb2, 0xfc, 0x99, + 0xe1, 0x7c, 0x40, 0xa8, 0x07, 0xf2, 0x50, 0xb7, 0xfc, 0xf7, 0x04, 0xac, 0x87, 0x17, 0x26, 0x3d, + 0x0a, 0xd6, 0x11, 0x00, 0xb1, 0x2c, 0xdb, 0x8f, 0x87, 0x6b, 0x3e, 0x95, 0xe7, 0xf4, 0x2a, 0xd5, + 0x48, 0x49, 0x89, 0x19, 0xd8, 0x1a, 0x01, 0x4c, 0x24, 0x4b, 0xc3, 0x76, 0x11, 0x0a, 0xc1, 0x4f, + 0x38, 0xfc, 0x77, 0x40, 0x71, 0xc5, 0x06, 0x01, 0xb1, 0x9b, 0x15, 0xde, 0x84, 0xcc, 0x09, 0x1d, + 0x18, 0x56, 0xf0, 0x30, 0x2b, 0x06, 0xe1, 0x43, 0x48, 0x3a, 0x7a, 0x08, 0x39, 0xf8, 0x19, 0x6c, + 0x68, 0xf6, 0x68, 0xd6, 0xdd, 0x03, 0x34, 0x73, 0xcd, 0xf7, 0x3e, 0x4f, 0x7c, 0x09, 0x93, 0x16, + 0xf3, 0x7f, 0x89, 0xc4, 0xef, 0x93, 0xa9, 0xc3, 0xce, 0xc1, 0x1f, 0x92, 0x5b, 0x87, 0x42, 0xb5, + 0x13, 0xce, 0x54, 0xa1, 0x7d, 0x93, 0x6a, 0xcc, 0xfb, 0xff, 0x07, 0x00, 0x00, 0xff, 0xff, 0xa3, + 0x58, 0x22, 0x30, 0xdf, 0x1c, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go similarity index 79% rename from vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go rename to vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go index b08b81c10..3b95a7757 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/gostring.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_gostring.gen.go @@ -1,12 +1,49 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: descriptor.proto + +/* +Package descriptor is a generated protocol buffer package. + +It is generated from these files: + descriptor.proto + +It has these top-level messages: + FileDescriptorSet + FileDescriptorProto + DescriptorProto + ExtensionRangeOptions + FieldDescriptorProto + OneofDescriptorProto + EnumDescriptorProto + EnumValueDescriptorProto + ServiceDescriptorProto + MethodDescriptorProto + FileOptions + MessageOptions + FieldOptions + OneofOptions + EnumOptions + EnumValueOptions + ServiceOptions + MethodOptions + UninterpretedOption + SourceCodeInfo + GeneratedCodeInfo +*/ package descriptor import fmt "fmt" - import strings "strings" -import github_com_gogo_protobuf_proto "github.com/gogo/protobuf/proto" +import proto "github.com/gogo/protobuf/proto" import sort "sort" import strconv "strconv" import reflect "reflect" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf func (this *FileDescriptorSet) GoString() string { if this == nil { @@ -117,7 +154,7 @@ func (this *DescriptorProto_ExtensionRange) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&descriptor.DescriptorProto_ExtensionRange{") if this.Start != nil { s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") @@ -125,6 +162,9 @@ func (this *DescriptorProto_ExtensionRange) GoString() string { if this.End != nil { s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -149,6 +189,22 @@ func (this *DescriptorProto_ReservedRange) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *ExtensionRangeOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.ExtensionRangeOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *FieldDescriptorProto) GoString() string { if this == nil { return "nil" @@ -162,10 +218,10 @@ func (this *FieldDescriptorProto) GoString() string { s = append(s, "Number: "+valueToGoStringDescriptor(this.Number, "int32")+",\n") } if this.Label != nil { - s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "descriptor.FieldDescriptorProto_Label")+",\n") + s = append(s, "Label: "+valueToGoStringDescriptor(this.Label, "FieldDescriptorProto_Label")+",\n") } if this.Type != nil { - s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "descriptor.FieldDescriptorProto_Type")+",\n") + s = append(s, "Type: "+valueToGoStringDescriptor(this.Type, "FieldDescriptorProto_Type")+",\n") } if this.TypeName != nil { s = append(s, "TypeName: "+valueToGoStringDescriptor(this.TypeName, "string")+",\n") @@ -195,11 +251,14 @@ func (this *OneofDescriptorProto) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 5) + s := make([]string, 0, 6) s = append(s, "&descriptor.OneofDescriptorProto{") if this.Name != nil { s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -210,7 +269,7 @@ func (this *EnumDescriptorProto) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 7) + s := make([]string, 0, 9) s = append(s, "&descriptor.EnumDescriptorProto{") if this.Name != nil { s = append(s, "Name: "+valueToGoStringDescriptor(this.Name, "string")+",\n") @@ -221,6 +280,30 @@ func (this *EnumDescriptorProto) GoString() string { if this.Options != nil { s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") } + if this.ReservedRange != nil { + s = append(s, "ReservedRange: "+fmt.Sprintf("%#v", this.ReservedRange)+",\n") + } + if this.ReservedName != nil { + s = append(s, "ReservedName: "+fmt.Sprintf("%#v", this.ReservedName)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumDescriptorProto_EnumReservedRange) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&descriptor.EnumDescriptorProto_EnumReservedRange{") + if this.Start != nil { + s = append(s, "Start: "+valueToGoStringDescriptor(this.Start, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } if this.XXX_unrecognized != nil { s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") } @@ -303,7 +386,7 @@ func (this *FileOptions) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 20) + s := make([]string, 0, 23) s = append(s, "&descriptor.FileOptions{") if this.JavaPackage != nil { s = append(s, "JavaPackage: "+valueToGoStringDescriptor(this.JavaPackage, "string")+",\n") @@ -321,7 +404,7 @@ func (this *FileOptions) GoString() string { s = append(s, "JavaStringCheckUtf8: "+valueToGoStringDescriptor(this.JavaStringCheckUtf8, "bool")+",\n") } if this.OptimizeFor != nil { - s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "descriptor.FileOptions_OptimizeMode")+",\n") + s = append(s, "OptimizeFor: "+valueToGoStringDescriptor(this.OptimizeFor, "FileOptions_OptimizeMode")+",\n") } if this.GoPackage != nil { s = append(s, "GoPackage: "+valueToGoStringDescriptor(this.GoPackage, "string")+",\n") @@ -335,6 +418,9 @@ func (this *FileOptions) GoString() string { if this.PyGenericServices != nil { s = append(s, "PyGenericServices: "+valueToGoStringDescriptor(this.PyGenericServices, "bool")+",\n") } + if this.PhpGenericServices != nil { + s = append(s, "PhpGenericServices: "+valueToGoStringDescriptor(this.PhpGenericServices, "bool")+",\n") + } if this.Deprecated != nil { s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") } @@ -347,8 +433,14 @@ func (this *FileOptions) GoString() string { if this.CsharpNamespace != nil { s = append(s, "CsharpNamespace: "+valueToGoStringDescriptor(this.CsharpNamespace, "string")+",\n") } - if this.JavananoUseDeprecatedPackage != nil { - s = append(s, "JavananoUseDeprecatedPackage: "+valueToGoStringDescriptor(this.JavananoUseDeprecatedPackage, "bool")+",\n") + if this.SwiftPrefix != nil { + s = append(s, "SwiftPrefix: "+valueToGoStringDescriptor(this.SwiftPrefix, "string")+",\n") + } + if this.PhpClassPrefix != nil { + s = append(s, "PhpClassPrefix: "+valueToGoStringDescriptor(this.PhpClassPrefix, "string")+",\n") + } + if this.PhpNamespace != nil { + s = append(s, "PhpNamespace: "+valueToGoStringDescriptor(this.PhpNamespace, "string")+",\n") } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") @@ -395,13 +487,13 @@ func (this *FieldOptions) GoString() string { s := make([]string, 0, 11) s = append(s, "&descriptor.FieldOptions{") if this.Ctype != nil { - s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "descriptor.FieldOptions_CType")+",\n") + s = append(s, "Ctype: "+valueToGoStringDescriptor(this.Ctype, "FieldOptions_CType")+",\n") } if this.Packed != nil { s = append(s, "Packed: "+valueToGoStringDescriptor(this.Packed, "bool")+",\n") } if this.Jstype != nil { - s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "descriptor.FieldOptions_JSType")+",\n") + s = append(s, "Jstype: "+valueToGoStringDescriptor(this.Jstype, "FieldOptions_JSType")+",\n") } if this.Lazy != nil { s = append(s, "Lazy: "+valueToGoStringDescriptor(this.Lazy, "bool")+",\n") @@ -422,6 +514,22 @@ func (this *FieldOptions) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *OneofOptions) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.OneofOptions{") + if this.UninterpretedOption != nil { + s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") + } + s = append(s, "XXX_InternalExtensions: "+extensionToGoStringDescriptor(this)+",\n") + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func (this *EnumOptions) GoString() string { if this == nil { return "nil" @@ -486,11 +594,14 @@ func (this *MethodOptions) GoString() string { if this == nil { return "nil" } - s := make([]string, 0, 6) + s := make([]string, 0, 7) s = append(s, "&descriptor.MethodOptions{") if this.Deprecated != nil { s = append(s, "Deprecated: "+valueToGoStringDescriptor(this.Deprecated, "bool")+",\n") } + if this.IdempotencyLevel != nil { + s = append(s, "IdempotencyLevel: "+valueToGoStringDescriptor(this.IdempotencyLevel, "MethodOptions_IdempotencyLevel")+",\n") + } if this.UninterpretedOption != nil { s = append(s, "UninterpretedOption: "+fmt.Sprintf("%#v", this.UninterpretedOption)+",\n") } @@ -594,6 +705,45 @@ func (this *SourceCodeInfo_Location) GoString() string { s = append(s, "}") return strings.Join(s, "") } +func (this *GeneratedCodeInfo) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&descriptor.GeneratedCodeInfo{") + if this.Annotation != nil { + s = append(s, "Annotation: "+fmt.Sprintf("%#v", this.Annotation)+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *GeneratedCodeInfo_Annotation) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 8) + s = append(s, "&descriptor.GeneratedCodeInfo_Annotation{") + if this.Path != nil { + s = append(s, "Path: "+fmt.Sprintf("%#v", this.Path)+",\n") + } + if this.SourceFile != nil { + s = append(s, "SourceFile: "+valueToGoStringDescriptor(this.SourceFile, "string")+",\n") + } + if this.Begin != nil { + s = append(s, "Begin: "+valueToGoStringDescriptor(this.Begin, "int32")+",\n") + } + if this.End != nil { + s = append(s, "End: "+valueToGoStringDescriptor(this.End, "int32")+",\n") + } + if this.XXX_unrecognized != nil { + s = append(s, "XXX_unrecognized:"+fmt.Sprintf("%#v", this.XXX_unrecognized)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} func valueToGoStringDescriptor(v interface{}, typ string) string { rv := reflect.ValueOf(v) if rv.IsNil() { @@ -602,8 +752,8 @@ func valueToGoStringDescriptor(v interface{}, typ string) string { pv := reflect.Indirect(rv).Interface() return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) } -func extensionToGoStringDescriptor(m github_com_gogo_protobuf_proto.Message) string { - e := github_com_gogo_protobuf_proto.GetUnsafeExtensionsMap(m) +func extensionToGoStringDescriptor(m proto.Message) string { + e := proto.GetUnsafeExtensionsMap(m) if e == nil { return "nil" } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_test.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_test.go new file mode 100644 index 000000000..8a4e5884f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/descriptor_test.go @@ -0,0 +1,31 @@ +package descriptor_test + +import ( + "fmt" + "testing" + + tpb "github.com/gogo/protobuf/proto/testdata" + "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func TestMessage(t *testing.T) { + var msg *descriptor.DescriptorProto + fd, md := descriptor.ForMessage(msg) + if pkg, want := fd.GetPackage(), "google.protobuf"; pkg != want { + t.Errorf("descriptor.ForMessage(%T).GetPackage() = %q; want %q", msg, pkg, want) + } + if name, want := md.GetName(), "DescriptorProto"; name != want { + t.Fatalf("descriptor.ForMessage(%T).GetName() = %q; want %q", msg, name, want) + } +} + +func Example_options() { + var msg *tpb.MyMessageSet + _, md := descriptor.ForMessage(msg) + if md.GetOptions().GetMessageSetWireFormat() { + fmt.Printf("%v uses option message_set_wire_format.\n", md.GetName()) + } + + // Output: + // MyMessageSet uses option message_set_wire_format. +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go index ab170f913..e0846a357 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/descriptor/helper.go @@ -1,4 +1,6 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. // http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without @@ -97,6 +99,17 @@ func (field *FieldDescriptorProto) GetKeyUint64() (x uint64) { return x } +func (field *FieldDescriptorProto) GetKey3Uint64() (x uint64) { + packed := field.IsPacked3() + wireType := field.WireType() + fieldNumber := field.GetNumber() + if packed { + wireType = 2 + } + x = uint64(uint32(fieldNumber)<<3 | uint32(wireType)) + return x +} + func (field *FieldDescriptorProto) GetKey() []byte { x := field.GetKeyUint64() i := 0 @@ -109,6 +122,18 @@ func (field *FieldDescriptorProto) GetKey() []byte { return keybuf } +func (field *FieldDescriptorProto) GetKey3() []byte { + x := field.GetKey3Uint64() + i := 0 + keybuf := make([]byte, 0) + for i = 0; x > 127; i++ { + keybuf = append(keybuf, 0x80|uint8(x&0x7F)) + x >>= 7 + } + keybuf = append(keybuf, uint8(x)) + return keybuf +} + func (desc *FileDescriptorSet) GetField(packageName, messageName, fieldName string) *FieldDescriptorProto { msg := desc.GetMessage(packageName, messageName) if msg == nil { @@ -350,6 +375,16 @@ func (f *FieldDescriptorProto) IsPacked() bool { return f.Options != nil && f.GetOptions().GetPacked() } +func (f *FieldDescriptorProto) IsPacked3() bool { + if f.IsRepeated() && f.IsScalar() { + if f.Options == nil || f.GetOptions().Packed == nil { + return true + } + return f.Options != nil && f.GetOptions().GetPacked() + } + return false +} + func (m *DescriptorProto) HasExtension() bool { return len(m.ExtensionRange) > 0 } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go index adf795138..7572c6e8c 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/generator.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Go support for Protocol Buffers - Google's data interchange format // @@ -668,7 +668,6 @@ func (g *Generator) CommandLineParameters(parameter string) { } } } - if pluginList == "" { return } @@ -716,6 +715,12 @@ var pkgNamesInUse = make(map[string][]*FileDescriptor) // Pkg is the candidate name. If f is nil, it's a builtin package like "proto" and // has no file descriptor. func RegisterUniquePackageName(pkg string, f *FileDescriptor) string { + if f == nil { + // For builtin and standard lib packages, try to use only + // the last component of the package path. + pkg = pkg[strings.LastIndex(pkg, "/")+1:] + } + // Convert dots to underscores before finding a unique alias. pkg = strings.Map(badToUnderscore, pkg) @@ -841,9 +846,10 @@ func (g *Generator) SetPackageNames() { // Register the support package names. They might collide with the // name of a package we import. g.Pkg = map[string]string{ - "fmt": RegisterUniquePackageName("fmt", nil), - "math": RegisterUniquePackageName("math", nil), - "proto": RegisterUniquePackageName("proto", nil), + "fmt": RegisterUniquePackageName("fmt", nil), + "math": RegisterUniquePackageName("math", nil), + "proto": RegisterUniquePackageName("proto", nil), + "golang_proto": RegisterUniquePackageName("golang_proto", nil), } AllFiles: @@ -1258,10 +1264,8 @@ func (g *Generator) generate(file *FileDescriptor) { } g.P() } - // Reset on each file g.writtenImports = make(map[string]bool) - for _, td := range g.file.imp { g.generateImported(td) } @@ -1323,9 +1327,8 @@ func (g *Generator) generate(file *FileDescriptor) { // Generate the header, including package definition func (g *Generator) generateHeader() { - g.P("// Code generated by protoc-gen-gogo.") + g.P("// Code generated by protoc-gen-gogo. DO NOT EDIT.") g.P("// source: ", *g.file.Name) - g.P("// DO NOT EDIT!") g.P() name := g.file.PackageName() @@ -1420,6 +1423,9 @@ func (g *Generator) generateImports() { // reference it later. The same argument applies to the fmt and math packages. if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) { g.PrintImport(g.Pkg["proto"], g.ImportPrefix+"github.com/gogo/protobuf/proto") + if gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.PrintImport(g.Pkg["golang_proto"], g.ImportPrefix+"github.com/golang/protobuf/proto") + } } else { g.PrintImport(g.Pkg["proto"], g.ImportPrefix+"github.com/golang/protobuf/proto") } @@ -1466,8 +1472,17 @@ func (g *Generator) generateImports() { } g.P("// Reference imports to suppress errors if they are not otherwise used.") g.P("var _ = ", g.Pkg["proto"], ".Marshal") + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.P("var _ = ", g.Pkg["golang_proto"], ".Marshal") + } g.P("var _ = ", g.Pkg["fmt"], ".Errorf") g.P("var _ = ", g.Pkg["math"], ".Inf") + for _, cimport := range g.customImports { + if cimport == "time" { + g.P("var _ = time.Kitchen") + break + } + } g.P() } @@ -1509,23 +1524,27 @@ func (g *Generator) generateEnum(enum *EnumDescriptor) { if !gogoproto.EnabledGoEnumPrefix(enum.file, enum.EnumDescriptorProto) { ccPrefix = "" } - g.P("type ", ccTypeName, " int32") - g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) - g.P("const (") - g.In() - for i, e := range enum.Value { - g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)) - name := *e.Name - if gogoproto.IsEnumValueCustomName(e) { - name = gogoproto.GetEnumValueCustomName(e) - } - name = ccPrefix + name - g.P(name, " ", ccTypeName, " = ", e.Number) - g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + if gogoproto.HasEnumDecl(enum.file, enum.EnumDescriptorProto) { + g.P("type ", ccTypeName, " int32") + g.file.addExport(enum, enumSymbol{ccTypeName, enum.proto3()}) + g.P("const (") + g.In() + for i, e := range enum.Value { + g.PrintComments(fmt.Sprintf("%s,%d,%d", enum.path, enumValuePath, i)) + name := *e.Name + if gogoproto.IsEnumValueCustomName(e) { + name = gogoproto.GetEnumValueCustomName(e) + } + name = ccPrefix + name + + g.P(name, " ", ccTypeName, " = ", e.Number) + g.file.addExport(enum, constOrVarSymbol{name, "const", ccTypeName}) + } + g.Out() + g.P(")") } - g.Out() - g.P(")") + g.P("var ", ccTypeName, "_name = map[int32]string{") g.In() generated := make(map[int32]bool) // avoid duplicate values @@ -1594,7 +1613,9 @@ func (g *Generator) generateEnum(enum *EnumDescriptor) { } indexes = append(indexes, strconv.Itoa(enum.index)) g.P("func (", ccTypeName, ") EnumDescriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") - + if enum.file.GetPackage() == "google.protobuf" && enum.GetName() == "NullValue" { + g.P("func (", ccTypeName, `) XXX_WellKnownType() string { return "`, enum.GetName(), `" }`) + } g.P() } @@ -1670,7 +1691,11 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor enum += CamelCaseSlice(obj.TypeName()) } packed := "" - if field.Options != nil && field.Options.GetPacked() { + if (field.Options != nil && field.Options.GetPacked()) || + // Per https://developers.google.com/protocol-buffers/docs/proto3#simple: + // "In proto3, repeated fields of scalar numeric types use packed encoding by default." + (message.proto3() && (field.Options == nil || field.Options.Packed == nil) && + isRepeated(field) && IsScalar(field)) { packed = ",packed" } fieldName := field.GetName() @@ -1738,7 +1763,15 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor if field.OneofIndex != nil { oneof = ",oneof" } - return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s%s%s%s%s%s", + stdtime := "" + if gogoproto.IsStdTime(field) { + stdtime = ",stdtime" + } + stdduration := "" + if gogoproto.IsStdDuration(field) { + stdduration = ",stdduration" + } + return strconv.Quote(fmt.Sprintf("%s,%d,%s%s%s%s%s%s%s%s%s%s%s%s%s", wiretype, field.GetNumber(), optrepreq, @@ -1751,12 +1784,14 @@ func (g *Generator) goTag(message *Descriptor, field *descriptor.FieldDescriptor ctype, casttype, castkey, - castvalue)) + castvalue, + stdtime, + stdduration)) } func needsStar(field *descriptor.FieldDescriptorProto, proto3 bool, allowOneOf bool) bool { if isRepeated(field) && - (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE) && + (*field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE || gogoproto.IsCustomType(field)) && (*field.Type != descriptor.FieldDescriptorProto_TYPE_GROUP) { return false } @@ -1864,8 +1899,14 @@ func (g *Generator) GoType(message *Descriptor, field *descriptor.FieldDescripto if len(packageName) > 0 { g.customImports = append(g.customImports, packageName) } + case gogoproto.IsStdTime(field): + g.customImports = append(g.customImports, "time") + typ = "time.Time" + case gogoproto.IsStdDuration(field): + g.customImports = append(g.customImports, "time") + typ = "time.Duration" } - if needsStar(field, g.file.proto3, message != nil && message.allowOneof()) { + if needsStar(field, g.file.proto3 && field.Extendee == nil, message != nil && message.allowOneof()) { typ = "*" + typ } if isRepeated(field) { @@ -1936,9 +1977,18 @@ func (g *Generator) GoMapType(d *Descriptor, field *descriptor.FieldDescriptorPr if !gogoproto.IsNullable(m.ValueAliasField) { valType = strings.TrimPrefix(valType, "*") } - g.RecordTypeUse(m.ValueAliasField.GetTypeName()) + if !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field) && !gogoproto.IsCustomType(field) && !gogoproto.IsCastType(field) { + g.RecordTypeUse(m.ValueAliasField.GetTypeName()) + } default: - valType = strings.TrimPrefix(valType, "*") + if gogoproto.IsCustomType(m.ValueAliasField) { + if !gogoproto.IsNullable(m.ValueAliasField) { + valType = strings.TrimPrefix(valType, "*") + } + g.RecordTypeUse(m.ValueAliasField.GetTypeName()) + } else { + valType = strings.TrimPrefix(valType, "*") + } } m.GoType = fmt.Sprintf("map[%s]%s", keyType, valType) @@ -1972,6 +2022,28 @@ var methodNames = [...]string{ "ProtoSize", } +// Names of messages in the `google.protobuf` package for which +// we will generate XXX_WellKnownType methods. +var wellKnownTypes = map[string]bool{ + "Any": true, + "Duration": true, + "Empty": true, + "Struct": true, + "Timestamp": true, + + "Value": true, + "ListValue": true, + "DoubleValue": true, + "FloatValue": true, + "Int64Value": true, + "UInt64Value": true, + "Int32Value": true, + "UInt32Value": true, + "BoolValue": true, + "StringValue": true, + "BytesValue": true, +} + // Generate the type and default constant definitions for this Descriptor. func (g *Generator) generateMessage(message *Descriptor) { // The full type name @@ -1996,10 +2068,6 @@ func (g *Generator) generateMessage(message *Descriptor) { oneofTypeName := make(map[*descriptor.FieldDescriptorProto]string) // without star oneofInsertPoints := make(map[int32]int) // oneof_index => offset of g.Buffer - g.PrintComments(message.path) - g.P("type ", ccTypeName, " struct {") - g.In() - // allocNames finds a conflict-free variation of the given strings, // consistently mutating their suffixes. // It returns the same number of strings. @@ -2021,7 +2089,7 @@ func (g *Generator) generateMessage(message *Descriptor) { } } - for i, field := range message.Field { + for _, field := range message.Field { // Allocate the getter and the field at the same time so name // collisions create field/method consistent names. // TODO: This allocation occurs based on the order of the fields @@ -2033,110 +2101,131 @@ func (g *Generator) generateMessage(message *Descriptor) { } ns := allocNames(base, "Get"+base) fieldName, fieldGetterName := ns[0], ns[1] - typename, wiretype := g.GoType(message, field) - jsonName := *field.Name - jsonTag := jsonName + ",omitempty" - repeatedNativeType := (!field.IsMessage() && !gogoproto.IsCustomType(field) && field.IsRepeated()) - if !gogoproto.IsNullable(field) && !repeatedNativeType { - jsonTag = jsonName - } - gogoJsonTag := gogoproto.GetJsonTag(field) - if gogoJsonTag != nil { - jsonTag = *gogoJsonTag - } - gogoMoreTags := gogoproto.GetMoreTags(field) - moreTags := "" - if gogoMoreTags != nil { - moreTags = " " + *gogoMoreTags - } - tag := fmt.Sprintf("protobuf:%s json:%q%s", g.goTag(message, field, wiretype), jsonTag, moreTags) fieldNames[field] = fieldName fieldGetterNames[field] = fieldGetterName - if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE && gogoproto.IsEmbed(field) { - fieldName = "" - } + } - oneof := field.OneofIndex != nil && message.allowOneof() - if oneof && oneofFieldName[*field.OneofIndex] == "" { - odp := message.OneofDecl[int(*field.OneofIndex)] - fname := allocNames(CamelCase(odp.GetName()))[0] + if gogoproto.HasTypeDecl(message.file, message.DescriptorProto) { + g.PrintComments(message.path) + g.P("type ", ccTypeName, " struct {") + g.In() - // This is the first field of a oneof we haven't seen before. - // Generate the union field. - com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)) - if com { - g.P("//") + for i, field := range message.Field { + fieldName := fieldNames[field] + typename, wiretype := g.GoType(message, field) + jsonName := *field.Name + jsonTag := jsonName + ",omitempty" + repeatedNativeType := (!field.IsMessage() && !gogoproto.IsCustomType(field) && field.IsRepeated()) + if !gogoproto.IsNullable(field) && !repeatedNativeType { + jsonTag = jsonName } - g.P("// Types that are valid to be assigned to ", fname, ":") - // Generate the rest of this comment later, - // when we've computed any disambiguation. - oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len() - - dname := "is" + ccTypeName + "_" + fname - oneofFieldName[*field.OneofIndex] = fname - oneofDisc[*field.OneofIndex] = dname - otag := `protobuf_oneof:"` + odp.GetName() + `"` - g.P(fname, " ", dname, " `", otag, "`") - } - - if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { - desc := g.ObjectNamed(field.GetTypeName()) - if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { - m := g.GoMapType(d, field) - typename = m.GoType - mapFieldTypes[field] = typename // record for the getter generation - - tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", m.KeyTag, m.ValueTag) + gogoJsonTag := gogoproto.GetJsonTag(field) + if gogoJsonTag != nil { + jsonTag = *gogoJsonTag + } + gogoMoreTags := gogoproto.GetMoreTags(field) + moreTags := "" + if gogoMoreTags != nil { + moreTags = " " + *gogoMoreTags + } + tag := fmt.Sprintf("protobuf:%s json:%q%s", g.goTag(message, field, wiretype), jsonTag, moreTags) + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE && gogoproto.IsEmbed(field) { + fieldName = "" } - } - fieldTypes[field] = typename + oneof := field.OneofIndex != nil && message.allowOneof() + if oneof && oneofFieldName[*field.OneofIndex] == "" { + odp := message.OneofDecl[int(*field.OneofIndex)] + fname := allocNames(CamelCase(odp.GetName()))[0] - if oneof { - tname := ccTypeName + "_" + fieldName - // It is possible for this to collide with a message or enum - // nested in this message. Check for collisions. - for { - ok := true - for _, desc := range message.nested { - if CamelCaseSlice(desc.TypeName()) == tname { - ok = false - break + // This is the first field of a oneof we haven't seen before. + // Generate the union field. + com := g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageOneofPath, *field.OneofIndex)) + if com { + g.P("//") + } + g.P("// Types that are valid to be assigned to ", fname, ":") + // Generate the rest of this comment later, + // when we've computed any disambiguation. + oneofInsertPoints[*field.OneofIndex] = g.Buffer.Len() + + dname := "is" + ccTypeName + "_" + fname + oneofFieldName[*field.OneofIndex] = fname + oneofDisc[*field.OneofIndex] = dname + otag := `protobuf_oneof:"` + odp.GetName() + `"` + g.P(fname, " ", dname, " `", otag, "`") + } + + if *field.Type == descriptor.FieldDescriptorProto_TYPE_MESSAGE { + desc := g.ObjectNamed(field.GetTypeName()) + if d, ok := desc.(*Descriptor); ok && d.GetOptions().GetMapEntry() { + m := g.GoMapType(d, field) + typename = m.GoType + mapFieldTypes[field] = typename // record for the getter generation + + tag += fmt.Sprintf(" protobuf_key:%s protobuf_val:%s", m.KeyTag, m.ValueTag) + } + } + + fieldTypes[field] = typename + + if oneof { + tname := ccTypeName + "_" + fieldName + // It is possible for this to collide with a message or enum + // nested in this message. Check for collisions. + for { + ok := true + for _, desc := range message.nested { + if CamelCaseSlice(desc.TypeName()) == tname { + ok = false + break + } } - } - for _, enum := range message.enums { - if CamelCaseSlice(enum.TypeName()) == tname { - ok = false - break + for _, enum := range message.enums { + if CamelCaseSlice(enum.TypeName()) == tname { + ok = false + break + } } + if !ok { + tname += "_" + continue + } + break } - if !ok { - tname += "_" - continue - } - break + + oneofTypeName[field] = tname + continue } - oneofTypeName[field] = tname - continue + g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) + g.P(fieldName, "\t", typename, "\t`", tag, "`") + if !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field) && !gogoproto.IsCustomType(field) && !gogoproto.IsCastType(field) { + g.RecordTypeUse(field.GetTypeName()) + } } - - g.PrintComments(fmt.Sprintf("%s,%d,%d", message.path, messageFieldPath, i)) - g.P(fieldName, "\t", typename, "\t`", tag, "`") - g.RecordTypeUse(field.GetTypeName()) - } - if len(message.ExtensionRange) > 0 { - if gogoproto.HasExtensionsMap(g.file.FileDescriptorProto, message.DescriptorProto) { - g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`") - } else { - g.P("XXX_extensions\t\t[]byte `protobuf:\"bytes,0,opt\" json:\"-\"`") + if len(message.ExtensionRange) > 0 { + if gogoproto.HasExtensionsMap(g.file.FileDescriptorProto, message.DescriptorProto) { + g.P(g.Pkg["proto"], ".XXX_InternalExtensions `json:\"-\"`") + } else { + g.P("XXX_extensions\t\t[]byte `protobuf:\"bytes,0,opt\" json:\"-\"`") + } + } + if gogoproto.HasUnrecognized(g.file.FileDescriptorProto, message.DescriptorProto) && !message.proto3() { + g.P("XXX_unrecognized\t[]byte `json:\"-\"`") + } + g.Out() + g.P("}") + } else { + // Even if the type does not need to be generated, we need to iterate + // over all its fields to be able to mark as used any imported types + // used by those fields. + for _, field := range message.Field { + if !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field) && !gogoproto.IsCustomType(field) && !gogoproto.IsCastType(field) { + g.RecordTypeUse(field.GetTypeName()) + } } } - if gogoproto.HasUnrecognized(g.file.FileDescriptorProto, message.DescriptorProto) && !message.proto3() { - g.P("XXX_unrecognized\t[]byte `json:\"-\"`") - } - g.Out() - g.P("}") // Update g.Buffer to list valid oneof types. // We do this down here, after we've disambiguated the oneof type names. @@ -2166,7 +2255,11 @@ func (g *Generator) generateMessage(message *Descriptor) { indexes = append([]string{strconv.Itoa(m.index)}, indexes...) } g.P("func (*", ccTypeName, ") Descriptor() ([]byte, []int) { return ", g.file.VarName(), ", []int{", strings.Join(indexes, ", "), "} }") - + // TODO: Revisit the decision to use a XXX_WellKnownType method + // if we change proto.MessageName to work with multiple equivalents. + if message.file.GetPackage() == "google.protobuf" && wellKnownTypes[message.GetName()] { + g.P("func (*", ccTypeName, `) XXX_WellKnownType() string { return "`, message.GetName(), `" }`) + } // Extension support methods var hasExtensions, isMessageSet bool if len(message.ExtensionRange) > 0 { @@ -2205,7 +2298,7 @@ func (g *Generator) generateMessage(message *Descriptor) { g.In() for _, r := range message.ExtensionRange { end := fmt.Sprint(*r.End - 1) // make range inclusive on both ends - g.P("{", r.Start, ", ", end, "},") + g.P("{Start: ", r.Start, ", End: ", end, "},") } g.Out() g.P("}") @@ -2250,7 +2343,7 @@ func (g *Generator) generateMessage(message *Descriptor) { case typename == "string": def = strconv.Quote(def) case typename == "[]byte": - def = "[]byte(" + strconv.Quote(def) + ")" + def = "[]byte(" + strconv.Quote(unescape(def)) + ")" kind = "var " case def == "inf", def == "-inf", def == "nan": // These names are known to, and defined by, the protocol language. @@ -2348,7 +2441,9 @@ func (g *Generator) generateMessage(message *Descriptor) { _, wiretype := g.GoType(message, field) tag := "protobuf:" + g.goTag(message, field, wiretype) g.P("type ", oneofTypeName[field], " struct{ ", fieldNames[field], " ", fieldTypes[field], " `", tag, "` }") - g.RecordTypeUse(field.GetTypeName()) + if !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field) && !gogoproto.IsCustomType(field) && !gogoproto.IsCastType(field) { + g.RecordTypeUse(field.GetTypeName()) + } } g.P() for _, field := range message.Field { @@ -2392,11 +2487,6 @@ func (g *Generator) generateMessage(message *Descriptor) { star = "*" } - // In proto3, only generate getters for message fields and oneof fields. - if message.proto3() && *field.Type != descriptor.FieldDescriptorProto_TYPE_MESSAGE && !oneof { - continue - } - // Only export getter symbols for basic types, // and for messages and enums in the same package. // Groups are not exported. @@ -2461,7 +2551,11 @@ func (g *Generator) generateMessage(message *Descriptor) { g.Out() g.P("}") } else if !oneof { - g.P("if m != nil && m." + fname + " != nil {") + if message.proto3() { + g.P("if m != nil {") + } else { + g.P("if m != nil && m." + fname + " != nil {") + } g.In() g.P("return " + star + "m." + fname) g.Out() @@ -2490,7 +2584,11 @@ func (g *Generator) generateMessage(message *Descriptor) { } else { goTyp, _ := g.GoType(message, field) goTypName := GoTypeToName(goTyp) - g.P("return ", goTypName, "{}") + if !gogoproto.IsNullable(field) && gogoproto.IsStdDuration(field) { + g.P("return 0") + } else { + g.P("return ", goTypName, "{}") + } } case descriptor.FieldDescriptorProto_TYPE_BOOL: g.P("return false") @@ -2647,13 +2745,41 @@ func (g *Generator) generateMessage(message *Descriptor) { fieldWire[field] = wire g.P("_ = b.EncodeVarint(", field.Number, "<<3|", g.Pkg["proto"], ".", wire, ")") if *field.Type == descriptor.FieldDescriptorProto_TYPE_BYTES && gogoproto.IsCustomType(field) { - g.P(`data, err := `, val, `.Marshal()`) + g.P(`dAtA, err := `, val, `.Marshal()`) g.P(`if err != nil {`) g.In() g.P(`return err`) g.Out() g.P(`}`) - val = "data" + val = "dAtA" + } else if gogoproto.IsStdTime(field) { + pkg := g.useTypes() + if gogoproto.IsNullable(field) { + g.P(`dAtA, err := `, pkg, `.StdTimeMarshal(*`, val, `)`) + } else { + g.P(`dAtA, err := `, pkg, `.StdTimeMarshal(`, val, `)`) + } + g.P(`if err != nil {`) + g.In() + g.P(`return err`) + g.Out() + g.P(`}`) + val = "dAtA" + pre, post = "b.EncodeRawBytes(", ")" + } else if gogoproto.IsStdDuration(field) { + pkg := g.useTypes() + if gogoproto.IsNullable(field) { + g.P(`dAtA, err := `, pkg, `.StdDurationMarshal(*`, val, `)`) + } else { + g.P(`dAtA, err := `, pkg, `.StdDurationMarshal(`, val, `)`) + } + g.P(`if err != nil {`) + g.In() + g.P(`return err`) + g.Out() + g.P(`}`) + val = "dAtA" + pre, post = "b.EncodeRawBytes(", ")" } if !canFail { g.P("_ = ", pre, val, post) @@ -2717,9 +2843,13 @@ func (g *Generator) generateMessage(message *Descriptor) { dec = "b.DecodeGroup(msg)" // handled specially below case descriptor.FieldDescriptorProto_TYPE_MESSAGE: - g.P("msg := new(", fieldTypes[field][1:], ")") // drop star - lhs = "err" - dec = "b.DecodeMessage(msg)" + if gogoproto.IsStdTime(field) || gogoproto.IsStdDuration(field) { + dec = "b.DecodeRawBytes(true)" + } else { + g.P("msg := new(", fieldTypes[field][1:], ")") // drop star + lhs = "err" + dec = "b.DecodeMessage(msg)" + } // handled specially below case descriptor.FieldDescriptorProto_TYPE_BYTES: dec = "b.DecodeRawBytes(true)" @@ -2754,6 +2884,34 @@ func (g *Generator) generateMessage(message *Descriptor) { g.P(`c := &cc`) g.P(`err = c.Unmarshal(`, val, `)`) val = "*c" + } else if gogoproto.IsStdTime(field) { + pkg := g.useTypes() + g.P(`if err != nil {`) + g.In() + g.P(`return true, err`) + g.Out() + g.P(`}`) + g.P(`c := new(time.Time)`) + g.P(`if err2 := `, pkg, `.StdTimeUnmarshal(c, `, val, `); err2 != nil {`) + g.In() + g.P(`return true, err`) + g.Out() + g.P(`}`) + val = "c" + } else if gogoproto.IsStdDuration(field) { + pkg := g.useTypes() + g.P(`if err != nil {`) + g.In() + g.P(`return true, err`) + g.Out() + g.P(`}`) + g.P(`c := new(time.Duration)`) + g.P(`if err2 := `, pkg, `.StdDurationUnmarshal(c, `, val, `); err2 != nil {`) + g.In() + g.P(`return true, err`) + g.Out() + g.P(`}`) + val = "c" } if cast != "" { val = cast + "(" + val + ")" @@ -2766,7 +2924,9 @@ func (g *Generator) generateMessage(message *Descriptor) { val += " != 0" case descriptor.FieldDescriptorProto_TYPE_GROUP, descriptor.FieldDescriptorProto_TYPE_MESSAGE: - val = "msg" + if !gogoproto.IsStdTime(field) && !gogoproto.IsStdDuration(field) { + val = "msg" + } } if gogoproto.IsCastType(field) { _, typ, err := getCastType(field) @@ -2831,7 +2991,21 @@ func (g *Generator) generateMessage(message *Descriptor) { fixed = g.Pkg["proto"] + ".Size(" + val + ")" case descriptor.FieldDescriptorProto_TYPE_MESSAGE: wire = "WireBytes" - g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") + if gogoproto.IsStdTime(field) { + if gogoproto.IsNullable(field) { + val = "*" + val + } + pkg := g.useTypes() + g.P("s := ", pkg, ".SizeOfStdTime(", val, ")") + } else if gogoproto.IsStdDuration(field) { + if gogoproto.IsNullable(field) { + val = "*" + val + } + pkg := g.useTypes() + g.P("s := ", pkg, ".SizeOfStdDuration(", val, ")") + } else { + g.P("s := ", g.Pkg["proto"], ".Size(", val, ")") + } fixed = "s" varint = fixed case descriptor.FieldDescriptorProto_TYPE_BYTES: @@ -2882,6 +3056,70 @@ func (g *Generator) generateMessage(message *Descriptor) { } g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["proto"], ccTypeName, fullName) + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.addInitf("%s.RegisterType((*%s)(nil), %q)", g.Pkg["golang_proto"], ccTypeName, fullName) + } +} + +var escapeChars = [256]byte{ + 'a': '\a', 'b': '\b', 'f': '\f', 'n': '\n', 'r': '\r', 't': '\t', 'v': '\v', '\\': '\\', '"': '"', '\'': '\'', '?': '?', +} + +// unescape reverses the "C" escaping that protoc does for default values of bytes fields. +// It is best effort in that it effectively ignores malformed input. Seemingly invalid escape +// sequences are conveyed, unmodified, into the decoded result. +func unescape(s string) string { + // NB: Sadly, we can't use strconv.Unquote because protoc will escape both + // single and double quotes, but strconv.Unquote only allows one or the + // other (based on actual surrounding quotes of its input argument). + + var out []byte + for len(s) > 0 { + // regular character, or too short to be valid escape + if s[0] != '\\' || len(s) < 2 { + out = append(out, s[0]) + s = s[1:] + } else if c := escapeChars[s[1]]; c != 0 { + // escape sequence + out = append(out, c) + s = s[2:] + } else if s[1] == 'x' || s[1] == 'X' { + // hex escape, e.g. "\x80 + if len(s) < 4 { + // too short to be valid + out = append(out, s[:2]...) + s = s[2:] + continue + } + v, err := strconv.ParseUint(s[2:4], 16, 8) + if err != nil { + out = append(out, s[:4]...) + } else { + out = append(out, byte(v)) + } + s = s[4:] + } else if '0' <= s[1] && s[1] <= '7' { + // octal escape, can vary from 1 to 3 octal digits; e.g., "\0" "\40" or "\164" + // so consume up to 2 more bytes or up to end-of-string + n := len(s[1:]) - len(strings.TrimLeft(s[1:], "01234567")) + if n > 3 { + n = 3 + } + v, err := strconv.ParseUint(s[1:1+n], 8, 8) + if err != nil { + out = append(out, s[:1+n]...) + } else { + out = append(out, byte(v)) + } + s = s[1+n:] + } else { + // bad escape, just propagate the slash as-is + out = append(out, s[0]) + s = s[1:] + } + } + + return string(out) } func (g *Generator) generateExtension(ext *ExtensionDescriptor) { @@ -2931,6 +3169,7 @@ func (g *Generator) generateExtension(ext *ExtensionDescriptor) { g.P("Field: ", field.Number, ",") g.P(`Name: "`, extName, `",`) g.P("Tag: ", tag, ",") + g.P(`Filename: "`, g.file.GetName(), `",`) g.Out() g.P("}") @@ -2939,6 +3178,9 @@ func (g *Generator) generateExtension(ext *ExtensionDescriptor) { if mset { // Generate a bit more code to register with message_set.go. g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["proto"], fieldType, *field.Number, extName) + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.addInitf("%s.RegisterMessageSetType((%s)(nil), %d, %q)", g.Pkg["golang_proto"], fieldType, *field.Number, extName) + } } g.file.addExport(ext, constOrVarSymbol{ccTypeName, "var", ""}) @@ -2989,6 +3231,9 @@ func (g *Generator) generateFileDescriptor(file *FileDescriptor) { v := file.VarName() g.P() g.P("func init() { ", g.Pkg["proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.P("func init() { ", g.Pkg["golang_proto"], ".RegisterFile(", strconv.Quote(*file.Name), ", ", v, ") }") + } g.P("var ", v, " = []byte{") g.In() g.P("// ", len(b), " bytes of a gzipped FileDescriptorProto") @@ -3021,10 +3266,16 @@ func (g *Generator) generateEnumRegistration(enum *EnumDescriptor) { // The full type name, CamelCased. ccTypeName := CamelCaseSlice(typeName) g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["proto"], pkg+ccTypeName, ccTypeName) + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.addInitf("%s.RegisterEnum(%q, %[3]s_name, %[3]s_value)", g.Pkg["golang_proto"], pkg+ccTypeName, ccTypeName) + } } func (g *Generator) generateExtensionRegistration(ext *ExtensionDescriptor) { g.addInitf("%s.RegisterExtension(%s)", g.Pkg["proto"], ext.DescName()) + if gogoproto.ImportsGoGoProto(g.file.FileDescriptorProto) && gogoproto.RegistersGolangProto(g.file.FileDescriptorProto) { + g.addInitf("%s.RegisterExtension(%s)", g.Pkg["golang_proto"], ext.DescName()) + } } // And now lots of helper functions. @@ -3108,6 +3359,32 @@ func isRepeated(field *descriptor.FieldDescriptorProto) bool { return field.Label != nil && *field.Label == descriptor.FieldDescriptorProto_LABEL_REPEATED } +// Is this field a scalar numeric type? +func IsScalar(field *descriptor.FieldDescriptorProto) bool { + if field.Type == nil { + return false + } + switch *field.Type { + case descriptor.FieldDescriptorProto_TYPE_DOUBLE, + descriptor.FieldDescriptorProto_TYPE_FLOAT, + descriptor.FieldDescriptorProto_TYPE_INT64, + descriptor.FieldDescriptorProto_TYPE_UINT64, + descriptor.FieldDescriptorProto_TYPE_INT32, + descriptor.FieldDescriptorProto_TYPE_FIXED64, + descriptor.FieldDescriptorProto_TYPE_FIXED32, + descriptor.FieldDescriptorProto_TYPE_BOOL, + descriptor.FieldDescriptorProto_TYPE_UINT32, + descriptor.FieldDescriptorProto_TYPE_ENUM, + descriptor.FieldDescriptorProto_TYPE_SFIXED32, + descriptor.FieldDescriptorProto_TYPE_SFIXED64, + descriptor.FieldDescriptorProto_TYPE_SINT32, + descriptor.FieldDescriptorProto_TYPE_SINT64: + return true + default: + return false + } +} + // badToUnderscore is the mapping function used to generate Go names from package names, // which can be dotted in the input .proto file. It replaces non-identifier characters such as // dot or dash with underscore. diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go index 07f955f26..d7a406e7c 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/helper.go @@ -1,5 +1,7 @@ -// Copyright (c) 2013, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -238,18 +240,38 @@ func (g *Generator) GetMapKeyField(field, keyField *descriptor.FieldDescriptorPr } func (g *Generator) GetMapValueField(field, valField *descriptor.FieldDescriptorProto) *descriptor.FieldDescriptorProto { - if !gogoproto.IsCastValue(field) && gogoproto.IsNullable(field) { - return valField + if gogoproto.IsCustomType(field) && gogoproto.IsCastValue(field) { + g.Fail("cannot have a customtype and casttype: ", field.String()) } valField = proto.Clone(valField).(*descriptor.FieldDescriptorProto) if valField.Options == nil { valField.Options = &descriptor.FieldOptions{} } + + stdtime := gogoproto.IsStdTime(field) + if stdtime { + if err := proto.SetExtension(valField.Options, gogoproto.E_Stdtime, &stdtime); err != nil { + g.Fail(err.Error()) + } + } + + stddur := gogoproto.IsStdDuration(field) + if stddur { + if err := proto.SetExtension(valField.Options, gogoproto.E_Stdduration, &stddur); err != nil { + g.Fail(err.Error()) + } + } + if valType := gogoproto.GetCastValue(field); len(valType) > 0 { if err := proto.SetExtension(valField.Options, gogoproto.E_Casttype, &valType); err != nil { g.Fail(err.Error()) } } + if valType := gogoproto.GetCustomType(field); len(valType) > 0 { + if err := proto.SetExtension(valField.Options, gogoproto.E_Customtype, &valType); err != nil { + g.Fail(err.Error()) + } + } nullable := gogoproto.IsNullable(field) if err := proto.SetExtension(valField.Options, gogoproto.E_Nullable, &nullable); err != nil { @@ -261,7 +283,7 @@ func (g *Generator) GetMapValueField(field, valField *descriptor.FieldDescriptor // GoMapValueTypes returns the map value Go type and the alias map value Go type (for casting), taking into // account whether the map is nullable or the value is a message. func GoMapValueTypes(mapField, valueField *descriptor.FieldDescriptorProto, goValueType, goValueAliasType string) (nullable bool, outGoType string, outGoAliasType string) { - nullable = gogoproto.IsNullable(mapField) && valueField.IsMessage() + nullable = gogoproto.IsNullable(mapField) && (valueField.IsMessage() || gogoproto.IsCustomType(mapField)) if nullable { // ensure the non-aliased Go value type is a pointer for consistency if strings.HasPrefix(goValueType, "*") { @@ -294,6 +316,7 @@ func EmbedFieldName(goTyp string) string { } func (g *Generator) GeneratePlugin(p Plugin) { + plugins = []Plugin{p} p.Init(g) // Generate the output. The generator runs for every file, even the files // that we don't generate output for, so that we can collate the full list @@ -396,32 +419,6 @@ func getCastType(field *descriptor.FieldDescriptorProto) (packageName string, ty return "", "", err } -func getCastKey(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { - if field.Options != nil { - var v interface{} - v, err = proto.GetExtension(field.Options, gogoproto.E_Castkey) - if err == nil && v.(*string) != nil { - ctype := *(v.(*string)) - packageName, typ = splitCPackageType(ctype) - return packageName, typ, nil - } - } - return "", "", err -} - -func getCastValue(field *descriptor.FieldDescriptorProto) (packageName string, typ string, err error) { - if field.Options != nil { - var v interface{} - v, err = proto.GetExtension(field.Options, gogoproto.E_Castvalue) - if err == nil && v.(*string) != nil { - ctype := *(v.(*string)) - packageName, typ = splitCPackageType(ctype) - return packageName, typ, nil - } - } - return "", "", err -} - func FileName(file *FileDescriptor) string { fname := path.Base(file.FileDescriptorProto.GetName()) fname = strings.Replace(fname, ".proto", "", -1) @@ -442,3 +439,9 @@ func (g *Generator) AllFiles() *descriptor.FileDescriptorSet { func (d *Descriptor) Path() string { return d.path } + +func (g *Generator) useTypes() string { + pkg := strings.Map(badToUnderscore, "github.com/gogo/protobuf/types") + g.customImports = append(g.customImports, "github.com/gogo/protobuf/types") + return pkg +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/name_test.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/name_test.go index fde3b046c..b3b60a3c6 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/name_test.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/generator/name_test.go @@ -83,3 +83,32 @@ func TestGoPackageOption(t *testing.T) { } } } + +func TestUnescape(t *testing.T) { + tests := []struct { + in string + out string + }{ + // successful cases, including all kinds of escapes + {"", ""}, + {"foo bar baz frob nitz", "foo bar baz frob nitz"}, + {`\000\001\002\003\004\005\006\007`, string([]byte{0, 1, 2, 3, 4, 5, 6, 7})}, + {`\a\b\f\n\r\t\v\\\?\'\"`, string([]byte{'\a', '\b', '\f', '\n', '\r', '\t', '\v', '\\', '?', '\'', '"'})}, + {`\x10\x20\x30\x40\x50\x60\x70\x80`, string([]byte{16, 32, 48, 64, 80, 96, 112, 128})}, + // variable length octal escapes + {`\0\018\222\377\3\04\005\6\07`, string([]byte{0, 1, '8', 0222, 255, 3, 4, 5, 6, 7})}, + // malformed escape sequences left as is + {"foo \\g bar", "foo \\g bar"}, + {"foo \\xg0 bar", "foo \\xg0 bar"}, + {"\\", "\\"}, + {"\\x", "\\x"}, + {"\\xf", "\\xf"}, + {"\\777", "\\777"}, // overflows byte + } + for _, tc := range tests { + s := unescape(tc.in) + if s != tc.out { + t.Errorf("doUnescape(%q) = %q; should have been %q", tc.in, s, tc.out) + } + } +} diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go index 4b8c9dd2c..06abe9b6a 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/grpc/grpc.go @@ -36,7 +36,6 @@ package grpc import ( "fmt" - "path" "strconv" "strings" @@ -48,7 +47,7 @@ import ( // It is incremented whenever an incompatibility between the generated code and // the grpc package is introduced; the generated code references // a constant, grpc.SupportPackageIsVersionN (where N is generatedCodeVersion). -const generatedCodeVersion = 3 +const generatedCodeVersion = 4 // Paths for packages used by code generated in this file, // relative to the import_prefix of the generator.Generator. @@ -129,11 +128,11 @@ func (g *grpc) GenerateImports(file *generator.FileDescriptor) { if len(file.FileDescriptorProto.Service) == 0 { return } - g.P("import (") - g.P(contextPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, contextPkgPath))) - g.P(grpcPkg, " ", strconv.Quote(path.Join(g.gen.ImportPrefix, grpcPkgPath))) - g.P(")") - g.P() + imports := generator.NewPluginImports(g.gen) + for _, i := range []string{contextPkgPath, grpcPkgPath} { + imports.NewImport(i).Use() + } + imports.GenerateImports(file) } // reservedClientName records whether a client name is reserved on the client side. @@ -254,7 +253,7 @@ func (g *grpc) generateService(file *generator.FileDescriptor, service *pb.Servi g.P("},") } g.P("},") - g.P("Metadata: ", file.VarName(), ",") + g.P("Metadata: \"", file.GetName(), "\",") g.P("}") g.P() } diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile index 546287cef..95234a755 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/Makefile @@ -33,5 +33,5 @@ # at src/google/protobuf/compiler/plugin.proto # Also we need to fix an import. regenerate: + go install github.com/gogo/protobuf/protoc-gen-gogo protoc --gogo_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. -I=../../protobuf/google/protobuf/compiler/:../../protobuf/ ../../protobuf/google/protobuf/compiler/plugin.proto - diff --git a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go index 460359392..c673d5035 100644 --- a/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go +++ b/vendor/github.com/gogo/protobuf/protoc-gen-gogo/plugin/plugin.pb.go @@ -1,6 +1,5 @@ -// Code generated by protoc-gen-gogo. +// Code generated by protoc-gen-gogo. DO NOT EDIT. // source: plugin.proto -// DO NOT EDIT! /* Package plugin_go is a generated protocol buffer package. @@ -9,6 +8,7 @@ It is generated from these files: plugin.proto It has these top-level messages: + Version CodeGeneratorRequest CodeGeneratorResponse */ @@ -30,6 +30,50 @@ var _ = math.Inf // proto package needs to be updated. const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package +// The version number of protocol compiler. +type Version struct { + Major *int32 `protobuf:"varint,1,opt,name=major" json:"major,omitempty"` + Minor *int32 `protobuf:"varint,2,opt,name=minor" json:"minor,omitempty"` + Patch *int32 `protobuf:"varint,3,opt,name=patch" json:"patch,omitempty"` + // A suffix for alpha, beta or rc release, e.g., "alpha-1", "rc2". It should + // be empty for mainline stable releases. + Suffix *string `protobuf:"bytes,4,opt,name=suffix" json:"suffix,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Version) Reset() { *m = Version{} } +func (m *Version) String() string { return proto.CompactTextString(m) } +func (*Version) ProtoMessage() {} +func (*Version) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } + +func (m *Version) GetMajor() int32 { + if m != nil && m.Major != nil { + return *m.Major + } + return 0 +} + +func (m *Version) GetMinor() int32 { + if m != nil && m.Minor != nil { + return *m.Minor + } + return 0 +} + +func (m *Version) GetPatch() int32 { + if m != nil && m.Patch != nil { + return *m.Patch + } + return 0 +} + +func (m *Version) GetSuffix() string { + if m != nil && m.Suffix != nil { + return *m.Suffix + } + return "" +} + // An encoded CodeGeneratorRequest is written to the plugin's stdin. type CodeGeneratorRequest struct { // The .proto files that were explicitly listed on the command-line. The @@ -49,14 +93,19 @@ type CodeGeneratorRequest struct { // the entire set into memory at once. However, as of this writing, this // is not similarly optimized on protoc's end -- it will store all fields in // memory at once before sending them to the plugin. - ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` - XXX_unrecognized []byte `json:"-"` + // + // Type names of fields and extensions in the FileDescriptorProto are always + // fully qualified. + ProtoFile []*google_protobuf.FileDescriptorProto `protobuf:"bytes,15,rep,name=proto_file,json=protoFile" json:"proto_file,omitempty"` + // The version number of protocol compiler. + CompilerVersion *Version `protobuf:"bytes,3,opt,name=compiler_version,json=compilerVersion" json:"compiler_version,omitempty"` + XXX_unrecognized []byte `json:"-"` } func (m *CodeGeneratorRequest) Reset() { *m = CodeGeneratorRequest{} } func (m *CodeGeneratorRequest) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorRequest) ProtoMessage() {} -func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{0} } +func (*CodeGeneratorRequest) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } func (m *CodeGeneratorRequest) GetFileToGenerate() []string { if m != nil { @@ -79,6 +128,13 @@ func (m *CodeGeneratorRequest) GetProtoFile() []*google_protobuf.FileDescriptorP return nil } +func (m *CodeGeneratorRequest) GetCompilerVersion() *Version { + if m != nil { + return m.CompilerVersion + } + return nil +} + // The plugin writes an encoded CodeGeneratorResponse to stdout. type CodeGeneratorResponse struct { // Error message. If non-empty, code generation failed. The plugin process @@ -97,7 +153,7 @@ type CodeGeneratorResponse struct { func (m *CodeGeneratorResponse) Reset() { *m = CodeGeneratorResponse{} } func (m *CodeGeneratorResponse) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorResponse) ProtoMessage() {} -func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{1} } +func (*CodeGeneratorResponse) Descriptor() ([]byte, []int) { return fileDescriptorPlugin, []int{2} } func (m *CodeGeneratorResponse) GetError() string { if m != nil && m.Error != nil { @@ -174,7 +230,7 @@ func (m *CodeGeneratorResponse_File) Reset() { *m = CodeGeneratorRespons func (m *CodeGeneratorResponse_File) String() string { return proto.CompactTextString(m) } func (*CodeGeneratorResponse_File) ProtoMessage() {} func (*CodeGeneratorResponse_File) Descriptor() ([]byte, []int) { - return fileDescriptorPlugin, []int{1, 0} + return fileDescriptorPlugin, []int{2, 0} } func (m *CodeGeneratorResponse_File) GetName() string { @@ -199,6 +255,7 @@ func (m *CodeGeneratorResponse_File) GetContent() string { } func init() { + proto.RegisterType((*Version)(nil), "google.protobuf.compiler.Version") proto.RegisterType((*CodeGeneratorRequest)(nil), "google.protobuf.compiler.CodeGeneratorRequest") proto.RegisterType((*CodeGeneratorResponse)(nil), "google.protobuf.compiler.CodeGeneratorResponse") proto.RegisterType((*CodeGeneratorResponse_File)(nil), "google.protobuf.compiler.CodeGeneratorResponse.File") @@ -207,24 +264,29 @@ func init() { func init() { proto.RegisterFile("plugin.proto", fileDescriptorPlugin) } var fileDescriptorPlugin = []byte{ - // 304 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x74, 0x51, 0x4f, 0x4b, 0xfb, 0x40, - 0x14, 0x24, 0xbf, 0x5f, 0x44, 0xf2, 0x2c, 0x8d, 0x2c, 0x15, 0x42, 0xe9, 0x21, 0x14, 0xc1, 0x9e, - 0x52, 0x10, 0xc1, 0x7b, 0x2b, 0xea, 0x31, 0x04, 0x4f, 0x82, 0x84, 0x98, 0xbe, 0x86, 0x85, 0x74, - 0xdf, 0xba, 0xd9, 0x7c, 0x22, 0xbf, 0x93, 0x9f, 0xc7, 0xfd, 0x93, 0x56, 0x29, 0xf6, 0x94, 0xbc, - 0x99, 0xd9, 0x99, 0xd9, 0x7d, 0x30, 0x92, 0x6d, 0xdf, 0x70, 0x91, 0x49, 0x45, 0x9a, 0x58, 0xd2, - 0x10, 0x35, 0x2d, 0xfa, 0xe9, 0xbd, 0xdf, 0x66, 0x35, 0xed, 0x24, 0x6f, 0x51, 0x4d, 0x53, 0xcf, - 0x2c, 0xf7, 0xcc, 0x72, 0x83, 0x5d, 0xad, 0xb8, 0xd4, 0xa4, 0xbc, 0x7a, 0xfe, 0x19, 0xc0, 0x64, - 0x4d, 0x1b, 0x7c, 0x42, 0x81, 0xaa, 0x32, 0x78, 0x81, 0x1f, 0x3d, 0x76, 0x9a, 0x2d, 0xe0, 0x72, - 0x6b, 0x3c, 0x4a, 0x4d, 0x65, 0xe3, 0x39, 0x4c, 0x82, 0xf4, 0xff, 0x22, 0x2a, 0xc6, 0x16, 0x7f, - 0xa1, 0xe1, 0x04, 0xb2, 0x19, 0x44, 0xb2, 0x52, 0xd5, 0x0e, 0x35, 0xaa, 0xe4, 0x5f, 0x1a, 0x18, - 0xc9, 0x0f, 0xc0, 0xd6, 0x00, 0x2e, 0xa9, 0xb4, 0xa7, 0x92, 0xd8, 0x38, 0x5c, 0xdc, 0x5e, 0x67, - 0xc7, 0x8d, 0x1f, 0x0d, 0xf9, 0x70, 0xe8, 0x96, 0x5b, 0xd8, 0x98, 0xd8, 0x8f, 0x65, 0xe6, 0x5f, - 0x01, 0x5c, 0x1d, 0xb5, 0xec, 0x24, 0x89, 0x0e, 0xd9, 0x04, 0xce, 0x50, 0x29, 0x52, 0xa6, 0x9b, - 0x0d, 0xf6, 0x03, 0x7b, 0x86, 0xf0, 0x57, 0xdc, 0x5d, 0x76, 0xea, 0x81, 0xb2, 0x3f, 0x4d, 0x5d, - 0x9b, 0xc2, 0x39, 0x4c, 0xdf, 0x20, 0xb4, 0x13, 0x63, 0x10, 0x0a, 0x73, 0xa3, 0x21, 0xc6, 0xfd, - 0xb3, 0x1b, 0x88, 0xb9, 0x91, 0x2b, 0xcd, 0x49, 0x94, 0x92, 0xb8, 0xd0, 0xc3, 0xf5, 0xc7, 0x07, - 0x38, 0xb7, 0x28, 0x4b, 0xe0, 0xbc, 0x26, 0xa1, 0xd1, 0x08, 0x62, 0x27, 0xd8, 0x8f, 0xab, 0x7b, - 0x98, 0x99, 0x2e, 0x27, 0xfb, 0xad, 0x46, 0xb9, 0x5b, 0xb4, 0x7b, 0x90, 0xee, 0x35, 0xf2, 0x6b, - 0x2f, 0x1b, 0xfa, 0x0e, 0x00, 0x00, 0xff, 0xff, 0x70, 0xa2, 0xbd, 0x30, 0x02, 0x02, 0x00, 0x00, + // 383 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x92, 0xcd, 0x6a, 0xd5, 0x40, + 0x14, 0xc7, 0x89, 0x37, 0xb5, 0xe4, 0xb4, 0x34, 0x65, 0xa8, 0x32, 0x94, 0x2e, 0xe2, 0x45, 0x30, + 0xab, 0x14, 0x8a, 0xe0, 0xbe, 0x15, 0x75, 0xe1, 0xe2, 0x32, 0x88, 0x0b, 0x41, 0x42, 0x4c, 0x4f, + 0xe2, 0x48, 0x32, 0x67, 0x9c, 0x99, 0x88, 0x4f, 0xea, 0x7b, 0xf8, 0x06, 0x32, 0x1f, 0xa9, 0x72, + 0xf1, 0xee, 0xe6, 0xff, 0x3b, 0xf3, 0x71, 0xce, 0x8f, 0x81, 0x53, 0x3d, 0x2d, 0xa3, 0x54, 0x8d, + 0x36, 0xe4, 0x88, 0xf1, 0x91, 0x68, 0x9c, 0x30, 0xa6, 0x2f, 0xcb, 0xd0, 0xf4, 0x34, 0x6b, 0x39, + 0xa1, 0xb9, 0xac, 0x62, 0xe5, 0x7a, 0xad, 0x5c, 0xdf, 0xa3, 0xed, 0x8d, 0xd4, 0x8e, 0x4c, 0xdc, + 0xbd, 0xed, 0xe1, 0xf8, 0x23, 0x1a, 0x2b, 0x49, 0xb1, 0x0b, 0x38, 0x9a, 0xbb, 0x6f, 0x64, 0x78, + 0x56, 0x65, 0xf5, 0x91, 0x88, 0x21, 0x50, 0xa9, 0xc8, 0xf0, 0x47, 0x89, 0xfa, 0xe0, 0xa9, 0xee, + 0x5c, 0xff, 0x95, 0x6f, 0x22, 0x0d, 0x81, 0x3d, 0x85, 0xc7, 0x76, 0x19, 0x06, 0xf9, 0x93, 0xe7, + 0x55, 0x56, 0x17, 0x22, 0xa5, 0xed, 0xef, 0x0c, 0x2e, 0xee, 0xe8, 0x1e, 0xdf, 0xa2, 0x42, 0xd3, + 0x39, 0x32, 0x02, 0xbf, 0x2f, 0x68, 0x1d, 0xab, 0xe1, 0x7c, 0x90, 0x13, 0xb6, 0x8e, 0xda, 0x31, + 0xd6, 0x90, 0x67, 0xd5, 0xa6, 0x2e, 0xc4, 0x99, 0xe7, 0x1f, 0x28, 0x9d, 0x40, 0x76, 0x05, 0x85, + 0xee, 0x4c, 0x37, 0xa3, 0xc3, 0xd8, 0x4a, 0x21, 0xfe, 0x02, 0x76, 0x07, 0x10, 0xc6, 0x69, 0xfd, + 0x29, 0x5e, 0x56, 0x9b, 0xfa, 0xe4, 0xe6, 0x79, 0xb3, 0xaf, 0xe5, 0x8d, 0x9c, 0xf0, 0xf5, 0x83, + 0x80, 0x9d, 0xc7, 0xa2, 0x08, 0x55, 0x5f, 0x61, 0xef, 0xe1, 0x7c, 0x15, 0xd7, 0xfe, 0x88, 0x4e, + 0xc2, 0x78, 0x27, 0x37, 0xcf, 0x9a, 0x43, 0x86, 0x9b, 0x24, 0x4f, 0x94, 0x2b, 0x49, 0x60, 0xfb, + 0x2b, 0x83, 0x27, 0x7b, 0x33, 0x5b, 0x4d, 0xca, 0xa2, 0x77, 0x87, 0xc6, 0x24, 0xcf, 0x85, 0x88, + 0x81, 0xbd, 0x83, 0xfc, 0x9f, 0xe6, 0x5f, 0x1e, 0x7e, 0xf1, 0xbf, 0x97, 0x86, 0xd9, 0x44, 0xb8, + 0xe1, 0xf2, 0x33, 0xe4, 0x61, 0x1e, 0x06, 0xb9, 0xea, 0x66, 0x4c, 0xcf, 0x84, 0x35, 0x7b, 0x01, + 0xa5, 0x54, 0x16, 0x8d, 0x93, 0xa4, 0x5a, 0x4d, 0x52, 0xb9, 0x24, 0xf3, 0xec, 0x01, 0xef, 0x3c, + 0x65, 0x1c, 0x8e, 0x7b, 0x52, 0x0e, 0x95, 0xe3, 0x65, 0xd8, 0xb0, 0xc6, 0xdb, 0x57, 0x70, 0xd5, + 0xd3, 0x7c, 0xb0, 0xbf, 0xdb, 0xd3, 0x5d, 0xf8, 0x9b, 0x41, 0xaf, 0xfd, 0x54, 0xc4, 0x9f, 0xda, + 0x8e, 0xf4, 0x27, 0x00, 0x00, 0xff, 0xff, 0x7a, 0x72, 0x3d, 0x18, 0xb5, 0x02, 0x00, 0x00, } diff --git a/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go new file mode 100644 index 000000000..ceadde6a5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/sortkeys/sortkeys.go @@ -0,0 +1,101 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2013, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package sortkeys + +import ( + "sort" +) + +func Strings(l []string) { + sort.Strings(l) +} + +func Float64s(l []float64) { + sort.Float64s(l) +} + +func Float32s(l []float32) { + sort.Sort(Float32Slice(l)) +} + +func Int64s(l []int64) { + sort.Sort(Int64Slice(l)) +} + +func Int32s(l []int32) { + sort.Sort(Int32Slice(l)) +} + +func Uint64s(l []uint64) { + sort.Sort(Uint64Slice(l)) +} + +func Uint32s(l []uint32) { + sort.Sort(Uint32Slice(l)) +} + +func Bools(l []bool) { + sort.Sort(BoolSlice(l)) +} + +type BoolSlice []bool + +func (p BoolSlice) Len() int { return len(p) } +func (p BoolSlice) Less(i, j int) bool { return p[j] } +func (p BoolSlice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int64Slice []int64 + +func (p Int64Slice) Len() int { return len(p) } +func (p Int64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Int32Slice []int32 + +func (p Int32Slice) Len() int { return len(p) } +func (p Int32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Int32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint64Slice []uint64 + +func (p Uint64Slice) Len() int { return len(p) } +func (p Uint64Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint64Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Uint32Slice []uint32 + +func (p Uint32Slice) Len() int { return len(p) } +func (p Uint32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Uint32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } + +type Float32Slice []float32 + +func (p Float32Slice) Len() int { return len(p) } +func (p Float32Slice) Less(i, j int) bool { return p[i] < p[j] } +func (p Float32Slice) Swap(i, j int) { p[i], p[j] = p[j], p[i] } diff --git a/vendor/github.com/gogo/protobuf/types/any.go b/vendor/github.com/gogo/protobuf/types/any.go new file mode 100644 index 000000000..d83c3ad00 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.go @@ -0,0 +1,138 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements functions to marshal proto.Message to/from +// google.protobuf.Any message. + +import ( + "fmt" + "reflect" + "strings" + + "github.com/gogo/protobuf/proto" +) + +const googleApis = "type.googleapis.com/" + +// AnyMessageName returns the name of the message contained in a google.protobuf.Any message. +// +// Note that regular type assertions should be done using the Is +// function. AnyMessageName is provided for less common use cases like filtering a +// sequence of Any messages based on a set of allowed message type names. +func AnyMessageName(any *Any) (string, error) { + if any == nil { + return "", fmt.Errorf("message is nil") + } + slash := strings.LastIndex(any.TypeUrl, "/") + if slash < 0 { + return "", fmt.Errorf("message type url %q is invalid", any.TypeUrl) + } + return any.TypeUrl[slash+1:], nil +} + +// MarshalAny takes the protocol buffer and encodes it into google.protobuf.Any. +func MarshalAny(pb proto.Message) (*Any, error) { + value, err := proto.Marshal(pb) + if err != nil { + return nil, err + } + return &Any{TypeUrl: googleApis + proto.MessageName(pb), Value: value}, nil +} + +// DynamicAny is a value that can be passed to UnmarshalAny to automatically +// allocate a proto.Message for the type specified in a google.protobuf.Any +// message. The allocated message is stored in the embedded proto.Message. +// +// Example: +// +// var x ptypes.DynamicAny +// if err := ptypes.UnmarshalAny(a, &x); err != nil { ... } +// fmt.Printf("unmarshaled message: %v", x.Message) +type DynamicAny struct { + proto.Message +} + +// Empty returns a new proto.Message of the type specified in a +// google.protobuf.Any message. It returns an error if corresponding message +// type isn't linked in. +func EmptyAny(any *Any) (proto.Message, error) { + aname, err := AnyMessageName(any) + if err != nil { + return nil, err + } + + t := proto.MessageType(aname) + if t == nil { + return nil, fmt.Errorf("any: message type %q isn't linked in", aname) + } + return reflect.New(t.Elem()).Interface().(proto.Message), nil +} + +// UnmarshalAny parses the protocol buffer representation in a google.protobuf.Any +// message and places the decoded result in pb. It returns an error if type of +// contents of Any message does not match type of pb message. +// +// pb can be a proto.Message, or a *DynamicAny. +func UnmarshalAny(any *Any, pb proto.Message) error { + if d, ok := pb.(*DynamicAny); ok { + if d.Message == nil { + var err error + d.Message, err = EmptyAny(any) + if err != nil { + return err + } + } + return UnmarshalAny(any, d.Message) + } + + aname, err := AnyMessageName(any) + if err != nil { + return err + } + + mname := proto.MessageName(pb) + if aname != mname { + return fmt.Errorf("mismatched message type: got %q want %q", aname, mname) + } + return proto.Unmarshal(any.Value, pb) +} + +// Is returns true if any value contains a given message type. +func Is(any *Any, pb proto.Message) bool { + aname, err := AnyMessageName(any) + if err != nil { + return false + } + + return aname == proto.MessageName(pb) +} diff --git a/vendor/github.com/gogo/protobuf/types/any.pb.go b/vendor/github.com/gogo/protobuf/types/any.pb.go new file mode 100644 index 000000000..1d280cdac --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any.pb.go @@ -0,0 +1,691 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/any.proto + +/* + Package types is a generated protocol buffer package. + + It is generated from these files: + google/protobuf/any.proto + google/protobuf/type.proto + google/protobuf/empty.proto + google/protobuf/api.proto + google/protobuf/timestamp.proto + google/protobuf/duration.proto + google/protobuf/struct.proto + google/protobuf/wrappers.proto + google/protobuf/field_mask.proto + google/protobuf/source_context.proto + + It has these top-level messages: + Any + Type + Field + Enum + EnumValue + Option + Empty + Api + Method + Mixin + Timestamp + Duration + Struct + Value + ListValue + DoubleValue + FloatValue + Int64Value + UInt64Value + Int32Value + UInt32Value + BoolValue + StringValue + BytesValue + FieldMask + SourceContext +*/ +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +// `Any` contains an arbitrary serialized protocol buffer message along with a +// URL that describes the type of the serialized message. +// +// Protobuf library provides support to pack/unpack Any values in the form +// of utility functions or additional generated methods of the Any type. +// +// Example 1: Pack and unpack a message in C++. +// +// Foo foo = ...; +// Any any; +// any.PackFrom(foo); +// ... +// if (any.UnpackTo(&foo)) { +// ... +// } +// +// Example 2: Pack and unpack a message in Java. +// +// Foo foo = ...; +// Any any = Any.pack(foo); +// ... +// if (any.is(Foo.class)) { +// foo = any.unpack(Foo.class); +// } +// +// Example 3: Pack and unpack a message in Python. +// +// foo = Foo(...) +// any = Any() +// any.Pack(foo) +// ... +// if any.Is(Foo.DESCRIPTOR): +// any.Unpack(foo) +// ... +// +// Example 4: Pack and unpack a message in Go +// +// foo := &pb.Foo{...} +// any, err := ptypes.MarshalAny(foo) +// ... +// foo := &pb.Foo{} +// if err := ptypes.UnmarshalAny(any, foo); err != nil { +// ... +// } +// +// The pack methods provided by protobuf library will by default use +// 'type.googleapis.com/full.type.name' as the type URL and the unpack +// methods only use the fully qualified type name after the last '/' +// in the type URL, for example "foo.bar.com/x/y.z" will yield type +// name "y.z". +// +// +// JSON +// ==== +// The JSON representation of an `Any` value uses the regular +// representation of the deserialized, embedded message, with an +// additional field `@type` which contains the type URL. Example: +// +// package google.profile; +// message Person { +// string first_name = 1; +// string last_name = 2; +// } +// +// { +// "@type": "type.googleapis.com/google.profile.Person", +// "firstName": , +// "lastName": +// } +// +// If the embedded message type is well-known and has a custom JSON +// representation, that representation will be embedded adding a field +// `value` which holds the custom JSON in addition to the `@type` +// field. Example (for message [google.protobuf.Duration][]): +// +// { +// "@type": "type.googleapis.com/google.protobuf.Duration", +// "value": "1.212s" +// } +// +type Any struct { + // A URL/resource name that uniquely identifies the type of the serialized + // protocol buffer message. The last segment of the URL's path must represent + // the fully qualified name of the type (as in + // `path/google.protobuf.Duration`). The name should be in a canonical form + // (e.g., leading "." is not accepted). + // + // In practice, teams usually precompile into the binary all types that they + // expect it to use in the context of Any. However, for URLs which use the + // scheme `http`, `https`, or no scheme, one can optionally set up a type + // server that maps type URLs to message definitions as follows: + // + // * If no scheme is provided, `https` is assumed. + // * An HTTP GET on the URL must yield a [google.protobuf.Type][] + // value in binary format, or produce an error. + // * Applications are allowed to cache lookup results based on the + // URL, or have them precompiled into a binary to avoid any + // lookup. Therefore, binary compatibility needs to be preserved + // on changes to types. (Use versioned type names to manage + // breaking changes.) + // + // Note: this functionality is not currently available in the official + // protobuf release, and it is not used for type URLs beginning with + // type.googleapis.com. + // + // Schemes other than `http`, `https` (or the empty scheme) might be + // used with implementation specific semantics. + // + TypeUrl string `protobuf:"bytes,1,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // Must be a valid serialized protocol buffer of the above specified type. + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Any) Reset() { *m = Any{} } +func (*Any) ProtoMessage() {} +func (*Any) Descriptor() ([]byte, []int) { return fileDescriptorAny, []int{0} } +func (*Any) XXX_WellKnownType() string { return "Any" } + +func (m *Any) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Any) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Any)(nil), "google.protobuf.Any") +} +func (this *Any) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + return 0 +} +func (this *Any) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Any) + if !ok { + that2, ok := that.(Any) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *Any) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Any{") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringAny(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Any) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Any) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.TypeUrl) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintAny(dAtA, i, uint64(len(m.TypeUrl))) + i += copy(dAtA[i:], m.TypeUrl) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintAny(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func encodeVarintAny(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedAny(r randyAny, easy bool) *Any { + this := &Any{} + this.TypeUrl = string(randStringAny(r)) + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyAny interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneAny(r randyAny) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringAny(r randyAny) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneAny(r) + } + return string(tmps) +} +func randUnrecognizedAny(r randyAny, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldAny(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldAny(dAtA []byte, r randyAny, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateAny(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateAny(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateAny(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateAny(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Any) Size() (n int) { + var l int + _ = l + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovAny(uint64(l)) + } + return n +} + +func sovAny(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozAny(x uint64) (n int) { + return sovAny(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Any) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Any{`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringAny(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Any) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Any: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Any: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowAny + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthAny + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipAny(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthAny + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipAny(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthAny + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowAny + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipAny(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthAny = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowAny = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/any.proto", fileDescriptorAny) } + +var fileDescriptorAny = []byte{ + // 211 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4c, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcc, 0xab, 0xd4, + 0x03, 0x73, 0x84, 0xf8, 0x21, 0x52, 0x7a, 0x30, 0x29, 0x25, 0x33, 0x2e, 0x66, 0xc7, 0xbc, 0x4a, + 0x21, 0x49, 0x2e, 0x8e, 0x92, 0xca, 0x82, 0xd4, 0xf8, 0xd2, 0xa2, 0x1c, 0x09, 0x46, 0x05, 0x46, + 0x0d, 0xce, 0x20, 0x76, 0x10, 0x3f, 0xb4, 0x28, 0x47, 0x48, 0x84, 0x8b, 0xb5, 0x2c, 0x31, 0xa7, + 0x34, 0x55, 0x82, 0x49, 0x81, 0x51, 0x83, 0x27, 0x08, 0xc2, 0x71, 0xaa, 0xbf, 0xf0, 0x50, 0x8e, + 0xe1, 0xc6, 0x43, 0x39, 0x86, 0x0f, 0x0f, 0xe5, 0x18, 0x7f, 0x3c, 0x94, 0x63, 0x6c, 0x78, 0x24, + 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, + 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, 0xc3, 0x07, 0x90, 0xf8, 0x63, 0x39, 0x46, 0x2e, 0xe1, 0xe4, + 0xfc, 0x5c, 0x3d, 0x34, 0xeb, 0x9d, 0x38, 0x1c, 0xf3, 0x2a, 0x03, 0x40, 0x9c, 0x00, 0xc6, 0x28, + 0x56, 0x90, 0x8d, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, 0x94, + 0x06, 0x40, 0x95, 0xea, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, 0x94, + 0x25, 0xb1, 0x81, 0xcd, 0x30, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0xf9, 0xf1, 0x85, 0x16, 0xed, + 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/any_test.go b/vendor/github.com/gogo/protobuf/types/any_test.go new file mode 100644 index 000000000..14679a244 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/any_test.go @@ -0,0 +1,112 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "testing" + + "github.com/gogo/protobuf/proto" + pb "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func TestMarshalUnmarshal(t *testing.T) { + orig := &Any{Value: []byte("test")} + + packed, err := MarshalAny(orig) + if err != nil { + t.Errorf("MarshalAny(%+v): got: _, %v exp: _, nil", orig, err) + } + + unpacked := &Any{} + err = UnmarshalAny(packed, unpacked) + if err != nil || !proto.Equal(unpacked, orig) { + t.Errorf("got: %v, %+v; want nil, %+v", err, unpacked, orig) + } +} + +func TestIs(t *testing.T) { + a, err := MarshalAny(&pb.FileDescriptorProto{}) + if err != nil { + t.Fatal(err) + } + if Is(a, &pb.DescriptorProto{}) { + t.Error("FileDescriptorProto is not a DescriptorProto, but Is says it is") + } + if !Is(a, &pb.FileDescriptorProto{}) { + t.Error("FileDescriptorProto is indeed a FileDescriptorProto, but Is says it is not") + } +} + +func TestIsDifferentUrlPrefixes(t *testing.T) { + m := &pb.FileDescriptorProto{} + a := &Any{TypeUrl: "foo/bar/" + proto.MessageName(m)} + if !Is(a, m) { + t.Errorf("message with type url %q didn't satisfy Is for type %q", a.TypeUrl, proto.MessageName(m)) + } +} + +func TestUnmarshalDynamic(t *testing.T) { + want := &pb.FileDescriptorProto{Name: proto.String("foo")} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + var got DynamicAny + if err := UnmarshalAny(a, &got); err != nil { + t.Fatal(err) + } + if !proto.Equal(got.Message, want) { + t.Errorf("invalid result from UnmarshalAny, got %q want %q", got.Message, want) + } +} + +func TestEmpty(t *testing.T) { + want := &pb.FileDescriptorProto{} + a, err := MarshalAny(want) + if err != nil { + t.Fatal(err) + } + got, err := EmptyAny(a) + if err != nil { + t.Fatal(err) + } + if !proto.Equal(got, want) { + t.Errorf("unequal empty message, got %q, want %q", got, want) + } + + // that's a valid type_url for a message which shouldn't be linked into this + // test binary. We want an error. + a.TypeUrl = "type.googleapis.com/google.protobuf.TestAny" + if _, err := EmptyAny(a); err == nil { + t.Errorf("got no error for an attempt to create a message of type %q, which shouldn't be linked in", a.TypeUrl) + } +} diff --git a/vendor/github.com/gogo/protobuf/types/api.pb.go b/vendor/github.com/gogo/protobuf/types/api.pb.go new file mode 100644 index 000000000..f9c8fbd7e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/api.pb.go @@ -0,0 +1,1892 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/api.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Api is a light-weight descriptor for an API Interface. +// +// Interfaces are also described as "protocol buffer services" in some contexts, +// such as by the "service" keyword in a .proto file, but they are different +// from API Services, which represent a concrete implementation of an interface +// as opposed to simply a description of methods and bindings. They are also +// sometimes simply referred to as "APIs" in other contexts, such as the name of +// this message itself. See https://cloud.google.com/apis/design/glossary for +// detailed terminology. +type Api struct { + // The fully qualified name of this interface, including package name + // followed by the interface's simple name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The methods of this interface, in unspecified order. + Methods []*Method `protobuf:"bytes,2,rep,name=methods" json:"methods,omitempty"` + // Any metadata attached to the interface. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + // A version string for this interface. If specified, must have the form + // `major-version.minor-version`, as in `1.10`. If the minor version is + // omitted, it defaults to zero. If the entire version field is empty, the + // major version is derived from the package name, as outlined below. If the + // field is not empty, the version in the package name will be verified to be + // consistent with what is provided here. + // + // The versioning schema uses [semantic + // versioning](http://semver.org) where the major version number + // indicates a breaking change and the minor version an additive, + // non-breaking change. Both version numbers are signals to users + // what to expect from different versions, and should be carefully + // chosen based on the product plan. + // + // The major version is also reflected in the package name of the + // interface, which must end in `v`, as in + // `google.feature.v1`. For major versions 0 and 1, the suffix can + // be omitted. Zero major versions must only be used for + // experimental, non-GA interfaces. + // + // + Version string `protobuf:"bytes,4,opt,name=version,proto3" json:"version,omitempty"` + // Source context for the protocol buffer service represented by this + // message. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // Included interfaces. See [Mixin][]. + Mixins []*Mixin `protobuf:"bytes,6,rep,name=mixins" json:"mixins,omitempty"` + // The source syntax of the service. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Api) Reset() { *m = Api{} } +func (*Api) ProtoMessage() {} +func (*Api) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{0} } + +func (m *Api) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Api) GetMethods() []*Method { + if m != nil { + return m.Methods + } + return nil +} + +func (m *Api) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Api) GetVersion() string { + if m != nil { + return m.Version + } + return "" +} + +func (m *Api) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Api) GetMixins() []*Mixin { + if m != nil { + return m.Mixins + } + return nil +} + +func (m *Api) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return SYNTAX_PROTO2 +} + +// Method represents a method of an API interface. +type Method struct { + // The simple name of this method. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // A URL of the input message type. + RequestTypeUrl string `protobuf:"bytes,2,opt,name=request_type_url,json=requestTypeUrl,proto3" json:"request_type_url,omitempty"` + // If true, the request is streamed. + RequestStreaming bool `protobuf:"varint,3,opt,name=request_streaming,json=requestStreaming,proto3" json:"request_streaming,omitempty"` + // The URL of the output message type. + ResponseTypeUrl string `protobuf:"bytes,4,opt,name=response_type_url,json=responseTypeUrl,proto3" json:"response_type_url,omitempty"` + // If true, the response is streamed. + ResponseStreaming bool `protobuf:"varint,5,opt,name=response_streaming,json=responseStreaming,proto3" json:"response_streaming,omitempty"` + // Any metadata attached to the method. + Options []*Option `protobuf:"bytes,6,rep,name=options" json:"options,omitempty"` + // The source syntax of this method. + Syntax Syntax `protobuf:"varint,7,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Method) Reset() { *m = Method{} } +func (*Method) ProtoMessage() {} +func (*Method) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{1} } + +func (m *Method) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Method) GetRequestTypeUrl() string { + if m != nil { + return m.RequestTypeUrl + } + return "" +} + +func (m *Method) GetRequestStreaming() bool { + if m != nil { + return m.RequestStreaming + } + return false +} + +func (m *Method) GetResponseTypeUrl() string { + if m != nil { + return m.ResponseTypeUrl + } + return "" +} + +func (m *Method) GetResponseStreaming() bool { + if m != nil { + return m.ResponseStreaming + } + return false +} + +func (m *Method) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Method) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return SYNTAX_PROTO2 +} + +// Declares an API Interface to be included in this interface. The including +// interface must redeclare all the methods from the included interface, but +// documentation and options are inherited as follows: +// +// - If after comment and whitespace stripping, the documentation +// string of the redeclared method is empty, it will be inherited +// from the original method. +// +// - Each annotation belonging to the service config (http, +// visibility) which is not set in the redeclared method will be +// inherited. +// +// - If an http annotation is inherited, the path pattern will be +// modified as follows. Any version prefix will be replaced by the +// version of the including interface plus the [root][] path if +// specified. +// +// Example of a simple mixin: +// +// package google.acl.v1; +// service AccessControl { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v1/{resource=**}:getAcl"; +// } +// } +// +// package google.storage.v2; +// service Storage { +// rpc GetAcl(GetAclRequest) returns (Acl); +// +// // Get a data record. +// rpc GetData(GetDataRequest) returns (Data) { +// option (google.api.http).get = "/v2/{resource=**}"; +// } +// } +// +// Example of a mixin configuration: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// +// The mixin construct implies that all methods in `AccessControl` are +// also declared with same name and request/response types in +// `Storage`. A documentation generator or annotation processor will +// see the effective `Storage.GetAcl` method after inherting +// documentation and annotations as follows: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/{resource=**}:getAcl"; +// } +// ... +// } +// +// Note how the version in the path pattern changed from `v1` to `v2`. +// +// If the `root` field in the mixin is specified, it should be a +// relative path under which inherited HTTP paths are placed. Example: +// +// apis: +// - name: google.storage.v2.Storage +// mixins: +// - name: google.acl.v1.AccessControl +// root: acls +// +// This implies the following inherited HTTP annotation: +// +// service Storage { +// // Get the underlying ACL object. +// rpc GetAcl(GetAclRequest) returns (Acl) { +// option (google.api.http).get = "/v2/acls/{resource=**}:getAcl"; +// } +// ... +// } +type Mixin struct { + // The fully qualified name of the interface which is included. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // If non-empty specifies a path under which inherited HTTP paths + // are rooted. + Root string `protobuf:"bytes,2,opt,name=root,proto3" json:"root,omitempty"` +} + +func (m *Mixin) Reset() { *m = Mixin{} } +func (*Mixin) ProtoMessage() {} +func (*Mixin) Descriptor() ([]byte, []int) { return fileDescriptorApi, []int{2} } + +func (m *Mixin) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Mixin) GetRoot() string { + if m != nil { + return m.Root + } + return "" +} + +func init() { + proto.RegisterType((*Api)(nil), "google.protobuf.Api") + proto.RegisterType((*Method)(nil), "google.protobuf.Method") + proto.RegisterType((*Mixin)(nil), "google.protobuf.Mixin") +} +func (this *Api) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Methods) != len(that1.Methods) { + if len(this.Methods) < len(that1.Methods) { + return -1 + } + return 1 + } + for i := range this.Methods { + if c := this.Methods[i].Compare(that1.Methods[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Version != that1.Version { + if this.Version < that1.Version { + return -1 + } + return 1 + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if len(this.Mixins) != len(that1.Mixins) { + if len(this.Mixins) < len(that1.Mixins) { + return -1 + } + return 1 + } + for i := range this.Mixins { + if c := this.Mixins[i].Compare(that1.Mixins[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + return 0 +} +func (this *Method) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + if this.RequestTypeUrl < that1.RequestTypeUrl { + return -1 + } + return 1 + } + if this.RequestStreaming != that1.RequestStreaming { + if !this.RequestStreaming { + return -1 + } + return 1 + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + if this.ResponseTypeUrl < that1.ResponseTypeUrl { + return -1 + } + return 1 + } + if this.ResponseStreaming != that1.ResponseStreaming { + if !this.ResponseStreaming { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + return 0 +} +func (this *Mixin) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Root != that1.Root { + if this.Root < that1.Root { + return -1 + } + return 1 + } + return 0 +} +func (this *Api) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Api) + if !ok { + that2, ok := that.(Api) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Methods) != len(that1.Methods) { + return false + } + for i := range this.Methods { + if !this.Methods[i].Equal(that1.Methods[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Version != that1.Version { + return false + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if len(this.Mixins) != len(that1.Mixins) { + return false + } + for i := range this.Mixins { + if !this.Mixins[i].Equal(that1.Mixins[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + return true +} +func (this *Method) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Method) + if !ok { + that2, ok := that.(Method) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.RequestTypeUrl != that1.RequestTypeUrl { + return false + } + if this.RequestStreaming != that1.RequestStreaming { + return false + } + if this.ResponseTypeUrl != that1.ResponseTypeUrl { + return false + } + if this.ResponseStreaming != that1.ResponseStreaming { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.Syntax != that1.Syntax { + return false + } + return true +} +func (this *Mixin) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Mixin) + if !ok { + that2, ok := that.(Mixin) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Root != that1.Root { + return false + } + return true +} +func (this *Api) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Api{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Methods != nil { + s = append(s, "Methods: "+fmt.Sprintf("%#v", this.Methods)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Version: "+fmt.Sprintf("%#v", this.Version)+",\n") + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + if this.Mixins != nil { + s = append(s, "Mixins: "+fmt.Sprintf("%#v", this.Mixins)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Method) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 11) + s = append(s, "&types.Method{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "RequestTypeUrl: "+fmt.Sprintf("%#v", this.RequestTypeUrl)+",\n") + s = append(s, "RequestStreaming: "+fmt.Sprintf("%#v", this.RequestStreaming)+",\n") + s = append(s, "ResponseTypeUrl: "+fmt.Sprintf("%#v", this.ResponseTypeUrl)+",\n") + s = append(s, "ResponseStreaming: "+fmt.Sprintf("%#v", this.ResponseStreaming)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Mixin) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Mixin{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Root: "+fmt.Sprintf("%#v", this.Root)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringApi(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Api) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Api) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Methods) > 0 { + for _, msg := range m.Methods { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Version) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Version))) + i += copy(dAtA[i:], m.Version) + } + if m.SourceContext != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintApi(dAtA, i, uint64(m.SourceContext.Size())) + n1, err := m.SourceContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if len(m.Mixins) > 0 { + for _, msg := range m.Mixins { + dAtA[i] = 0x32 + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Syntax != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + } + return i, nil +} + +func (m *Method) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Method) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.RequestTypeUrl) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.RequestTypeUrl))) + i += copy(dAtA[i:], m.RequestTypeUrl) + } + if m.RequestStreaming { + dAtA[i] = 0x18 + i++ + if m.RequestStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.ResponseTypeUrl) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.ResponseTypeUrl))) + i += copy(dAtA[i:], m.ResponseTypeUrl) + } + if m.ResponseStreaming { + dAtA[i] = 0x28 + i++ + if m.ResponseStreaming { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x32 + i++ + i = encodeVarintApi(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Syntax != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintApi(dAtA, i, uint64(m.Syntax)) + } + return i, nil +} + +func (m *Mixin) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Mixin) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Root) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintApi(dAtA, i, uint64(len(m.Root))) + i += copy(dAtA[i:], m.Root) + } + return i, nil +} + +func encodeVarintApi(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedApi(r randyApi, easy bool) *Api { + this := &Api{} + this.Name = string(randStringApi(r)) + if r.Intn(10) != 0 { + v1 := r.Intn(5) + this.Methods = make([]*Method, v1) + for i := 0; i < v1; i++ { + this.Methods[i] = NewPopulatedMethod(r, easy) + } + } + if r.Intn(10) != 0 { + v2 := r.Intn(5) + this.Options = make([]*Option, v2) + for i := 0; i < v2; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Version = string(randStringApi(r)) + if r.Intn(10) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + if r.Intn(10) != 0 { + v3 := r.Intn(5) + this.Mixins = make([]*Mixin, v3) + for i := 0; i < v3; i++ { + this.Mixins[i] = NewPopulatedMixin(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMethod(r randyApi, easy bool) *Method { + this := &Method{} + this.Name = string(randStringApi(r)) + this.RequestTypeUrl = string(randStringApi(r)) + this.RequestStreaming = bool(bool(r.Intn(2) == 0)) + this.ResponseTypeUrl = string(randStringApi(r)) + this.ResponseStreaming = bool(bool(r.Intn(2) == 0)) + if r.Intn(10) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedMixin(r randyApi, easy bool) *Mixin { + this := &Mixin{} + this.Name = string(randStringApi(r)) + this.Root = string(randStringApi(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyApi interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneApi(r randyApi) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringApi(r randyApi) string { + v5 := r.Intn(100) + tmps := make([]rune, v5) + for i := 0; i < v5; i++ { + tmps[i] = randUTF8RuneApi(r) + } + return string(tmps) +} +func randUnrecognizedApi(r randyApi, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldApi(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldApi(dAtA []byte, r randyApi, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + v6 := r.Int63() + if r.Intn(2) == 0 { + v6 *= -1 + } + dAtA = encodeVarintPopulateApi(dAtA, uint64(v6)) + case 1: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateApi(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateApi(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateApi(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Api) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Methods) > 0 { + for _, e := range m.Methods { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + l = len(m.Version) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovApi(uint64(l)) + } + if len(m.Mixins) > 0 { + for _, e := range m.Mixins { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + return n +} + +func (m *Method) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.RequestTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.RequestStreaming { + n += 2 + } + l = len(m.ResponseTypeUrl) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + if m.ResponseStreaming { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovApi(uint64(l)) + } + } + if m.Syntax != 0 { + n += 1 + sovApi(uint64(m.Syntax)) + } + return n +} + +func (m *Mixin) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + l = len(m.Root) + if l > 0 { + n += 1 + l + sovApi(uint64(l)) + } + return n +} + +func sovApi(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozApi(x uint64) (n int) { + return sovApi(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Api) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Api{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Methods:` + strings.Replace(fmt.Sprintf("%v", this.Methods), "Method", "Method", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `Version:` + fmt.Sprintf("%v", this.Version) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Mixins:` + strings.Replace(fmt.Sprintf("%v", this.Mixins), "Mixin", "Mixin", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `}`, + }, "") + return s +} +func (this *Method) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Method{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `RequestTypeUrl:` + fmt.Sprintf("%v", this.RequestTypeUrl) + `,`, + `RequestStreaming:` + fmt.Sprintf("%v", this.RequestStreaming) + `,`, + `ResponseTypeUrl:` + fmt.Sprintf("%v", this.ResponseTypeUrl) + `,`, + `ResponseStreaming:` + fmt.Sprintf("%v", this.ResponseStreaming) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `}`, + }, "") + return s +} +func (this *Mixin) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Mixin{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Root:` + fmt.Sprintf("%v", this.Root) + `,`, + `}`, + }, "") + return s +} +func valueToStringApi(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Api) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Api: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Api: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Methods", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Methods = append(m.Methods, &Method{}) + if err := m.Methods[len(m.Methods)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Version", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Version = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Mixins", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Mixins = append(m.Mixins, &Mixin{}) + if err := m.Mixins[len(m.Mixins)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= (Syntax(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Method) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Method: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Method: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.RequestTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field RequestStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.RequestStreaming = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseTypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.ResponseTypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field ResponseStreaming", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.ResponseStreaming = bool(v != 0) + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= (Syntax(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Mixin) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Mixin: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Mixin: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowApi + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthApi + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Root = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipApi(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthApi + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipApi(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthApi + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowApi + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipApi(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthApi = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowApi = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/api.proto", fileDescriptorApi) } + +var fileDescriptorApi = []byte{ + // 467 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x91, 0x31, 0x6f, 0x13, 0x31, + 0x14, 0xc7, 0xe3, 0xbb, 0xe4, 0x52, 0x5c, 0x91, 0x82, 0x91, 0xc0, 0x64, 0xb0, 0x4e, 0x15, 0xc3, + 0x09, 0xc4, 0x45, 0x94, 0x4f, 0xd0, 0x20, 0xd4, 0x01, 0x21, 0xa2, 0x0b, 0x08, 0x89, 0x25, 0x4a, + 0x83, 0x09, 0x96, 0xee, 0x6c, 0x63, 0x3b, 0x90, 0x4c, 0xf0, 0x59, 0x98, 0x10, 0x23, 0xdf, 0x80, + 0x8d, 0xb1, 0x23, 0x23, 0x39, 0x16, 0xc6, 0x8e, 0x8c, 0xc8, 0xbe, 0x73, 0x53, 0x92, 0x20, 0xd1, + 0xcd, 0xef, 0xfd, 0x7f, 0xfe, 0xfb, 0xbd, 0xbf, 0xe1, 0xcd, 0xa9, 0x10, 0xd3, 0x9c, 0xf6, 0xa4, + 0x12, 0x46, 0x1c, 0xcf, 0x5e, 0xf5, 0xc6, 0x92, 0xa5, 0xae, 0x40, 0x7b, 0x95, 0x94, 0x7a, 0xa9, + 0x7b, 0x6b, 0x9d, 0xd5, 0x62, 0xa6, 0x26, 0x74, 0x34, 0x11, 0xdc, 0xd0, 0xb9, 0xa9, 0xc0, 0x6e, + 0x77, 0x9d, 0x32, 0x0b, 0x59, 0x9b, 0xec, 0x7f, 0x0d, 0x60, 0x78, 0x28, 0x19, 0x42, 0xb0, 0xc9, + 0xc7, 0x05, 0xc5, 0x20, 0x06, 0xc9, 0xa5, 0xcc, 0x9d, 0xd1, 0x3d, 0xd8, 0x2e, 0xa8, 0x79, 0x2d, + 0x5e, 0x6a, 0x1c, 0xc4, 0x61, 0xb2, 0x7b, 0x70, 0x23, 0x5d, 0x1b, 0x20, 0x7d, 0xec, 0xf4, 0xcc, + 0x73, 0xf6, 0x8a, 0x90, 0x86, 0x09, 0xae, 0x71, 0xf8, 0x8f, 0x2b, 0x4f, 0x9c, 0x9e, 0x79, 0x0e, + 0x61, 0xd8, 0x7e, 0x4b, 0x95, 0x66, 0x82, 0xe3, 0xa6, 0x7b, 0xdc, 0x97, 0xe8, 0x21, 0xec, 0xfc, + 0xbd, 0x0f, 0x6e, 0xc5, 0x20, 0xd9, 0x3d, 0x20, 0x1b, 0x9e, 0x43, 0x87, 0x3d, 0xa8, 0xa8, 0xec, + 0xb2, 0x3e, 0x5f, 0xa2, 0x14, 0x46, 0x05, 0x9b, 0x33, 0xae, 0x71, 0xe4, 0x46, 0xba, 0xbe, 0xb9, + 0x85, 0x95, 0xb3, 0x9a, 0x42, 0x3d, 0x18, 0xe9, 0x05, 0x37, 0xe3, 0x39, 0x6e, 0xc7, 0x20, 0xe9, + 0x6c, 0x59, 0x61, 0xe8, 0xe4, 0xac, 0xc6, 0xf6, 0xbf, 0x04, 0x30, 0xaa, 0x82, 0xd8, 0x1a, 0x63, + 0x02, 0xaf, 0x28, 0xfa, 0x66, 0x46, 0xb5, 0x19, 0xd9, 0xe0, 0x47, 0x33, 0x95, 0xe3, 0xc0, 0xe9, + 0x9d, 0xba, 0xff, 0x74, 0x21, 0xe9, 0x33, 0x95, 0xa3, 0x3b, 0xf0, 0xaa, 0x27, 0xb5, 0x51, 0x74, + 0x5c, 0x30, 0x3e, 0xc5, 0x61, 0x0c, 0x92, 0x9d, 0xcc, 0x5b, 0x0c, 0x7d, 0x1f, 0xdd, 0xb6, 0xb0, + 0x96, 0x82, 0x6b, 0xba, 0xf2, 0xad, 0x12, 0xdc, 0xf3, 0x82, 0x37, 0xbe, 0x0b, 0xd1, 0x19, 0xbb, + 0x72, 0x6e, 0x39, 0xe7, 0x33, 0x97, 0x95, 0xf5, 0xb9, 0x5f, 0x8c, 0xfe, 0xf3, 0x17, 0x2f, 0x1c, + 0x5a, 0x0f, 0xb6, 0x5c, 0xec, 0x5b, 0x23, 0x43, 0xb0, 0xa9, 0x84, 0x30, 0x75, 0x4c, 0xee, 0xdc, + 0x7f, 0x7f, 0xb2, 0x24, 0x8d, 0xef, 0x4b, 0xd2, 0x38, 0x5d, 0x12, 0xf0, 0x7b, 0x49, 0xc0, 0x87, + 0x92, 0x80, 0x4f, 0x25, 0x01, 0xdf, 0x4a, 0x02, 0x4e, 0x4a, 0x02, 0x7e, 0x94, 0x04, 0xfc, 0x2a, + 0x49, 0xe3, 0xd4, 0xf6, 0x7f, 0x12, 0x00, 0xaf, 0x4d, 0x44, 0xb1, 0x3e, 0x46, 0x7f, 0xe7, 0x50, + 0xb2, 0x81, 0x2d, 0x06, 0xe0, 0x45, 0xcb, 0xe6, 0xa6, 0x3f, 0x06, 0xe1, 0xd1, 0xa0, 0xff, 0x39, + 0x20, 0x47, 0x15, 0x3a, 0xf0, 0x13, 0x3f, 0xa7, 0x79, 0xfe, 0x88, 0x8b, 0x77, 0xdc, 0xc6, 0xa8, + 0x8f, 0x23, 0xe7, 0x71, 0xff, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x65, 0x14, 0x47, 0x85, 0xa1, + 0x03, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/doc.go b/vendor/github.com/gogo/protobuf/types/doc.go new file mode 100644 index 000000000..ff2810af1 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/doc.go @@ -0,0 +1,35 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +/* +Package types contains code for interacting with well-known types. +*/ +package types diff --git a/vendor/github.com/gogo/protobuf/types/duration.go b/vendor/github.com/gogo/protobuf/types/duration.go new file mode 100644 index 000000000..475d61f1d --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.go @@ -0,0 +1,100 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements conversions between google.protobuf.Duration +// and time.Duration. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Range of a Duration in seconds, as specified in + // google/protobuf/duration.proto. This is about 10,000 years in seconds. + maxSeconds = int64(10000 * 365.25 * 24 * 60 * 60) + minSeconds = -maxSeconds +) + +// validateDuration determines whether the Duration is valid according to the +// definition in google/protobuf/duration.proto. A valid Duration +// may still be too large to fit into a time.Duration (the range of Duration +// is about 10,000 years, and the range of time.Duration is about 290). +func validateDuration(d *Duration) error { + if d == nil { + return errors.New("duration: nil Duration") + } + if d.Seconds < minSeconds || d.Seconds > maxSeconds { + return fmt.Errorf("duration: %#v: seconds out of range", d) + } + if d.Nanos <= -1e9 || d.Nanos >= 1e9 { + return fmt.Errorf("duration: %#v: nanos out of range", d) + } + // Seconds and Nanos must have the same sign, unless d.Nanos is zero. + if (d.Seconds < 0 && d.Nanos > 0) || (d.Seconds > 0 && d.Nanos < 0) { + return fmt.Errorf("duration: %#v: seconds and nanos have different signs", d) + } + return nil +} + +// DurationFromProto converts a Duration to a time.Duration. DurationFromProto +// returns an error if the Duration is invalid or is too large to be +// represented in a time.Duration. +func DurationFromProto(p *Duration) (time.Duration, error) { + if err := validateDuration(p); err != nil { + return 0, err + } + d := time.Duration(p.Seconds) * time.Second + if int64(d/time.Second) != p.Seconds { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + if p.Nanos != 0 { + d += time.Duration(p.Nanos) + if (d < 0) != (p.Nanos < 0) { + return 0, fmt.Errorf("duration: %#v is out of range for time.Duration", p) + } + } + return d, nil +} + +// DurationProto converts a time.Duration to a Duration. +func DurationProto(d time.Duration) *Duration { + nanos := d.Nanoseconds() + secs := nanos / 1e9 + nanos -= secs * 1e9 + return &Duration{ + Seconds: secs, + Nanos: int32(nanos), + } +} diff --git a/vendor/github.com/gogo/protobuf/types/duration.pb.go b/vendor/github.com/gogo/protobuf/types/duration.pb.go new file mode 100644 index 000000000..7a81308b8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration.pb.go @@ -0,0 +1,474 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/duration.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A Duration represents a signed, fixed-length span of time represented +// as a count of seconds and fractions of seconds at nanosecond +// resolution. It is independent of any calendar and concepts like "day" +// or "month". It is related to Timestamp in that the difference between +// two Timestamp values is a Duration and it can be added or subtracted +// from a Timestamp. Range is approximately +-10,000 years. +// +// # Examples +// +// Example 1: Compute Duration from two Timestamps in pseudo code. +// +// Timestamp start = ...; +// Timestamp end = ...; +// Duration duration = ...; +// +// duration.seconds = end.seconds - start.seconds; +// duration.nanos = end.nanos - start.nanos; +// +// if (duration.seconds < 0 && duration.nanos > 0) { +// duration.seconds += 1; +// duration.nanos -= 1000000000; +// } else if (durations.seconds > 0 && duration.nanos < 0) { +// duration.seconds -= 1; +// duration.nanos += 1000000000; +// } +// +// Example 2: Compute Timestamp from Timestamp + Duration in pseudo code. +// +// Timestamp start = ...; +// Duration duration = ...; +// Timestamp end = ...; +// +// end.seconds = start.seconds + duration.seconds; +// end.nanos = start.nanos + duration.nanos; +// +// if (end.nanos < 0) { +// end.seconds -= 1; +// end.nanos += 1000000000; +// } else if (end.nanos >= 1000000000) { +// end.seconds += 1; +// end.nanos -= 1000000000; +// } +// +// Example 3: Compute Duration from datetime.timedelta in Python. +// +// td = datetime.timedelta(days=3, minutes=10) +// duration = Duration() +// duration.FromTimedelta(td) +// +// # JSON Mapping +// +// In JSON format, the Duration type is encoded as a string rather than an +// object, where the string ends in the suffix "s" (indicating seconds) and +// is preceded by the number of seconds, with nanoseconds expressed as +// fractional seconds. For example, 3 seconds with 0 nanoseconds should be +// encoded in JSON format as "3s", while 3 seconds and 1 nanosecond should +// be expressed in JSON format as "3.000000001s", and 3 seconds and 1 +// microsecond should be expressed in JSON format as "3.000001s". +// +// +type Duration struct { + // Signed seconds of the span of time. Must be from -315,576,000,000 + // to +315,576,000,000 inclusive. Note: these bounds are computed from: + // 60 sec/min * 60 min/hr * 24 hr/day * 365.25 days/year * 10000 years + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Signed fractions of a second at nanosecond resolution of the span + // of time. Durations less than one second are represented with a 0 + // `seconds` field and a positive or negative `nanos` field. For durations + // of one second or more, a non-zero value for the `nanos` field must be + // of the same sign as the `seconds` field. Must be from -999,999,999 + // to +999,999,999 inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *Duration) Reset() { *m = Duration{} } +func (*Duration) ProtoMessage() {} +func (*Duration) Descriptor() ([]byte, []int) { return fileDescriptorDuration, []int{0} } +func (*Duration) XXX_WellKnownType() string { return "Duration" } + +func (m *Duration) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Duration) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Duration)(nil), "google.protobuf.Duration") +} +func (this *Duration) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + return 0 +} +func (this *Duration) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Duration) + if !ok { + that2, ok := that.(Duration) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + return true +} +func (this *Duration) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Duration{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringDuration(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Duration) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Duration) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Seconds != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintDuration(dAtA, i, uint64(m.Seconds)) + } + if m.Nanos != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintDuration(dAtA, i, uint64(m.Nanos)) + } + return i, nil +} + +func encodeVarintDuration(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Duration) Size() (n int) { + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovDuration(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovDuration(uint64(m.Nanos)) + } + return n +} + +func sovDuration(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozDuration(x uint64) (n int) { + return sovDuration(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Duration) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Duration: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Duration: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowDuration + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipDuration(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthDuration + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipDuration(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthDuration + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowDuration + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipDuration(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthDuration = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowDuration = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/duration.proto", fileDescriptorDuration) } + +var fileDescriptorDuration = []byte{ + // 209 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0x29, 0x2d, 0x4a, + 0x2c, 0xc9, 0xcc, 0xcf, 0xd3, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0x56, + 0x5c, 0x1c, 0x2e, 0x50, 0x25, 0x42, 0x12, 0x5c, 0xec, 0xc5, 0xa9, 0xc9, 0xf9, 0x79, 0x29, 0xc5, + 0x12, 0x8c, 0x0a, 0x8c, 0x1a, 0xcc, 0x41, 0x30, 0xae, 0x90, 0x08, 0x17, 0x6b, 0x5e, 0x62, 0x5e, + 0x7e, 0xb1, 0x04, 0x93, 0x02, 0xa3, 0x06, 0x6b, 0x10, 0x84, 0xe3, 0x54, 0x7f, 0xe1, 0xa1, 0x1c, + 0xc3, 0x8d, 0x87, 0x72, 0x0c, 0x1f, 0x1e, 0xca, 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, + 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0xe1, + 0x91, 0x1c, 0xe3, 0x8a, 0xc7, 0x72, 0x8c, 0x5c, 0xc2, 0xc9, 0xf9, 0xb9, 0x7a, 0x68, 0x56, 0x3b, + 0xf1, 0xc2, 0x2c, 0x0e, 0x00, 0x89, 0x04, 0x30, 0x46, 0xb1, 0x96, 0x54, 0x16, 0xa4, 0x16, 0xff, + 0x60, 0x64, 0x5c, 0xc4, 0xc4, 0xec, 0x1e, 0xe0, 0xb4, 0x8a, 0x49, 0xce, 0x1d, 0xa2, 0x25, 0x00, + 0xaa, 0x45, 0x2f, 0x3c, 0x35, 0x27, 0xc7, 0x3b, 0x2f, 0xbf, 0x3c, 0x2f, 0x04, 0xa4, 0x32, 0x89, + 0x0d, 0x6c, 0x96, 0x31, 0x20, 0x00, 0x00, 0xff, 0xff, 0x89, 0xa6, 0x3c, 0xd3, 0xf6, 0x00, 0x00, + 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/duration_gogo.go b/vendor/github.com/gogo/protobuf/types/duration_gogo.go new file mode 100644 index 000000000..90e7670e2 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration_gogo.go @@ -0,0 +1,100 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "fmt" + "time" +) + +func NewPopulatedDuration(r interface { + Int63() int64 +}, easy bool) *Duration { + this := &Duration{} + maxSecs := time.Hour.Nanoseconds() / 1e9 + max := 2 * maxSecs + s := int64(r.Int63()) % max + s -= maxSecs + neg := int64(1) + if s < 0 { + neg = -1 + } + this.Seconds = s + this.Nanos = int32(neg * (r.Int63() % 1e9)) + return this +} + +func (d *Duration) String() string { + td, err := DurationFromProto(d) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return td.String() +} + +func NewPopulatedStdDuration(r interface { + Int63() int64 +}, easy bool) *time.Duration { + dur := NewPopulatedDuration(r, easy) + d, err := DurationFromProto(dur) + if err != nil { + return nil + } + return &d +} + +func SizeOfStdDuration(d time.Duration) int { + dur := DurationProto(d) + return dur.Size() +} + +func StdDurationMarshal(d time.Duration) ([]byte, error) { + size := SizeOfStdDuration(d) + buf := make([]byte, size) + _, err := StdDurationMarshalTo(d, buf) + return buf, err +} + +func StdDurationMarshalTo(d time.Duration, data []byte) (int, error) { + dur := DurationProto(d) + return dur.MarshalTo(data) +} + +func StdDurationUnmarshal(d *time.Duration, data []byte) error { + dur := &Duration{} + if err := dur.Unmarshal(data); err != nil { + return err + } + dd, err := DurationFromProto(dur) + if err != nil { + return err + } + *d = dd + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/duration_test.go b/vendor/github.com/gogo/protobuf/types/duration_test.go new file mode 100644 index 000000000..7f2bcb429 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/duration_test.go @@ -0,0 +1,120 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "math" + "testing" + "time" + + "github.com/gogo/protobuf/proto" +) + +const ( + minGoSeconds = math.MinInt64 / int64(1e9) + maxGoSeconds = math.MaxInt64 / int64(1e9) +) + +var durationTests = []struct { + proto *Duration + isValid bool + inRange bool + dur time.Duration +}{ + // The zero duration. + {&Duration{Seconds: 0, Nanos: 0}, true, true, 0}, + // Some ordinary non-zero durations. + {&Duration{Seconds: 100, Nanos: 0}, true, true, 100 * time.Second}, + {&Duration{Seconds: -100, Nanos: 0}, true, true, -100 * time.Second}, + {&Duration{Seconds: 100, Nanos: 987}, true, true, 100*time.Second + 987}, + {&Duration{Seconds: -100, Nanos: -987}, true, true, -(100*time.Second + 987)}, + // The largest duration representable in Go. + {&Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, true, math.MaxInt64}, + // The smallest duration representable in Go. + {&Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, true, math.MinInt64}, + {nil, false, false, 0}, + {&Duration{Seconds: -100, Nanos: 987}, false, false, 0}, + {&Duration{Seconds: 100, Nanos: -987}, false, false, 0}, + {&Duration{Seconds: math.MinInt64, Nanos: 0}, false, false, 0}, + {&Duration{Seconds: math.MaxInt64, Nanos: 0}, false, false, 0}, + // The largest valid duration. + {&Duration{Seconds: maxSeconds, Nanos: 1e9 - 1}, true, false, 0}, + // The smallest valid duration. + {&Duration{Seconds: minSeconds, Nanos: -(1e9 - 1)}, true, false, 0}, + // The smallest invalid duration above the valid range. + {&Duration{Seconds: maxSeconds + 1, Nanos: 0}, false, false, 0}, + // The largest invalid duration below the valid range. + {&Duration{Seconds: minSeconds - 1, Nanos: -(1e9 - 1)}, false, false, 0}, + // One nanosecond past the largest duration representable in Go. + {&Duration{Seconds: maxGoSeconds, Nanos: int32(math.MaxInt64-1e9*maxGoSeconds) + 1}, true, false, 0}, + // One nanosecond past the smallest duration representable in Go. + {&Duration{Seconds: minGoSeconds, Nanos: int32(math.MinInt64-1e9*minGoSeconds) - 1}, true, false, 0}, + // One second past the largest duration representable in Go. + {&Duration{Seconds: maxGoSeconds + 1, Nanos: int32(math.MaxInt64 - 1e9*maxGoSeconds)}, true, false, 0}, + // One second past the smallest duration representable in Go. + {&Duration{Seconds: minGoSeconds - 1, Nanos: int32(math.MinInt64 - 1e9*minGoSeconds)}, true, false, 0}, +} + +func TestValidateDuration(t *testing.T) { + for _, test := range durationTests { + err := validateDuration(test.proto) + gotValid := (err == nil) + if gotValid != test.isValid { + t.Errorf("validateDuration(%v) = %t, want %t", test.proto, gotValid, test.isValid) + } + } +} + +func TestDurationFromProto(t *testing.T) { + for _, test := range durationTests { + got, err := DurationFromProto(test.proto) + gotOK := (err == nil) + wantOK := test.isValid && test.inRange + if gotOK != wantOK { + t.Errorf("DurationFromProto(%v) ok = %t, want %t", test.proto, gotOK, wantOK) + } + if err == nil && got != test.dur { + t.Errorf("DurationFromProto(%v) = %v, want %v", test.proto, got, test.dur) + } + } +} + +func TestDurationProto(t *testing.T) { + for _, test := range durationTests { + if test.isValid && test.inRange { + got := DurationProto(test.dur) + if !proto.Equal(got, test.proto) { + t.Errorf("DurationProto(%v) = %v, want %v", test.dur, got, test.proto) + } + } + } +} diff --git a/vendor/github.com/gogo/protobuf/types/empty.pb.go b/vendor/github.com/gogo/protobuf/types/empty.pb.go new file mode 100644 index 000000000..dbf1fc7c3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/empty.pb.go @@ -0,0 +1,417 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/empty.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A generic empty message that you can re-use to avoid defining duplicated +// empty messages in your APIs. A typical example is to use it as the request +// or the response type of an API method. For instance: +// +// service Foo { +// rpc Bar(google.protobuf.Empty) returns (google.protobuf.Empty); +// } +// +// The JSON representation for `Empty` is empty JSON object `{}`. +type Empty struct { +} + +func (m *Empty) Reset() { *m = Empty{} } +func (*Empty) ProtoMessage() {} +func (*Empty) Descriptor() ([]byte, []int) { return fileDescriptorEmpty, []int{0} } +func (*Empty) XXX_WellKnownType() string { return "Empty" } + +func init() { + proto.RegisterType((*Empty)(nil), "google.protobuf.Empty") +} +func (this *Empty) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + return 0 +} +func (this *Empty) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Empty) + if !ok { + that2, ok := that.(Empty) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + return true +} +func (this *Empty) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 4) + s = append(s, "&types.Empty{") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringEmpty(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Empty) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Empty) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func encodeVarintEmpty(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedEmpty(r randyEmpty, easy bool) *Empty { + this := &Empty{} + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyEmpty interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneEmpty(r randyEmpty) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringEmpty(r randyEmpty) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneEmpty(r) + } + return string(tmps) +} +func randUnrecognizedEmpty(r randyEmpty, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldEmpty(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldEmpty(dAtA []byte, r randyEmpty, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateEmpty(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateEmpty(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Empty) Size() (n int) { + var l int + _ = l + return n +} + +func sovEmpty(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozEmpty(x uint64) (n int) { + return sovEmpty(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Empty) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Empty{`, + `}`, + }, "") + return s +} +func valueToStringEmpty(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Empty) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowEmpty + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Empty: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Empty: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipEmpty(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthEmpty + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipEmpty(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthEmpty + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowEmpty + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipEmpty(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthEmpty = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowEmpty = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/empty.proto", fileDescriptorEmpty) } + +var fileDescriptorEmpty = []byte{ + // 176 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4e, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcd, 0x2d, 0x28, + 0xa9, 0xd4, 0x03, 0x73, 0x85, 0xf8, 0x21, 0x92, 0x7a, 0x30, 0x49, 0x25, 0x76, 0x2e, 0x56, 0x57, + 0x90, 0xbc, 0x53, 0x0b, 0xe3, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, + 0xc7, 0xf8, 0xe3, 0xa1, 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, + 0x92, 0x63, 0xbc, 0xf0, 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, + 0x80, 0xc4, 0x1f, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x19, 0xe8, 0xc4, 0x05, + 0x36, 0x2e, 0x00, 0xc4, 0x0d, 0x60, 0x8c, 0x62, 0x2d, 0xa9, 0x2c, 0x48, 0x2d, 0xfe, 0xc1, 0xc8, + 0xb8, 0x88, 0x89, 0xd9, 0x3d, 0xc0, 0x69, 0x15, 0x93, 0x9c, 0x3b, 0x44, 0x7d, 0x00, 0x54, 0xbd, + 0x5e, 0x78, 0x6a, 0x4e, 0x8e, 0x77, 0x5e, 0x7e, 0x79, 0x5e, 0x08, 0x48, 0x65, 0x12, 0x1b, 0xd8, + 0x20, 0x63, 0x40, 0x00, 0x00, 0x00, 0xff, 0xff, 0x1b, 0xcd, 0x88, 0x0e, 0xc6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/field_mask.pb.go b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go new file mode 100644 index 000000000..f86d354d6 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/field_mask.pb.go @@ -0,0 +1,704 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/field_mask.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `FieldMask` represents a set of symbolic field paths, for example: +// +// paths: "f.a" +// paths: "f.b.d" +// +// Here `f` represents a field in some root message, `a` and `b` +// fields in the message found in `f`, and `d` a field found in the +// message in `f.b`. +// +// Field masks are used to specify a subset of fields that should be +// returned by a get operation or modified by an update operation. +// Field masks also have a custom JSON encoding (see below). +// +// # Field Masks in Projections +// +// When used in the context of a projection, a response message or +// sub-message is filtered by the API to only contain those fields as +// specified in the mask. For example, if the mask in the previous +// example is applied to a response message as follows: +// +// f { +// a : 22 +// b { +// d : 1 +// x : 2 +// } +// y : 13 +// } +// z: 8 +// +// The result will not contain specific values for fields x,y and z +// (their value will be set to the default, and omitted in proto text +// output): +// +// +// f { +// a : 22 +// b { +// d : 1 +// } +// } +// +// A repeated field is not allowed except at the last position of a +// paths string. +// +// If a FieldMask object is not present in a get operation, the +// operation applies to all fields (as if a FieldMask of all fields +// had been specified). +// +// Note that a field mask does not necessarily apply to the +// top-level response message. In case of a REST get operation, the +// field mask applies directly to the response, but in case of a REST +// list operation, the mask instead applies to each individual message +// in the returned resource list. In case of a REST custom method, +// other definitions may be used. Where the mask applies will be +// clearly documented together with its declaration in the API. In +// any case, the effect on the returned resource/resources is required +// behavior for APIs. +// +// # Field Masks in Update Operations +// +// A field mask in update operations specifies which fields of the +// targeted resource are going to be updated. The API is required +// to only change the values of the fields as specified in the mask +// and leave the others untouched. If a resource is passed in to +// describe the updated values, the API ignores the values of all +// fields not covered by the mask. +// +// If a repeated field is specified for an update operation, the existing +// repeated values in the target resource will be overwritten by the new values. +// Note that a repeated field is only allowed in the last position of a `paths` +// string. +// +// If a sub-message is specified in the last position of the field mask for an +// update operation, then the existing sub-message in the target resource is +// overwritten. Given the target message: +// +// f { +// b { +// d : 1 +// x : 2 +// } +// c : 1 +// } +// +// And an update message: +// +// f { +// b { +// d : 10 +// } +// } +// +// then if the field mask is: +// +// paths: "f.b" +// +// then the result will be: +// +// f { +// b { +// d : 10 +// } +// c : 1 +// } +// +// However, if the update mask was: +// +// paths: "f.b.d" +// +// then the result would be: +// +// f { +// b { +// d : 10 +// x : 2 +// } +// c : 1 +// } +// +// In order to reset a field's value to the default, the field must +// be in the mask and set to the default value in the provided resource. +// Hence, in order to reset all fields of a resource, provide a default +// instance of the resource and set all fields in the mask, or do +// not provide a mask as described below. +// +// If a field mask is not present on update, the operation applies to +// all fields (as if a field mask of all fields has been specified). +// Note that in the presence of schema evolution, this may mean that +// fields the client does not know and has therefore not filled into +// the request will be reset to their default. If this is unwanted +// behavior, a specific service may require a client to always specify +// a field mask, producing an error if not. +// +// As with get operations, the location of the resource which +// describes the updated values in the request message depends on the +// operation kind. In any case, the effect of the field mask is +// required to be honored by the API. +// +// ## Considerations for HTTP REST +// +// The HTTP kind of an update operation which uses a field mask must +// be set to PATCH instead of PUT in order to satisfy HTTP semantics +// (PUT must only be used for full updates). +// +// # JSON Encoding of Field Masks +// +// In JSON, a field mask is encoded as a single string where paths are +// separated by a comma. Fields name in each path are converted +// to/from lower-camel naming conventions. +// +// As an example, consider the following message declarations: +// +// message Profile { +// User user = 1; +// Photo photo = 2; +// } +// message User { +// string display_name = 1; +// string address = 2; +// } +// +// In proto a field mask for `Profile` may look as such: +// +// mask { +// paths: "user.display_name" +// paths: "photo" +// } +// +// In JSON, the same mask is represented as below: +// +// { +// mask: "user.displayName,photo" +// } +// +// # Field Masks and Oneof Fields +// +// Field masks treat fields in oneofs just as regular fields. Consider the +// following message: +// +// message SampleMessage { +// oneof test_oneof { +// string name = 4; +// SubMessage sub_message = 9; +// } +// } +// +// The field mask can be: +// +// mask { +// paths: "name" +// } +// +// Or: +// +// mask { +// paths: "sub_message" +// } +// +// Note that oneof type names ("test_oneof" in this case) cannot be used in +// paths. +// +// ## Field Mask Verification +// +// The implementation of the all the API methods, which have any FieldMask type +// field in the request, should verify the included field paths, and return +// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. +type FieldMask struct { + // The set of field mask paths. + Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` +} + +func (m *FieldMask) Reset() { *m = FieldMask{} } +func (*FieldMask) ProtoMessage() {} +func (*FieldMask) Descriptor() ([]byte, []int) { return fileDescriptorFieldMask, []int{0} } + +func (m *FieldMask) GetPaths() []string { + if m != nil { + return m.Paths + } + return nil +} + +func init() { + proto.RegisterType((*FieldMask)(nil), "google.protobuf.FieldMask") +} +func (this *FieldMask) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if len(this.Paths) != len(that1.Paths) { + if len(this.Paths) < len(that1.Paths) { + return -1 + } + return 1 + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + if this.Paths[i] < that1.Paths[i] { + return -1 + } + return 1 + } + } + return 0 +} +func (this *FieldMask) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FieldMask) + if !ok { + that2, ok := that.(FieldMask) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Paths) != len(that1.Paths) { + return false + } + for i := range this.Paths { + if this.Paths[i] != that1.Paths[i] { + return false + } + } + return true +} +func (this *FieldMask) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FieldMask{") + s = append(s, "Paths: "+fmt.Sprintf("%#v", this.Paths)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringFieldMask(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *FieldMask) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FieldMask) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + dAtA[i] = 0xa + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func encodeVarintFieldMask(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedFieldMask(r randyFieldMask, easy bool) *FieldMask { + this := &FieldMask{} + v1 := r.Intn(10) + this.Paths = make([]string, v1) + for i := 0; i < v1; i++ { + this.Paths[i] = string(randStringFieldMask(r)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyFieldMask interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneFieldMask(r randyFieldMask) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringFieldMask(r randyFieldMask) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneFieldMask(r) + } + return string(tmps) +} +func randUnrecognizedFieldMask(r randyFieldMask, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldFieldMask(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldFieldMask(dAtA []byte, r randyFieldMask, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateFieldMask(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateFieldMask(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *FieldMask) Size() (n int) { + var l int + _ = l + if len(m.Paths) > 0 { + for _, s := range m.Paths { + l = len(s) + n += 1 + l + sovFieldMask(uint64(l)) + } + } + return n +} + +func sovFieldMask(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozFieldMask(x uint64) (n int) { + return sovFieldMask(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *FieldMask) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FieldMask{`, + `Paths:` + fmt.Sprintf("%v", this.Paths) + `,`, + `}`, + }, "") + return s +} +func valueToStringFieldMask(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *FieldMask) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FieldMask: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FieldMask: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Paths", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowFieldMask + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthFieldMask + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Paths = append(m.Paths, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipFieldMask(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthFieldMask + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipFieldMask(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthFieldMask + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowFieldMask + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipFieldMask(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthFieldMask = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowFieldMask = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/field_mask.proto", fileDescriptorFieldMask) } + +var fileDescriptorFieldMask = []byte{ + // 200 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x48, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x4f, 0xcb, 0x4c, 0xcd, + 0x49, 0x89, 0xcf, 0x4d, 0x2c, 0xce, 0xd6, 0x03, 0x8b, 0x09, 0xf1, 0x43, 0x54, 0xe8, 0xc1, 0x54, + 0x28, 0x29, 0x72, 0x71, 0xba, 0x81, 0x14, 0xf9, 0x26, 0x16, 0x67, 0x0b, 0x89, 0x70, 0xb1, 0x16, + 0x24, 0x96, 0x64, 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6b, 0x70, 0x06, 0x41, 0x38, 0x4e, 0xad, 0x8c, + 0x17, 0x1e, 0xca, 0x31, 0xdc, 0x78, 0x28, 0xc7, 0xf0, 0xe1, 0xa1, 0x1c, 0xe3, 0x8f, 0x87, 0x72, + 0x8c, 0x0d, 0x8f, 0xe4, 0x18, 0x57, 0x3c, 0x92, 0x63, 0x3c, 0xf1, 0x48, 0x8e, 0xf1, 0xc2, 0x23, + 0x39, 0xc6, 0x07, 0x8f, 0xe4, 0x18, 0x5f, 0x3c, 0x92, 0x63, 0xf8, 0x00, 0x12, 0x7f, 0x2c, 0xc7, + 0xc8, 0x25, 0x9c, 0x9c, 0x9f, 0xab, 0x87, 0x66, 0x95, 0x13, 0x1f, 0xdc, 0xa2, 0x00, 0x90, 0x50, + 0x00, 0x63, 0x14, 0x6b, 0x49, 0x65, 0x41, 0x6a, 0xf1, 0x22, 0x26, 0x66, 0xf7, 0x00, 0xa7, 0x55, + 0x4c, 0x72, 0xee, 0x10, 0x0d, 0x01, 0x50, 0x0d, 0x7a, 0xe1, 0xa9, 0x39, 0x39, 0xde, 0x79, 0xf9, + 0xe5, 0x79, 0x21, 0x20, 0x65, 0x49, 0x6c, 0x60, 0x93, 0x8c, 0x01, 0x01, 0x00, 0x00, 0xff, 0xff, + 0x0e, 0x49, 0x68, 0x2a, 0xe6, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/source_context.pb.go b/vendor/github.com/gogo/protobuf/types/source_context.pb.go new file mode 100644 index 000000000..ed12c14e4 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/source_context.pb.go @@ -0,0 +1,473 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/source_context.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `SourceContext` represents information about the source of a +// protobuf element, like the file in which it is defined. +type SourceContext struct { + // The path-qualified name of the .proto file that contained the associated + // protobuf element. For example: `"google/protobuf/source_context.proto"`. + FileName string `protobuf:"bytes,1,opt,name=file_name,json=fileName,proto3" json:"file_name,omitempty"` +} + +func (m *SourceContext) Reset() { *m = SourceContext{} } +func (*SourceContext) ProtoMessage() {} +func (*SourceContext) Descriptor() ([]byte, []int) { return fileDescriptorSourceContext, []int{0} } + +func (m *SourceContext) GetFileName() string { + if m != nil { + return m.FileName + } + return "" +} + +func init() { + proto.RegisterType((*SourceContext)(nil), "google.protobuf.SourceContext") +} +func (this *SourceContext) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.FileName != that1.FileName { + if this.FileName < that1.FileName { + return -1 + } + return 1 + } + return 0 +} +func (this *SourceContext) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*SourceContext) + if !ok { + that2, ok := that.(SourceContext) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.FileName != that1.FileName { + return false + } + return true +} +func (this *SourceContext) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.SourceContext{") + s = append(s, "FileName: "+fmt.Sprintf("%#v", this.FileName)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringSourceContext(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *SourceContext) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *SourceContext) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.FileName) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintSourceContext(dAtA, i, uint64(len(m.FileName))) + i += copy(dAtA[i:], m.FileName) + } + return i, nil +} + +func encodeVarintSourceContext(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedSourceContext(r randySourceContext, easy bool) *SourceContext { + this := &SourceContext{} + this.FileName = string(randStringSourceContext(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randySourceContext interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneSourceContext(r randySourceContext) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringSourceContext(r randySourceContext) string { + v1 := r.Intn(100) + tmps := make([]rune, v1) + for i := 0; i < v1; i++ { + tmps[i] = randUTF8RuneSourceContext(r) + } + return string(tmps) +} +func randUnrecognizedSourceContext(r randySourceContext, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldSourceContext(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldSourceContext(dAtA []byte, r randySourceContext, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + v2 := r.Int63() + if r.Intn(2) == 0 { + v2 *= -1 + } + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(v2)) + case 1: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateSourceContext(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateSourceContext(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *SourceContext) Size() (n int) { + var l int + _ = l + l = len(m.FileName) + if l > 0 { + n += 1 + l + sovSourceContext(uint64(l)) + } + return n +} + +func sovSourceContext(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozSourceContext(x uint64) (n int) { + return sovSourceContext(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *SourceContext) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&SourceContext{`, + `FileName:` + fmt.Sprintf("%v", this.FileName) + `,`, + `}`, + }, "") + return s +} +func valueToStringSourceContext(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *SourceContext) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SourceContext: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SourceContext: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FileName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowSourceContext + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthSourceContext + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.FileName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipSourceContext(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthSourceContext + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipSourceContext(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthSourceContext + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowSourceContext + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipSourceContext(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthSourceContext = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowSourceContext = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/source_context.proto", fileDescriptorSourceContext) } + +var fileDescriptorSourceContext = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x52, 0x49, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xce, 0x2f, 0x2d, + 0x4a, 0x4e, 0x8d, 0x4f, 0xce, 0xcf, 0x2b, 0x49, 0xad, 0x28, 0xd1, 0x03, 0x8b, 0x0b, 0xf1, 0x43, + 0x54, 0xe9, 0xc1, 0x54, 0x29, 0xe9, 0x70, 0xf1, 0x06, 0x83, 0x15, 0x3a, 0x43, 0xd4, 0x09, 0x49, + 0x73, 0x71, 0xa6, 0x65, 0xe6, 0xa4, 0xc6, 0xe7, 0x25, 0xe6, 0xa6, 0x4a, 0x30, 0x2a, 0x30, 0x6a, + 0x70, 0x06, 0x71, 0x80, 0x04, 0xfc, 0x12, 0x73, 0x53, 0x9d, 0x3a, 0x19, 0x2f, 0x3c, 0x94, 0x63, + 0xb8, 0xf1, 0x50, 0x8e, 0xe1, 0xc3, 0x43, 0x39, 0xc6, 0x1f, 0x0f, 0xe5, 0x18, 0x1b, 0x1e, 0xc9, + 0x31, 0xae, 0x78, 0x24, 0xc7, 0x78, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, 0x0f, 0x1e, + 0xc9, 0x31, 0xbe, 0x78, 0x24, 0xc7, 0xf0, 0x01, 0x24, 0xfe, 0x58, 0x8e, 0x91, 0x4b, 0x38, 0x39, + 0x3f, 0x57, 0x0f, 0xcd, 0x56, 0x27, 0x21, 0x14, 0x3b, 0x03, 0x40, 0xc2, 0x01, 0x8c, 0x51, 0xac, + 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x8b, 0x98, 0x98, 0xdd, 0x03, 0x9c, 0x56, 0x31, 0xc9, 0xb9, 0x43, + 0x34, 0x05, 0x40, 0x35, 0xe9, 0x85, 0xa7, 0xe6, 0xe4, 0x78, 0xe7, 0xe5, 0x97, 0xe7, 0x85, 0x80, + 0x94, 0x25, 0xb1, 0x81, 0x4d, 0x33, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x4b, 0x7a, 0x1a, 0x45, + 0xf9, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/struct.pb.go b/vendor/github.com/gogo/protobuf/types/struct.pb.go new file mode 100644 index 000000000..1987b80bd --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/struct.pb.go @@ -0,0 +1,1797 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/struct.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strconv "strconv" + +import strings "strings" +import reflect "reflect" +import sortkeys "github.com/gogo/protobuf/sortkeys" + +import binary "encoding/binary" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// `NullValue` is a singleton enumeration to represent the null value for the +// `Value` type union. +// +// The JSON representation for `NullValue` is JSON `null`. +type NullValue int32 + +const ( + // Null value. + NULL_VALUE NullValue = 0 +) + +var NullValue_name = map[int32]string{ + 0: "NULL_VALUE", +} +var NullValue_value = map[string]int32{ + "NULL_VALUE": 0, +} + +func (NullValue) EnumDescriptor() ([]byte, []int) { return fileDescriptorStruct, []int{0} } +func (NullValue) XXX_WellKnownType() string { return "NullValue" } + +// `Struct` represents a structured data value, consisting of fields +// which map to dynamically typed values. In some languages, `Struct` +// might be supported by a native representation. For example, in +// scripting languages like JS a struct is represented as an +// object. The details of that representation are described together +// with the proto support for the language. +// +// The JSON representation for `Struct` is JSON object. +type Struct struct { + // Unordered map of dynamically typed values. + Fields map[string]*Value `protobuf:"bytes,1,rep,name=fields" json:"fields,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Struct) Reset() { *m = Struct{} } +func (*Struct) ProtoMessage() {} +func (*Struct) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{0} } +func (*Struct) XXX_WellKnownType() string { return "Struct" } + +func (m *Struct) GetFields() map[string]*Value { + if m != nil { + return m.Fields + } + return nil +} + +// `Value` represents a dynamically typed value which can be either +// null, a number, a string, a boolean, a recursive struct value, or a +// list of values. A producer of value is expected to set one of that +// variants, absence of any variant indicates an error. +// +// The JSON representation for `Value` is JSON value. +type Value struct { + // The kind of value. + // + // Types that are valid to be assigned to Kind: + // *Value_NullValue + // *Value_NumberValue + // *Value_StringValue + // *Value_BoolValue + // *Value_StructValue + // *Value_ListValue + Kind isValue_Kind `protobuf_oneof:"kind"` +} + +func (m *Value) Reset() { *m = Value{} } +func (*Value) ProtoMessage() {} +func (*Value) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{1} } +func (*Value) XXX_WellKnownType() string { return "Value" } + +type isValue_Kind interface { + isValue_Kind() + Equal(interface{}) bool + MarshalTo([]byte) (int, error) + Size() int +} + +type Value_NullValue struct { + NullValue NullValue `protobuf:"varint,1,opt,name=null_value,json=nullValue,proto3,enum=google.protobuf.NullValue,oneof"` +} +type Value_NumberValue struct { + NumberValue float64 `protobuf:"fixed64,2,opt,name=number_value,json=numberValue,proto3,oneof"` +} +type Value_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} +type Value_BoolValue struct { + BoolValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} +type Value_StructValue struct { + StructValue *Struct `protobuf:"bytes,5,opt,name=struct_value,json=structValue,oneof"` +} +type Value_ListValue struct { + ListValue *ListValue `protobuf:"bytes,6,opt,name=list_value,json=listValue,oneof"` +} + +func (*Value_NullValue) isValue_Kind() {} +func (*Value_NumberValue) isValue_Kind() {} +func (*Value_StringValue) isValue_Kind() {} +func (*Value_BoolValue) isValue_Kind() {} +func (*Value_StructValue) isValue_Kind() {} +func (*Value_ListValue) isValue_Kind() {} + +func (m *Value) GetKind() isValue_Kind { + if m != nil { + return m.Kind + } + return nil +} + +func (m *Value) GetNullValue() NullValue { + if x, ok := m.GetKind().(*Value_NullValue); ok { + return x.NullValue + } + return NULL_VALUE +} + +func (m *Value) GetNumberValue() float64 { + if x, ok := m.GetKind().(*Value_NumberValue); ok { + return x.NumberValue + } + return 0 +} + +func (m *Value) GetStringValue() string { + if x, ok := m.GetKind().(*Value_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Value) GetBoolValue() bool { + if x, ok := m.GetKind().(*Value_BoolValue); ok { + return x.BoolValue + } + return false +} + +func (m *Value) GetStructValue() *Struct { + if x, ok := m.GetKind().(*Value_StructValue); ok { + return x.StructValue + } + return nil +} + +func (m *Value) GetListValue() *ListValue { + if x, ok := m.GetKind().(*Value_ListValue); ok { + return x.ListValue + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Value) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Value_OneofMarshaler, _Value_OneofUnmarshaler, _Value_OneofSizer, []interface{}{ + (*Value_NullValue)(nil), + (*Value_NumberValue)(nil), + (*Value_StringValue)(nil), + (*Value_BoolValue)(nil), + (*Value_StructValue)(nil), + (*Value_ListValue)(nil), + } +} + +func _Value_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + _ = b.EncodeVarint(1<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + _ = b.EncodeVarint(2<<3 | proto.WireFixed64) + _ = b.EncodeFixed64(math.Float64bits(x.NumberValue)) + case *Value_StringValue: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.StringValue) + case *Value_BoolValue: + t := uint64(0) + if x.BoolValue { + t = 1 + } + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(t) + case *Value_StructValue: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StructValue); err != nil { + return err + } + case *Value_ListValue: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ListValue); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Value.Kind has unexpected type %T", x) + } + return nil +} + +func _Value_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Value) + switch tag { + case 1: // kind.null_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_NullValue{NullValue(x)} + return true, err + case 2: // kind.number_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Kind = &Value_NumberValue{math.Float64frombits(x)} + return true, err + case 3: // kind.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Kind = &Value_StringValue{x} + return true, err + case 4: // kind.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Kind = &Value_BoolValue{x != 0} + return true, err + case 5: // kind.struct_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(Struct) + err := b.DecodeMessage(msg) + m.Kind = &Value_StructValue{msg} + return true, err + case 6: // kind.list_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ListValue) + err := b.DecodeMessage(msg) + m.Kind = &Value_ListValue{msg} + return true, err + default: + return false, nil + } +} + +func _Value_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Value) + // kind + switch x := m.Kind.(type) { + case *Value_NullValue: + n += proto.SizeVarint(1<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.NullValue)) + case *Value_NumberValue: + n += proto.SizeVarint(2<<3 | proto.WireFixed64) + n += 8 + case *Value_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Value_BoolValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Value_StructValue: + s := proto.Size(x.StructValue) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Value_ListValue: + s := proto.Size(x.ListValue) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// `ListValue` is a wrapper around a repeated field of values. +// +// The JSON representation for `ListValue` is JSON array. +type ListValue struct { + // Repeated field of dynamically typed values. + Values []*Value `protobuf:"bytes,1,rep,name=values" json:"values,omitempty"` +} + +func (m *ListValue) Reset() { *m = ListValue{} } +func (*ListValue) ProtoMessage() {} +func (*ListValue) Descriptor() ([]byte, []int) { return fileDescriptorStruct, []int{2} } +func (*ListValue) XXX_WellKnownType() string { return "ListValue" } + +func (m *ListValue) GetValues() []*Value { + if m != nil { + return m.Values + } + return nil +} + +func init() { + proto.RegisterType((*Struct)(nil), "google.protobuf.Struct") + proto.RegisterType((*Value)(nil), "google.protobuf.Value") + proto.RegisterType((*ListValue)(nil), "google.protobuf.ListValue") + proto.RegisterEnum("google.protobuf.NullValue", NullValue_name, NullValue_value) +} +func (x NullValue) String() string { + s, ok := NullValue_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Struct) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Struct) + if !ok { + that2, ok := that.(Struct) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + return true +} +func (this *Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value) + if !ok { + that2, ok := that.(Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if that1.Kind == nil { + if this.Kind != nil { + return false + } + } else if this.Kind == nil { + return false + } else if !this.Kind.Equal(that1.Kind) { + return false + } + return true +} +func (this *Value_NullValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NullValue) + if !ok { + that2, ok := that.(Value_NullValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NullValue != that1.NullValue { + return false + } + return true +} +func (this *Value_NumberValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_NumberValue) + if !ok { + that2, ok := that.(Value_NumberValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.NumberValue != that1.NumberValue { + return false + } + return true +} +func (this *Value_StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StringValue) + if !ok { + that2, ok := that.(Value_StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.StringValue != that1.StringValue { + return false + } + return true +} +func (this *Value_BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_BoolValue) + if !ok { + that2, ok := that.(Value_BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.BoolValue != that1.BoolValue { + return false + } + return true +} +func (this *Value_StructValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_StructValue) + if !ok { + that2, ok := that.(Value_StructValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.StructValue.Equal(that1.StructValue) { + return false + } + return true +} +func (this *Value_ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Value_ListValue) + if !ok { + that2, ok := that.(Value_ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !this.ListValue.Equal(that1.ListValue) { + return false + } + return true +} +func (this *ListValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*ListValue) + if !ok { + that2, ok := that.(ListValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if len(this.Values) != len(that1.Values) { + return false + } + for i := range this.Values { + if !this.Values[i].Equal(that1.Values[i]) { + return false + } + } + return true +} +func (this *Struct) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Struct{") + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%#v: %#v,", k, this.Fields[k]) + } + mapStringForFields += "}" + if this.Fields != nil { + s = append(s, "Fields: "+mapStringForFields+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Value{") + if this.Kind != nil { + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Value_NullValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NullValue{` + + `NullValue:` + fmt.Sprintf("%#v", this.NullValue) + `}`}, ", ") + return s +} +func (this *Value_NumberValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_NumberValue{` + + `NumberValue:` + fmt.Sprintf("%#v", this.NumberValue) + `}`}, ", ") + return s +} +func (this *Value_StringValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StringValue{` + + `StringValue:` + fmt.Sprintf("%#v", this.StringValue) + `}`}, ", ") + return s +} +func (this *Value_BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_BoolValue{` + + `BoolValue:` + fmt.Sprintf("%#v", this.BoolValue) + `}`}, ", ") + return s +} +func (this *Value_StructValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_StructValue{` + + `StructValue:` + fmt.Sprintf("%#v", this.StructValue) + `}`}, ", ") + return s +} +func (this *Value_ListValue) GoString() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&types.Value_ListValue{` + + `ListValue:` + fmt.Sprintf("%#v", this.ListValue) + `}`}, ", ") + return s +} +func (this *ListValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.ListValue{") + if this.Values != nil { + s = append(s, "Values: "+fmt.Sprintf("%#v", this.Values)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringStruct(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Struct) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Struct) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Fields) > 0 { + for k := range m.Fields { + dAtA[i] = 0xa + i++ + v := m.Fields[k] + msgSize := 0 + if v != nil { + msgSize = v.Size() + msgSize += 1 + sovStruct(uint64(msgSize)) + } + mapSize := 1 + len(k) + sovStruct(uint64(len(k))) + msgSize + i = encodeVarintStruct(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintStruct(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + if v != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintStruct(dAtA, i, uint64(v.Size())) + n1, err := v.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + } + } + return i, nil +} + +func (m *Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Kind != nil { + nn2, err := m.Kind.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn2 + } + return i, nil +} + +func (m *Value_NullValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x8 + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.NullValue)) + return i, nil +} +func (m *Value_NumberValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x11 + i++ + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.NumberValue)))) + i += 8 + return i, nil +} +func (m *Value_StringValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintStruct(dAtA, i, uint64(len(m.StringValue))) + i += copy(dAtA[i:], m.StringValue) + return i, nil +} +func (m *Value_BoolValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + if m.BoolValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} +func (m *Value_StructValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.StructValue != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.StructValue.Size())) + n3, err := m.StructValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} +func (m *Value_ListValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.ListValue != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintStruct(dAtA, i, uint64(m.ListValue.Size())) + n4, err := m.ListValue.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n4 + } + return i, nil +} +func (m *ListValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ListValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Values) > 0 { + for _, msg := range m.Values { + dAtA[i] = 0xa + i++ + i = encodeVarintStruct(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func encodeVarintStruct(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedStruct(r randyStruct, easy bool) *Struct { + this := &Struct{} + if r.Intn(10) == 0 { + v1 := r.Intn(10) + this.Fields = make(map[string]*Value) + for i := 0; i < v1; i++ { + this.Fields[randStringStruct(r)] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue(r randyStruct, easy bool) *Value { + this := &Value{} + oneofNumber_Kind := []int32{1, 2, 3, 4, 5, 6}[r.Intn(6)] + switch oneofNumber_Kind { + case 1: + this.Kind = NewPopulatedValue_NullValue(r, easy) + case 2: + this.Kind = NewPopulatedValue_NumberValue(r, easy) + case 3: + this.Kind = NewPopulatedValue_StringValue(r, easy) + case 4: + this.Kind = NewPopulatedValue_BoolValue(r, easy) + case 5: + this.Kind = NewPopulatedValue_StructValue(r, easy) + case 6: + this.Kind = NewPopulatedValue_ListValue(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedValue_NullValue(r randyStruct, easy bool) *Value_NullValue { + this := &Value_NullValue{} + this.NullValue = NullValue([]int32{0}[r.Intn(1)]) + return this +} +func NewPopulatedValue_NumberValue(r randyStruct, easy bool) *Value_NumberValue { + this := &Value_NumberValue{} + this.NumberValue = float64(r.Float64()) + if r.Intn(2) == 0 { + this.NumberValue *= -1 + } + return this +} +func NewPopulatedValue_StringValue(r randyStruct, easy bool) *Value_StringValue { + this := &Value_StringValue{} + this.StringValue = string(randStringStruct(r)) + return this +} +func NewPopulatedValue_BoolValue(r randyStruct, easy bool) *Value_BoolValue { + this := &Value_BoolValue{} + this.BoolValue = bool(bool(r.Intn(2) == 0)) + return this +} +func NewPopulatedValue_StructValue(r randyStruct, easy bool) *Value_StructValue { + this := &Value_StructValue{} + this.StructValue = NewPopulatedStruct(r, easy) + return this +} +func NewPopulatedValue_ListValue(r randyStruct, easy bool) *Value_ListValue { + this := &Value_ListValue{} + this.ListValue = NewPopulatedListValue(r, easy) + return this +} +func NewPopulatedListValue(r randyStruct, easy bool) *ListValue { + this := &ListValue{} + if r.Intn(10) == 0 { + v2 := r.Intn(5) + this.Values = make([]*Value, v2) + for i := 0; i < v2; i++ { + this.Values[i] = NewPopulatedValue(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyStruct interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneStruct(r randyStruct) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringStruct(r randyStruct) string { + v3 := r.Intn(100) + tmps := make([]rune, v3) + for i := 0; i < v3; i++ { + tmps[i] = randUTF8RuneStruct(r) + } + return string(tmps) +} +func randUnrecognizedStruct(r randyStruct, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldStruct(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldStruct(dAtA []byte, r randyStruct, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + v4 := r.Int63() + if r.Intn(2) == 0 { + v4 *= -1 + } + dAtA = encodeVarintPopulateStruct(dAtA, uint64(v4)) + case 1: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateStruct(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateStruct(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateStruct(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Struct) Size() (n int) { + var l int + _ = l + if len(m.Fields) > 0 { + for k, v := range m.Fields { + _ = k + _ = v + l = 0 + if v != nil { + l = v.Size() + l += 1 + sovStruct(uint64(l)) + } + mapEntrySize := 1 + len(k) + sovStruct(uint64(len(k))) + l + n += mapEntrySize + 1 + sovStruct(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Value) Size() (n int) { + var l int + _ = l + if m.Kind != nil { + n += m.Kind.Size() + } + return n +} + +func (m *Value_NullValue) Size() (n int) { + var l int + _ = l + n += 1 + sovStruct(uint64(m.NullValue)) + return n +} +func (m *Value_NumberValue) Size() (n int) { + var l int + _ = l + n += 9 + return n +} +func (m *Value_StringValue) Size() (n int) { + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovStruct(uint64(l)) + return n +} +func (m *Value_BoolValue) Size() (n int) { + var l int + _ = l + n += 2 + return n +} +func (m *Value_StructValue) Size() (n int) { + var l int + _ = l + if m.StructValue != nil { + l = m.StructValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *Value_ListValue) Size() (n int) { + var l int + _ = l + if m.ListValue != nil { + l = m.ListValue.Size() + n += 1 + l + sovStruct(uint64(l)) + } + return n +} +func (m *ListValue) Size() (n int) { + var l int + _ = l + if len(m.Values) > 0 { + for _, e := range m.Values { + l = e.Size() + n += 1 + l + sovStruct(uint64(l)) + } + } + return n +} + +func sovStruct(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozStruct(x uint64) (n int) { + return sovStruct(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Struct) String() string { + if this == nil { + return "nil" + } + keysForFields := make([]string, 0, len(this.Fields)) + for k := range this.Fields { + keysForFields = append(keysForFields, k) + } + sortkeys.Strings(keysForFields) + mapStringForFields := "map[string]*Value{" + for _, k := range keysForFields { + mapStringForFields += fmt.Sprintf("%v: %v,", k, this.Fields[k]) + } + mapStringForFields += "}" + s := strings.Join([]string{`&Struct{`, + `Fields:` + mapStringForFields + `,`, + `}`, + }, "") + return s +} +func (this *Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NullValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NullValue{`, + `NullValue:` + fmt.Sprintf("%v", this.NullValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_NumberValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_NumberValue{`, + `NumberValue:` + fmt.Sprintf("%v", this.NumberValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StringValue{`, + `StringValue:` + fmt.Sprintf("%v", this.StringValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_BoolValue{`, + `BoolValue:` + fmt.Sprintf("%v", this.BoolValue) + `,`, + `}`, + }, "") + return s +} +func (this *Value_StructValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_StructValue{`, + `StructValue:` + strings.Replace(fmt.Sprintf("%v", this.StructValue), "Struct", "Struct", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Value_ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Value_ListValue{`, + `ListValue:` + strings.Replace(fmt.Sprintf("%v", this.ListValue), "ListValue", "ListValue", 1) + `,`, + `}`, + }, "") + return s +} +func (this *ListValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&ListValue{`, + `Values:` + strings.Replace(fmt.Sprintf("%v", this.Values), "Value", "Value", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringStruct(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Struct) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Struct: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Struct: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Fields == nil { + m.Fields = make(map[string]*Value) + } + var mapkey string + var mapvalue *Value + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStruct + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var mapmsglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + mapmsglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + postmsgIndex := iNdEx + mapmsglen + if mapmsglen < 0 { + return ErrInvalidLengthStruct + } + if postmsgIndex > l { + return io.ErrUnexpectedEOF + } + mapvalue = &Value{} + if err := mapvalue.Unmarshal(dAtA[iNdEx:postmsgIndex]); err != nil { + return err + } + iNdEx = postmsgIndex + } else { + iNdEx = entryPreIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Fields[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NullValue", wireType) + } + var v NullValue + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (NullValue(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Kind = &Value_NullValue{v} + case 2: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field NumberValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Kind = &Value_NumberValue{float64(math.Float64frombits(v))} + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Kind = &Value_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BoolValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Kind = &Value_BoolValue{b} + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StructValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &Struct{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_StructValue{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field ListValue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ListValue{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Kind = &Value_ListValue{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ListValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ListValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ListValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStruct + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStruct + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, &Value{}) + if err := m.Values[len(m.Values)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStruct(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStruct + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStruct(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthStruct + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStruct + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStruct(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStruct = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStruct = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/struct.proto", fileDescriptorStruct) } + +var fileDescriptorStruct = []byte{ + // 440 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x74, 0x91, 0xc1, 0x6b, 0xd4, 0x40, + 0x14, 0xc6, 0xf3, 0xb2, 0xdd, 0xe0, 0xbe, 0x48, 0x2d, 0x23, 0xe8, 0x52, 0x65, 0x5c, 0xb6, 0x97, + 0x45, 0x24, 0x85, 0xf5, 0x22, 0xae, 0x17, 0x17, 0x6a, 0x0b, 0x2e, 0x25, 0x46, 0x5b, 0xc1, 0xcb, + 0x62, 0xd2, 0x74, 0x09, 0x9d, 0xce, 0x94, 0x64, 0x46, 0xd9, 0x9b, 0xfe, 0x17, 0x9e, 0x3d, 0x89, + 0x47, 0xff, 0x0a, 0x8f, 0x3d, 0x7a, 0x34, 0x39, 0x79, 0xec, 0xb1, 0x47, 0x99, 0x99, 0x24, 0x4a, + 0x97, 0xde, 0xf2, 0xbe, 0xf9, 0xbd, 0xef, 0xbd, 0xef, 0x05, 0xef, 0x2f, 0x84, 0x58, 0xb0, 0x74, + 0xfb, 0x2c, 0x17, 0x52, 0xc4, 0xea, 0x78, 0xbb, 0x90, 0xb9, 0x4a, 0x64, 0x60, 0x6a, 0x72, 0xcb, + 0xbe, 0x06, 0xcd, 0xeb, 0xf0, 0x0b, 0xa0, 0xf7, 0xda, 0x10, 0x64, 0x82, 0xde, 0x71, 0x96, 0xb2, + 0xa3, 0xa2, 0x0f, 0x83, 0xce, 0xc8, 0x1f, 0x6f, 0x05, 0x57, 0xe0, 0xc0, 0x82, 0xc1, 0x0b, 0x43, + 0xed, 0x70, 0x99, 0x2f, 0xa3, 0xba, 0x65, 0xf3, 0x15, 0xfa, 0xff, 0xc9, 0x64, 0x03, 0x3b, 0x27, + 0xe9, 0xb2, 0x0f, 0x03, 0x18, 0xf5, 0x22, 0xfd, 0x49, 0x1e, 0x61, 0xf7, 0xc3, 0x7b, 0xa6, 0xd2, + 0xbe, 0x3b, 0x80, 0x91, 0x3f, 0xbe, 0xb3, 0x62, 0x7e, 0xa8, 0x5f, 0x23, 0x0b, 0x3d, 0x75, 0x9f, + 0xc0, 0xf0, 0x87, 0x8b, 0x5d, 0x23, 0x92, 0x09, 0x22, 0x57, 0x8c, 0xcd, 0xad, 0x81, 0x36, 0x5d, + 0x1f, 0x6f, 0xae, 0x18, 0xec, 0x2b, 0xc6, 0x0c, 0xbf, 0xe7, 0x44, 0x3d, 0xde, 0x14, 0x64, 0x0b, + 0x6f, 0x72, 0x75, 0x1a, 0xa7, 0xf9, 0xfc, 0xdf, 0x7c, 0xd8, 0x73, 0x22, 0xdf, 0xaa, 0x2d, 0x54, + 0xc8, 0x3c, 0xe3, 0x8b, 0x1a, 0xea, 0xe8, 0xc5, 0x35, 0x64, 0x55, 0x0b, 0x3d, 0x40, 0x8c, 0x85, + 0x68, 0xd6, 0x58, 0x1b, 0xc0, 0xe8, 0x86, 0x1e, 0xa5, 0x35, 0x0b, 0x3c, 0x33, 0x2e, 0x2a, 0x91, + 0x35, 0xd2, 0x35, 0x51, 0xef, 0x5e, 0x73, 0xc7, 0xda, 0x5e, 0x25, 0xb2, 0x4d, 0xc9, 0xb2, 0xa2, + 0xe9, 0xf5, 0x4c, 0xef, 0x6a, 0xca, 0x59, 0x56, 0xc8, 0x36, 0x25, 0x6b, 0x8a, 0xa9, 0x87, 0x6b, + 0x27, 0x19, 0x3f, 0x1a, 0x4e, 0xb0, 0xd7, 0x12, 0x24, 0x40, 0xcf, 0x98, 0x35, 0x7f, 0xf4, 0xba, + 0xa3, 0xd7, 0xd4, 0xc3, 0x7b, 0xd8, 0x6b, 0x8f, 0x48, 0xd6, 0x11, 0xf7, 0x0f, 0x66, 0xb3, 0xf9, + 0xe1, 0xf3, 0xd9, 0xc1, 0xce, 0x86, 0x33, 0xfd, 0x0c, 0xe7, 0x25, 0x75, 0x7e, 0x95, 0xd4, 0xb9, + 0x28, 0x29, 0x5c, 0x96, 0x14, 0x3e, 0x55, 0x14, 0xbe, 0x55, 0x14, 0x7e, 0x56, 0x14, 0xce, 0x2b, + 0x0a, 0xbf, 0x2b, 0x0a, 0x7f, 0x2a, 0xea, 0x5c, 0x54, 0x14, 0xf0, 0x76, 0x22, 0x4e, 0xaf, 0x8e, + 0x9b, 0xfa, 0x36, 0x79, 0xa8, 0xeb, 0x10, 0xde, 0x75, 0xe5, 0xf2, 0x2c, 0x2d, 0x2e, 0x01, 0xbe, + 0xba, 0x9d, 0xdd, 0x70, 0xfa, 0xdd, 0xa5, 0xbb, 0xb6, 0x21, 0x6c, 0xf6, 0x7b, 0x9b, 0x32, 0xf6, + 0x92, 0x8b, 0x8f, 0xfc, 0x8d, 0x26, 0x63, 0xcf, 0x38, 0x3d, 0xfe, 0x1b, 0x00, 0x00, 0xff, 0xff, + 0x97, 0x4f, 0x05, 0x6c, 0xe5, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.go b/vendor/github.com/gogo/protobuf/types/timestamp.go new file mode 100644 index 000000000..7ae54d8b3 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.go @@ -0,0 +1,132 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +// This file implements operations on google.protobuf.Timestamp. + +import ( + "errors" + "fmt" + "time" +) + +const ( + // Seconds field of the earliest valid Timestamp. + // This is time.Date(1, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + minValidSeconds = -62135596800 + // Seconds field just after the latest valid Timestamp. + // This is time.Date(10000, 1, 1, 0, 0, 0, 0, time.UTC).Unix(). + maxValidSeconds = 253402300800 +) + +// validateTimestamp determines whether a Timestamp is valid. +// A valid timestamp represents a time in the range +// [0001-01-01, 10000-01-01) and has a Nanos field +// in the range [0, 1e9). +// +// If the Timestamp is valid, validateTimestamp returns nil. +// Otherwise, it returns an error that describes +// the problem. +// +// Every valid Timestamp can be represented by a time.Time, but the converse is not true. +func validateTimestamp(ts *Timestamp) error { + if ts == nil { + return errors.New("timestamp: nil Timestamp") + } + if ts.Seconds < minValidSeconds { + return fmt.Errorf("timestamp: %#v before 0001-01-01", ts) + } + if ts.Seconds >= maxValidSeconds { + return fmt.Errorf("timestamp: %#v after 10000-01-01", ts) + } + if ts.Nanos < 0 || ts.Nanos >= 1e9 { + return fmt.Errorf("timestamp: %#v: nanos not in range [0, 1e9)", ts) + } + return nil +} + +// TimestampFromProto converts a google.protobuf.Timestamp proto to a time.Time. +// It returns an error if the argument is invalid. +// +// Unlike most Go functions, if Timestamp returns an error, the first return value +// is not the zero time.Time. Instead, it is the value obtained from the +// time.Unix function when passed the contents of the Timestamp, in the UTC +// locale. This may or may not be a meaningful time; many invalid Timestamps +// do map to valid time.Times. +// +// A nil Timestamp returns an error. The first return value in that case is +// undefined. +func TimestampFromProto(ts *Timestamp) (time.Time, error) { + // Don't return the zero value on error, because corresponds to a valid + // timestamp. Instead return whatever time.Unix gives us. + var t time.Time + if ts == nil { + t = time.Unix(0, 0).UTC() // treat nil like the empty Timestamp + } else { + t = time.Unix(ts.Seconds, int64(ts.Nanos)).UTC() + } + return t, validateTimestamp(ts) +} + +// TimestampNow returns a google.protobuf.Timestamp for the current time. +func TimestampNow() *Timestamp { + ts, err := TimestampProto(time.Now()) + if err != nil { + panic("ptypes: time.Now() out of Timestamp range") + } + return ts +} + +// TimestampProto converts the time.Time to a google.protobuf.Timestamp proto. +// It returns an error if the resulting Timestamp is invalid. +func TimestampProto(t time.Time) (*Timestamp, error) { + seconds := t.Unix() + nanos := int32(t.Sub(time.Unix(seconds, 0))) + ts := &Timestamp{ + Seconds: seconds, + Nanos: nanos, + } + if err := validateTimestamp(ts); err != nil { + return nil, err + } + return ts, nil +} + +// TimestampString returns the RFC 3339 string for valid Timestamps. For invalid +// Timestamps, it returns an error message in parentheses. +func TimestampString(ts *Timestamp) string { + t, err := TimestampFromProto(ts) + if err != nil { + return fmt.Sprintf("(%v)", err) + } + return t.Format(time.RFC3339Nano) +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp.pb.go b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go new file mode 100644 index 000000000..2d8686e0e --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp.pb.go @@ -0,0 +1,492 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/timestamp.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// A Timestamp represents a point in time independent of any time zone +// or calendar, represented as seconds and fractions of seconds at +// nanosecond resolution in UTC Epoch time. It is encoded using the +// Proleptic Gregorian Calendar which extends the Gregorian calendar +// backwards to year one. It is encoded assuming all minutes are 60 +// seconds long, i.e. leap seconds are "smeared" so that no leap second +// table is needed for interpretation. Range is from +// 0001-01-01T00:00:00Z to 9999-12-31T23:59:59.999999999Z. +// By restricting to that range, we ensure that we can convert to +// and from RFC 3339 date strings. +// See [https://www.ietf.org/rfc/rfc3339.txt](https://www.ietf.org/rfc/rfc3339.txt). +// +// # Examples +// +// Example 1: Compute Timestamp from POSIX `time()`. +// +// Timestamp timestamp; +// timestamp.set_seconds(time(NULL)); +// timestamp.set_nanos(0); +// +// Example 2: Compute Timestamp from POSIX `gettimeofday()`. +// +// struct timeval tv; +// gettimeofday(&tv, NULL); +// +// Timestamp timestamp; +// timestamp.set_seconds(tv.tv_sec); +// timestamp.set_nanos(tv.tv_usec * 1000); +// +// Example 3: Compute Timestamp from Win32 `GetSystemTimeAsFileTime()`. +// +// FILETIME ft; +// GetSystemTimeAsFileTime(&ft); +// UINT64 ticks = (((UINT64)ft.dwHighDateTime) << 32) | ft.dwLowDateTime; +// +// // A Windows tick is 100 nanoseconds. Windows epoch 1601-01-01T00:00:00Z +// // is 11644473600 seconds before Unix epoch 1970-01-01T00:00:00Z. +// Timestamp timestamp; +// timestamp.set_seconds((INT64) ((ticks / 10000000) - 11644473600LL)); +// timestamp.set_nanos((INT32) ((ticks % 10000000) * 100)); +// +// Example 4: Compute Timestamp from Java `System.currentTimeMillis()`. +// +// long millis = System.currentTimeMillis(); +// +// Timestamp timestamp = Timestamp.newBuilder().setSeconds(millis / 1000) +// .setNanos((int) ((millis % 1000) * 1000000)).build(); +// +// +// Example 5: Compute Timestamp from current time in Python. +// +// timestamp = Timestamp() +// timestamp.GetCurrentTime() +// +// # JSON Mapping +// +// In JSON format, the Timestamp type is encoded as a string in the +// [RFC 3339](https://www.ietf.org/rfc/rfc3339.txt) format. That is, the +// format is "{year}-{month}-{day}T{hour}:{min}:{sec}[.{frac_sec}]Z" +// where {year} is always expressed using four digits while {month}, {day}, +// {hour}, {min}, and {sec} are zero-padded to two digits each. The fractional +// seconds, which can go up to 9 digits (i.e. up to 1 nanosecond resolution), +// are optional. The "Z" suffix indicates the timezone ("UTC"); the timezone +// is required. A proto3 JSON serializer should always use UTC (as indicated by +// "Z") when printing the Timestamp type and a proto3 JSON parser should be +// able to accept both UTC and other timezones (as indicated by an offset). +// +// For example, "2017-01-15T01:30:15.01Z" encodes 15.01 seconds past +// 01:30 UTC on January 15, 2017. +// +// In JavaScript, one can convert a Date object to this format using the +// standard [toISOString()](https://developer.mozilla.org/en-US/docs/Web/JavaScript/Reference/Global_Objects/Date/toISOString] +// method. In Python, a standard `datetime.datetime` object can be converted +// to this format using [`strftime`](https://docs.python.org/2/library/time.html#time.strftime) +// with the time format spec '%Y-%m-%dT%H:%M:%S.%fZ'. Likewise, in Java, one +// can use the Joda Time's [`ISODateTimeFormat.dateTime()`]( +// http://www.joda.org/joda-time/apidocs/org/joda/time/format/ISODateTimeFormat.html#dateTime-- +// ) to obtain a formatter capable of generating timestamps in this format. +// +// +type Timestamp struct { + // Represents seconds of UTC time since Unix epoch + // 1970-01-01T00:00:00Z. Must be from 0001-01-01T00:00:00Z to + // 9999-12-31T23:59:59Z inclusive. + Seconds int64 `protobuf:"varint,1,opt,name=seconds,proto3" json:"seconds,omitempty"` + // Non-negative fractions of a second at nanosecond resolution. Negative + // second values with fractions must still have non-negative nanos values + // that count forward in time. Must be from 0 to 999,999,999 + // inclusive. + Nanos int32 `protobuf:"varint,2,opt,name=nanos,proto3" json:"nanos,omitempty"` +} + +func (m *Timestamp) Reset() { *m = Timestamp{} } +func (*Timestamp) ProtoMessage() {} +func (*Timestamp) Descriptor() ([]byte, []int) { return fileDescriptorTimestamp, []int{0} } +func (*Timestamp) XXX_WellKnownType() string { return "Timestamp" } + +func (m *Timestamp) GetSeconds() int64 { + if m != nil { + return m.Seconds + } + return 0 +} + +func (m *Timestamp) GetNanos() int32 { + if m != nil { + return m.Nanos + } + return 0 +} + +func init() { + proto.RegisterType((*Timestamp)(nil), "google.protobuf.Timestamp") +} +func (this *Timestamp) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Seconds != that1.Seconds { + if this.Seconds < that1.Seconds { + return -1 + } + return 1 + } + if this.Nanos != that1.Nanos { + if this.Nanos < that1.Nanos { + return -1 + } + return 1 + } + return 0 +} +func (this *Timestamp) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Timestamp) + if !ok { + that2, ok := that.(Timestamp) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Seconds != that1.Seconds { + return false + } + if this.Nanos != that1.Nanos { + return false + } + return true +} +func (this *Timestamp) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Timestamp{") + s = append(s, "Seconds: "+fmt.Sprintf("%#v", this.Seconds)+",\n") + s = append(s, "Nanos: "+fmt.Sprintf("%#v", this.Nanos)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringTimestamp(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Timestamp) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Timestamp) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Seconds != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintTimestamp(dAtA, i, uint64(m.Seconds)) + } + if m.Nanos != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintTimestamp(dAtA, i, uint64(m.Nanos)) + } + return i, nil +} + +func encodeVarintTimestamp(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Timestamp) Size() (n int) { + var l int + _ = l + if m.Seconds != 0 { + n += 1 + sovTimestamp(uint64(m.Seconds)) + } + if m.Nanos != 0 { + n += 1 + sovTimestamp(uint64(m.Nanos)) + } + return n +} + +func sovTimestamp(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozTimestamp(x uint64) (n int) { + return sovTimestamp(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Timestamp) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Timestamp: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Timestamp: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Seconds", wireType) + } + m.Seconds = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Seconds |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Nanos", wireType) + } + m.Nanos = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowTimestamp + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Nanos |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipTimestamp(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthTimestamp + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipTimestamp(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthTimestamp + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowTimestamp + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipTimestamp(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthTimestamp = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowTimestamp = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/timestamp.proto", fileDescriptorTimestamp) } + +var fileDescriptorTimestamp = []byte{ + // 212 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4f, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0xc9, 0xcc, 0x4d, + 0x2d, 0x2e, 0x49, 0xcc, 0x2d, 0xd0, 0x03, 0x0b, 0x09, 0xf1, 0x43, 0x14, 0xe8, 0xc1, 0x14, 0x28, + 0x59, 0x73, 0x71, 0x86, 0xc0, 0xd4, 0x08, 0x49, 0x70, 0xb1, 0x17, 0xa7, 0x26, 0xe7, 0xe7, 0xa5, + 0x14, 0x4b, 0x30, 0x2a, 0x30, 0x6a, 0x30, 0x07, 0xc1, 0xb8, 0x42, 0x22, 0x5c, 0xac, 0x79, 0x89, + 0x79, 0xf9, 0xc5, 0x12, 0x4c, 0x0a, 0x8c, 0x1a, 0xac, 0x41, 0x10, 0x8e, 0x53, 0x03, 0xe3, 0x85, + 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xb8, 0xe2, 0x91, 0x1c, 0xe3, + 0x89, 0x47, 0x72, 0x8c, 0x17, 0x1e, 0xc9, 0x31, 0x3e, 0x78, 0x24, 0xc7, 0xf8, 0xe2, 0x91, 0x1c, + 0xc3, 0x87, 0x47, 0x72, 0x8c, 0x2b, 0x1e, 0xcb, 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, + 0x59, 0xee, 0xc4, 0x07, 0xb7, 0x3a, 0x00, 0x24, 0x14, 0xc0, 0x18, 0xc5, 0x5a, 0x52, 0x59, 0x90, + 0x5a, 0xfc, 0x83, 0x91, 0x71, 0x11, 0x13, 0xb3, 0x7b, 0x80, 0xd3, 0x2a, 0x26, 0x39, 0x77, 0x88, + 0x9e, 0x00, 0xa8, 0x1e, 0xbd, 0xf0, 0xd4, 0x9c, 0x1c, 0xef, 0xbc, 0xfc, 0xf2, 0xbc, 0x10, 0x90, + 0xca, 0x24, 0x36, 0xb0, 0x61, 0xc6, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0x0b, 0x2a, 0x17, 0x44, + 0xfa, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go new file mode 100644 index 000000000..e03fa1315 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp_gogo.go @@ -0,0 +1,94 @@ +// Protocol Buffers for Go with Gadgets +// +// Copyright (c) 2016, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "time" +) + +func NewPopulatedTimestamp(r interface { + Int63() int64 +}, easy bool) *Timestamp { + this := &Timestamp{} + ns := int64(r.Int63()) + this.Seconds = ns / 1e9 + this.Nanos = int32(ns % 1e9) + return this +} + +func (ts *Timestamp) String() string { + return TimestampString(ts) +} + +func NewPopulatedStdTime(r interface { + Int63() int64 +}, easy bool) *time.Time { + timestamp := NewPopulatedTimestamp(r, easy) + t, err := TimestampFromProto(timestamp) + if err != nil { + return nil + } + return &t +} + +func SizeOfStdTime(t time.Time) int { + ts, err := TimestampProto(t) + if err != nil { + return 0 + } + return ts.Size() +} + +func StdTimeMarshal(t time.Time) ([]byte, error) { + size := SizeOfStdTime(t) + buf := make([]byte, size) + _, err := StdTimeMarshalTo(t, buf) + return buf, err +} + +func StdTimeMarshalTo(t time.Time, data []byte) (int, error) { + ts, err := TimestampProto(t) + if err != nil { + return 0, err + } + return ts.MarshalTo(data) +} + +func StdTimeUnmarshal(t *time.Time, data []byte) error { + ts := &Timestamp{} + if err := ts.Unmarshal(data); err != nil { + return err + } + tt, err := TimestampFromProto(ts) + if err != nil { + return err + } + *t = tt + return nil +} diff --git a/vendor/github.com/gogo/protobuf/types/timestamp_test.go b/vendor/github.com/gogo/protobuf/types/timestamp_test.go new file mode 100644 index 000000000..6af8631e5 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/timestamp_test.go @@ -0,0 +1,152 @@ +// Go support for Protocol Buffers - Google's data interchange format +// +// Copyright 2016 The Go Authors. All rights reserved. +// https://github.com/golang/protobuf +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package types + +import ( + "math" + "testing" + "time" + + "github.com/gogo/protobuf/proto" +) + +var tests = []struct { + ts *Timestamp + valid bool + t time.Time +}{ + // The timestamp representing the Unix epoch date. + {&Timestamp{Seconds: 0, Nanos: 0}, true, utcDate(1970, 1, 1)}, + // The smallest representable timestamp. + {&Timestamp{Seconds: math.MinInt64, Nanos: math.MinInt32}, false, + time.Unix(math.MinInt64, math.MinInt32).UTC()}, + // The smallest representable timestamp with non-negative nanos. + {&Timestamp{Seconds: math.MinInt64, Nanos: 0}, false, time.Unix(math.MinInt64, 0).UTC()}, + // The earliest valid timestamp. + {&Timestamp{Seconds: minValidSeconds, Nanos: 0}, true, utcDate(1, 1, 1)}, + //"0001-01-01T00:00:00Z"}, + // The largest representable timestamp. + {&Timestamp{Seconds: math.MaxInt64, Nanos: math.MaxInt32}, false, + time.Unix(math.MaxInt64, math.MaxInt32).UTC()}, + // The largest representable timestamp with nanos in range. + {&Timestamp{Seconds: math.MaxInt64, Nanos: 1e9 - 1}, false, + time.Unix(math.MaxInt64, 1e9-1).UTC()}, + // The largest valid timestamp. + {&Timestamp{Seconds: maxValidSeconds - 1, Nanos: 1e9 - 1}, true, + time.Date(9999, 12, 31, 23, 59, 59, 1e9-1, time.UTC)}, + // The smallest invalid timestamp that is larger than the valid range. + {&Timestamp{Seconds: maxValidSeconds, Nanos: 0}, false, time.Unix(maxValidSeconds, 0).UTC()}, + // A date before the epoch. + {&Timestamp{Seconds: -281836800, Nanos: 0}, true, utcDate(1961, 1, 26)}, + // A date after the epoch. + {&Timestamp{Seconds: 1296000000, Nanos: 0}, true, utcDate(2011, 1, 26)}, + // A date after the epoch, in the middle of the day. + {&Timestamp{Seconds: 1296012345, Nanos: 940483}, true, + time.Date(2011, 1, 26, 3, 25, 45, 940483, time.UTC)}, +} + +func TestValidateTimestamp(t *testing.T) { + for _, s := range tests { + got := validateTimestamp(s.ts) + if (got == nil) != s.valid { + t.Errorf("validateTimestamp(%v) = %v, want %v", s.ts, got, s.valid) + } + } +} + +func TestTimestampFromProto(t *testing.T) { + for _, s := range tests { + got, err := TimestampFromProto(s.ts) + if (err == nil) != s.valid { + t.Errorf("TimestampFromProto(%v) error = %v, but valid = %t", s.ts, err, s.valid) + } else if s.valid && got != s.t { + t.Errorf("TimestampFromProto(%v) = %v, want %v", s.ts, got, s.t) + } + } + // Special case: a nil TimestampFromProto is an error, but returns the 0 Unix time. + got, err := TimestampFromProto(nil) + want := time.Unix(0, 0).UTC() + if got != want { + t.Errorf("TimestampFromProto(nil) = %v, want %v", got, want) + } + if err == nil { + t.Errorf("TimestampFromProto(nil) error = nil, expected error") + } +} + +func TestTimestampProto(t *testing.T) { + for _, s := range tests { + got, err := TimestampProto(s.t) + if (err == nil) != s.valid { + t.Errorf("TimestampProto(%v) error = %v, but valid = %t", s.t, err, s.valid) + } else if s.valid && !proto.Equal(got, s.ts) { + t.Errorf("TimestampProto(%v) = %v, want %v", s.t, got, s.ts) + } + } + // No corresponding special case here: no time.Time results in a nil Timestamp. +} + +func TestTimestampString(t *testing.T) { + for _, test := range []struct { + ts *Timestamp + want string + }{ + // Not much testing needed because presumably time.Format is + // well-tested. + {&Timestamp{Seconds: 0, Nanos: 0}, "1970-01-01T00:00:00Z"}, + {&Timestamp{Seconds: minValidSeconds - 1, Nanos: 0}, "(timestamp: &types.Timestamp{Seconds: -62135596801,\nNanos: 0,\n} before 0001-01-01)"}, + } { + got := TimestampString(test.ts) + if got != test.want { + t.Errorf("TimestampString(%v) = %q, want %q", test.ts, got, test.want) + } + } +} + +func utcDate(year, month, day int) time.Time { + return time.Date(year, time.Month(month), day, 0, 0, 0, 0, time.UTC) +} + +func TestTimestampNow(t *testing.T) { + // Bracket the expected time. + before := time.Now() + ts := TimestampNow() + after := time.Now() + + tm, err := TimestampFromProto(ts) + if err != nil { + t.Errorf("between %v and %v\nTimestampNow() = %v\nwhich is invalid (%v)", before, after, ts, err) + } + if tm.Before(before) || tm.After(after) { + t.Errorf("between %v and %v\nTimestamp(TimestampNow()) = %v", before, after, tm) + } +} diff --git a/vendor/github.com/gogo/protobuf/types/type.pb.go b/vendor/github.com/gogo/protobuf/types/type.pb.go new file mode 100644 index 000000000..22885a2e8 --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/type.pb.go @@ -0,0 +1,2950 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/type.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import strconv "strconv" + +import strings "strings" +import reflect "reflect" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The syntax in which a protocol buffer element is defined. +type Syntax int32 + +const ( + // Syntax `proto2`. + SYNTAX_PROTO2 Syntax = 0 + // Syntax `proto3`. + SYNTAX_PROTO3 Syntax = 1 +) + +var Syntax_name = map[int32]string{ + 0: "SYNTAX_PROTO2", + 1: "SYNTAX_PROTO3", +} +var Syntax_value = map[string]int32{ + "SYNTAX_PROTO2": 0, + "SYNTAX_PROTO3": 1, +} + +func (Syntax) EnumDescriptor() ([]byte, []int) { return fileDescriptorType, []int{0} } + +// Basic field types. +type Field_Kind int32 + +const ( + // Field type unknown. + TYPE_UNKNOWN Field_Kind = 0 + // Field type double. + TYPE_DOUBLE Field_Kind = 1 + // Field type float. + TYPE_FLOAT Field_Kind = 2 + // Field type int64. + TYPE_INT64 Field_Kind = 3 + // Field type uint64. + TYPE_UINT64 Field_Kind = 4 + // Field type int32. + TYPE_INT32 Field_Kind = 5 + // Field type fixed64. + TYPE_FIXED64 Field_Kind = 6 + // Field type fixed32. + TYPE_FIXED32 Field_Kind = 7 + // Field type bool. + TYPE_BOOL Field_Kind = 8 + // Field type string. + TYPE_STRING Field_Kind = 9 + // Field type group. Proto2 syntax only, and deprecated. + TYPE_GROUP Field_Kind = 10 + // Field type message. + TYPE_MESSAGE Field_Kind = 11 + // Field type bytes. + TYPE_BYTES Field_Kind = 12 + // Field type uint32. + TYPE_UINT32 Field_Kind = 13 + // Field type enum. + TYPE_ENUM Field_Kind = 14 + // Field type sfixed32. + TYPE_SFIXED32 Field_Kind = 15 + // Field type sfixed64. + TYPE_SFIXED64 Field_Kind = 16 + // Field type sint32. + TYPE_SINT32 Field_Kind = 17 + // Field type sint64. + TYPE_SINT64 Field_Kind = 18 +) + +var Field_Kind_name = map[int32]string{ + 0: "TYPE_UNKNOWN", + 1: "TYPE_DOUBLE", + 2: "TYPE_FLOAT", + 3: "TYPE_INT64", + 4: "TYPE_UINT64", + 5: "TYPE_INT32", + 6: "TYPE_FIXED64", + 7: "TYPE_FIXED32", + 8: "TYPE_BOOL", + 9: "TYPE_STRING", + 10: "TYPE_GROUP", + 11: "TYPE_MESSAGE", + 12: "TYPE_BYTES", + 13: "TYPE_UINT32", + 14: "TYPE_ENUM", + 15: "TYPE_SFIXED32", + 16: "TYPE_SFIXED64", + 17: "TYPE_SINT32", + 18: "TYPE_SINT64", +} +var Field_Kind_value = map[string]int32{ + "TYPE_UNKNOWN": 0, + "TYPE_DOUBLE": 1, + "TYPE_FLOAT": 2, + "TYPE_INT64": 3, + "TYPE_UINT64": 4, + "TYPE_INT32": 5, + "TYPE_FIXED64": 6, + "TYPE_FIXED32": 7, + "TYPE_BOOL": 8, + "TYPE_STRING": 9, + "TYPE_GROUP": 10, + "TYPE_MESSAGE": 11, + "TYPE_BYTES": 12, + "TYPE_UINT32": 13, + "TYPE_ENUM": 14, + "TYPE_SFIXED32": 15, + "TYPE_SFIXED64": 16, + "TYPE_SINT32": 17, + "TYPE_SINT64": 18, +} + +func (Field_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptorType, []int{1, 0} } + +// Whether a field is optional, required, or repeated. +type Field_Cardinality int32 + +const ( + // For fields with unknown cardinality. + CARDINALITY_UNKNOWN Field_Cardinality = 0 + // For optional fields. + CARDINALITY_OPTIONAL Field_Cardinality = 1 + // For required fields. Proto2 syntax only. + CARDINALITY_REQUIRED Field_Cardinality = 2 + // For repeated fields. + CARDINALITY_REPEATED Field_Cardinality = 3 +) + +var Field_Cardinality_name = map[int32]string{ + 0: "CARDINALITY_UNKNOWN", + 1: "CARDINALITY_OPTIONAL", + 2: "CARDINALITY_REQUIRED", + 3: "CARDINALITY_REPEATED", +} +var Field_Cardinality_value = map[string]int32{ + "CARDINALITY_UNKNOWN": 0, + "CARDINALITY_OPTIONAL": 1, + "CARDINALITY_REQUIRED": 2, + "CARDINALITY_REPEATED": 3, +} + +func (Field_Cardinality) EnumDescriptor() ([]byte, []int) { return fileDescriptorType, []int{1, 1} } + +// A protocol buffer message type. +type Type struct { + // The fully qualified message name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The list of fields. + Fields []*Field `protobuf:"bytes,2,rep,name=fields" json:"fields,omitempty"` + // The list of types appearing in `oneof` definitions in this type. + Oneofs []string `protobuf:"bytes,3,rep,name=oneofs" json:"oneofs,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,4,rep,name=options" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,5,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,6,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Type) Reset() { *m = Type{} } +func (*Type) ProtoMessage() {} +func (*Type) Descriptor() ([]byte, []int) { return fileDescriptorType, []int{0} } + +func (m *Type) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Type) GetFields() []*Field { + if m != nil { + return m.Fields + } + return nil +} + +func (m *Type) GetOneofs() []string { + if m != nil { + return m.Oneofs + } + return nil +} + +func (m *Type) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Type) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Type) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return SYNTAX_PROTO2 +} + +// A single field of a message type. +type Field struct { + // The field type. + Kind Field_Kind `protobuf:"varint,1,opt,name=kind,proto3,enum=google.protobuf.Field_Kind" json:"kind,omitempty"` + // The field cardinality. + Cardinality Field_Cardinality `protobuf:"varint,2,opt,name=cardinality,proto3,enum=google.protobuf.Field_Cardinality" json:"cardinality,omitempty"` + // The field number. + Number int32 `protobuf:"varint,3,opt,name=number,proto3" json:"number,omitempty"` + // The field name. + Name string `protobuf:"bytes,4,opt,name=name,proto3" json:"name,omitempty"` + // The field type URL, without the scheme, for message or enumeration + // types. Example: `"type.googleapis.com/google.protobuf.Timestamp"`. + TypeUrl string `protobuf:"bytes,6,opt,name=type_url,json=typeUrl,proto3" json:"type_url,omitempty"` + // The index of the field type in `Type.oneofs`, for message or enumeration + // types. The first type has index 1; zero means the type is not in the list. + OneofIndex int32 `protobuf:"varint,7,opt,name=oneof_index,json=oneofIndex,proto3" json:"oneof_index,omitempty"` + // Whether to use alternative packed wire representation. + Packed bool `protobuf:"varint,8,opt,name=packed,proto3" json:"packed,omitempty"` + // The protocol buffer options. + Options []*Option `protobuf:"bytes,9,rep,name=options" json:"options,omitempty"` + // The field JSON name. + JsonName string `protobuf:"bytes,10,opt,name=json_name,json=jsonName,proto3" json:"json_name,omitempty"` + // The string value of the default value of this field. Proto2 syntax only. + DefaultValue string `protobuf:"bytes,11,opt,name=default_value,json=defaultValue,proto3" json:"default_value,omitempty"` +} + +func (m *Field) Reset() { *m = Field{} } +func (*Field) ProtoMessage() {} +func (*Field) Descriptor() ([]byte, []int) { return fileDescriptorType, []int{1} } + +func (m *Field) GetKind() Field_Kind { + if m != nil { + return m.Kind + } + return TYPE_UNKNOWN +} + +func (m *Field) GetCardinality() Field_Cardinality { + if m != nil { + return m.Cardinality + } + return CARDINALITY_UNKNOWN +} + +func (m *Field) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *Field) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Field) GetTypeUrl() string { + if m != nil { + return m.TypeUrl + } + return "" +} + +func (m *Field) GetOneofIndex() int32 { + if m != nil { + return m.OneofIndex + } + return 0 +} + +func (m *Field) GetPacked() bool { + if m != nil { + return m.Packed + } + return false +} + +func (m *Field) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Field) GetJsonName() string { + if m != nil { + return m.JsonName + } + return "" +} + +func (m *Field) GetDefaultValue() string { + if m != nil { + return m.DefaultValue + } + return "" +} + +// Enum type definition. +type Enum struct { + // Enum type name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value definitions. + Enumvalue []*EnumValue `protobuf:"bytes,2,rep,name=enumvalue" json:"enumvalue,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` + // The source context. + SourceContext *SourceContext `protobuf:"bytes,4,opt,name=source_context,json=sourceContext" json:"source_context,omitempty"` + // The source syntax. + Syntax Syntax `protobuf:"varint,5,opt,name=syntax,proto3,enum=google.protobuf.Syntax" json:"syntax,omitempty"` +} + +func (m *Enum) Reset() { *m = Enum{} } +func (*Enum) ProtoMessage() {} +func (*Enum) Descriptor() ([]byte, []int) { return fileDescriptorType, []int{2} } + +func (m *Enum) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Enum) GetEnumvalue() []*EnumValue { + if m != nil { + return m.Enumvalue + } + return nil +} + +func (m *Enum) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +func (m *Enum) GetSourceContext() *SourceContext { + if m != nil { + return m.SourceContext + } + return nil +} + +func (m *Enum) GetSyntax() Syntax { + if m != nil { + return m.Syntax + } + return SYNTAX_PROTO2 +} + +// Enum value definition. +type EnumValue struct { + // Enum value name. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // Enum value number. + Number int32 `protobuf:"varint,2,opt,name=number,proto3" json:"number,omitempty"` + // Protocol buffer options. + Options []*Option `protobuf:"bytes,3,rep,name=options" json:"options,omitempty"` +} + +func (m *EnumValue) Reset() { *m = EnumValue{} } +func (*EnumValue) ProtoMessage() {} +func (*EnumValue) Descriptor() ([]byte, []int) { return fileDescriptorType, []int{3} } + +func (m *EnumValue) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *EnumValue) GetNumber() int32 { + if m != nil { + return m.Number + } + return 0 +} + +func (m *EnumValue) GetOptions() []*Option { + if m != nil { + return m.Options + } + return nil +} + +// A protocol buffer option, which can be attached to a message, field, +// enumeration, etc. +type Option struct { + // The option's name. For protobuf built-in options (options defined in + // descriptor.proto), this is the short name. For example, `"map_entry"`. + // For custom options, it should be the fully-qualified name. For example, + // `"google.api.http"`. + Name string `protobuf:"bytes,1,opt,name=name,proto3" json:"name,omitempty"` + // The option's value packed in an Any message. If the value is a primitive, + // the corresponding wrapper type defined in google/protobuf/wrappers.proto + // should be used. If the value is an enum, it should be stored as an int32 + // value using the google.protobuf.Int32Value type. + Value *Any `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` +} + +func (m *Option) Reset() { *m = Option{} } +func (*Option) ProtoMessage() {} +func (*Option) Descriptor() ([]byte, []int) { return fileDescriptorType, []int{4} } + +func (m *Option) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Option) GetValue() *Any { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*Type)(nil), "google.protobuf.Type") + proto.RegisterType((*Field)(nil), "google.protobuf.Field") + proto.RegisterType((*Enum)(nil), "google.protobuf.Enum") + proto.RegisterType((*EnumValue)(nil), "google.protobuf.EnumValue") + proto.RegisterType((*Option)(nil), "google.protobuf.Option") + proto.RegisterEnum("google.protobuf.Syntax", Syntax_name, Syntax_value) + proto.RegisterEnum("google.protobuf.Field_Kind", Field_Kind_name, Field_Kind_value) + proto.RegisterEnum("google.protobuf.Field_Cardinality", Field_Cardinality_name, Field_Cardinality_value) +} +func (this *Type) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Fields) != len(that1.Fields) { + if len(this.Fields) < len(that1.Fields) { + return -1 + } + return 1 + } + for i := range this.Fields { + if c := this.Fields[i].Compare(that1.Fields[i]); c != 0 { + return c + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + if len(this.Oneofs) < len(that1.Oneofs) { + return -1 + } + return 1 + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + if this.Oneofs[i] < that1.Oneofs[i] { + return -1 + } + return 1 + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + return 0 +} +func (this *Field) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Kind != that1.Kind { + if this.Kind < that1.Kind { + return -1 + } + return 1 + } + if this.Cardinality != that1.Cardinality { + if this.Cardinality < that1.Cardinality { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.TypeUrl != that1.TypeUrl { + if this.TypeUrl < that1.TypeUrl { + return -1 + } + return 1 + } + if this.OneofIndex != that1.OneofIndex { + if this.OneofIndex < that1.OneofIndex { + return -1 + } + return 1 + } + if this.Packed != that1.Packed { + if !this.Packed { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if this.JsonName != that1.JsonName { + if this.JsonName < that1.JsonName { + return -1 + } + return 1 + } + if this.DefaultValue != that1.DefaultValue { + if this.DefaultValue < that1.DefaultValue { + return -1 + } + return 1 + } + return 0 +} +func (this *Enum) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + if len(this.Enumvalue) < len(that1.Enumvalue) { + return -1 + } + return 1 + } + for i := range this.Enumvalue { + if c := this.Enumvalue[i].Compare(that1.Enumvalue[i]); c != 0 { + return c + } + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + if c := this.SourceContext.Compare(that1.SourceContext); c != 0 { + return c + } + if this.Syntax != that1.Syntax { + if this.Syntax < that1.Syntax { + return -1 + } + return 1 + } + return 0 +} +func (this *EnumValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if this.Number != that1.Number { + if this.Number < that1.Number { + return -1 + } + return 1 + } + if len(this.Options) != len(that1.Options) { + if len(this.Options) < len(that1.Options) { + return -1 + } + return 1 + } + for i := range this.Options { + if c := this.Options[i].Compare(that1.Options[i]); c != 0 { + return c + } + } + return 0 +} +func (this *Option) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Name != that1.Name { + if this.Name < that1.Name { + return -1 + } + return 1 + } + if c := this.Value.Compare(that1.Value); c != 0 { + return c + } + return 0 +} +func (x Syntax) String() string { + s, ok := Syntax_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Kind) String() string { + s, ok := Field_Kind_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (x Field_Cardinality) String() string { + s, ok := Field_Cardinality_name[int32(x)] + if ok { + return s + } + return strconv.Itoa(int(x)) +} +func (this *Type) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Type) + if !ok { + that2, ok := that.(Type) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Fields) != len(that1.Fields) { + return false + } + for i := range this.Fields { + if !this.Fields[i].Equal(that1.Fields[i]) { + return false + } + } + if len(this.Oneofs) != len(that1.Oneofs) { + return false + } + for i := range this.Oneofs { + if this.Oneofs[i] != that1.Oneofs[i] { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + return true +} +func (this *Field) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Field) + if !ok { + that2, ok := that.(Field) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Kind != that1.Kind { + return false + } + if this.Cardinality != that1.Cardinality { + return false + } + if this.Number != that1.Number { + return false + } + if this.Name != that1.Name { + return false + } + if this.TypeUrl != that1.TypeUrl { + return false + } + if this.OneofIndex != that1.OneofIndex { + return false + } + if this.Packed != that1.Packed { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if this.JsonName != that1.JsonName { + return false + } + if this.DefaultValue != that1.DefaultValue { + return false + } + return true +} +func (this *Enum) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Enum) + if !ok { + that2, ok := that.(Enum) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if len(this.Enumvalue) != len(that1.Enumvalue) { + return false + } + for i := range this.Enumvalue { + if !this.Enumvalue[i].Equal(that1.Enumvalue[i]) { + return false + } + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + if !this.SourceContext.Equal(that1.SourceContext) { + return false + } + if this.Syntax != that1.Syntax { + return false + } + return true +} +func (this *EnumValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*EnumValue) + if !ok { + that2, ok := that.(EnumValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if this.Number != that1.Number { + return false + } + if len(this.Options) != len(that1.Options) { + return false + } + for i := range this.Options { + if !this.Options[i].Equal(that1.Options[i]) { + return false + } + } + return true +} +func (this *Option) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Option) + if !ok { + that2, ok := that.(Option) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Name != that1.Name { + return false + } + if !this.Value.Equal(that1.Value) { + return false + } + return true +} +func (this *Type) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 10) + s = append(s, "&types.Type{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Fields != nil { + s = append(s, "Fields: "+fmt.Sprintf("%#v", this.Fields)+",\n") + } + s = append(s, "Oneofs: "+fmt.Sprintf("%#v", this.Oneofs)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Field) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 14) + s = append(s, "&types.Field{") + s = append(s, "Kind: "+fmt.Sprintf("%#v", this.Kind)+",\n") + s = append(s, "Cardinality: "+fmt.Sprintf("%#v", this.Cardinality)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "TypeUrl: "+fmt.Sprintf("%#v", this.TypeUrl)+",\n") + s = append(s, "OneofIndex: "+fmt.Sprintf("%#v", this.OneofIndex)+",\n") + s = append(s, "Packed: "+fmt.Sprintf("%#v", this.Packed)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "JsonName: "+fmt.Sprintf("%#v", this.JsonName)+",\n") + s = append(s, "DefaultValue: "+fmt.Sprintf("%#v", this.DefaultValue)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Enum) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 9) + s = append(s, "&types.Enum{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Enumvalue != nil { + s = append(s, "Enumvalue: "+fmt.Sprintf("%#v", this.Enumvalue)+",\n") + } + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + if this.SourceContext != nil { + s = append(s, "SourceContext: "+fmt.Sprintf("%#v", this.SourceContext)+",\n") + } + s = append(s, "Syntax: "+fmt.Sprintf("%#v", this.Syntax)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *EnumValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 7) + s = append(s, "&types.EnumValue{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + s = append(s, "Number: "+fmt.Sprintf("%#v", this.Number)+",\n") + if this.Options != nil { + s = append(s, "Options: "+fmt.Sprintf("%#v", this.Options)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Option) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 6) + s = append(s, "&types.Option{") + s = append(s, "Name: "+fmt.Sprintf("%#v", this.Name)+",\n") + if this.Value != nil { + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + } + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringType(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *Type) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Type) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Fields) > 0 { + for _, msg := range m.Fields { + dAtA[i] = 0x12 + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + dAtA[i] = 0x1a + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x22 + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.SourceContext != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintType(dAtA, i, uint64(m.SourceContext.Size())) + n1, err := m.SourceContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + } + if m.Syntax != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + } + return i, nil +} + +func (m *Field) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Field) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Kind != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Kind)) + } + if m.Cardinality != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Cardinality)) + } + if m.Number != 0 { + dAtA[i] = 0x18 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Number)) + } + if len(m.Name) > 0 { + dAtA[i] = 0x22 + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.TypeUrl) > 0 { + dAtA[i] = 0x32 + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.TypeUrl))) + i += copy(dAtA[i:], m.TypeUrl) + } + if m.OneofIndex != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintType(dAtA, i, uint64(m.OneofIndex)) + } + if m.Packed { + dAtA[i] = 0x40 + i++ + if m.Packed { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x4a + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.JsonName) > 0 { + dAtA[i] = 0x52 + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.JsonName))) + i += copy(dAtA[i:], m.JsonName) + } + if len(m.DefaultValue) > 0 { + dAtA[i] = 0x5a + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.DefaultValue))) + i += copy(dAtA[i:], m.DefaultValue) + } + return i, nil +} + +func (m *Enum) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Enum) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if len(m.Enumvalue) > 0 { + for _, msg := range m.Enumvalue { + dAtA[i] = 0x12 + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.SourceContext != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintType(dAtA, i, uint64(m.SourceContext.Size())) + n2, err := m.SourceContext.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.Syntax != 0 { + dAtA[i] = 0x28 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Syntax)) + } + return i, nil +} + +func (m *EnumValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *EnumValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Number != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, msg := range m.Options { + dAtA[i] = 0x1a + i++ + i = encodeVarintType(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *Option) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Option) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Name) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintType(dAtA, i, uint64(len(m.Name))) + i += copy(dAtA[i:], m.Name) + } + if m.Value != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintType(dAtA, i, uint64(m.Value.Size())) + n3, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + return i, nil +} + +func encodeVarintType(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedType(r randyType, easy bool) *Type { + this := &Type{} + this.Name = string(randStringType(r)) + if r.Intn(10) != 0 { + v1 := r.Intn(5) + this.Fields = make([]*Field, v1) + for i := 0; i < v1; i++ { + this.Fields[i] = NewPopulatedField(r, easy) + } + } + v2 := r.Intn(10) + this.Oneofs = make([]string, v2) + for i := 0; i < v2; i++ { + this.Oneofs[i] = string(randStringType(r)) + } + if r.Intn(10) != 0 { + v3 := r.Intn(5) + this.Options = make([]*Option, v3) + for i := 0; i < v3; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(10) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedField(r randyType, easy bool) *Field { + this := &Field{} + this.Kind = Field_Kind([]int32{0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18}[r.Intn(19)]) + this.Cardinality = Field_Cardinality([]int32{0, 1, 2, 3}[r.Intn(4)]) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + this.Name = string(randStringType(r)) + this.TypeUrl = string(randStringType(r)) + this.OneofIndex = int32(r.Int31()) + if r.Intn(2) == 0 { + this.OneofIndex *= -1 + } + this.Packed = bool(bool(r.Intn(2) == 0)) + if r.Intn(10) != 0 { + v4 := r.Intn(5) + this.Options = make([]*Option, v4) + for i := 0; i < v4; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + this.JsonName = string(randStringType(r)) + this.DefaultValue = string(randStringType(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedEnum(r randyType, easy bool) *Enum { + this := &Enum{} + this.Name = string(randStringType(r)) + if r.Intn(10) != 0 { + v5 := r.Intn(5) + this.Enumvalue = make([]*EnumValue, v5) + for i := 0; i < v5; i++ { + this.Enumvalue[i] = NewPopulatedEnumValue(r, easy) + } + } + if r.Intn(10) != 0 { + v6 := r.Intn(5) + this.Options = make([]*Option, v6) + for i := 0; i < v6; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if r.Intn(10) != 0 { + this.SourceContext = NewPopulatedSourceContext(r, easy) + } + this.Syntax = Syntax([]int32{0, 1}[r.Intn(2)]) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedEnumValue(r randyType, easy bool) *EnumValue { + this := &EnumValue{} + this.Name = string(randStringType(r)) + this.Number = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Number *= -1 + } + if r.Intn(10) != 0 { + v7 := r.Intn(5) + this.Options = make([]*Option, v7) + for i := 0; i < v7; i++ { + this.Options[i] = NewPopulatedOption(r, easy) + } + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedOption(r randyType, easy bool) *Option { + this := &Option{} + this.Name = string(randStringType(r)) + if r.Intn(10) != 0 { + this.Value = NewPopulatedAny(r, easy) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyType interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneType(r randyType) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringType(r randyType) string { + v8 := r.Intn(100) + tmps := make([]rune, v8) + for i := 0; i < v8; i++ { + tmps[i] = randUTF8RuneType(r) + } + return string(tmps) +} +func randUnrecognizedType(r randyType, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldType(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldType(dAtA []byte, r randyType, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + v9 := r.Int63() + if r.Intn(2) == 0 { + v9 *= -1 + } + dAtA = encodeVarintPopulateType(dAtA, uint64(v9)) + case 1: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateType(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateType(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateType(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *Type) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Fields) > 0 { + for _, e := range m.Fields { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Oneofs) > 0 { + for _, s := range m.Oneofs { + l = len(s) + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + return n +} + +func (m *Field) Size() (n int) { + var l int + _ = l + if m.Kind != 0 { + n += 1 + sovType(uint64(m.Kind)) + } + if m.Cardinality != 0 { + n += 1 + sovType(uint64(m.Cardinality)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.TypeUrl) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.OneofIndex != 0 { + n += 1 + sovType(uint64(m.OneofIndex)) + } + if m.Packed { + n += 2 + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + l = len(m.JsonName) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + l = len(m.DefaultValue) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + return n +} + +func (m *Enum) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if len(m.Enumvalue) > 0 { + for _, e := range m.Enumvalue { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + if m.SourceContext != nil { + l = m.SourceContext.Size() + n += 1 + l + sovType(uint64(l)) + } + if m.Syntax != 0 { + n += 1 + sovType(uint64(m.Syntax)) + } + return n +} + +func (m *EnumValue) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Number != 0 { + n += 1 + sovType(uint64(m.Number)) + } + if len(m.Options) > 0 { + for _, e := range m.Options { + l = e.Size() + n += 1 + l + sovType(uint64(l)) + } + } + return n +} + +func (m *Option) Size() (n int) { + var l int + _ = l + l = len(m.Name) + if l > 0 { + n += 1 + l + sovType(uint64(l)) + } + if m.Value != nil { + l = m.Value.Size() + n += 1 + l + sovType(uint64(l)) + } + return n +} + +func sovType(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozType(x uint64) (n int) { + return sovType(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *Type) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Type{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Fields:` + strings.Replace(fmt.Sprintf("%v", this.Fields), "Field", "Field", 1) + `,`, + `Oneofs:` + fmt.Sprintf("%v", this.Oneofs) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `}`, + }, "") + return s +} +func (this *Field) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Field{`, + `Kind:` + fmt.Sprintf("%v", this.Kind) + `,`, + `Cardinality:` + fmt.Sprintf("%v", this.Cardinality) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `TypeUrl:` + fmt.Sprintf("%v", this.TypeUrl) + `,`, + `OneofIndex:` + fmt.Sprintf("%v", this.OneofIndex) + `,`, + `Packed:` + fmt.Sprintf("%v", this.Packed) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `JsonName:` + fmt.Sprintf("%v", this.JsonName) + `,`, + `DefaultValue:` + fmt.Sprintf("%v", this.DefaultValue) + `,`, + `}`, + }, "") + return s +} +func (this *Enum) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Enum{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Enumvalue:` + strings.Replace(fmt.Sprintf("%v", this.Enumvalue), "EnumValue", "EnumValue", 1) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `SourceContext:` + strings.Replace(fmt.Sprintf("%v", this.SourceContext), "SourceContext", "SourceContext", 1) + `,`, + `Syntax:` + fmt.Sprintf("%v", this.Syntax) + `,`, + `}`, + }, "") + return s +} +func (this *EnumValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&EnumValue{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Number:` + fmt.Sprintf("%v", this.Number) + `,`, + `Options:` + strings.Replace(fmt.Sprintf("%v", this.Options), "Option", "Option", 1) + `,`, + `}`, + }, "") + return s +} +func (this *Option) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Option{`, + `Name:` + fmt.Sprintf("%v", this.Name) + `,`, + `Value:` + strings.Replace(fmt.Sprintf("%v", this.Value), "Any", "Any", 1) + `,`, + `}`, + }, "") + return s +} +func valueToStringType(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *Type) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Type: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Type: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Fields", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Fields = append(m.Fields, &Field{}) + if err := m.Fields[len(m.Fields)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Oneofs", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Oneofs = append(m.Oneofs, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= (Syntax(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Field) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Field: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Field: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Kind", wireType) + } + m.Kind = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Kind |= (Field_Kind(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Cardinality", wireType) + } + m.Cardinality = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Cardinality |= (Field_Cardinality(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TypeUrl", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.TypeUrl = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field OneofIndex", wireType) + } + m.OneofIndex = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.OneofIndex |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Packed", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Packed = bool(v != 0) + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field JsonName", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.JsonName = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 11: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field DefaultValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.DefaultValue = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Enum) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Enum: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Enum: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Enumvalue", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Enumvalue = append(m.Enumvalue, &EnumValue{}) + if err := m.Enumvalue[len(m.Enumvalue)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field SourceContext", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.SourceContext == nil { + m.SourceContext = &SourceContext{} + } + if err := m.SourceContext.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Syntax", wireType) + } + m.Syntax = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Syntax |= (Syntax(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *EnumValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: EnumValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: EnumValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Number", wireType) + } + m.Number = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Number |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Options", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Options = append(m.Options, &Option{}) + if err := m.Options[len(m.Options)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Option) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Option: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Option: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Name", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Name = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowType + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthType + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Value == nil { + m.Value = &Any{} + } + if err := m.Value.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipType(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthType + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipType(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthType + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowType + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipType(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthType = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowType = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/type.proto", fileDescriptorType) } + +var fileDescriptorType = []byte{ + // 840 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x55, 0xcf, 0x8f, 0xda, 0x46, + 0x14, 0x66, 0x8c, 0xf1, 0xe2, 0x47, 0x60, 0x27, 0x93, 0x28, 0x71, 0x36, 0x92, 0x8b, 0x68, 0x0f, + 0x28, 0x07, 0x56, 0x85, 0xd5, 0xaa, 0x57, 0x58, 0xbc, 0x14, 0x2d, 0xb1, 0xdd, 0xc1, 0x34, 0xd9, + 0x5e, 0x10, 0x0b, 0xde, 0x88, 0xc4, 0x8c, 0x11, 0x36, 0xed, 0x72, 0xab, 0xd4, 0x73, 0xff, 0x89, + 0x9e, 0xaa, 0x9e, 0xfb, 0x47, 0xe4, 0x98, 0x63, 0x8f, 0x5d, 0x7a, 0xe9, 0x31, 0xc7, 0xdc, 0x5a, + 0xcd, 0x18, 0xbc, 0xe6, 0x47, 0xa5, 0xb4, 0xbd, 0xf1, 0xbe, 0xef, 0x7b, 0x3f, 0xe7, 0xf9, 0x01, + 0x47, 0xaf, 0x7c, 0xff, 0x95, 0xe7, 0x1e, 0x4f, 0x67, 0x7e, 0xe8, 0x5f, 0xcd, 0xaf, 0x8f, 0xc3, + 0xc5, 0xd4, 0xad, 0x08, 0x8b, 0x1c, 0x46, 0x5c, 0x65, 0xcd, 0x1d, 0x3d, 0xd9, 0x16, 0x0f, 0xd8, + 0x22, 0x62, 0x8f, 0x3e, 0xdb, 0xa6, 0x02, 0x7f, 0x3e, 0x1b, 0xba, 0xfd, 0xa1, 0xcf, 0x42, 0xf7, + 0x26, 0x8c, 0x54, 0xa5, 0x1f, 0x25, 0x90, 0x9d, 0xc5, 0xd4, 0x25, 0x04, 0x64, 0x36, 0x98, 0xb8, + 0x1a, 0x2a, 0xa2, 0xb2, 0x4a, 0xc5, 0x6f, 0x52, 0x01, 0xe5, 0x7a, 0xec, 0x7a, 0xa3, 0x40, 0x93, + 0x8a, 0xe9, 0x72, 0xae, 0xfa, 0xa8, 0xb2, 0x95, 0xbf, 0x72, 0xce, 0x69, 0xba, 0x52, 0x91, 0x47, + 0xa0, 0xf8, 0xcc, 0xf5, 0xaf, 0x03, 0x2d, 0x5d, 0x4c, 0x97, 0x55, 0xba, 0xb2, 0xc8, 0xe7, 0x70, + 0xe0, 0x4f, 0xc3, 0xb1, 0xcf, 0x02, 0x4d, 0x16, 0x81, 0x1e, 0xef, 0x04, 0xb2, 0x04, 0x4f, 0xd7, + 0x3a, 0x62, 0x40, 0x61, 0xb3, 0x5e, 0x2d, 0x53, 0x44, 0xe5, 0x5c, 0x55, 0xdf, 0xf1, 0xec, 0x0a, + 0xd9, 0x59, 0xa4, 0xa2, 0xf9, 0x20, 0x69, 0x92, 0x63, 0x50, 0x82, 0x05, 0x0b, 0x07, 0x37, 0x9a, + 0x52, 0x44, 0xe5, 0xc2, 0x9e, 0xc4, 0x5d, 0x41, 0xd3, 0x95, 0xac, 0xf4, 0xab, 0x02, 0x19, 0xd1, + 0x14, 0x39, 0x06, 0xf9, 0xcd, 0x98, 0x8d, 0xc4, 0x40, 0x0a, 0xd5, 0xa7, 0xfb, 0x5b, 0xaf, 0x5c, + 0x8c, 0xd9, 0x88, 0x0a, 0x21, 0x69, 0x42, 0x6e, 0x38, 0x98, 0x8d, 0xc6, 0x6c, 0xe0, 0x8d, 0xc3, + 0x85, 0x26, 0x09, 0xbf, 0xd2, 0x3f, 0xf8, 0x9d, 0xdd, 0x29, 0x69, 0xd2, 0x8d, 0xcf, 0x90, 0xcd, + 0x27, 0x57, 0xee, 0x4c, 0x4b, 0x17, 0x51, 0x39, 0x43, 0x57, 0x56, 0xfc, 0x3e, 0x72, 0xe2, 0x7d, + 0x9e, 0x40, 0x96, 0x2f, 0x47, 0x7f, 0x3e, 0xf3, 0x44, 0x7f, 0x2a, 0x3d, 0xe0, 0x76, 0x6f, 0xe6, + 0x91, 0x4f, 0x20, 0x27, 0x86, 0xdf, 0x1f, 0xb3, 0x91, 0x7b, 0xa3, 0x1d, 0x88, 0x58, 0x20, 0xa0, + 0x36, 0x47, 0x78, 0x9e, 0xe9, 0x60, 0xf8, 0xc6, 0x1d, 0x69, 0xd9, 0x22, 0x2a, 0x67, 0xe9, 0xca, + 0x4a, 0xbe, 0x95, 0xfa, 0x91, 0x6f, 0xf5, 0x14, 0xd4, 0xd7, 0x81, 0xcf, 0xfa, 0xa2, 0x3e, 0x10, + 0x75, 0x64, 0x39, 0x60, 0xf2, 0x1a, 0x3f, 0x85, 0xfc, 0xc8, 0xbd, 0x1e, 0xcc, 0xbd, 0xb0, 0xff, + 0xed, 0xc0, 0x9b, 0xbb, 0x5a, 0x4e, 0x08, 0xee, 0xad, 0xc0, 0xaf, 0x39, 0x56, 0x7a, 0x2b, 0x81, + 0xcc, 0x27, 0x49, 0x30, 0xdc, 0x73, 0x2e, 0x6d, 0xa3, 0xdf, 0x33, 0x2f, 0x4c, 0xeb, 0x85, 0x89, + 0x53, 0xe4, 0x10, 0x72, 0x02, 0x69, 0x5a, 0xbd, 0x46, 0xc7, 0xc0, 0x88, 0x14, 0x00, 0x04, 0x70, + 0xde, 0xb1, 0xea, 0x0e, 0x96, 0x62, 0xbb, 0x6d, 0x3a, 0xa7, 0x27, 0x38, 0x1d, 0x3b, 0xf4, 0x22, + 0x40, 0x4e, 0x0a, 0x6a, 0x55, 0x9c, 0x89, 0x73, 0x9c, 0xb7, 0x5f, 0x1a, 0xcd, 0xd3, 0x13, 0xac, + 0x6c, 0x22, 0xb5, 0x2a, 0x3e, 0x20, 0x79, 0x50, 0x05, 0xd2, 0xb0, 0xac, 0x0e, 0xce, 0xc6, 0x31, + 0xbb, 0x0e, 0x6d, 0x9b, 0x2d, 0xac, 0xc6, 0x31, 0x5b, 0xd4, 0xea, 0xd9, 0x18, 0xe2, 0x08, 0xcf, + 0x8d, 0x6e, 0xb7, 0xde, 0x32, 0x70, 0x2e, 0x56, 0x34, 0x2e, 0x1d, 0xa3, 0x8b, 0xef, 0x6d, 0x94, + 0x55, 0xab, 0xe2, 0x7c, 0x9c, 0xc2, 0x30, 0x7b, 0xcf, 0x71, 0x81, 0xdc, 0x87, 0x7c, 0x94, 0x62, + 0x5d, 0xc4, 0xe1, 0x16, 0x74, 0x7a, 0x82, 0xf1, 0x5d, 0x21, 0x51, 0x94, 0xfb, 0x1b, 0xc0, 0xe9, + 0x09, 0x26, 0xa5, 0x10, 0x72, 0x89, 0xdd, 0x22, 0x8f, 0xe1, 0xc1, 0x59, 0x9d, 0x36, 0xdb, 0x66, + 0xbd, 0xd3, 0x76, 0x2e, 0x13, 0x73, 0xd5, 0xe0, 0x61, 0x92, 0xb0, 0x6c, 0xa7, 0x6d, 0x99, 0xf5, + 0x0e, 0x46, 0xdb, 0x0c, 0x35, 0xbe, 0xea, 0xb5, 0xa9, 0xd1, 0xc4, 0xd2, 0x2e, 0x63, 0x1b, 0x75, + 0xc7, 0x68, 0xe2, 0x74, 0xe9, 0x2f, 0x04, 0xb2, 0xc1, 0xe6, 0x93, 0xbd, 0x67, 0xe4, 0x0b, 0x50, + 0x5d, 0x36, 0x9f, 0x44, 0xcf, 0x1f, 0x5d, 0x92, 0xa3, 0x9d, 0xa5, 0xe2, 0xde, 0x62, 0x19, 0xe8, + 0x9d, 0x38, 0xb9, 0x8c, 0xe9, 0xff, 0x7c, 0x38, 0xe4, 0xff, 0x77, 0x38, 0x32, 0x1f, 0x77, 0x38, + 0x5e, 0x83, 0x1a, 0xb7, 0xb0, 0x77, 0x0a, 0x77, 0x1f, 0xb6, 0xb4, 0xf1, 0x61, 0xff, 0xfb, 0x1e, + 0x4b, 0x5f, 0x82, 0x12, 0x41, 0x7b, 0x13, 0x3d, 0x83, 0xcc, 0x7a, 0xd4, 0xbc, 0xf1, 0x87, 0x3b, + 0xe1, 0xea, 0x6c, 0x41, 0x23, 0xc9, 0xb3, 0x0a, 0x28, 0x51, 0x1f, 0x7c, 0xd9, 0xba, 0x97, 0xa6, + 0x53, 0x7f, 0xd9, 0xb7, 0xa9, 0xe5, 0x58, 0x55, 0x9c, 0xda, 0x86, 0x6a, 0x18, 0x35, 0x7e, 0x40, + 0xef, 0x6e, 0xf5, 0xd4, 0x6f, 0xb7, 0x7a, 0xea, 0xfd, 0xad, 0x8e, 0x3e, 0xdc, 0xea, 0xe8, 0xfb, + 0xa5, 0x8e, 0x7e, 0x5e, 0xea, 0xe8, 0xed, 0x52, 0x47, 0xef, 0x96, 0x3a, 0xfa, 0x7d, 0xa9, 0xa3, + 0x3f, 0x97, 0x7a, 0xea, 0x3d, 0xc7, 0xff, 0xd0, 0x11, 0x3c, 0x18, 0xfa, 0x93, 0xed, 0x12, 0x1a, + 0x2a, 0xff, 0xcf, 0xb1, 0xb9, 0x65, 0xa3, 0x6f, 0x32, 0xfc, 0x68, 0x05, 0x1f, 0x10, 0xfa, 0x49, + 0x4a, 0xb7, 0xec, 0xc6, 0x2f, 0x92, 0xde, 0x8a, 0xe4, 0xf6, 0xba, 0xe2, 0x17, 0xae, 0xe7, 0x5d, + 0x30, 0xff, 0x3b, 0xc6, 0xdd, 0x82, 0x2b, 0x45, 0xc4, 0xa9, 0xfd, 0x1d, 0x00, 0x00, 0xff, 0xff, + 0x1a, 0xcd, 0x2b, 0x09, 0x2b, 0x07, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/types/wrappers.pb.go b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go new file mode 100644 index 000000000..7916fcb8f --- /dev/null +++ b/vendor/github.com/gogo/protobuf/types/wrappers.pb.go @@ -0,0 +1,2157 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: google/protobuf/wrappers.proto + +package types + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" + +import bytes "bytes" + +import strings "strings" +import reflect "reflect" + +import binary "encoding/binary" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Wrapper message for `double`. +// +// The JSON representation for `DoubleValue` is JSON number. +type DoubleValue struct { + // The double value. + Value float64 `protobuf:"fixed64,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *DoubleValue) Reset() { *m = DoubleValue{} } +func (*DoubleValue) ProtoMessage() {} +func (*DoubleValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{0} } +func (*DoubleValue) XXX_WellKnownType() string { return "DoubleValue" } + +func (m *DoubleValue) GetValue() float64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `float`. +// +// The JSON representation for `FloatValue` is JSON number. +type FloatValue struct { + // The float value. + Value float32 `protobuf:"fixed32,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *FloatValue) Reset() { *m = FloatValue{} } +func (*FloatValue) ProtoMessage() {} +func (*FloatValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{1} } +func (*FloatValue) XXX_WellKnownType() string { return "FloatValue" } + +func (m *FloatValue) GetValue() float32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int64`. +// +// The JSON representation for `Int64Value` is JSON string. +type Int64Value struct { + // The int64 value. + Value int64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Int64Value) Reset() { *m = Int64Value{} } +func (*Int64Value) ProtoMessage() {} +func (*Int64Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{2} } +func (*Int64Value) XXX_WellKnownType() string { return "Int64Value" } + +func (m *Int64Value) GetValue() int64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint64`. +// +// The JSON representation for `UInt64Value` is JSON string. +type UInt64Value struct { + // The uint64 value. + Value uint64 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *UInt64Value) Reset() { *m = UInt64Value{} } +func (*UInt64Value) ProtoMessage() {} +func (*UInt64Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{3} } +func (*UInt64Value) XXX_WellKnownType() string { return "UInt64Value" } + +func (m *UInt64Value) GetValue() uint64 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `int32`. +// +// The JSON representation for `Int32Value` is JSON number. +type Int32Value struct { + // The int32 value. + Value int32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Int32Value) Reset() { *m = Int32Value{} } +func (*Int32Value) ProtoMessage() {} +func (*Int32Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{4} } +func (*Int32Value) XXX_WellKnownType() string { return "Int32Value" } + +func (m *Int32Value) GetValue() int32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `uint32`. +// +// The JSON representation for `UInt32Value` is JSON number. +type UInt32Value struct { + // The uint32 value. + Value uint32 `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *UInt32Value) Reset() { *m = UInt32Value{} } +func (*UInt32Value) ProtoMessage() {} +func (*UInt32Value) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{5} } +func (*UInt32Value) XXX_WellKnownType() string { return "UInt32Value" } + +func (m *UInt32Value) GetValue() uint32 { + if m != nil { + return m.Value + } + return 0 +} + +// Wrapper message for `bool`. +// +// The JSON representation for `BoolValue` is JSON `true` and `false`. +type BoolValue struct { + // The bool value. + Value bool `protobuf:"varint,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BoolValue) Reset() { *m = BoolValue{} } +func (*BoolValue) ProtoMessage() {} +func (*BoolValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{6} } +func (*BoolValue) XXX_WellKnownType() string { return "BoolValue" } + +func (m *BoolValue) GetValue() bool { + if m != nil { + return m.Value + } + return false +} + +// Wrapper message for `string`. +// +// The JSON representation for `StringValue` is JSON string. +type StringValue struct { + // The string value. + Value string `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *StringValue) Reset() { *m = StringValue{} } +func (*StringValue) ProtoMessage() {} +func (*StringValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{7} } +func (*StringValue) XXX_WellKnownType() string { return "StringValue" } + +func (m *StringValue) GetValue() string { + if m != nil { + return m.Value + } + return "" +} + +// Wrapper message for `bytes`. +// +// The JSON representation for `BytesValue` is JSON string. +type BytesValue struct { + // The bytes value. + Value []byte `protobuf:"bytes,1,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *BytesValue) Reset() { *m = BytesValue{} } +func (*BytesValue) ProtoMessage() {} +func (*BytesValue) Descriptor() ([]byte, []int) { return fileDescriptorWrappers, []int{8} } +func (*BytesValue) XXX_WellKnownType() string { return "BytesValue" } + +func (m *BytesValue) GetValue() []byte { + if m != nil { + return m.Value + } + return nil +} + +func init() { + proto.RegisterType((*DoubleValue)(nil), "google.protobuf.DoubleValue") + proto.RegisterType((*FloatValue)(nil), "google.protobuf.FloatValue") + proto.RegisterType((*Int64Value)(nil), "google.protobuf.Int64Value") + proto.RegisterType((*UInt64Value)(nil), "google.protobuf.UInt64Value") + proto.RegisterType((*Int32Value)(nil), "google.protobuf.Int32Value") + proto.RegisterType((*UInt32Value)(nil), "google.protobuf.UInt32Value") + proto.RegisterType((*BoolValue)(nil), "google.protobuf.BoolValue") + proto.RegisterType((*StringValue)(nil), "google.protobuf.StringValue") + proto.RegisterType((*BytesValue)(nil), "google.protobuf.BytesValue") +} +func (this *DoubleValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *FloatValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *Int64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *UInt64Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *Int32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *UInt32Value) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *BoolValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if !this.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *StringValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if this.Value != that1.Value { + if this.Value < that1.Value { + return -1 + } + return 1 + } + return 0 +} +func (this *BytesValue) Compare(that interface{}) int { + if that == nil { + if this == nil { + return 0 + } + return 1 + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return 1 + } + } + if that1 == nil { + if this == nil { + return 0 + } + return 1 + } else if this == nil { + return -1 + } + if c := bytes.Compare(this.Value, that1.Value); c != 0 { + return c + } + return 0 +} +func (this *DoubleValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*DoubleValue) + if !ok { + that2, ok := that.(DoubleValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *FloatValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*FloatValue) + if !ok { + that2, ok := that.(FloatValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Int64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int64Value) + if !ok { + that2, ok := that.(Int64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *UInt64Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt64Value) + if !ok { + that2, ok := that.(UInt64Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *Int32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*Int32Value) + if !ok { + that2, ok := that.(Int32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *UInt32Value) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*UInt32Value) + if !ok { + that2, ok := that.(UInt32Value) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *BoolValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BoolValue) + if !ok { + that2, ok := that.(BoolValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *StringValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*StringValue) + if !ok { + that2, ok := that.(StringValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if this.Value != that1.Value { + return false + } + return true +} +func (this *BytesValue) Equal(that interface{}) bool { + if that == nil { + return this == nil + } + + that1, ok := that.(*BytesValue) + if !ok { + that2, ok := that.(BytesValue) + if ok { + that1 = &that2 + } else { + return false + } + } + if that1 == nil { + return this == nil + } else if this == nil { + return false + } + if !bytes.Equal(this.Value, that1.Value) { + return false + } + return true +} +func (this *DoubleValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.DoubleValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *FloatValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.FloatValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt64Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt64Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *Int32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.Int32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *UInt32Value) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.UInt32Value{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BoolValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BoolValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *StringValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.StringValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func (this *BytesValue) GoString() string { + if this == nil { + return "nil" + } + s := make([]string, 0, 5) + s = append(s, "&types.BytesValue{") + s = append(s, "Value: "+fmt.Sprintf("%#v", this.Value)+",\n") + s = append(s, "}") + return strings.Join(s, "") +} +func valueToGoStringWrappers(v interface{}, typ string) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("func(v %v) *%v { return &v } ( %#v )", typ, typ, pv) +} +func (m *DoubleValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *DoubleValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x9 + i++ + binary.LittleEndian.PutUint64(dAtA[i:], uint64(math.Float64bits(float64(m.Value)))) + i += 8 + } + return i, nil +} + +func (m *FloatValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *FloatValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0xd + i++ + binary.LittleEndian.PutUint32(dAtA[i:], uint32(math.Float32bits(float32(m.Value)))) + i += 4 + } + return i, nil +} + +func (m *Int64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int64Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *UInt64Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt64Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *Int32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Int32Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *UInt32Value) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *UInt32Value) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintWrappers(dAtA, i, uint64(m.Value)) + } + return i, nil +} + +func (m *BoolValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BoolValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Value { + dAtA[i] = 0x8 + i++ + if m.Value { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + return i, nil +} + +func (m *StringValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *StringValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Value) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *BytesValue) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *BytesValue) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Value) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintWrappers(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func encodeVarintWrappers(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func NewPopulatedDoubleValue(r randyWrappers, easy bool) *DoubleValue { + this := &DoubleValue{} + this.Value = float64(r.Float64()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedFloatValue(r randyWrappers, easy bool) *FloatValue { + this := &FloatValue{} + this.Value = float32(r.Float32()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedInt64Value(r randyWrappers, easy bool) *Int64Value { + this := &Int64Value{} + this.Value = int64(r.Int63()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUInt64Value(r randyWrappers, easy bool) *UInt64Value { + this := &UInt64Value{} + this.Value = uint64(uint64(r.Uint32())) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedInt32Value(r randyWrappers, easy bool) *Int32Value { + this := &Int32Value{} + this.Value = int32(r.Int31()) + if r.Intn(2) == 0 { + this.Value *= -1 + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedUInt32Value(r randyWrappers, easy bool) *UInt32Value { + this := &UInt32Value{} + this.Value = uint32(r.Uint32()) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedBoolValue(r randyWrappers, easy bool) *BoolValue { + this := &BoolValue{} + this.Value = bool(bool(r.Intn(2) == 0)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedStringValue(r randyWrappers, easy bool) *StringValue { + this := &StringValue{} + this.Value = string(randStringWrappers(r)) + if !easy && r.Intn(10) != 0 { + } + return this +} + +func NewPopulatedBytesValue(r randyWrappers, easy bool) *BytesValue { + this := &BytesValue{} + v1 := r.Intn(100) + this.Value = make([]byte, v1) + for i := 0; i < v1; i++ { + this.Value[i] = byte(r.Intn(256)) + } + if !easy && r.Intn(10) != 0 { + } + return this +} + +type randyWrappers interface { + Float32() float32 + Float64() float64 + Int63() int64 + Int31() int32 + Uint32() uint32 + Intn(n int) int +} + +func randUTF8RuneWrappers(r randyWrappers) rune { + ru := r.Intn(62) + if ru < 10 { + return rune(ru + 48) + } else if ru < 36 { + return rune(ru + 55) + } + return rune(ru + 61) +} +func randStringWrappers(r randyWrappers) string { + v2 := r.Intn(100) + tmps := make([]rune, v2) + for i := 0; i < v2; i++ { + tmps[i] = randUTF8RuneWrappers(r) + } + return string(tmps) +} +func randUnrecognizedWrappers(r randyWrappers, maxFieldNumber int) (dAtA []byte) { + l := r.Intn(5) + for i := 0; i < l; i++ { + wire := r.Intn(4) + if wire == 3 { + wire = 5 + } + fieldNumber := maxFieldNumber + r.Intn(100) + dAtA = randFieldWrappers(dAtA, r, fieldNumber, wire) + } + return dAtA +} +func randFieldWrappers(dAtA []byte, r randyWrappers, fieldNumber int, wire int) []byte { + key := uint32(fieldNumber)<<3 | uint32(wire) + switch wire { + case 0: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + v3 := r.Int63() + if r.Intn(2) == 0 { + v3 *= -1 + } + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(v3)) + case 1: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + case 2: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + ll := r.Intn(100) + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(ll)) + for j := 0; j < ll; j++ { + dAtA = append(dAtA, byte(r.Intn(256))) + } + default: + dAtA = encodeVarintPopulateWrappers(dAtA, uint64(key)) + dAtA = append(dAtA, byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256)), byte(r.Intn(256))) + } + return dAtA +} +func encodeVarintPopulateWrappers(dAtA []byte, v uint64) []byte { + for v >= 1<<7 { + dAtA = append(dAtA, uint8(uint64(v)&0x7f|0x80)) + v >>= 7 + } + dAtA = append(dAtA, uint8(v)) + return dAtA +} +func (m *DoubleValue) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 9 + } + return n +} + +func (m *FloatValue) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 5 + } + return n +} + +func (m *Int64Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *UInt64Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *Int32Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *UInt32Value) Size() (n int) { + var l int + _ = l + if m.Value != 0 { + n += 1 + sovWrappers(uint64(m.Value)) + } + return n +} + +func (m *BoolValue) Size() (n int) { + var l int + _ = l + if m.Value { + n += 2 + } + return n +} + +func (m *StringValue) Size() (n int) { + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + return n +} + +func (m *BytesValue) Size() (n int) { + var l int + _ = l + l = len(m.Value) + if l > 0 { + n += 1 + l + sovWrappers(uint64(l)) + } + return n +} + +func sovWrappers(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozWrappers(x uint64) (n int) { + return sovWrappers(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (this *DoubleValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&DoubleValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *FloatValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&FloatValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Int64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *UInt64Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt64Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *Int32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&Int32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *UInt32Value) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&UInt32Value{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *BoolValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BoolValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *StringValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&StringValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func (this *BytesValue) String() string { + if this == nil { + return "nil" + } + s := strings.Join([]string{`&BytesValue{`, + `Value:` + fmt.Sprintf("%v", this.Value) + `,`, + `}`, + }, "") + return s +} +func valueToStringWrappers(v interface{}) string { + rv := reflect.ValueOf(v) + if rv.IsNil() { + return "nil" + } + pv := reflect.Indirect(rv).Interface() + return fmt.Sprintf("*%v", pv) +} +func (m *DoubleValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: DoubleValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: DoubleValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + v = uint64(binary.LittleEndian.Uint64(dAtA[iNdEx:])) + iNdEx += 8 + m.Value = float64(math.Float64frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *FloatValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 5 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v uint32 + if (iNdEx + 4) > l { + return io.ErrUnexpectedEOF + } + v = uint32(binary.LittleEndian.Uint32(dAtA[iNdEx:])) + iNdEx += 4 + m.Value = float32(math.Float32frombits(v)) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt64Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt64Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt64Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Int32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Int32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Int32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (int32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *UInt32Value) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UInt32Value: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UInt32Value: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + m.Value = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Value |= (uint32(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BoolValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BoolValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BoolValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = bool(v != 0) + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *StringValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *BytesValue) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BytesValue: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BytesValue: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowWrappers + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthWrappers + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipWrappers(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthWrappers + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipWrappers(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthWrappers + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowWrappers + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipWrappers(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthWrappers = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowWrappers = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("google/protobuf/wrappers.proto", fileDescriptorWrappers) } + +var fileDescriptorWrappers = []byte{ + // 285 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x92, 0x4b, 0xcf, 0xcf, 0x4f, + 0xcf, 0x49, 0xd5, 0x2f, 0x28, 0xca, 0x2f, 0xc9, 0x4f, 0x2a, 0x4d, 0xd3, 0x2f, 0x2f, 0x4a, 0x2c, + 0x28, 0x48, 0x2d, 0x2a, 0xd6, 0x03, 0x8b, 0x08, 0xf1, 0x43, 0xe4, 0xf5, 0x60, 0xf2, 0x4a, 0xca, + 0x5c, 0xdc, 0x2e, 0xf9, 0xa5, 0x49, 0x39, 0xa9, 0x61, 0x89, 0x39, 0xa5, 0xa9, 0x42, 0x22, 0x5c, + 0xac, 0x65, 0x20, 0x86, 0x04, 0xa3, 0x02, 0xa3, 0x06, 0x63, 0x10, 0x84, 0xa3, 0xa4, 0xc4, 0xc5, + 0xe5, 0x96, 0x93, 0x9f, 0x58, 0x82, 0x45, 0x0d, 0x13, 0x92, 0x1a, 0xcf, 0xbc, 0x12, 0x33, 0x13, + 0x2c, 0x6a, 0x98, 0x61, 0x6a, 0x94, 0xb9, 0xb8, 0x43, 0x71, 0x29, 0x62, 0x41, 0x35, 0xc8, 0xd8, + 0x08, 0x8b, 0x1a, 0x56, 0x34, 0x83, 0xb0, 0x2a, 0xe2, 0x85, 0x29, 0x52, 0xe4, 0xe2, 0x74, 0xca, + 0xcf, 0xcf, 0xc1, 0xa2, 0x84, 0x03, 0xc9, 0x9c, 0xe0, 0x92, 0xa2, 0xcc, 0xbc, 0x74, 0x2c, 0x8a, + 0x38, 0x91, 0x1c, 0xe4, 0x54, 0x59, 0x92, 0x5a, 0x8c, 0x45, 0x0d, 0x0f, 0x54, 0x8d, 0x53, 0x3b, + 0xe3, 0x85, 0x87, 0x72, 0x0c, 0x37, 0x1e, 0xca, 0x31, 0x7c, 0x78, 0x28, 0xc7, 0xf8, 0xe3, 0xa1, + 0x1c, 0x63, 0xc3, 0x23, 0x39, 0xc6, 0x15, 0x8f, 0xe4, 0x18, 0x4f, 0x3c, 0x92, 0x63, 0xbc, 0xf0, + 0x48, 0x8e, 0xf1, 0xc1, 0x23, 0x39, 0xc6, 0x17, 0x8f, 0xe4, 0x18, 0x3e, 0x80, 0xc4, 0x1f, 0xcb, + 0x31, 0x72, 0x09, 0x27, 0xe7, 0xe7, 0xea, 0xa1, 0x45, 0x87, 0x13, 0x6f, 0x38, 0x34, 0xbe, 0x02, + 0x40, 0x22, 0x01, 0x8c, 0x51, 0xac, 0x25, 0x95, 0x05, 0xa9, 0xc5, 0x3f, 0x18, 0x19, 0x17, 0x31, + 0x31, 0xbb, 0x07, 0x38, 0xad, 0x62, 0x92, 0x73, 0x87, 0x68, 0x09, 0x80, 0x6a, 0xd1, 0x0b, 0x4f, + 0xcd, 0xc9, 0xf1, 0xce, 0xcb, 0x2f, 0xcf, 0x0b, 0x01, 0xa9, 0x4c, 0x62, 0x03, 0x9b, 0x65, 0x0c, + 0x08, 0x00, 0x00, 0xff, 0xff, 0xd6, 0x02, 0xeb, 0x7c, 0x0a, 0x02, 0x00, 0x00, +} diff --git a/vendor/github.com/gogo/protobuf/vanity/command/command.go b/vendor/github.com/gogo/protobuf/vanity/command/command.go index 7e8368aa3..eeca42ba0 100644 --- a/vendor/github.com/gogo/protobuf/vanity/command/command.go +++ b/vendor/github.com/gogo/protobuf/vanity/command/command.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2015, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -29,14 +29,11 @@ package command import ( + "fmt" + "go/format" "io/ioutil" "os" - - "github.com/gogo/protobuf/proto" - "github.com/gogo/protobuf/protoc-gen-gogo/generator" - plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" - - _ "github.com/gogo/protobuf/protoc-gen-gogo/grpc" + "strings" _ "github.com/gogo/protobuf/plugin/compare" _ "github.com/gogo/protobuf/plugin/defaultcheck" @@ -51,13 +48,13 @@ import ( _ "github.com/gogo/protobuf/plugin/populate" _ "github.com/gogo/protobuf/plugin/size" _ "github.com/gogo/protobuf/plugin/stringer" + "github.com/gogo/protobuf/plugin/testgen" _ "github.com/gogo/protobuf/plugin/union" _ "github.com/gogo/protobuf/plugin/unmarshal" - - "github.com/gogo/protobuf/plugin/testgen" - - "go/format" - "strings" + "github.com/gogo/protobuf/proto" + "github.com/gogo/protobuf/protoc-gen-gogo/generator" + _ "github.com/gogo/protobuf/protoc-gen-gogo/grpc" + plugin "github.com/gogo/protobuf/protoc-gen-gogo/plugin" ) func Read() *plugin.CodeGeneratorRequest { @@ -77,6 +74,44 @@ func Read() *plugin.CodeGeneratorRequest { return g.Request } +// filenameSuffix replaces the .pb.go at the end of each filename. +func GeneratePlugin(req *plugin.CodeGeneratorRequest, p generator.Plugin, filenameSuffix string) *plugin.CodeGeneratorResponse { + g := generator.New() + g.Request = req + if len(g.Request.FileToGenerate) == 0 { + g.Fail("no files to generate") + } + + g.CommandLineParameters(g.Request.GetParameter()) + + g.WrapTypes() + g.SetPackageNames() + g.BuildTypeNameMap() + g.GeneratePlugin(p) + + for i := 0; i < len(g.Response.File); i++ { + g.Response.File[i].Name = proto.String( + strings.Replace(*g.Response.File[i].Name, ".pb.go", filenameSuffix, -1), + ) + } + if err := goformat(g.Response); err != nil { + g.Error(err) + } + return g.Response +} + +func goformat(resp *plugin.CodeGeneratorResponse) error { + for i := 0; i < len(resp.File); i++ { + formatted, err := format.Source([]byte(resp.File[i].GetContent())) + if err != nil { + return fmt.Errorf("go format error: %v", err) + } + fmts := string(formatted) + resp.File[i].Content = &fmts + } + return nil +} + func Generate(req *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse { // Begin by allocating a generator. The request and response structures are stored there // so we can do error handling easily - the response structure contains the field to @@ -95,46 +130,20 @@ func Generate(req *plugin.CodeGeneratorRequest) *plugin.CodeGeneratorResponse { g.GenerateAllFiles() - gtest := generator.New() - - data, err := proto.Marshal(req) - if err != nil { - g.Error(err, "failed to marshal modified proto") - } - if err := proto.Unmarshal(data, gtest.Request); err != nil { - g.Error(err, "parsing modified proto") + if err := goformat(g.Response); err != nil { + g.Error(err) } - if len(gtest.Request.FileToGenerate) == 0 { - gtest.Fail("no files to generate") - } + testReq := proto.Clone(req).(*plugin.CodeGeneratorRequest) - gtest.CommandLineParameters(gtest.Request.GetParameter()) + testResp := GeneratePlugin(testReq, testgen.NewPlugin(), "pb_test.go") - // Create a wrapped version of the Descriptors and EnumDescriptors that - // point to the file that defines them. - gtest.WrapTypes() - - gtest.SetPackageNames() - gtest.BuildTypeNameMap() - - gtest.GeneratePlugin(testgen.NewPlugin()) - - for i := 0; i < len(gtest.Response.File); i++ { - if strings.Contains(*gtest.Response.File[i].Content, `//These tests are generated by github.com/gogo/protobuf/plugin/testgen`) { - gtest.Response.File[i].Name = proto.String(strings.Replace(*gtest.Response.File[i].Name, ".pb.go", "pb_test.go", -1)) - g.Response.File = append(g.Response.File, gtest.Response.File[i]) + for i := 0; i < len(testResp.File); i++ { + if strings.Contains(*testResp.File[i].Content, `//These tests are generated by github.com/gogo/protobuf/plugin/testgen`) { + g.Response.File = append(g.Response.File, testResp.File[i]) } } - for i := 0; i < len(g.Response.File); i++ { - formatted, err := format.Source([]byte(g.Response.File[i].GetContent())) - if err != nil { - g.Error(err, "go format error") - } - fmts := string(formatted) - g.Response.File[i].Content = &fmts - } return g.Response } diff --git a/vendor/github.com/gogo/protobuf/vanity/enum.go b/vendor/github.com/gogo/protobuf/vanity/enum.go index 13d089744..466d07b54 100644 --- a/vendor/github.com/gogo/protobuf/vanity/enum.go +++ b/vendor/github.com/gogo/protobuf/vanity/enum.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2015, Vastech SA (PTY) LTD. rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2015, The GoGo Authors. rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/vanity/field.go b/vendor/github.com/gogo/protobuf/vanity/field.go index a484d1e1c..62cdddfab 100644 --- a/vendor/github.com/gogo/protobuf/vanity/field.go +++ b/vendor/github.com/gogo/protobuf/vanity/field.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2015, Vastech SA (PTY) LTD. rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2015, The GoGo Authors. rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -72,6 +72,13 @@ func TurnOffNullable(field *descriptor.FieldDescriptorProto) { SetBoolFieldOption(gogoproto.E_Nullable, false)(field) } +func TurnOffNullableForNativeTypes(field *descriptor.FieldDescriptorProto) { + if field.IsRepeated() || field.IsMessage() { + return + } + SetBoolFieldOption(gogoproto.E_Nullable, false)(field) +} + func TurnOffNullableForNativeTypesWithoutDefaultsOnly(field *descriptor.FieldDescriptorProto) { if field.IsRepeated() || field.IsMessage() { return diff --git a/vendor/github.com/gogo/protobuf/vanity/file.go b/vendor/github.com/gogo/protobuf/vanity/file.go index d82fcdabb..e7b56de1f 100644 --- a/vendor/github.com/gogo/protobuf/vanity/file.go +++ b/vendor/github.com/gogo/protobuf/vanity/file.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2015, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -29,15 +29,17 @@ package vanity import ( - "strings" + "path/filepath" "github.com/gogo/protobuf/gogoproto" "github.com/gogo/protobuf/proto" descriptor "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" ) -func NotInPackageGoogleProtobuf(file *descriptor.FileDescriptorProto) bool { - return !strings.HasPrefix(file.GetPackage(), "google.protobuf") +func NotGoogleProtobufDescriptorProto(file *descriptor.FileDescriptorProto) bool { + // can not just check if file.GetName() == "google/protobuf/descriptor.proto" because we do not want to assume compile path + _, fileName := filepath.Split(file.GetName()) + return !(file.GetPackage() == "google.protobuf" && fileName == "descriptor.proto") } func FilterFiles(files []*descriptor.FileDescriptorProto, f func(file *descriptor.FileDescriptorProto) bool) []*descriptor.FileDescriptorProto { @@ -173,3 +175,7 @@ func TurnOffGoUnrecognizedAll(file *descriptor.FileDescriptorProto) { func TurnOffGogoImport(file *descriptor.FileDescriptorProto) { SetBoolFileOption(gogoproto.E_GogoprotoImport, false)(file) } + +func TurnOnCompareAll(file *descriptor.FileDescriptorProto) { + SetBoolFileOption(gogoproto.E_CompareAll, true)(file) +} diff --git a/vendor/github.com/gogo/protobuf/vanity/foreach.go b/vendor/github.com/gogo/protobuf/vanity/foreach.go index 0133c9d2b..888b6d04d 100644 --- a/vendor/github.com/gogo/protobuf/vanity/foreach.go +++ b/vendor/github.com/gogo/protobuf/vanity/foreach.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2015, Vastech SA (PTY) LTD. All rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2015, The GoGo Authors. All rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are diff --git a/vendor/github.com/gogo/protobuf/vanity/msg.go b/vendor/github.com/gogo/protobuf/vanity/msg.go index 3954a1869..7ff2b9879 100644 --- a/vendor/github.com/gogo/protobuf/vanity/msg.go +++ b/vendor/github.com/gogo/protobuf/vanity/msg.go @@ -1,7 +1,7 @@ -// Extensions for Protocol Buffers to create more go like structures. +// Protocol Buffers for Go with Gadgets // -// Copyright (c) 2015, Vastech SA (PTY) LTD. rights reserved. -// http://github.com/gogo/protobuf/gogoproto +// Copyright (c) 2015, The GoGo Authors. rights reserved. +// http://github.com/gogo/protobuf // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are @@ -136,3 +136,7 @@ func TurnOffGoExtensionsMap(msg *descriptor.DescriptorProto) { func TurnOffGoUnrecognized(msg *descriptor.DescriptorProto) { SetBoolMessageOption(gogoproto.E_GoprotoUnrecognized, false)(msg) } + +func TurnOnCompare(msg *descriptor.DescriptorProto) { + SetBoolMessageOption(gogoproto.E_Compare, true)(msg) +} diff --git a/vendor/github.com/influxdata/ifql/.gitignore b/vendor/github.com/influxdata/ifql/.gitignore new file mode 100644 index 000000000..c8cce53ea --- /dev/null +++ b/vendor/github.com/influxdata/ifql/.gitignore @@ -0,0 +1,8 @@ +/vendor/ +/bin/ +.vscode +/dist/ +ifqld.id +.*.swp +ifql_pkgs +/*.ifql diff --git a/vendor/github.com/influxdata/ifql/.goreleaser.yml b/vendor/github.com/influxdata/ifql/.goreleaser.yml new file mode 100644 index 000000000..68eb0da2b --- /dev/null +++ b/vendor/github.com/influxdata/ifql/.goreleaser.yml @@ -0,0 +1,54 @@ +project_name: ifql +builds: +- goos: + - linux + - darwin + - windows + goarch: + - amd64 + - 386 + - arm + - arm64 + goarm: + - 6 + - 7 + ignore: + - goos: darwin + goarch: 386 + - goos: windows + goarch: 386 + main: ./cmd/ifqld/main.go + ldflags: -s -w -X main.version={{.Version}} -X main.commit={{.Commit}} -X main.date={{.Date}} + binary: ifqld +archive: + format: tar.gz + wrap_in_directory: true + format_overrides: + - goos: windows + format: zip + replacements: + darwin: macOS + name_template: '{{ .Binary }}_{{ .Version }}_{{ .Os }}_{{ .Arch }}{{ if .Arm }}v{{ + .Arm }}{{ end }}' + files: + - LICENSE + - README.md +fpm: + vendor: InfluxData + homepage: https://influxdata.com + maintainer: contact@influxdata.com + description: IFQLD is an HTTP server for processing IFQL queries to 1 or more InfluxDB servers. + license: AGPL + formats: + - deb + - rpm + bindir: /usr/bin + dependencies: + - coreutils + files: + "LICENSE": "/usr/share/ifqld/" + "README.md": "/usr/share/ifqld/" +snapshot: + name_template: SNAPSHOT-{{ .Commit }} +checksum: + name_template: '{{ .ProjectName }}_{{ .Version }}_checksums.txt' diff --git a/vendor/github.com/influxdata/ifql/CHANGELOG.md b/vendor/github.com/influxdata/ifql/CHANGELOG.md new file mode 100644 index 000000000..e38c27c41 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/CHANGELOG.md @@ -0,0 +1,52 @@ +## v0.0.5 [2018-02-09] + +### Features + +- [#143](https://github.com/influxdata/ifql/issues/143) Add yield function +- [#193](https://github.com/influxdata/ifql/issues/193) Add pipe forward expressions +- [#229](https://github.com/influxdata/ifql/pull/229) Add top/bottom functions +- [#230](https://github.com/influxdata/ifql/pull/230) Add state count and state duration +- [#231](https://github.com/influxdata/ifql/pull/231) Add distinct function +- [#234](https://github.com/influxdata/ifql/pull/234) Add percentile function with exact and approx implementations +- [#243](https://github.com/influxdata/ifql/pull/243) Add support for pushing down group by with aggregates + +## v0.0.4 [2018-01-12] + +### Features + +- [#167](https://github.com/influxdata/ifql/pull/167) Add support for functions in IFQL +- [#171](https://github.com/influxdata/ifql/pull/171) Add initial benchmarks +- [#177](https://github.com/influxdata/ifql/pull/177) Make join function accept map of tables +- [#178](https://github.com/influxdata/ifql/pull/178) Update tracing for parsing/compile steps +- [#179](https://github.com/influxdata/ifql/pull/179) Add "map" function +- [#180](https://github.com/influxdata/ifql/pull/180) Remove "var" keyword +- [#181](https://github.com/influxdata/ifql/pull/181) Add "shift" function, for shifting values in time +- [#182](https://github.com/influxdata/ifql/pull/182) Add suppport for multiple values on table records +- [#183](https://github.com/influxdata/ifql/pull/183) Add derivative function +- [#185](https://github.com/influxdata/ifql/pull/185) Add integral function +- [#186](https://github.com/influxdata/ifql/pull/186) Add difference function +- [#188](https://github.com/influxdata/ifql/pull/188) Add support for default arguments in IFQL functions +- [#189](https://github.com/influxdata/ifql/pull/189) Update filter to be able to push down into multiple filter operations +- [#190](https://github.com/influxdata/ifql/pull/190) Add support for "//" style comments in IFQL + +## v0.0.3 [2017-12-08] + +### Features + +- [#166](https://github.com/influxdata/ifql/issues/166) Initial Resource Management API is in place. Now queries can be submitted with varying priorities and limits on concurrency and memory usage. +- [#164](https://github.com/influxdata/ifql/issues/164) Opentracing support for queries. +- [#139](https://github.com/influxdata/ifql/issues/139) Join is now a global function. +- [#130](https://github.com/influxdata/ifql/issues/130) Add error handling of duplicate arguments to functions. +- [#100](https://github.com/influxdata/ifql/issues/100) Add error handling of unknown arguments to functions. + +### Bugfixes + +- [#153](https://github.com/influxdata/ifql/issues/153) Fix issues with line protocol output if the _measurement and _field tags were missing. + +## v0.0.2 [2017-11-21] + +Release after some initial community feedback. + +## v0.0.1 [2017-11-13] +Initial release of ifqld + diff --git a/vendor/github.com/influxdata/ifql/Dockerfile b/vendor/github.com/influxdata/ifql/Dockerfile new file mode 100644 index 000000000..5322e38a5 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/Dockerfile @@ -0,0 +1,14 @@ +FROM golang:1.10 as builder +ENV GOPATH / +RUN go get github.com/golang/dep/... +ADD . /src/github.com/influxdata/ifql +WORKDIR /src/github.com/influxdata/ifql +RUN go get github.com/golang/dep/... && \ + CGO_ENABLED=0 go build -ldflags "-s" -a -installsuffix cgo -i -o bin/ifqld ./cmd/ifqld + +FROM alpine +RUN apk add --no-cache ca-certificates tzdata +EXPOSE 8093/tcp +COPY --from=builder /src/github.com/influxdata/ifql/bin/ifqld / +COPY --from=builder /src/github.com/influxdata/ifql/LICENSE / +ENTRYPOINT ["/ifqld"] diff --git a/vendor/github.com/influxdata/ifql/Dockerfile_build b/vendor/github.com/influxdata/ifql/Dockerfile_build new file mode 100644 index 000000000..0d77aa2c6 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/Dockerfile_build @@ -0,0 +1,42 @@ +FROM ruby:2.4.1-stretch + +# This dockerfile is capabable of performing all +# build/test/package/deploy actions needed for IFQL. + +LABEL maintainer="contact@influxdb.com" + +RUN apt-get update && apt-get -y install \ + apt-transport-https \ + curl \ + gnupg2 \ + rubygems-integration \ + ruby-dev \ + ruby \ + build-essential \ + rsync \ + rpm \ + tar \ + zip \ + && rm -rf /var/lib/apt/lists/* + +RUN gem install fpm -v 1.9.3 + +# Install go +ENV GOPATH /root/go +ENV GO_VERSION 1.10 +ENV GO_ARCH amd64 +RUN wget -q https://storage.googleapis.com/golang/go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + tar -C /usr/local/ -xf /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz && \ + rm /go${GO_VERSION}.linux-${GO_ARCH}.tar.gz +ENV PATH /usr/local/go/bin:$PATH + +# Install go dep +RUN go get github.com/golang/dep/... + +ENV PROJECT_DIR $GOPATH/src/github.com/influxdata/ifql +ENV PATH $GOPATH/bin:$PATH +RUN mkdir -p $PROJECT_DIR +WORKDIR $PROJECT_DIR + +VOLUME $PROJECT_DIR +VOLUME /root/go/src diff --git a/vendor/github.com/influxdata/ifql/Gopkg.lock b/vendor/github.com/influxdata/ifql/Gopkg.lock new file mode 100644 index 000000000..c5a6126e2 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/Gopkg.lock @@ -0,0 +1,433 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + name = "github.com/apache/thrift" + packages = ["lib/go/thrift"] + revision = "b2a4d4ae21c789b689dd162deb819665567f481c" + version = "0.10.0" + +[[projects]] + branch = "master" + name = "github.com/apex/log" + packages = [ + ".", + "handlers/cli" + ] + revision = "ff0f66940b829dc66c81dad34746d4349b83eb9e" + +[[projects]] + branch = "master" + name = "github.com/beorn7/perks" + packages = ["quantile"] + revision = "4c0e84591b9aa9e6dcfdf3e020114cd81f89d5f9" + +[[projects]] + name = "github.com/c-bata/go-prompt" + packages = ["."] + revision = "c292a2f4b4fe8563883871456e19028ff9df0f26" + version = "v0.1.1" + +[[projects]] + branch = "master" + name = "github.com/codahale/hdrhistogram" + packages = ["."] + revision = "3a0bb77429bd3a61596f5e8a3172445844342120" + +[[projects]] + name = "github.com/fatih/color" + packages = ["."] + revision = "570b54cabe6b8eb0bc2dfce68d964677d63b5260" + version = "v1.5.0" + +[[projects]] + name = "github.com/gogo/protobuf" + packages = [ + "codec", + "gogoproto", + "proto", + "protoc-gen-gogo/descriptor", + "sortkeys", + "types" + ] + revision = "100ba4e885062801d56799d78530b73b178a78f3" + version = "v0.4" + +[[projects]] + branch = "master" + name = "github.com/golang/protobuf" + packages = ["proto"] + revision = "1643683e1b54a9e88ad26d98f81400c8c9d9f4f9" + +[[projects]] + branch = "master" + name = "github.com/gonum/blas" + packages = [ + ".", + "blas64", + "native", + "native/internal/math32" + ] + revision = "37e82626499e1df7c54aeaba0959fd6e7e8dc1e4" + +[[projects]] + branch = "master" + name = "github.com/gonum/floats" + packages = ["."] + revision = "f74b330d45c56584a6ea7a27f5c64ea2900631e9" + +[[projects]] + branch = "master" + name = "github.com/gonum/internal" + packages = [ + "asm/f32", + "asm/f64" + ] + revision = "e57e4534cf9b3b00ef6c0175f59d8d2d34f60914" + +[[projects]] + branch = "master" + name = "github.com/gonum/lapack" + packages = [ + ".", + "lapack64", + "native" + ] + revision = "5ed4b826becd1807e09377508f51756586d1a98c" + +[[projects]] + branch = "master" + name = "github.com/gonum/mathext" + packages = [ + ".", + "internal/amos", + "internal/cephes", + "internal/gonum" + ] + revision = "0b9a913d27c42216782103ce3773136730e1535f" + +[[projects]] + branch = "master" + name = "github.com/gonum/matrix" + packages = [ + ".", + "mat64" + ] + revision = "dd6034299e4242c9f0ea36735e6d4264dfcb3f9f" + +[[projects]] + branch = "master" + name = "github.com/gonum/stat" + packages = [ + ".", + "distuv" + ] + revision = "40602ac931a7ab7f72cb1eff5ce1753a2d28d345" + +[[projects]] + branch = "master" + name = "github.com/google/go-cmp" + packages = [ + "cmp", + "cmp/cmpopts", + "cmp/internal/diff", + "cmp/internal/function", + "cmp/internal/value" + ] + revision = "d5735f74713c51f7450a43d0a98d41ce2c1db3cb" + +[[projects]] + branch = "master" + name = "github.com/google/go-github" + packages = ["github"] + revision = "79fc6c156e5a36bacaa65c9c08722d8ef843c686" + +[[projects]] + branch = "master" + name = "github.com/google/go-querystring" + packages = ["query"] + revision = "53e6ce116135b80d037921a7fdd5138cf32d7a8a" + +[[projects]] + name = "github.com/goreleaser/archive" + packages = [ + ".", + "tar", + "zip" + ] + revision = "caa5f3f5742eb0535631e94fa5e171c74c0144b7" + version = "v1.0.0" + +[[projects]] + name = "github.com/goreleaser/goreleaser" + packages = [ + ".", + "checksum", + "config", + "context", + "goreleaserlib", + "internal/archiveformat", + "internal/buildtarget", + "internal/client", + "internal/ext", + "internal/git", + "internal/linux", + "internal/name", + "pipeline", + "pipeline/archive", + "pipeline/brew", + "pipeline/build", + "pipeline/changelog", + "pipeline/checksums", + "pipeline/cleandist", + "pipeline/defaults", + "pipeline/docker", + "pipeline/env", + "pipeline/fpm", + "pipeline/git", + "pipeline/release", + "pipeline/snapcraft" + ] + revision = "adc2d7d4c572af20cc20290db48b664f12c158bb" + version = "v0.35.7" + +[[projects]] + name = "github.com/influxdata/influxdb" + packages = [ + "models", + "pkg/escape" + ] + revision = "2d474a3089bcfce6b472779be9470a1f0ef3d5e4" + version = "v1.3.7" + +[[projects]] + branch = "master" + name = "github.com/influxdata/tdigest" + packages = ["."] + revision = "617b83f940fd9acd207f712561a8a0590277fb38" + +[[projects]] + branch = "master" + name = "github.com/influxdata/usage-client" + packages = ["v1"] + revision = "6d3895376368aa52a3a81d2a16e90f0f52371967" + +[[projects]] + branch = "master" + name = "github.com/influxdata/yamux" + packages = ["."] + revision = "e7f91523e648eeb91537e420aebbd96aa64ab6ae" + +[[projects]] + branch = "master" + name = "github.com/influxdata/yarpc" + packages = [ + ".", + "codes", + "status", + "yarpcproto" + ] + revision = "f0da2db138cad2fb425541938fc28dd5a5bc6918" + +[[projects]] + name = "github.com/jessevdk/go-flags" + packages = ["."] + revision = "96dc06278ce32a0e9d957d590bb987c81ee66407" + version = "v1.3.0" + +[[projects]] + name = "github.com/mattn/go-colorable" + packages = ["."] + revision = "167de6bfdfba052fa6b2d3664c8f5272e23c9072" + version = "v0.0.9" + +[[projects]] + name = "github.com/mattn/go-isatty" + packages = ["."] + revision = "0360b2af4f38e8d38c7fce2a9f4e702702d73a39" + version = "v0.0.3" + +[[projects]] + branch = "master" + name = "github.com/mattn/go-zglob" + packages = [ + ".", + "fastwalk" + ] + revision = "4b74c24375b3b1ee226867156e01996f4e19a8d6" + +[[projects]] + name = "github.com/matttproud/golang_protobuf_extensions" + packages = ["pbutil"] + revision = "3247c84500bff8d9fb6d579d800f20b3e091582c" + version = "v1.0.0" + +[[projects]] + branch = "master" + name = "github.com/mna/pigeon" + packages = [ + ".", + "ast", + "builder" + ] + revision = "904fede4321f9aae0e347d296f56035410375fed" + +[[projects]] + name = "github.com/opentracing/opentracing-go" + packages = [ + ".", + "ext", + "log" + ] + revision = "1949ddbfd147afd4d964a9f00b24eb291e0e7c38" + version = "v1.0.2" + +[[projects]] + name = "github.com/pkg/errors" + packages = ["."] + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/pkg/term" + packages = ["termios"] + revision = "b1f72af2d63057363398bec5873d16a98b453312" + +[[projects]] + name = "github.com/prometheus/client_golang" + packages = [ + "prometheus", + "prometheus/promhttp" + ] + revision = "c5b7fccd204277076155f10851dad72b76a49317" + version = "v0.8.0" + +[[projects]] + branch = "master" + name = "github.com/prometheus/client_model" + packages = ["go"] + revision = "6f3806018612930941127f2a7c6c453ba2c527d2" + +[[projects]] + branch = "master" + name = "github.com/prometheus/common" + packages = [ + "expfmt", + "internal/bitbucket.org/ww/goautoneg", + "model" + ] + revision = "1bab55dd05dbff384524a6a1c99006d9eb5f139b" + +[[projects]] + branch = "master" + name = "github.com/prometheus/procfs" + packages = [ + ".", + "xfs" + ] + revision = "a6e9df898b1336106c743392c48ee0b71f5c4efa" + +[[projects]] + name = "github.com/satori/go.uuid" + packages = ["."] + revision = "879c5887cd475cd7864858769793b2ceb0d44feb" + version = "v1.1.0" + +[[projects]] + name = "github.com/uber/jaeger-client-go" + packages = [ + ".", + "config", + "internal/baggage", + "internal/baggage/remote", + "internal/spanlog", + "log", + "rpcmetrics", + "thrift-gen/agent", + "thrift-gen/baggage", + "thrift-gen/jaeger", + "thrift-gen/sampling", + "thrift-gen/zipkincore", + "utils" + ] + revision = "0ce42f3f87dae4f5ba84bdc60c99a908db419cb8" + version = "v2.10.0" + +[[projects]] + name = "github.com/uber/jaeger-lib" + packages = ["metrics"] + revision = "c48167d9cae5887393dd5e61efd06a4a48b7fbb3" + version = "v1.2.1" + +[[projects]] + name = "github.com/urfave/cli" + packages = ["."] + revision = "cfb38830724cc34fedffe9a2a29fb54fa9169cd1" + version = "v1.20.0" + +[[projects]] + branch = "master" + name = "golang.org/x/net" + packages = [ + "context", + "context/ctxhttp" + ] + revision = "01c190206fbdffa42f334f4b2bf2220f50e64920" + +[[projects]] + branch = "master" + name = "golang.org/x/oauth2" + packages = [ + ".", + "internal" + ] + revision = "9ff8ebcc8e241d46f52ecc5bff0e5a2f2dbef402" + +[[projects]] + branch = "master" + name = "golang.org/x/sync" + packages = ["errgroup"] + revision = "fd80eb99c8f653c847d294a001bdf2a3a6f768f5" + +[[projects]] + branch = "master" + name = "golang.org/x/sys" + packages = ["unix"] + revision = "2c42eef0765b9837fbdab12011af7830f55f88f0" + +[[projects]] + branch = "master" + name = "golang.org/x/tools" + packages = [ + "go/ast/astutil", + "imports" + ] + revision = "e531a2a1c15f94033f6fa87666caeb19a688175f" + +[[projects]] + name = "google.golang.org/appengine" + packages = [ + "internal", + "internal/base", + "internal/datastore", + "internal/log", + "internal/remote_api", + "internal/urlfetch", + "urlfetch" + ] + revision = "150dc57a1b433e64154302bdc40b6bb8aefa313a" + version = "v1.0.0" + +[[projects]] + branch = "v2" + name = "gopkg.in/yaml.v2" + packages = ["."] + revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + inputs-digest = "b020a89be355de84a5ca3ba9a63e4ec9c0fd9a57d7b4b4159d110980b4189e24" + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/vendor/github.com/influxdata/ifql/Gopkg.toml b/vendor/github.com/influxdata/ifql/Gopkg.toml new file mode 100644 index 000000000..3fc7ab5f4 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/Gopkg.toml @@ -0,0 +1,17 @@ +required = ["github.com/mna/pigeon", "github.com/goreleaser/goreleaser"] + +[[constraint]] + branch = "master" + name = "github.com/google/go-cmp" + +[[override]] + branch = "master" + name = "github.com/mna/pigeon" + +[[constraint]] + name = "github.com/opentracing/opentracing-go" + version = "1.0.2" + +[[constraint]] + name = "github.com/uber/jaeger-client-go" + version = "2.10.0" diff --git a/vendor/github.com/influxdata/ifql/LICENSE b/vendor/github.com/influxdata/ifql/LICENSE new file mode 100644 index 000000000..776d0adcf --- /dev/null +++ b/vendor/github.com/influxdata/ifql/LICENSE @@ -0,0 +1,684 @@ +IFQLD is an HTTP server for processing IFQL queries to 1 or more InfluxDB servers. + +Copyright (C) 2017, InfluxData Inc. + +This program is free software: you can redistribute it and/or modify +it under the terms of the GNU Affero General Public License as +published by the Free Software Foundation, either version 3 of the +License, or (at your option) any later version. + +This program is distributed in the hope that it will be useful, +but WITHOUT ANY WARRANTY; without even the implied warranty of +MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +GNU Affero General Public License for more details. + +You should have received a copy of the GNU Affero General Public License +along with this program. If not, see . + +InfluxData Inc. +799 Market Street, Suite 400 +San Francisco, CA 94103 +contact@influxdata.com + + + GNU AFFERO GENERAL PUBLIC LICENSE + Version 3, 19 November 2007 + + Copyright (C) 2007 Free Software Foundation, Inc. + Everyone is permitted to copy and distribute verbatim copies + of this license document, but changing it is not allowed. + + Preamble + + The GNU Affero General Public License is a free, copyleft license for +software and other kinds of works, specifically designed to ensure +cooperation with the community in the case of network server software. + + The licenses for most software and other practical works are designed +to take away your freedom to share and change the works. By contrast, +our General Public Licenses are intended to guarantee your freedom to +share and change all versions of a program--to make sure it remains free +software for all its users. + + When we speak of free software, we are referring to freedom, not +price. Our General Public Licenses are designed to make sure that you +have the freedom to distribute copies of free software (and charge for +them if you wish), that you receive source code or can get it if you +want it, that you can change the software or use pieces of it in new +free programs, and that you know you can do these things. + + Developers that use our General Public Licenses protect your rights +with two steps: (1) assert copyright on the software, and (2) offer +you this License which gives you legal permission to copy, distribute +and/or modify the software. + + A secondary benefit of defending all users' freedom is that +improvements made in alternate versions of the program, if they +receive widespread use, become available for other developers to +incorporate. Many developers of free software are heartened and +encouraged by the resulting cooperation. However, in the case of +software used on network servers, this result may fail to come about. +The GNU General Public License permits making a modified version and +letting the public access it on a server without ever releasing its +source code to the public. + + The GNU Affero General Public License is designed specifically to +ensure that, in such cases, the modified source code becomes available +to the community. It requires the operator of a network server to +provide the source code of the modified version running there to the +users of that server. Therefore, public use of a modified version, on +a publicly accessible server, gives the public access to the source +code of the modified version. + + An older license, called the Affero General Public License and +published by Affero, was designed to accomplish similar goals. This is +a different license, not a version of the Affero GPL, but Affero has +released a new version of the Affero GPL which permits relicensing under +this license. + + The precise terms and conditions for copying, distribution and +modification follow. + + TERMS AND CONDITIONS + + 0. Definitions. + + "This License" refers to version 3 of the GNU Affero General Public License. + + "Copyright" also means copyright-like laws that apply to other kinds of +works, such as semiconductor masks. + + "The Program" refers to any copyrightable work licensed under this +License. Each licensee is addressed as "you". "Licensees" and +"recipients" may be individuals or organizations. + + To "modify" a work means to copy from or adapt all or part of the work +in a fashion requiring copyright permission, other than the making of an +exact copy. The resulting work is called a "modified version" of the +earlier work or a work "based on" the earlier work. + + A "covered work" means either the unmodified Program or a work based +on the Program. + + To "propagate" a work means to do anything with it that, without +permission, would make you directly or secondarily liable for +infringement under applicable copyright law, except executing it on a +computer or modifying a private copy. Propagation includes copying, +distribution (with or without modification), making available to the +public, and in some countries other activities as well. + + To "convey" a work means any kind of propagation that enables other +parties to make or receive copies. Mere interaction with a user through +a computer network, with no transfer of a copy, is not conveying. + + An interactive user interface displays "Appropriate Legal Notices" +to the extent that it includes a convenient and prominently visible +feature that (1) displays an appropriate copyright notice, and (2) +tells the user that there is no warranty for the work (except to the +extent that warranties are provided), that licensees may convey the +work under this License, and how to view a copy of this License. If +the interface presents a list of user commands or options, such as a +menu, a prominent item in the list meets this criterion. + + 1. Source Code. + + The "source code" for a work means the preferred form of the work +for making modifications to it. "Object code" means any non-source +form of a work. + + A "Standard Interface" means an interface that either is an official +standard defined by a recognized standards body, or, in the case of +interfaces specified for a particular programming language, one that +is widely used among developers working in that language. + + The "System Libraries" of an executable work include anything, other +than the work as a whole, that (a) is included in the normal form of +packaging a Major Component, but which is not part of that Major +Component, and (b) serves only to enable use of the work with that +Major Component, or to implement a Standard Interface for which an +implementation is available to the public in source code form. A +"Major Component", in this context, means a major essential component +(kernel, window system, and so on) of the specific operating system +(if any) on which the executable work runs, or a compiler used to +produce the work, or an object code interpreter used to run it. + + The "Corresponding Source" for a work in object code form means all +the source code needed to generate, install, and (for an executable +work) run the object code and to modify the work, including scripts to +control those activities. However, it does not include the work's +System Libraries, or general-purpose tools or generally available free +programs which are used unmodified in performing those activities but +which are not part of the work. For example, Corresponding Source +includes interface definition files associated with source files for +the work, and the source code for shared libraries and dynamically +linked subprograms that the work is specifically designed to require, +such as by intimate data communication or control flow between those +subprograms and other parts of the work. + + The Corresponding Source need not include anything that users +can regenerate automatically from other parts of the Corresponding +Source. + + The Corresponding Source for a work in source code form is that +same work. + + 2. Basic Permissions. + + All rights granted under this License are granted for the term of +copyright on the Program, and are irrevocable provided the stated +conditions are met. This License explicitly affirms your unlimited +permission to run the unmodified Program. The output from running a +covered work is covered by this License only if the output, given its +content, constitutes a covered work. This License acknowledges your +rights of fair use or other equivalent, as provided by copyright law. + + You may make, run and propagate covered works that you do not +convey, without conditions so long as your license otherwise remains +in force. You may convey covered works to others for the sole purpose +of having them make modifications exclusively for you, or provide you +with facilities for running those works, provided that you comply with +the terms of this License in conveying all material for which you do +not control copyright. Those thus making or running the covered works +for you must do so exclusively on your behalf, under your direction +and control, on terms that prohibit them from making any copies of +your copyrighted material outside their relationship with you. + + Conveying under any other circumstances is permitted solely under +the conditions stated below. Sublicensing is not allowed; section 10 +makes it unnecessary. + + 3. Protecting Users' Legal Rights From Anti-Circumvention Law. + + No covered work shall be deemed part of an effective technological +measure under any applicable law fulfilling obligations under article +11 of the WIPO copyright treaty adopted on 20 December 1996, or +similar laws prohibiting or restricting circumvention of such +measures. + + When you convey a covered work, you waive any legal power to forbid +circumvention of technological measures to the extent such circumvention +is effected by exercising rights under this License with respect to +the covered work, and you disclaim any intention to limit operation or +modification of the work as a means of enforcing, against the work's +users, your or third parties' legal rights to forbid circumvention of +technological measures. + + 4. Conveying Verbatim Copies. + + You may convey verbatim copies of the Program's source code as you +receive it, in any medium, provided that you conspicuously and +appropriately publish on each copy an appropriate copyright notice; +keep intact all notices stating that this License and any +non-permissive terms added in accord with section 7 apply to the code; +keep intact all notices of the absence of any warranty; and give all +recipients a copy of this License along with the Program. + + You may charge any price or no price for each copy that you convey, +and you may offer support or warranty protection for a fee. + + 5. Conveying Modified Source Versions. + + You may convey a work based on the Program, or the modifications to +produce it from the Program, in the form of source code under the +terms of section 4, provided that you also meet all of these conditions: + + a) The work must carry prominent notices stating that you modified + it, and giving a relevant date. + + b) The work must carry prominent notices stating that it is + released under this License and any conditions added under section + 7. This requirement modifies the requirement in section 4 to + "keep intact all notices". + + c) You must license the entire work, as a whole, under this + License to anyone who comes into possession of a copy. This + License will therefore apply, along with any applicable section 7 + additional terms, to the whole of the work, and all its parts, + regardless of how they are packaged. This License gives no + permission to license the work in any other way, but it does not + invalidate such permission if you have separately received it. + + d) If the work has interactive user interfaces, each must display + Appropriate Legal Notices; however, if the Program has interactive + interfaces that do not display Appropriate Legal Notices, your + work need not make them do so. + + A compilation of a covered work with other separate and independent +works, which are not by their nature extensions of the covered work, +and which are not combined with it such as to form a larger program, +in or on a volume of a storage or distribution medium, is called an +"aggregate" if the compilation and its resulting copyright are not +used to limit the access or legal rights of the compilation's users +beyond what the individual works permit. Inclusion of a covered work +in an aggregate does not cause this License to apply to the other +parts of the aggregate. + + 6. Conveying Non-Source Forms. + + You may convey a covered work in object code form under the terms +of sections 4 and 5, provided that you also convey the +machine-readable Corresponding Source under the terms of this License, +in one of these ways: + + a) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by the + Corresponding Source fixed on a durable physical medium + customarily used for software interchange. + + b) Convey the object code in, or embodied in, a physical product + (including a physical distribution medium), accompanied by a + written offer, valid for at least three years and valid for as + long as you offer spare parts or customer support for that product + model, to give anyone who possesses the object code either (1) a + copy of the Corresponding Source for all the software in the + product that is covered by this License, on a durable physical + medium customarily used for software interchange, for a price no + more than your reasonable cost of physically performing this + conveying of source, or (2) access to copy the + Corresponding Source from a network server at no charge. + + c) Convey individual copies of the object code with a copy of the + written offer to provide the Corresponding Source. This + alternative is allowed only occasionally and noncommercially, and + only if you received the object code with such an offer, in accord + with subsection 6b. + + d) Convey the object code by offering access from a designated + place (gratis or for a charge), and offer equivalent access to the + Corresponding Source in the same way through the same place at no + further charge. You need not require recipients to copy the + Corresponding Source along with the object code. If the place to + copy the object code is a network server, the Corresponding Source + may be on a different server (operated by you or a third party) + that supports equivalent copying facilities, provided you maintain + clear directions next to the object code saying where to find the + Corresponding Source. Regardless of what server hosts the + Corresponding Source, you remain obligated to ensure that it is + available for as long as needed to satisfy these requirements. + + e) Convey the object code using peer-to-peer transmission, provided + you inform other peers where the object code and Corresponding + Source of the work are being offered to the general public at no + charge under subsection 6d. + + A separable portion of the object code, whose source code is excluded +from the Corresponding Source as a System Library, need not be +included in conveying the object code work. + + A "User Product" is either (1) a "consumer product", which means any +tangible personal property which is normally used for personal, family, +or household purposes, or (2) anything designed or sold for incorporation +into a dwelling. In determining whether a product is a consumer product, +doubtful cases shall be resolved in favor of coverage. For a particular +product received by a particular user, "normally used" refers to a +typical or common use of that class of product, regardless of the status +of the particular user or of the way in which the particular user +actually uses, or expects or is expected to use, the product. A product +is a consumer product regardless of whether the product has substantial +commercial, industrial or non-consumer uses, unless such uses represent +the only significant mode of use of the product. + + "Installation Information" for a User Product means any methods, +procedures, authorization keys, or other information required to install +and execute modified versions of a covered work in that User Product from +a modified version of its Corresponding Source. The information must +suffice to ensure that the continued functioning of the modified object +code is in no case prevented or interfered with solely because +modification has been made. + + If you convey an object code work under this section in, or with, or +specifically for use in, a User Product, and the conveying occurs as +part of a transaction in which the right of possession and use of the +User Product is transferred to the recipient in perpetuity or for a +fixed term (regardless of how the transaction is characterized), the +Corresponding Source conveyed under this section must be accompanied +by the Installation Information. But this requirement does not apply +if neither you nor any third party retains the ability to install +modified object code on the User Product (for example, the work has +been installed in ROM). + + The requirement to provide Installation Information does not include a +requirement to continue to provide support service, warranty, or updates +for a work that has been modified or installed by the recipient, or for +the User Product in which it has been modified or installed. Access to a +network may be denied when the modification itself materially and +adversely affects the operation of the network or violates the rules and +protocols for communication across the network. + + Corresponding Source conveyed, and Installation Information provided, +in accord with this section must be in a format that is publicly +documented (and with an implementation available to the public in +source code form), and must require no special password or key for +unpacking, reading or copying. + + 7. Additional Terms. + + "Additional permissions" are terms that supplement the terms of this +License by making exceptions from one or more of its conditions. +Additional permissions that are applicable to the entire Program shall +be treated as though they were included in this License, to the extent +that they are valid under applicable law. If additional permissions +apply only to part of the Program, that part may be used separately +under those permissions, but the entire Program remains governed by +this License without regard to the additional permissions. + + When you convey a copy of a covered work, you may at your option +remove any additional permissions from that copy, or from any part of +it. (Additional permissions may be written to require their own +removal in certain cases when you modify the work.) You may place +additional permissions on material, added by you to a covered work, +for which you have or can give appropriate copyright permission. + + Notwithstanding any other provision of this License, for material you +add to a covered work, you may (if authorized by the copyright holders of +that material) supplement the terms of this License with terms: + + a) Disclaiming warranty or limiting liability differently from the + terms of sections 15 and 16 of this License; or + + b) Requiring preservation of specified reasonable legal notices or + author attributions in that material or in the Appropriate Legal + Notices displayed by works containing it; or + + c) Prohibiting misrepresentation of the origin of that material, or + requiring that modified versions of such material be marked in + reasonable ways as different from the original version; or + + d) Limiting the use for publicity purposes of names of licensors or + authors of the material; or + + e) Declining to grant rights under trademark law for use of some + trade names, trademarks, or service marks; or + + f) Requiring indemnification of licensors and authors of that + material by anyone who conveys the material (or modified versions of + it) with contractual assumptions of liability to the recipient, for + any liability that these contractual assumptions directly impose on + those licensors and authors. + + All other non-permissive additional terms are considered "further +restrictions" within the meaning of section 10. If the Program as you +received it, or any part of it, contains a notice stating that it is +governed by this License along with a term that is a further +restriction, you may remove that term. If a license document contains +a further restriction but permits relicensing or conveying under this +License, you may add to a covered work material governed by the terms +of that license document, provided that the further restriction does +not survive such relicensing or conveying. + + If you add terms to a covered work in accord with this section, you +must place, in the relevant source files, a statement of the +additional terms that apply to those files, or a notice indicating +where to find the applicable terms. + + Additional terms, permissive or non-permissive, may be stated in the +form of a separately written license, or stated as exceptions; +the above requirements apply either way. + + 8. Termination. + + You may not propagate or modify a covered work except as expressly +provided under this License. Any attempt otherwise to propagate or +modify it is void, and will automatically terminate your rights under +this License (including any patent licenses granted under the third +paragraph of section 11). + + However, if you cease all violation of this License, then your +license from a particular copyright holder is reinstated (a) +provisionally, unless and until the copyright holder explicitly and +finally terminates your license, and (b) permanently, if the copyright +holder fails to notify you of the violation by some reasonable means +prior to 60 days after the cessation. + + Moreover, your license from a particular copyright holder is +reinstated permanently if the copyright holder notifies you of the +violation by some reasonable means, this is the first time you have +received notice of violation of this License (for any work) from that +copyright holder, and you cure the violation prior to 30 days after +your receipt of the notice. + + Termination of your rights under this section does not terminate the +licenses of parties who have received copies or rights from you under +this License. If your rights have been terminated and not permanently +reinstated, you do not qualify to receive new licenses for the same +material under section 10. + + 9. Acceptance Not Required for Having Copies. + + You are not required to accept this License in order to receive or +run a copy of the Program. Ancillary propagation of a covered work +occurring solely as a consequence of using peer-to-peer transmission +to receive a copy likewise does not require acceptance. However, +nothing other than this License grants you permission to propagate or +modify any covered work. These actions infringe copyright if you do +not accept this License. Therefore, by modifying or propagating a +covered work, you indicate your acceptance of this License to do so. + + 10. Automatic Licensing of Downstream Recipients. + + Each time you convey a covered work, the recipient automatically +receives a license from the original licensors, to run, modify and +propagate that work, subject to this License. You are not responsible +for enforcing compliance by third parties with this License. + + An "entity transaction" is a transaction transferring control of an +organization, or substantially all assets of one, or subdividing an +organization, or merging organizations. If propagation of a covered +work results from an entity transaction, each party to that +transaction who receives a copy of the work also receives whatever +licenses to the work the party's predecessor in interest had or could +give under the previous paragraph, plus a right to possession of the +Corresponding Source of the work from the predecessor in interest, if +the predecessor has it or can get it with reasonable efforts. + + You may not impose any further restrictions on the exercise of the +rights granted or affirmed under this License. For example, you may +not impose a license fee, royalty, or other charge for exercise of +rights granted under this License, and you may not initiate litigation +(including a cross-claim or counterclaim in a lawsuit) alleging that +any patent claim is infringed by making, using, selling, offering for +sale, or importing the Program or any portion of it. + + 11. Patents. + + A "contributor" is a copyright holder who authorizes use under this +License of the Program or a work on which the Program is based. The +work thus licensed is called the contributor's "contributor version". + + A contributor's "essential patent claims" are all patent claims +owned or controlled by the contributor, whether already acquired or +hereafter acquired, that would be infringed by some manner, permitted +by this License, of making, using, or selling its contributor version, +but do not include claims that would be infringed only as a +consequence of further modification of the contributor version. For +purposes of this definition, "control" includes the right to grant +patent sublicenses in a manner consistent with the requirements of +this License. + + Each contributor grants you a non-exclusive, worldwide, royalty-free +patent license under the contributor's essential patent claims, to +make, use, sell, offer for sale, import and otherwise run, modify and +propagate the contents of its contributor version. + + In the following three paragraphs, a "patent license" is any express +agreement or commitment, however denominated, not to enforce a patent +(such as an express permission to practice a patent or covenant not to +sue for patent infringement). To "grant" such a patent license to a +party means to make such an agreement or commitment not to enforce a +patent against the party. + + If you convey a covered work, knowingly relying on a patent license, +and the Corresponding Source of the work is not available for anyone +to copy, free of charge and under the terms of this License, through a +publicly available network server or other readily accessible means, +then you must either (1) cause the Corresponding Source to be so +available, or (2) arrange to deprive yourself of the benefit of the +patent license for this particular work, or (3) arrange, in a manner +consistent with the requirements of this License, to extend the patent +license to downstream recipients. "Knowingly relying" means you have +actual knowledge that, but for the patent license, your conveying the +covered work in a country, or your recipient's use of the covered work +in a country, would infringe one or more identifiable patents in that +country that you have reason to believe are valid. + + If, pursuant to or in connection with a single transaction or +arrangement, you convey, or propagate by procuring conveyance of, a +covered work, and grant a patent license to some of the parties +receiving the covered work authorizing them to use, propagate, modify +or convey a specific copy of the covered work, then the patent license +you grant is automatically extended to all recipients of the covered +work and works based on it. + + A patent license is "discriminatory" if it does not include within +the scope of its coverage, prohibits the exercise of, or is +conditioned on the non-exercise of one or more of the rights that are +specifically granted under this License. You may not convey a covered +work if you are a party to an arrangement with a third party that is +in the business of distributing software, under which you make payment +to the third party based on the extent of your activity of conveying +the work, and under which the third party grants, to any of the +parties who would receive the covered work from you, a discriminatory +patent license (a) in connection with copies of the covered work +conveyed by you (or copies made from those copies), or (b) primarily +for and in connection with specific products or compilations that +contain the covered work, unless you entered into that arrangement, +or that patent license was granted, prior to 28 March 2007. + + Nothing in this License shall be construed as excluding or limiting +any implied license or other defenses to infringement that may +otherwise be available to you under applicable patent law. + + 12. No Surrender of Others' Freedom. + + If conditions are imposed on you (whether by court order, agreement or +otherwise) that contradict the conditions of this License, they do not +excuse you from the conditions of this License. If you cannot convey a +covered work so as to satisfy simultaneously your obligations under this +License and any other pertinent obligations, then as a consequence you may +not convey it at all. For example, if you agree to terms that obligate you +to collect a royalty for further conveying from those to whom you convey +the Program, the only way you could satisfy both those terms and this +License would be to refrain entirely from conveying the Program. + + 13. Remote Network Interaction; Use with the GNU General Public License. + + Notwithstanding any other provision of this License, if you modify the +Program, your modified version must prominently offer all users +interacting with it remotely through a computer network (if your version +supports such interaction) an opportunity to receive the Corresponding +Source of your version by providing access to the Corresponding Source +from a network server at no charge, through some standard or customary +means of facilitating copying of software. This Corresponding Source +shall include the Corresponding Source for any work covered by version 3 +of the GNU General Public License that is incorporated pursuant to the +following paragraph. + + Notwithstanding any other provision of this License, you have +permission to link or combine any covered work with a work licensed +under version 3 of the GNU General Public License into a single +combined work, and to convey the resulting work. The terms of this +License will continue to apply to the part which is the covered work, +but the work with which it is combined will remain governed by version +3 of the GNU General Public License. + + 14. Revised Versions of this License. + + The Free Software Foundation may publish revised and/or new versions of +the GNU Affero General Public License from time to time. Such new versions +will be similar in spirit to the present version, but may differ in detail to +address new problems or concerns. + + Each version is given a distinguishing version number. If the +Program specifies that a certain numbered version of the GNU Affero General +Public License "or any later version" applies to it, you have the +option of following the terms and conditions either of that numbered +version or of any later version published by the Free Software +Foundation. If the Program does not specify a version number of the +GNU Affero General Public License, you may choose any version ever published +by the Free Software Foundation. + + If the Program specifies that a proxy can decide which future +versions of the GNU Affero General Public License can be used, that proxy's +public statement of acceptance of a version permanently authorizes you +to choose that version for the Program. + + Later license versions may give you additional or different +permissions. However, no additional obligations are imposed on any +author or copyright holder as a result of your choosing to follow a +later version. + + 15. Disclaimer of Warranty. + + THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY +APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT +HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY +OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, +THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR +PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM +IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF +ALL NECESSARY SERVICING, REPAIR OR CORRECTION. + + 16. Limitation of Liability. + + IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING +WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS +THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY +GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE +USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF +DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD +PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS), +EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF +SUCH DAMAGES. + + 17. Interpretation of Sections 15 and 16. + + If the disclaimer of warranty and limitation of liability provided +above cannot be given local legal effect according to their terms, +reviewing courts shall apply local law that most closely approximates +an absolute waiver of all civil liability in connection with the +Program, unless a warranty or assumption of liability accompanies a +copy of the Program in return for a fee. + + END OF TERMS AND CONDITIONS + + How to Apply These Terms to Your New Programs + + If you develop a new program, and you want it to be of the greatest +possible use to the public, the best way to achieve this is to make it +free software which everyone can redistribute and change under these terms. + + To do so, attach the following notices to the program. It is safest +to attach them to the start of each source file to most effectively +state the exclusion of warranty; and each file should have at least +the "copyright" line and a pointer to where the full notice is found. + + + Copyright (C) + + This program is free software: you can redistribute it and/or modify + it under the terms of the GNU Affero General Public License as published + by the Free Software Foundation, either version 3 of the License, or + (at your option) any later version. + + This program is distributed in the hope that it will be useful, + but WITHOUT ANY WARRANTY; without even the implied warranty of + MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the + GNU Affero General Public License for more details. + + You should have received a copy of the GNU Affero General Public License + along with this program. If not, see . + +Also add information on how to contact you by electronic and paper mail. + + If your software can interact with users remotely through a computer +network, you should also make sure that it provides a way for users to +get its source. For example, if your program is a web application, its +interface could display a "Source" link that leads users to an archive +of the code. There are many ways you could offer source, and different +solutions will be better for different programs; see section 13 for the +specific requirements. + + You should also get your employer (if you work as a programmer) or school, +if any, to sign a "copyright disclaimer" for the program, if necessary. +For more information on this, and how to apply and follow the GNU AGPL, see +. diff --git a/vendor/github.com/influxdata/ifql/Makefile b/vendor/github.com/influxdata/ifql/Makefile new file mode 100644 index 000000000..50f513844 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/Makefile @@ -0,0 +1,66 @@ +VERSION ?= $(shell git describe --always --tags) +SUBDIRS := ast parser promql +GO_ARGS=-tags '$(GO_TAGS)' +export GO_BUILD=go build $(GO_ARGS) +export GO_TEST=go test $(GO_ARGS) +export GO_GENERATE=go generate $(GO_ARGS) + +SOURCES := $(shell find . -name '*.go' -not -name '*_test.go') +SOURCES_NO_VENDOR := $(shell find . -path ./vendor -prune -o -name "*.go" -not -name '*_test.go' -print) + +all: Gopkg.lock $(SUBDIRS) bin/ifql bin/ifqld + +$(SUBDIRS): bin/pigeon bin/cmpgen + $(MAKE) -C $@ $(MAKECMDGOALS) + +bin/ifql: $(SOURCES) bin/pigeon bin/cmpgen + $(GO_BUILD) -i -o bin/ifql ./cmd/ifql + +bin/ifqld: $(SOURCES) bin/pigeon bin/cmpgen + $(GO_BUILD) -i -o bin/ifqld ./cmd/ifqld + +bin/pigeon: ./vendor/github.com/mna/pigeon/main.go + go build -i -o bin/pigeon ./vendor/github.com/mna/pigeon + +bin/cmpgen: ./ast/asttest/cmpgen/main.go + go build -i -o bin/cmpgen ./ast/asttest/cmpgen + +Gopkg.lock: Gopkg.toml + dep ensure -v + +vendor/github.com/mna/pigeon/main.go: Gopkg.lock + dep ensure -v + +fmt: $(SOURCES_NO_VENDOR) + goimports -w $^ + +update: + dep ensure -v -update + +test: Gopkg.lock bin/ifql + $(GO_TEST) ./... + +test-race: Gopkg.lock bin/ifql + $(GO_TEST) -race ./... + +bench: Gopkg.lock bin/ifql + $(GO_TEST) -bench=. -run=^$$ ./... + +bin/goreleaser: + go build -i -o bin/goreleaser ./vendor/github.com/goreleaser/goreleaser + +dist: bin/goreleaser + PATH=./bin:${PATH} goreleaser --rm-dist --release-notes CHANGELOG.md + +release: dist release-docker + +release-docker: + docker build -t quay.io/influxdb/ifqld:latest . + docker tag quay.io/influxdb/ifqld:latest quay.io/influxdb/ifqld:${VERSION} + docker push quay.io/influxdb/ifqld:latest + docker push quay.io/influxdb/ifqld:${VERSION} + +clean: $(SUBDIRS) + rm -rf bin dist + +.PHONY: all clean $(SUBDIRS) update test test-race bench release docker dist fmt diff --git a/vendor/github.com/influxdata/ifql/README.md b/vendor/github.com/influxdata/ifql/README.md new file mode 100644 index 000000000..bb829ee53 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/README.md @@ -0,0 +1,498 @@ +# IFQL (Influx Query Language) + +`ifqld` is an HTTP server for running **IFQL** queries to one or more InfluxDB +servers. + +`ifqld` runs on port `8093` by default + +### Specification +Here is the rough design specification for details until we get documentation up: http://bit.ly/ifql-spec + +### INSTALLATION +1. Upgrade to InfluxDB >= 1.4.1 +https://portal.influxdata.com/downloads + + +2. Update the InfluxDB configuration file to enable **IFQL** processing; restart +the InfluxDB server. InfluxDB will open port `8082` to accept **IFQL** queries. + +> **This port has no authentication.** + +``` +[ifql] + enabled = true + log-enabled = true + bind-address = ":8082" +``` + +3. Download `ifqld` and install from https://github.com/influxdata/ifql/releases + +4. Start `ifqld` with the InfluxDB host and port of `8082`. To run in federated +mode (see below), add the `--host` option for each InfluxDB host. + +```sh +ifqld --verbose --host localhost:8082 +``` + +5. To run a query POST an **IFQL** query string to `/query` as the `q` parameter: +```sh +curl -XPOST --data-urlencode \ +'q=from(db:"telegraf") + |> filter(fn: (r) => r["_measurement"] == "cpu" AND r["_field"] == "usage_user") + |> range(start:-170h) + |> sum()' \ +http://localhost:8093/query +``` + +#### docker compose + +To spin up a testing environment you can run: + +``` +docker-compose up +``` + +Inside the `root` directory. It will spin up an `influxdb` and `ifqld` daemon +ready to be used. `influxd` is exposed on port `8086` and port `8082`. + + +### Prometheus metrics +Metrics are exposed on `/metrics`. +`ifqld` records the number of queries and the number of different functions within **IFQL** queries + +### Federated Mode +By passing the `--host` option multiple times `ifqld` will query multiple +InfluxDB servers. + +For example: + +```sh +ifqld --host influxdb1:8082 --host influxdb2:8082 +``` + +The results from multiple InfluxDB are merged together as if there was +one server. + +### Basic Syntax + +IFQL constructs a query by starting with a table of data and passing the table through transformations steps to describe the desired query operations. +Transformations are represented as functions which take a table of data as an input argument and return a new table that has been transformed. +There is a special function `from` which is a source function, meaning it does not accept a table as input, but rather produces a table. +All other transformation functions accept at least one table and return a table as a result. + +For example to get the last point for each series in a database you start by creating a table using `from` and then pass that table into the `limit` function. + +``` +// Select the last point per series in the telegraf database. +limit(table:from(db:"telegraf"), n:1) +``` + +Since it is common to chain long lists of transformations together the pipe forward operator `|>` can be used to make reading the code easier. +These two expressions are equivalent: + +``` +// Select the last point per series in the telegraf database. +limit(table:from(db:"telegraf"), n:1) + + +// Same as above, but uses the pipe forward operator to indicate the flow of data. +from(db:"telegraf") |> limit(n:1) +``` + + +Long list of functions can thus be chained together: + +``` +// Get the first point per host from the last minute of data. +from(db:"telegraf") |> range(start:-1m) |> group(by:["host"]) |> first() +``` + + + +### Supported Functions + +Below is a list of supported functions. + +#### from + +Starting point for all queires. Get data from the specified database. + +Example: `from(db:"telegraf")` + +##### options +* `db` string + `from(db:"telegraf")` + +* `hosts` array of strings + `from(db:"telegraf", hosts:["host1", "host2"])` + +#### count + +Counts the number of results + +Example: `from(db:"telegraf") |> count()` + +#### first + +Returns the first result of the query + +Example: `from(db:"telegraf") |> first()` + +#### group +Groups results by a user-specified set of tags + +##### options + +* `by` array of strings +Group by these specific tag names +Cannot be used with `except` option + +Example: `from(db: "telegraf") |> range(start: -30m) |> group(by: ["tag_a", "tag_b"])` + +* `keep` array of strings +Keep specific tag keys that were not in `by` in the results + +Example: `from(db: "telegraf") |> range(start: -30m) |> group(by: ["tag_a", "tag_b"], keep:["tag_c"])` +* `except` array of strings +Group by all but these tag keys +Cannot be used with `by` option + +Example: `from(db: "telegraf") |> range(start: -30m) |> group(except: ["tag_a"], keep:["tag_b", "tag_c"])` + +#### join + +Join two time series together on time and the list of `on` keys. + +Example: + +``` +cpu = from(db: "telegraf") |> filter(fn: (r) => r["_measurement"] == "cpu" and r["_field"] == "usage_user") |> range(start: -30m) +mem = from(db: "telegraf") |> filter(fn: (r) => r["_measurement"] == "mem" and r["_field"] == "used_percent") |> range(start: -30m) +join(tables:{cpu:cpu, mem:mem}, on:["host"], fn: (tables) => tables.cpu["_value"] + tables.mem["_value"]) +``` + +##### options + +* `tables` map of tables +Map of tables to join. Currently only two tables are allowed. + +* `on` array of strings +List of tag keys that when equal produces a result set. + +* `fn` + +Defines the function that merges the values of the tables. +The function must defined to accept a single parameter. +The parameter is a map, which uses the same keys found in the `tables` map. +The function is called for each joined set of records from the tables. + +#### last +Returns the last result of the query + +Example: `from(db: "telegraf") |> last()` + +#### limit +Restricts the number of rows returned in the results. + +Example: `from(db: "telegraf") |> limit(n: 10)` + +#### map + +Applies a function to each row of the table. + +##### options + +* `fn` function + +Function to apply to each row. The return value of the function may be a single value or an object. + +Example: +``` +from(db:"foo") + |> filter(fn: (r) => r["_measurement"]=="cpu" AND + r["_field"] == "usage_system" AND + r["service"] == "app-server") + |> range(start:-12h) + // Square the value + |> map(fn: (r) => r._value * r._value) +``` + +Example: +``` +from(db:"foo") + |> filter(fn: (r) => r["_measurement"]=="cpu" AND + r["_field"] == "usage_system" AND + r["service"] == "app-server") + |> range(start:-12h) + // Square the value and keep the original value + |> map(fn: (r) => ({value: r._value, value2:r._value * r._value})) +``` + +#### max + +Returns the max value within the results + +Example: +``` +from(db:"foo") + |> filter(fn: (r) => r["_measurement"]=="cpu" AND + r["_field"] == "usage_system" AND + r["service"] == "app-server") + |> range(start:-12h) + |> window(every:10m) + |> max() +``` + +#### mean +Returns the mean of the values within the results + +Example: +``` +from(db:"foo") + |> filter(fn: (r) => r["_measurement"] == "mem" AND + r["_field"] == "used_percent") + |> range(start:-12h) + |> window(every:10m) + |> mean() +``` + +#### min +Returns the min value within the results + +Example: +``` +from(db:"foo") + |> filter(fn: (r) => r[ "_measurement"] == "cpu" AND + r["_field" ]== "usage_system") + |> range(start:-12h) + |> window(every:10m, period: 5m) + |> min() +``` + + +#### range +Filters the results by time boundaries + +Example: +``` +from(db:"foo") + |> filter(fn: (r) => r["_measurement"] == "cpu" AND + r["_field"] == "usage_system") + |> range(start:-12h, stop: -15m) +``` + +##### options +* start duration +Specifies the oldest time to be included in the results + +* stop duration or timestamp +Specifies exclusive upper time bound +Defaults to "now" + +#### sample + +Example to sample every fifth point starting from the second element: +``` +from(db:"foo") + |> filter(fn: (r) => r["_measurement"] == "cpu" AND + r["_field"] == "usage_system") + |> range(start:-1d) + |> sample(n: 5, pos: 1) +``` + +##### options +* `n` +Sample every Nth element +* `pos` +Position offset from start of results to begin sampling +`pos` must be less than `n` +If `pos` less than 0, a random offset is used. +Default is -1 (random offset) + +#### set +Add tag of key and value to set +Example: `from(db: "telegraf") |> set(key: "mykey", value: "myvalue")` +##### options +* `key` string +* `value` string + +#### skew +Skew of the results + +Example: `from(db: "telegraf") |> range(start: -30m, stop: -15m) |> skew()` + +#### sort +Sorts the results by the specified columns +Default sort is ascending + +Example: +``` +from(db:"telegraf") + |> filter(fn: (r) => r["_measurement"] == "system" AND + r["_field"] == "uptime") + |> range(start:-12h) + |> sort(cols:["region", "host", "value"]) +``` + +##### options +* `cols` array of strings +List of columns used to sort; precedence from left to right. +Default is `["value"]` + +For example, this sorts by uptime descending to find the longest +running instances. + +``` +from(db:"telegraf") + |> filter(fn: (r) => r["_measurement"] == "system" AND + r["_field"] == "uptime") + |> range(start:-12h) + |> sort(desc: true) +``` + +* `desc` bool +Sort results descending + +#### spread +Difference between min and max values + +Example: `from(db: "telegraf") |> range(start: -30m) |> spread()` + +#### stddev +Standard Deviation of the results + +Example: `from(db: "telegraf") |> range(start: -30m, stop: -15m) |> stddev()` + +#### sum +Sum of the results + +Example: `from(db: "telegraf") |> range(start: -30m, stop: -15m) |> sum()` + +#### filter +Filters the results using an expression + +Example: +``` +from(db:"foo") + |> filter(fn: (r) => r["_measurement"]=="cpu" AND + r["_field"] == "usage_system" AND + r["service"] == "app-server") + |> range(start:-12h) + |> max() +``` + +##### options + +* `fn` function(record) bool + +Function to when filtering the records. +The function must accept a single parameter which will be the records and return a boolean value. +Records which evaluate to true, will be included in the results. + +#### window +Partitions the results by a given time range + +##### options +* `every` duration +Duration of time between windows + +Defaults to `period`'s value +``` +from(db:"foo") + |> range(start:-12h) + |> window(every:10m) + |> max() +``` + +* `period` duration +Duration of the windowed parition +``` +from(db:"foo") + |> range(start:-12h) + |> window(every:10m) + |> max() +``` + +Default to `every`'s value +* `start` time +The time of the initial window parition. + +* `round` duration +Rounds a window's bounds to the nearest duration + +Example: +``` +from(db:"foo") + |> range(start:-12h) + |> window(every:10m) + |> max() +``` + +### Custom Functions + +IFQL also allows the user to define their own functions. +The function syntax is: + +``` +(parameter list) => +``` + +The list of parameters is simply a list of identifiers with optional default values. +The function body is either a single expression which is returned or a block of statements. +Functions may be assigned to identifiers to given them a name. + +Examples: + +``` +// Define a simple addition function +add = (a,b) => a + b + +// Define a helper function to get data from a telegraf measurement. +// By default the database is expected to be named "telegraf". +telegrafM = (measurement, db="telegraf") => + from(db:db) + |> filter(fn: (r) => r._measurement == measurement) + +// Define a helper function for a common join operation +// Use block syntax since we have more than a single expression +abJoin = (measurementA, measurementB, on) => { + a = telegrafM(measurement:measurementA) + b = telegrafM(measurement:measurementB) + return join( + tables:{a:a, b:b}, + on:on, + // Return a map from the join fn, + // this creates a table with a column for each key in the map. + // Note the () around the map to indicate a single map expression instead of function block. + fn: (t) => ({ + a: t.a._value, + b: t.b._value, + }), + ) +} +``` + +#### Pipe Arguments + +Functions may also declare that an argument can be piped into from an pipe forward operator by specifing a special default value: + +``` +// Define add function which accepts `a` as the piped argument. +add = (a=<-, b) => a + b + +// Call add using the pipe forward syntax. +1 |> add(b:3) // 4 + +// Define measurement function which accepts table as the piped argument. +measurement = (m, table=<-) => table |> filter(fn: (r) => r._measurement == m) + +// Define field function which accepts table as the piped argument +field = (field, table=<-) => table |> filter(fn: (r) => r._field == field) + +// Query usage_idle from the cpu measurement and the telegraf database. +// Using the measurement and field functions. +from(db:"telegraf") + |> measurement(m:"cpu") + |> field(field:"usage_idle") +``` + diff --git a/vendor/github.com/influxdata/ifql/ast/Makefile b/vendor/github.com/influxdata/ifql/ast/Makefile new file mode 100644 index 000000000..4ca2b9c34 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/ast/Makefile @@ -0,0 +1,9 @@ +SUBDIRS := asttest + +$(SUBDIRS): + $(MAKE) -C $@ $(MAKECMDGOALS) + +all: $(SUBDIRS) + + +.PHONY: $(SUBDIRS) clean diff --git a/vendor/github.com/influxdata/ifql/ast/ast.go b/vendor/github.com/influxdata/ifql/ast/ast.go new file mode 100644 index 000000000..cb5446242 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/ast/ast.go @@ -0,0 +1,850 @@ +package ast + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Position represents a specific location in the source +type Position struct { + Line int `json:"line"` // Line is the line in the source marked by this position + Column int `json:"column"` // Column is the column in the source marked by this position +} + +// SourceLocation represents the location of a node in the AST +type SourceLocation struct { + Start Position `json:"start"` // Start is the location in the source the node starts + End Position `json:"end"` // End is the location in the source the node ends + Source *string `json:"source,omitempty"` // Source is optional raw source +} + +// Node represents a node in the InfluxDB abstract syntax tree. +type Node interface { + node() + Type() string // Type property is a string that contains the variant type of the node + Location() *SourceLocation + Copy() Node + + // All node must support json marshalling + json.Marshaler +} + +func (*Program) node() {} + +func (*BlockStatement) node() {} +func (*ExpressionStatement) node() {} +func (*ReturnStatement) node() {} +func (*VariableDeclaration) node() {} +func (*VariableDeclarator) node() {} + +func (*ArrayExpression) node() {} +func (*ArrowFunctionExpression) node() {} +func (*BinaryExpression) node() {} +func (*CallExpression) node() {} +func (*ConditionalExpression) node() {} +func (*LogicalExpression) node() {} +func (*MemberExpression) node() {} +func (*PipeExpression) node() {} +func (*ObjectExpression) node() {} +func (*UnaryExpression) node() {} + +func (*Property) node() {} +func (*Identifier) node() {} + +func (*BooleanLiteral) node() {} +func (*DateTimeLiteral) node() {} +func (*DurationLiteral) node() {} +func (*FloatLiteral) node() {} +func (*IntegerLiteral) node() {} +func (*PipeLiteral) node() {} +func (*RegexpLiteral) node() {} +func (*StringLiteral) node() {} +func (*UnsignedIntegerLiteral) node() {} + +// BaseNode holds the attributes every expression or statement should have +type BaseNode struct { + Loc *SourceLocation `json:"location,omitempty"` +} + +// Location is the source location of the Node +func (b *BaseNode) Location() *SourceLocation { return b.Loc } + +// Program represents a complete program source tree +type Program struct { + *BaseNode + Body []Statement `json:"body"` +} + +// Type is the abstract type +func (*Program) Type() string { return "Program" } + +func (p *Program) Copy() Node { + np := new(Program) + *np = *p + if len(p.Body) > 0 { + np.Body = make([]Statement, len(p.Body)) + for i, s := range p.Body { + np.Body[i] = s.Copy().(Statement) + } + } + return np +} + +// Statement Perhaps we don't even want statements nor expression statements +type Statement interface { + Node + stmt() +} + +func (*BlockStatement) stmt() {} +func (*ExpressionStatement) stmt() {} +func (*ReturnStatement) stmt() {} +func (*VariableDeclaration) stmt() {} + +// BlockStatement is a set of statements +type BlockStatement struct { + *BaseNode + Body []Statement `json:"body"` +} + +// Type is the abstract type +func (*BlockStatement) Type() string { return "BlockStatement" } + +func (s *BlockStatement) Copy() Node { + ns := new(BlockStatement) + *ns = *s + + if len(s.Body) > 0 { + ns.Body = make([]Statement, len(s.Body)) + for i, stmt := range s.Body { + ns.Body[i] = stmt.Copy().(Statement) + } + } + return ns +} + +// ExpressionStatement may consist of an expression that does not return a value and is executed solely for its side-effects. +type ExpressionStatement struct { + *BaseNode + Expression Expression `json:"expression"` +} + +// Type is the abstract type +func (*ExpressionStatement) Type() string { return "ExpressionStatement" } + +func (s *ExpressionStatement) Copy() Node { + if s == nil { + return s + } + ns := new(ExpressionStatement) + *ns = *s + + ns.Expression = s.Expression.Copy().(Expression) + + return ns +} + +// ReturnStatement defines an Expression to return +type ReturnStatement struct { + *BaseNode + Argument Expression `json:"argument"` +} + +// Type is the abstract type +func (*ReturnStatement) Type() string { return "ReturnStatement" } +func (s *ReturnStatement) Copy() Node { + if s == nil { + return s + } + ns := new(ReturnStatement) + *ns = *s + + ns.Argument = s.Argument.Copy().(Expression) + + return ns +} + +// VariableDeclaration declares one or more variables using assignment +type VariableDeclaration struct { + *BaseNode + Declarations []*VariableDeclarator `json:"declarations"` +} + +// Type is the abstract type +func (*VariableDeclaration) Type() string { return "VariableDeclaration" } + +func (d *VariableDeclaration) Copy() Node { + if d == nil { + return d + } + nd := new(VariableDeclaration) + *nd = *d + + if len(d.Declarations) > 0 { + nd.Declarations = make([]*VariableDeclarator, len(d.Declarations)) + for i, decl := range d.Declarations { + nd.Declarations[i] = decl.Copy().(*VariableDeclarator) + } + } + + return nd +} + +// VariableDeclarator represents the declaration of a variable +type VariableDeclarator struct { + *BaseNode + ID *Identifier `json:"id"` + Init Expression `json:"init"` +} + +// Type is the abstract type +func (*VariableDeclarator) Type() string { return "VariableDeclarator" } + +func (d *VariableDeclarator) Copy() Node { + if d == nil { + return d + } + nd := new(VariableDeclarator) + *nd = *d + + nd.Init = d.Init.Copy().(Expression) + + return nd +} + +// Expression represents an action that can be performed by InfluxDB that can be evaluated to a value. +type Expression interface { + Node + expression() +} + +func (*ArrayExpression) expression() {} +func (*ArrowFunctionExpression) expression() {} +func (*BinaryExpression) expression() {} +func (*BooleanLiteral) expression() {} +func (*CallExpression) expression() {} +func (*ConditionalExpression) expression() {} +func (*DateTimeLiteral) expression() {} +func (*DurationLiteral) expression() {} +func (*FloatLiteral) expression() {} +func (*Identifier) expression() {} +func (*IntegerLiteral) expression() {} +func (*LogicalExpression) expression() {} +func (*MemberExpression) expression() {} +func (*ObjectExpression) expression() {} +func (*PipeExpression) expression() {} +func (*PipeLiteral) expression() {} +func (*RegexpLiteral) expression() {} +func (*StringLiteral) expression() {} +func (*UnaryExpression) expression() {} +func (*UnsignedIntegerLiteral) expression() {} + +// CallExpression represents a function all whose callee may be an Identifier or MemberExpression +type CallExpression struct { + *BaseNode + Callee Expression `json:"callee"` + Arguments []Expression `json:"arguments,omitempty"` +} + +// Type is the abstract type +func (*CallExpression) Type() string { return "CallExpression" } + +func (e *CallExpression) Copy() Node { + if e == nil { + return e + } + ne := new(CallExpression) + *ne = *e + + ne.Callee = e.Callee.Copy().(Expression) + + if len(e.Arguments) > 0 { + ne.Arguments = make([]Expression, len(e.Arguments)) + for i, arg := range e.Arguments { + ne.Arguments[i] = arg.Copy().(Expression) + } + } + + return ne +} + +type PipeExpression struct { + *BaseNode + Argument Expression `json:"argument"` + Call *CallExpression `json:"call"` +} + +// Type is the abstract type +func (*PipeExpression) Type() string { return "PipeExpression" } + +func (e *PipeExpression) Copy() Node { + if e == nil { + return e + } + ne := new(PipeExpression) + *ne = *e + + ne.Argument = e.Argument.Copy().(Expression) + ne.Call = e.Call.Copy().(*CallExpression) + + return ne +} + +// MemberExpression represents calling a property of a CallExpression +type MemberExpression struct { + *BaseNode + Object Expression `json:"object"` + Property Expression `json:"property"` +} + +// Type is the abstract type +func (*MemberExpression) Type() string { return "MemberExpression" } + +func (e *MemberExpression) Copy() Node { + if e == nil { + return e + } + ne := new(MemberExpression) + *ne = *e + + ne.Object = e.Object.Copy().(Expression) + ne.Property = e.Property.Copy().(Expression) + + return ne +} + +type ArrowFunctionExpression struct { + *BaseNode + Params []*Property `json:"params"` + Body Node `json:"body"` +} + +// Type is the abstract type +func (*ArrowFunctionExpression) Type() string { return "ArrowFunctionExpression" } + +func (e *ArrowFunctionExpression) Copy() Node { + if e == nil { + return e + } + ne := new(ArrowFunctionExpression) + *ne = *e + + if len(e.Params) > 0 { + ne.Params = make([]*Property, len(e.Params)) + for i, param := range e.Params { + ne.Params[i] = param.Copy().(*Property) + } + } + + ne.Body = e.Body.Copy() + + return ne +} + +// OperatorKind are Equality and Arithmatic operators. +// Result of evaluating an equality operator is always of type Boolean based on whether the +// comparison is true +// Arithmetic operators take numerical values (either literals or variables) as their operands +// and return a single numerical value. +type OperatorKind int + +const ( + opBegin OperatorKind = iota + MultiplicationOperator + DivisionOperator + AdditionOperator + SubtractionOperator + LessThanEqualOperator + LessThanOperator + GreaterThanEqualOperator + GreaterThanOperator + StartsWithOperator + InOperator + NotOperator + NotEmptyOperator + EmptyOperator + EqualOperator + NotEqualOperator + RegexpMatchOperator + NotRegexpMatchOperator + opEnd +) + +func (o OperatorKind) String() string { + return OperatorTokens[o] +} + +// OperatorLookup converts the operators to OperatorKind +func OperatorLookup(op string) OperatorKind { + return operators[op] +} + +func (o OperatorKind) MarshalText() ([]byte, error) { + text, ok := OperatorTokens[o] + if !ok { + return nil, fmt.Errorf("unknown operator %d", int(o)) + } + return []byte(text), nil +} +func (o *OperatorKind) UnmarshalText(data []byte) error { + var ok bool + *o, ok = operators[string(data)] + if !ok { + return fmt.Errorf("unknown operator %q", string(data)) + } + return nil +} + +// BinaryExpression use binary operators act on two operands in an expression. +// BinaryExpression includes relational and arithmatic operators +type BinaryExpression struct { + *BaseNode + Operator OperatorKind `json:"operator"` + Left Expression `json:"left"` + Right Expression `json:"right"` +} + +// Type is the abstract type +func (*BinaryExpression) Type() string { return "BinaryExpression" } + +func (e *BinaryExpression) Copy() Node { + if e == nil { + return e + } + ne := new(BinaryExpression) + *ne = *e + + ne.Left = e.Left.Copy().(Expression) + ne.Right = e.Right.Copy().(Expression) + + return ne +} + +// UnaryExpression use operators act on a single operand in an expression. +type UnaryExpression struct { + *BaseNode + Operator OperatorKind `json:"operator"` + Argument Expression `json:"argument"` +} + +// Type is the abstract type +func (*UnaryExpression) Type() string { return "UnaryExpression" } + +func (e *UnaryExpression) Copy() Node { + if e == nil { + return e + } + ne := new(UnaryExpression) + *ne = *e + + ne.Argument = e.Argument.Copy().(Expression) + + return ne +} + +// LogicalOperatorKind are used with boolean (logical) values +type LogicalOperatorKind int + +const ( + logOpBegin LogicalOperatorKind = iota + AndOperator + OrOperator + logOpEnd +) + +func (o LogicalOperatorKind) String() string { + return LogicalOperatorTokens[o] +} + +// LogicalOperatorLookup converts the operators to LogicalOperatorKind +func LogicalOperatorLookup(op string) LogicalOperatorKind { + return logOperators[op] +} + +func (o LogicalOperatorKind) MarshalText() ([]byte, error) { + text, ok := LogicalOperatorTokens[o] + if !ok { + return nil, fmt.Errorf("unknown logical operator %d", int(o)) + } + return []byte(text), nil +} +func (o *LogicalOperatorKind) UnmarshalText(data []byte) error { + var ok bool + *o, ok = logOperators[string(data)] + if !ok { + return fmt.Errorf("unknown logical operator %q", string(data)) + } + return nil +} + +// LogicalExpression represent the rule conditions that collectively evaluate to either true or false. +// `or` expressions compute the disjunction of two boolean expressions and return boolean values. +// `and`` expressions compute the conjunction of two boolean expressions and return boolean values. +type LogicalExpression struct { + *BaseNode + Operator LogicalOperatorKind `json:"operator"` + Left Expression `json:"left"` + Right Expression `json:"right"` +} + +// Type is the abstract type +func (*LogicalExpression) Type() string { return "LogicalExpression" } + +func (e *LogicalExpression) Copy() Node { + if e == nil { + return e + } + ne := new(LogicalExpression) + *ne = *e + + ne.Left = e.Left.Copy().(Expression) + ne.Right = e.Right.Copy().(Expression) + + return ne +} + +// ArrayExpression is used to create and directly specify the elements of an array object +type ArrayExpression struct { + *BaseNode + Elements []Expression `json:"elements"` +} + +// Type is the abstract type +func (*ArrayExpression) Type() string { return "ArrayExpression" } + +func (e *ArrayExpression) Copy() Node { + if e == nil { + return e + } + ne := new(ArrayExpression) + *ne = *e + + if len(e.Elements) > 0 { + ne.Elements = make([]Expression, len(e.Elements)) + for i, el := range e.Elements { + ne.Elements[i] = el.Copy().(Expression) + } + } + + return ne +} + +// ObjectExpression allows the declaration of an anonymous object within a declaration. +type ObjectExpression struct { + *BaseNode + Properties []*Property `json:"properties"` +} + +// Type is the abstract type +func (*ObjectExpression) Type() string { return "ObjectExpression" } + +func (e *ObjectExpression) Copy() Node { + if e == nil { + return e + } + ne := new(ObjectExpression) + *ne = *e + + if len(e.Properties) > 0 { + ne.Properties = make([]*Property, len(e.Properties)) + for i, p := range e.Properties { + ne.Properties[i] = p.Copy().(*Property) + } + } + + return ne +} + +// ConditionalExpression selects one of two expressions, `Alternate` or `Consequent` +// depending on a third, boolean, expression, `Test`. +type ConditionalExpression struct { + *BaseNode + Test Expression `json:"test"` + Alternate Expression `json:"alternate"` + Consequent Expression `json:"consequent"` +} + +// Type is the abstract type +func (*ConditionalExpression) Type() string { return "ConditionalExpression" } + +func (e *ConditionalExpression) Copy() Node { + if e == nil { + return e + } + ne := new(ConditionalExpression) + *ne = *e + + ne.Test = e.Test.Copy().(Expression) + ne.Alternate = e.Alternate.Copy().(Expression) + ne.Consequent = e.Consequent.Copy().(Expression) + + return ne +} + +// Property is the value associated with a key +type Property struct { + *BaseNode + Key *Identifier `json:"key"` + Value Expression `json:"value"` +} + +func (p *Property) Copy() Node { + if p == nil { + return p + } + np := new(Property) + *np = *p + + if p.Value != nil { + np.Value = p.Value.Copy().(Expression) + } + + return np +} + +// Type is the abstract type +func (*Property) Type() string { return "Property" } + +// Identifier represents a name that identifies a unique Node +type Identifier struct { + *BaseNode + Name string `json:"name"` +} + +// Type is the abstract type +func (*Identifier) Type() string { return "Identifier" } + +func (i *Identifier) Copy() Node { + if i == nil { + return i + } + ni := new(Identifier) + *ni = *i + return ni +} + +// Literal are thelexical forms for literal expressions which define +// boolean, string, integer, number, duration, datetime and field values. +// Literals must be coerced explicitly. +type Literal interface { + Expression + literal() +} + +func (*BooleanLiteral) literal() {} +func (*DateTimeLiteral) literal() {} +func (*DurationLiteral) literal() {} +func (*FloatLiteral) literal() {} +func (*IntegerLiteral) literal() {} +func (*PipeLiteral) literal() {} +func (*RegexpLiteral) literal() {} +func (*StringLiteral) literal() {} +func (*UnsignedIntegerLiteral) literal() {} + +// PipeLiteral represents an specialized literal value, indicating the left hand value of a pipe expression. +type PipeLiteral struct { + *BaseNode +} + +// Type is the abstract type +func (*PipeLiteral) Type() string { return "PipeLiteral" } + +func (i *PipeLiteral) Copy() Node { + if i == nil { + return i + } + ni := new(PipeLiteral) + *ni = *i + return ni +} + +// StringLiteral expressions begin and end with double quote marks. +type StringLiteral struct { + *BaseNode + Value string `json:"value"` +} + +func (*StringLiteral) Type() string { return "StringLiteral" } + +func (l *StringLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(StringLiteral) + *nl = *l + return nl +} + +// BooleanLiteral represent boolean values +type BooleanLiteral struct { + *BaseNode + Value bool `json:"value"` +} + +// Type is the abstract type +func (*BooleanLiteral) Type() string { return "BooleanLiteral" } + +func (l *BooleanLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(BooleanLiteral) + *nl = *l + return nl +} + +// FloatLiteral represent floating point numbers according to the double representations defined by the IEEE-754-1985 +type FloatLiteral struct { + *BaseNode + Value float64 `json:"value"` +} + +// Type is the abstract type +func (*FloatLiteral) Type() string { return "FloatLiteral" } + +func (l *FloatLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(FloatLiteral) + *nl = *l + return nl +} + +// IntegerLiteral represent integer numbers. +type IntegerLiteral struct { + *BaseNode + Value int64 `json:"value"` +} + +// Type is the abstract type +func (*IntegerLiteral) Type() string { return "IntegerLiteral" } + +func (l *IntegerLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(IntegerLiteral) + *nl = *l + return nl +} + +// UnsignedIntegerLiteral represent integer numbers. +type UnsignedIntegerLiteral struct { + *BaseNode + Value uint64 `json:"value"` +} + +// Type is the abstract type +func (*UnsignedIntegerLiteral) Type() string { return "UnsignedIntegerLiteral" } + +func (l *UnsignedIntegerLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(UnsignedIntegerLiteral) + *nl = *l + return nl +} + +// RegexpLiteral expressions begin and end with `/` and are regular expressions with syntax accepted by RE2 +type RegexpLiteral struct { + *BaseNode + Value *regexp.Regexp `json:"value"` +} + +// Type is the abstract type +func (*RegexpLiteral) Type() string { return "RegexpLiteral" } + +func (l *RegexpLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(RegexpLiteral) + *nl = *l + return nl +} + +// DurationLiteral represents the elapsed time between two instants as an +// int64 nanosecond count with syntax of golang's time.Duration +// TODO: this may be better as a class initialization +type DurationLiteral struct { + *BaseNode + Value time.Duration `json:"value"` +} + +// Type is the abstract type +func (*DurationLiteral) Type() string { return "DurationLiteral" } + +func (l *DurationLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(DurationLiteral) + *nl = *l + return nl +} + +// DateTimeLiteral represents an instant in time with nanosecond precision using +// the syntax of golang's RFC3339 Nanosecond variant +// TODO: this may be better as a class initialization +type DateTimeLiteral struct { + *BaseNode + Value time.Time `json:"value"` +} + +// Type is the abstract type +func (*DateTimeLiteral) Type() string { return "DateTimeLiteral" } + +func (l *DateTimeLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(DateTimeLiteral) + *nl = *l + return nl +} + +// OperatorTokens converts OperatorKind to string +var OperatorTokens = map[OperatorKind]string{ + MultiplicationOperator: "*", + DivisionOperator: "/", + AdditionOperator: "+", + SubtractionOperator: "-", + LessThanEqualOperator: "<=", + LessThanOperator: "<", + GreaterThanOperator: ">", + GreaterThanEqualOperator: ">=", + InOperator: "in", + NotOperator: "not", + NotEmptyOperator: "not empty", + EmptyOperator: "empty", + StartsWithOperator: "startswith", + EqualOperator: "==", + NotEqualOperator: "!=", + RegexpMatchOperator: "=~", + NotRegexpMatchOperator: "!~", +} + +// LogicalOperatorTokens converts LogicalOperatorKind to string +var LogicalOperatorTokens = map[LogicalOperatorKind]string{ + AndOperator: "and", + OrOperator: "or", +} + +var operators map[string]OperatorKind +var logOperators map[string]LogicalOperatorKind + +func init() { + operators = make(map[string]OperatorKind) + for op := opBegin + 1; op < opEnd; op++ { + operators[OperatorTokens[op]] = op + } + + logOperators = make(map[string]LogicalOperatorKind) + for op := logOpBegin + 1; op < logOpEnd; op++ { + logOperators[LogicalOperatorTokens[op]] = op + } +} diff --git a/vendor/github.com/influxdata/ifql/ast/json.go b/vendor/github.com/influxdata/ifql/ast/json.go new file mode 100644 index 000000000..c7008d569 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/ast/json.go @@ -0,0 +1,894 @@ +package ast + +import ( + "encoding/json" + "fmt" + "regexp" + "strconv" + "time" +) + +func (p *Program) MarshalJSON() ([]byte, error) { + type Alias Program + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: p.Type(), + Alias: (*Alias)(p), + } + return json.Marshal(raw) +} +func (p *Program) UnmarshalJSON(data []byte) error { + type Alias Program + raw := struct { + *Alias + Body []json.RawMessage `json:"body"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *p = *(*Program)(raw.Alias) + } + + p.Body = make([]Statement, len(raw.Body)) + for i, r := range raw.Body { + s, err := unmarshalStatement(r) + if err != nil { + return err + } + p.Body[i] = s + } + return nil +} +func (s *BlockStatement) MarshalJSON() ([]byte, error) { + type Alias BlockStatement + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: s.Type(), + Alias: (*Alias)(s), + } + return json.Marshal(raw) +} +func (s *BlockStatement) UnmarshalJSON(data []byte) error { + type Alias BlockStatement + raw := struct { + *Alias + Body []json.RawMessage `json:"body"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *s = *(*BlockStatement)(raw.Alias) + } + + s.Body = make([]Statement, len(raw.Body)) + for i, r := range raw.Body { + stmt, err := unmarshalStatement(r) + if err != nil { + return err + } + s.Body[i] = stmt + } + return nil +} +func (s *ExpressionStatement) MarshalJSON() ([]byte, error) { + type Alias ExpressionStatement + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: s.Type(), + Alias: (*Alias)(s), + } + return json.Marshal(raw) +} +func (s *ExpressionStatement) UnmarshalJSON(data []byte) error { + type Alias ExpressionStatement + raw := struct { + *Alias + Expression json.RawMessage `json:"expression"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *s = *(*ExpressionStatement)(raw.Alias) + } + + e, err := unmarshalExpression(raw.Expression) + if err != nil { + return err + } + s.Expression = e + return nil +} +func (s *ReturnStatement) MarshalJSON() ([]byte, error) { + type Alias ReturnStatement + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: s.Type(), + Alias: (*Alias)(s), + } + return json.Marshal(raw) +} +func (s *ReturnStatement) UnmarshalJSON(data []byte) error { + type Alias ReturnStatement + raw := struct { + *Alias + Argument json.RawMessage `json:"argument"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *s = *(*ReturnStatement)(raw.Alias) + } + + e, err := unmarshalExpression(raw.Argument) + if err != nil { + return err + } + s.Argument = e + return nil +} +func (d *VariableDeclaration) MarshalJSON() ([]byte, error) { + type Alias VariableDeclaration + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: d.Type(), + Alias: (*Alias)(d), + } + return json.Marshal(raw) +} +func (d *VariableDeclarator) MarshalJSON() ([]byte, error) { + type Alias VariableDeclarator + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: d.Type(), + Alias: (*Alias)(d), + } + return json.Marshal(raw) +} +func (d *VariableDeclarator) UnmarshalJSON(data []byte) error { + type Alias VariableDeclarator + raw := struct { + *Alias + Init json.RawMessage `json:"init"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *d = *(*VariableDeclarator)(raw.Alias) + } + + e, err := unmarshalExpression(raw.Init) + if err != nil { + return err + } + d.Init = e + return nil +} +func (e *CallExpression) MarshalJSON() ([]byte, error) { + type Alias CallExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *CallExpression) UnmarshalJSON(data []byte) error { + type Alias CallExpression + raw := struct { + *Alias + Callee json.RawMessage `json:"callee"` + Arguments []json.RawMessage `json:"arguments"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*CallExpression)(raw.Alias) + } + + callee, err := unmarshalExpression(raw.Callee) + if err != nil { + return err + } + e.Callee = callee + + e.Arguments = make([]Expression, len(raw.Arguments)) + for i, r := range raw.Arguments { + expr, err := unmarshalExpression(r) + if err != nil { + return err + } + e.Arguments[i] = expr + } + return nil +} +func (e *PipeExpression) MarshalJSON() ([]byte, error) { + type Alias PipeExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *PipeExpression) UnmarshalJSON(data []byte) error { + type Alias PipeExpression + raw := struct { + *Alias + Argument json.RawMessage `json:"argument"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*PipeExpression)(raw.Alias) + } + + arg, err := unmarshalExpression(raw.Argument) + if err != nil { + return err + } + e.Argument = arg + + return nil +} +func (e *MemberExpression) MarshalJSON() ([]byte, error) { + type Alias MemberExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *MemberExpression) UnmarshalJSON(data []byte) error { + type Alias MemberExpression + raw := struct { + *Alias + Object json.RawMessage `json:"object"` + Property json.RawMessage `json:"property"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*MemberExpression)(raw.Alias) + } + + object, err := unmarshalExpression(raw.Object) + if err != nil { + return err + } + e.Object = object + + property, err := unmarshalExpression(raw.Property) + if err != nil { + return err + } + e.Property = property + + return nil +} +func (e *ArrowFunctionExpression) MarshalJSON() ([]byte, error) { + type Alias ArrowFunctionExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *ArrowFunctionExpression) UnmarshalJSON(data []byte) error { + type Alias ArrowFunctionExpression + raw := struct { + *Alias + Body json.RawMessage `json:"body"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*ArrowFunctionExpression)(raw.Alias) + } + + body, err := unmarshalNode(raw.Body) + if err != nil { + return err + } + e.Body = body + return nil +} +func (e *BinaryExpression) MarshalJSON() ([]byte, error) { + type Alias BinaryExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *BinaryExpression) UnmarshalJSON(data []byte) error { + type Alias BinaryExpression + raw := struct { + *Alias + Left json.RawMessage `json:"left"` + Right json.RawMessage `json:"right"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*BinaryExpression)(raw.Alias) + } + + l, err := unmarshalExpression(raw.Left) + if err != nil { + return err + } + e.Left = l + + r, err := unmarshalExpression(raw.Right) + if err != nil { + return err + } + e.Right = r + return nil +} +func (e *UnaryExpression) MarshalJSON() ([]byte, error) { + type Alias UnaryExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *UnaryExpression) UnmarshalJSON(data []byte) error { + type Alias UnaryExpression + raw := struct { + *Alias + Argument json.RawMessage `json:"argument"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*UnaryExpression)(raw.Alias) + } + + argument, err := unmarshalExpression(raw.Argument) + if err != nil { + return err + } + e.Argument = argument + + return nil +} +func (e *LogicalExpression) MarshalJSON() ([]byte, error) { + type Alias LogicalExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *LogicalExpression) UnmarshalJSON(data []byte) error { + type Alias LogicalExpression + raw := struct { + *Alias + Left json.RawMessage `json:"left"` + Right json.RawMessage `json:"right"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*LogicalExpression)(raw.Alias) + } + + l, err := unmarshalExpression(raw.Left) + if err != nil { + return err + } + e.Left = l + + r, err := unmarshalExpression(raw.Right) + if err != nil { + return err + } + e.Right = r + return nil +} +func (e *ArrayExpression) MarshalJSON() ([]byte, error) { + type Alias ArrayExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *ArrayExpression) UnmarshalJSON(data []byte) error { + type Alias ArrayExpression + raw := struct { + *Alias + Elements []json.RawMessage `json:"elements"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*ArrayExpression)(raw.Alias) + } + + e.Elements = make([]Expression, len(raw.Elements)) + for i, r := range raw.Elements { + expr, err := unmarshalExpression(r) + if err != nil { + return err + } + e.Elements[i] = expr + } + return nil +} +func (e *ObjectExpression) MarshalJSON() ([]byte, error) { + type Alias ObjectExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *ConditionalExpression) MarshalJSON() ([]byte, error) { + type Alias ConditionalExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.Type(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *ConditionalExpression) UnmarshalJSON(data []byte) error { + type Alias ConditionalExpression + raw := struct { + *Alias + Test json.RawMessage `json:"test"` + Alternate json.RawMessage `json:"alternate"` + Consequent json.RawMessage `json:"consequent"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*ConditionalExpression)(raw.Alias) + } + + test, err := unmarshalExpression(raw.Test) + if err != nil { + return err + } + e.Test = test + + alternate, err := unmarshalExpression(raw.Alternate) + if err != nil { + return err + } + e.Alternate = alternate + + consequent, err := unmarshalExpression(raw.Consequent) + if err != nil { + return err + } + e.Consequent = consequent + return nil +} +func (p *Property) MarshalJSON() ([]byte, error) { + type Alias Property + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: p.Type(), + Alias: (*Alias)(p), + } + return json.Marshal(raw) +} +func (p *Property) UnmarshalJSON(data []byte) error { + type Alias Property + raw := struct { + *Alias + Value json.RawMessage `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *p = *(*Property)(raw.Alias) + } + + if raw.Value != nil { + value, err := unmarshalExpression(raw.Value) + if err != nil { + return err + } + p.Value = value + } + return nil +} +func (i *Identifier) MarshalJSON() ([]byte, error) { + type Alias Identifier + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: i.Type(), + Alias: (*Alias)(i), + } + return json.Marshal(raw) +} +func (l *PipeLiteral) MarshalJSON() ([]byte, error) { + type Alias PipeLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.Type(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} +func (l *StringLiteral) MarshalJSON() ([]byte, error) { + type Alias StringLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.Type(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} +func (l *BooleanLiteral) MarshalJSON() ([]byte, error) { + type Alias BooleanLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.Type(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} +func (l *FloatLiteral) MarshalJSON() ([]byte, error) { + type Alias FloatLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.Type(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} +func (l *IntegerLiteral) MarshalJSON() ([]byte, error) { + type Alias IntegerLiteral + raw := struct { + Type string `json:"type"` + *Alias + Value string `json:"value"` + }{ + Type: l.Type(), + Alias: (*Alias)(l), + Value: strconv.FormatInt(l.Value, 10), + } + return json.Marshal(raw) +} +func (l *IntegerLiteral) UnmarshalJSON(data []byte) error { + type Alias IntegerLiteral + raw := struct { + *Alias + Value string `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Alias != nil { + *l = *(*IntegerLiteral)(raw.Alias) + } + + value, err := strconv.ParseInt(raw.Value, 10, 64) + if err != nil { + return err + } + l.Value = value + return nil +} +func (l *UnsignedIntegerLiteral) MarshalJSON() ([]byte, error) { + type Alias UnsignedIntegerLiteral + raw := struct { + Type string `json:"type"` + *Alias + Value string `json:"value"` + }{ + Type: l.Type(), + Alias: (*Alias)(l), + Value: strconv.FormatUint(l.Value, 10), + } + return json.Marshal(raw) +} +func (l *UnsignedIntegerLiteral) UnmarshalJSON(data []byte) error { + type Alias UnsignedIntegerLiteral + raw := struct { + *Alias + Value string `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Alias != nil { + *l = *(*UnsignedIntegerLiteral)(raw.Alias) + } + + value, err := strconv.ParseUint(raw.Value, 10, 64) + if err != nil { + return err + } + l.Value = value + return nil +} +func (l *RegexpLiteral) MarshalJSON() ([]byte, error) { + type Alias RegexpLiteral + raw := struct { + Type string `json:"type"` + *Alias + Value string `json:"value"` + }{ + Type: l.Type(), + Alias: (*Alias)(l), + Value: l.Value.String(), + } + return json.Marshal(raw) +} +func (l *RegexpLiteral) UnmarshalJSON(data []byte) error { + type Alias RegexpLiteral + raw := struct { + *Alias + Value string `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Alias != nil { + *l = *(*RegexpLiteral)(raw.Alias) + } + + value, err := regexp.Compile(raw.Value) + if err != nil { + return err + } + l.Value = value + return nil +} +func (l *DurationLiteral) MarshalJSON() ([]byte, error) { + type Alias DurationLiteral + raw := struct { + Type string `json:"type"` + *Alias + Value string `json:"value"` + }{ + Type: l.Type(), + Alias: (*Alias)(l), + Value: l.Value.String(), + } + return json.Marshal(raw) +} +func (l *DurationLiteral) UnmarshalJSON(data []byte) error { + type Alias DurationLiteral + raw := struct { + *Alias + Value string `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Alias != nil { + *l = *(*DurationLiteral)(raw.Alias) + } + + value, err := time.ParseDuration(raw.Value) + if err != nil { + return err + } + l.Value = value + return nil +} + +func (l *DateTimeLiteral) MarshalJSON() ([]byte, error) { + type Alias DateTimeLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.Type(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} + +func checkNullMsg(msg json.RawMessage) bool { + switch len(msg) { + case 0: + return true + case 4: + return string(msg) == "null" + default: + return false + } +} +func unmarshalStatement(msg json.RawMessage) (Statement, error) { + if checkNullMsg(msg) { + return nil, nil + } + n, err := unmarshalNode(msg) + if err != nil { + return nil, err + } + s, ok := n.(Statement) + if !ok { + return nil, fmt.Errorf("node %q is not a statement", n.Type()) + } + return s, nil +} +func unmarshalExpression(msg json.RawMessage) (Expression, error) { + if checkNullMsg(msg) { + return nil, nil + } + n, err := unmarshalNode(msg) + if err != nil { + return nil, err + } + e, ok := n.(Expression) + if !ok { + return nil, fmt.Errorf("node %q is not an expression", n.Type()) + } + return e, nil +} +func unmarshalLiteral(msg json.RawMessage) (Literal, error) { + if checkNullMsg(msg) { + return nil, nil + } + n, err := unmarshalNode(msg) + if err != nil { + return nil, err + } + e, ok := n.(Literal) + if !ok { + return nil, fmt.Errorf("node %q is not a literal", n.Type()) + } + return e, nil +} +func unmarshalNode(msg json.RawMessage) (Node, error) { + if checkNullMsg(msg) { + return nil, nil + } + + type typeRawMessage struct { + Type string `json:"type"` + } + + typ := typeRawMessage{} + if err := json.Unmarshal(msg, &typ); err != nil { + return nil, err + } + + var node Node + switch typ.Type { + case "Program": + node = new(Program) + case "BlockStatement": + node = new(BlockStatement) + case "ExpressionStatement": + node = new(ExpressionStatement) + case "ReturnStatement": + node = new(ReturnStatement) + case "VariableDeclaration": + node = new(VariableDeclaration) + case "VariableDeclarator": + node = new(VariableDeclarator) + case "CallExpression": + node = new(CallExpression) + case "PipeExpression": + node = new(PipeExpression) + case "MemberExpression": + node = new(MemberExpression) + case "BinaryExpression": + node = new(BinaryExpression) + case "UnaryExpression": + node = new(UnaryExpression) + case "LogicalExpression": + node = new(LogicalExpression) + case "ObjectExpression": + node = new(ObjectExpression) + case "ConditionalExpression": + node = new(ConditionalExpression) + case "ArrayExpression": + node = new(ArrayExpression) + case "Identifier": + node = new(Identifier) + case "PipeLiteral": + node = new(PipeLiteral) + case "StringLiteral": + node = new(StringLiteral) + case "BooleanLiteral": + node = new(BooleanLiteral) + case "FloatLiteral": + node = new(FloatLiteral) + case "IntegerLiteral": + node = new(IntegerLiteral) + case "UnsignedIntegerLiteral": + node = new(UnsignedIntegerLiteral) + case "RegexpLiteral": + node = new(RegexpLiteral) + case "DurationLiteral": + node = new(DurationLiteral) + case "DateTimeLiteral": + node = new(DateTimeLiteral) + case "ArrowFunctionExpression": + node = new(ArrowFunctionExpression) + case "Property": + node = new(Property) + default: + return nil, fmt.Errorf("unknown type %q", typ.Type) + } + + if err := json.Unmarshal(msg, node); err != nil { + return nil, err + } + return node, nil +} +func UnmarshalNode(data []byte) (Node, error) { + return unmarshalNode((json.RawMessage)(data)) +} diff --git a/vendor/github.com/influxdata/ifql/ast/json_test.go b/vendor/github.com/influxdata/ifql/ast/json_test.go new file mode 100644 index 000000000..6ba936b24 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/ast/json_test.go @@ -0,0 +1,255 @@ +package ast_test + +import ( + "encoding/json" + "math" + "regexp" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/ast/asttest" +) + +func TestJSONMarshal(t *testing.T) { + testCases := []struct { + name string + node ast.Node + want string + }{ + { + name: "simple program", + node: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.StringLiteral{Value: "hello"}, + }, + }, + }, + want: `{"type":"Program","body":[{"type":"ExpressionStatement","expression":{"type":"StringLiteral","value":"hello"}}]}`, + }, + { + name: "block statement", + node: &ast.BlockStatement{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.StringLiteral{Value: "hello"}, + }, + }, + }, + want: `{"type":"BlockStatement","body":[{"type":"ExpressionStatement","expression":{"type":"StringLiteral","value":"hello"}}]}`, + }, + { + name: "expression statement", + node: &ast.ExpressionStatement{ + Expression: &ast.StringLiteral{Value: "hello"}, + }, + want: `{"type":"ExpressionStatement","expression":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "return statement", + node: &ast.ReturnStatement{ + Argument: &ast.StringLiteral{Value: "hello"}, + }, + want: `{"type":"ReturnStatement","argument":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "variable declaration", + node: &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{ + { + ID: &ast.Identifier{Name: "a"}, + Init: &ast.StringLiteral{Value: "hello"}, + }, + }, + }, + want: `{"type":"VariableDeclaration","declarations":[{"type":"VariableDeclarator","id":{"type":"Identifier","name":"a"},"init":{"type":"StringLiteral","value":"hello"}}]}`, + }, + { + name: "variable declarator", + node: &ast.VariableDeclarator{ + ID: &ast.Identifier{Name: "a"}, + Init: &ast.StringLiteral{Value: "hello"}, + }, + want: `{"type":"VariableDeclarator","id":{"type":"Identifier","name":"a"},"init":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "call expression", + node: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "a"}, + Arguments: []ast.Expression{&ast.StringLiteral{Value: "hello"}}, + }, + want: `{"type":"CallExpression","callee":{"type":"Identifier","name":"a"},"arguments":[{"type":"StringLiteral","value":"hello"}]}`, + }, + { + name: "pipe expression", + node: &ast.PipeExpression{ + Argument: &ast.Identifier{Name: "a"}, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "a"}, + Arguments: []ast.Expression{&ast.StringLiteral{Value: "hello"}}, + }, + }, + want: `{"type":"PipeExpression","argument":{"type":"Identifier","name":"a"},"call":{"type":"CallExpression","callee":{"type":"Identifier","name":"a"},"arguments":[{"type":"StringLiteral","value":"hello"}]}}`, + }, + { + name: "member expression", + node: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "a"}, + Property: &ast.StringLiteral{Value: "hello"}, + }, + want: `{"type":"MemberExpression","object":{"type":"Identifier","name":"a"},"property":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "arrow function expression", + node: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "a"}}}, + Body: &ast.StringLiteral{Value: "hello"}, + }, + want: `{"type":"ArrowFunctionExpression","params":[{"type":"Property","key":{"type":"Identifier","name":"a"},"value":null}],"body":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "binary expression", + node: &ast.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &ast.StringLiteral{Value: "hello"}, + Right: &ast.StringLiteral{Value: "world"}, + }, + want: `{"type":"BinaryExpression","operator":"+","left":{"type":"StringLiteral","value":"hello"},"right":{"type":"StringLiteral","value":"world"}}`, + }, + { + name: "unary expression", + node: &ast.UnaryExpression{ + Operator: ast.NotOperator, + Argument: &ast.BooleanLiteral{Value: true}, + }, + want: `{"type":"UnaryExpression","operator":"not","argument":{"type":"BooleanLiteral","value":true}}`, + }, + { + name: "logical expression", + node: &ast.LogicalExpression{ + Operator: ast.OrOperator, + Left: &ast.BooleanLiteral{Value: false}, + Right: &ast.BooleanLiteral{Value: true}, + }, + want: `{"type":"LogicalExpression","operator":"or","left":{"type":"BooleanLiteral","value":false},"right":{"type":"BooleanLiteral","value":true}}`, + }, + { + name: "array expression", + node: &ast.ArrayExpression{ + Elements: []ast.Expression{&ast.StringLiteral{Value: "hello"}}, + }, + want: `{"type":"ArrayExpression","elements":[{"type":"StringLiteral","value":"hello"}]}`, + }, + { + name: "object expression", + node: &ast.ObjectExpression{ + Properties: []*ast.Property{{ + Key: &ast.Identifier{Name: "a"}, + Value: &ast.StringLiteral{Value: "hello"}, + }}, + }, + want: `{"type":"ObjectExpression","properties":[{"type":"Property","key":{"type":"Identifier","name":"a"},"value":{"type":"StringLiteral","value":"hello"}}]}`, + }, + { + name: "conditional expression", + node: &ast.ConditionalExpression{ + Test: &ast.BooleanLiteral{Value: true}, + Alternate: &ast.StringLiteral{Value: "false"}, + Consequent: &ast.StringLiteral{Value: "true"}, + }, + want: `{"type":"ConditionalExpression","test":{"type":"BooleanLiteral","value":true},"alternate":{"type":"StringLiteral","value":"false"},"consequent":{"type":"StringLiteral","value":"true"}}`, + }, + { + name: "property", + node: &ast.Property{ + Key: &ast.Identifier{Name: "a"}, + Value: &ast.StringLiteral{Value: "hello"}, + }, + want: `{"type":"Property","key":{"type":"Identifier","name":"a"},"value":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "identifier", + node: &ast.Identifier{ + Name: "a", + }, + want: `{"type":"Identifier","name":"a"}`, + }, + { + name: "string literal", + node: &ast.StringLiteral{ + Value: "hello", + }, + want: `{"type":"StringLiteral","value":"hello"}`, + }, + { + name: "boolean literal", + node: &ast.BooleanLiteral{ + Value: true, + }, + want: `{"type":"BooleanLiteral","value":true}`, + }, + { + name: "float literal", + node: &ast.FloatLiteral{ + Value: 42.1, + }, + want: `{"type":"FloatLiteral","value":42.1}`, + }, + { + name: "integer literal", + node: &ast.IntegerLiteral{ + Value: math.MaxInt64, + }, + want: `{"type":"IntegerLiteral","value":"9223372036854775807"}`, + }, + { + name: "unsigned integer literal", + node: &ast.UnsignedIntegerLiteral{ + Value: math.MaxUint64, + }, + want: `{"type":"UnsignedIntegerLiteral","value":"18446744073709551615"}`, + }, + { + name: "regexp literal", + node: &ast.RegexpLiteral{ + Value: regexp.MustCompile(`.*`), + }, + want: `{"type":"RegexpLiteral","value":".*"}`, + }, + { + name: "duration literal", + node: &ast.DurationLiteral{ + Value: time.Hour + time.Minute, + }, + want: `{"type":"DurationLiteral","value":"1h1m0s"}`, + }, + { + name: "datetime literal", + node: &ast.DateTimeLiteral{ + Value: time.Date(2017, 8, 8, 8, 8, 8, 8, time.UTC), + }, + want: `{"type":"DateTimeLiteral","value":"2017-08-08T08:08:08.000000008Z"}`, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + data, err := json.Marshal(tc.node) + if err != nil { + t.Fatal(err) + } + if got := string(data); got != tc.want { + t.Errorf("unexpected json data:\nwant:%s\ngot: %s\n", tc.want, got) + } + node, err := ast.UnmarshalNode(data) + if err != nil { + t.Fatal(err) + } + if !cmp.Equal(tc.node, node, asttest.CompareOptions...) { + t.Errorf("unexpected node after unmarshalling: -want/+got:\n%s", cmp.Diff(tc.node, node, asttest.CompareOptions...)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/circle-test.sh b/vendor/github.com/influxdata/ifql/circle-test.sh new file mode 100755 index 000000000..2066cc4ff --- /dev/null +++ b/vendor/github.com/influxdata/ifql/circle-test.sh @@ -0,0 +1,26 @@ +#!/bin/bash + +# Get dir of script and make it is our working directory. +DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) +cd $DIR + +# Build image +imagename="ifql-img-${CIRCLE_BUILD_NUM}" +dataname="ifql-data-${CIRCLE_BUILD_NUM}" + +docker build -f Dockerfile_build -t $imagename . + +# Create docker volume of repo + +docker create \ + --name $dataname \ + -v "/root/go/src/github.com/influxdata/ifql" \ + $imagename /bin/true +docker cp "$DIR/" "$dataname:/root/go/src/github.com/influxdata/" + +# Run tests in docker +docker run \ + --rm \ + --volumes-from $dataname \ + "$imagename" \ + make test diff --git a/vendor/github.com/influxdata/ifql/compiler/compiler.go b/vendor/github.com/influxdata/ifql/compiler/compiler.go new file mode 100644 index 000000000..d3d638098 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/compiler/compiler.go @@ -0,0 +1,228 @@ +package compiler + +import ( + "errors" + "fmt" + + "github.com/influxdata/ifql/semantic" +) + +func Compile(f *semantic.FunctionExpression, inTypes map[string]semantic.Type) (Func, error) { + declarations := make(map[string]semantic.VariableDeclaration, len(inTypes)) + for k, t := range inTypes { + declarations[k] = semantic.NewExternalVariableDeclaration(k, t) + } + f = f.Copy().(*semantic.FunctionExpression) + semantic.ApplyNewDeclarations(f, declarations) + + root, err := compile(f.Body) + if err != nil { + return nil, err + } + cpy := make(map[string]semantic.Type) + for k, v := range inTypes { + cpy[k] = v + } + return compiledFn{ + root: root, + inTypes: cpy, + }, nil +} + +func compile(n semantic.Node) (Evaluator, error) { + switch n := n.(type) { + case *semantic.BlockStatement: + body := make([]Evaluator, len(n.Body)) + for i, s := range n.Body { + node, err := compile(s) + if err != nil { + return nil, err + } + body[i] = node + } + return &blockEvaluator{ + t: n.ReturnStatement().Argument.Type(), + body: body, + }, nil + case *semantic.ExpressionStatement: + return nil, errors.New("statement does nothing, sideffects are not supported by the compiler") + case *semantic.ReturnStatement: + node, err := compile(n.Argument) + if err != nil { + return nil, err + } + return returnEvaluator{ + Evaluator: node, + }, nil + case *semantic.NativeVariableDeclaration: + node, err := compile(n.Init) + if err != nil { + return nil, err + } + return &declarationEvaluator{ + t: n.Init.Type(), + id: n.Identifier.Name, + init: node, + }, nil + case *semantic.ObjectExpression: + properties := make(map[string]Evaluator, len(n.Properties)) + for _, p := range n.Properties { + node, err := compile(p.Value) + if err != nil { + return nil, err + } + properties[p.Key.Name] = node + } + return &mapEvaluator{ + t: n.Type(), + properties: properties, + }, nil + case *semantic.IdentifierExpression: + return &identifierEvaluator{ + t: n.Type(), + name: n.Name, + }, nil + case *semantic.MemberExpression: + object, err := compile(n.Object) + if err != nil { + return nil, err + } + return &memberEvaluator{ + t: n.Type(), + object: object, + property: n.Property, + }, nil + case *semantic.BooleanLiteral: + return &booleanEvaluator{ + t: n.Type(), + b: n.Value, + }, nil + case *semantic.IntegerLiteral: + return &integerEvaluator{ + t: n.Type(), + i: n.Value, + }, nil + case *semantic.FloatLiteral: + return &floatEvaluator{ + t: n.Type(), + f: n.Value, + }, nil + case *semantic.StringLiteral: + return &stringEvaluator{ + t: n.Type(), + s: n.Value, + }, nil + case *semantic.RegexpLiteral: + return ®expEvaluator{ + t: n.Type(), + r: n.Value, + }, nil + case *semantic.DateTimeLiteral: + return &timeEvaluator{ + t: n.Type(), + time: Time(n.Value.UnixNano()), + }, nil + case *semantic.UnaryExpression: + node, err := compile(n.Argument) + if err != nil { + return nil, err + } + return &unaryEvaluator{ + t: n.Type(), + node: node, + }, nil + case *semantic.LogicalExpression: + l, err := compile(n.Left) + if err != nil { + return nil, err + } + r, err := compile(n.Right) + if err != nil { + return nil, err + } + return &logicalEvaluator{ + t: n.Type(), + operator: n.Operator, + left: l, + right: r, + }, nil + case *semantic.BinaryExpression: + l, err := compile(n.Left) + if err != nil { + return nil, err + } + lt := l.Type() + r, err := compile(n.Right) + if err != nil { + return nil, err + } + rt := r.Type() + sig := binarySignature{ + Operator: n.Operator, + Left: lt, + Right: rt, + } + f, ok := binaryFuncs[sig] + if !ok { + return nil, fmt.Errorf("unsupported binary expression %v %v %v", sig.Left, sig.Operator, sig.Right) + } + return &binaryEvaluator{ + t: n.Type(), + left: l, + right: r, + f: f.Func, + }, nil + default: + return nil, fmt.Errorf("unknown semantic node of type %T", n) + } +} + +// CompilationCache caches compilation results based on the types of the input parameters. +type CompilationCache struct { + fn *semantic.FunctionExpression + root *compilationCacheNode +} + +func NewCompilationCache(fn *semantic.FunctionExpression) *CompilationCache { + return &CompilationCache{ + fn: fn, + root: new(compilationCacheNode), + } +} + +// Compile returnes a compiled function bsaed on the provided types. +// The result will be cached for subsequent calls. +func (c *CompilationCache) Compile(types map[string]semantic.Type) (Func, error) { + return c.root.compile(c.fn, 0, types) +} + +type compilationCacheNode struct { + children map[semantic.Type]*compilationCacheNode + + fn Func + err error +} + +// compile recursively searches for a matching child node that has compiled the function. +// If the compilation has not been performed previously its result is cached and returned. +func (c *compilationCacheNode) compile(fn *semantic.FunctionExpression, idx int, types map[string]semantic.Type) (Func, error) { + if idx == len(fn.Params) { + // We are the matching child, return the cached result or do the compilation. + if c.fn == nil && c.err == nil { + c.fn, c.err = Compile(fn, types) + } + return c.fn, c.err + } + // Find the matching child based on the order. + next := fn.Params[idx].Key.Name + t := types[next] + child := c.children[t] + if child == nil { + child = new(compilationCacheNode) + if c.children == nil { + c.children = make(map[semantic.Type]*compilationCacheNode) + } + c.children[t] = child + } + return child.compile(fn, idx+1, types) +} diff --git a/vendor/github.com/influxdata/ifql/compiler/compiler_test.go b/vendor/github.com/influxdata/ifql/compiler/compiler_test.go new file mode 100644 index 000000000..4be0a98c3 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/compiler/compiler_test.go @@ -0,0 +1,183 @@ +package compiler_test + +import ( + "reflect" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/compiler" + "github.com/influxdata/ifql/semantic" + "github.com/influxdata/ifql/semantic/semantictest" +) + +var CmpOptions []cmp.Option + +func init() { + CmpOptions = append(semantictest.CmpOptions, cmp.Comparer(ValueEqual)) +} + +func ValueEqual(x, y compiler.Value) bool { + if x.Type() != y.Type() { + return false + } + switch k := x.Type().Kind(); k { + case semantic.Bool: + return x.Bool() == y.Bool() + case semantic.UInt: + return x.UInt() == y.UInt() + case semantic.Int: + return x.Int() == y.Int() + case semantic.Float: + return x.Float() == y.Float() + case semantic.String: + return x.Str() == y.Str() + case semantic.Time: + return x.Time() == y.Time() + case semantic.Object: + return cmp.Equal(x.Object(), y.Object(), CmpOptions...) + default: + return false + } +} + +func TestCompilationCache(t *testing.T) { + add := &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{ + {Key: &semantic.Identifier{Name: "a"}}, + {Key: &semantic.Identifier{Name: "b"}}, + }, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.IdentifierExpression{Name: "a"}, + Right: &semantic.IdentifierExpression{Name: "b"}, + }, + } + testCases := []struct { + name string + types map[string]semantic.Type + scope map[string]compiler.Value + want compiler.Value + }{ + { + name: "floats", + types: map[string]semantic.Type{ + "a": semantic.Float, + "b": semantic.Float, + }, + scope: map[string]compiler.Value{ + "a": compiler.NewFloat(5), + "b": compiler.NewFloat(4), + }, + want: compiler.NewFloat(9), + }, + { + name: "ints", + types: map[string]semantic.Type{ + "a": semantic.Int, + "b": semantic.Int, + }, + scope: map[string]compiler.Value{ + "a": compiler.NewInt(5), + "b": compiler.NewInt(4), + }, + want: compiler.NewInt(9), + }, + { + name: "uints", + types: map[string]semantic.Type{ + "a": semantic.UInt, + "b": semantic.UInt, + }, + scope: map[string]compiler.Value{ + "a": compiler.NewUInt(5), + "b": compiler.NewUInt(4), + }, + want: compiler.NewUInt(9), + }, + } + + //Reuse the same cache for all test cases + cache := compiler.NewCompilationCache(add) + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + f0, err := cache.Compile(tc.types) + if err != nil { + t.Fatal(err) + } + f1, err := cache.Compile(tc.types) + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(f0, f1) { + t.Errorf("unexpected new compilation result") + } + + got0, err := f0.Eval(tc.scope) + if err != nil { + t.Fatal(err) + } + got1, err := f1.Eval(tc.scope) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(got0, tc.want, CmpOptions...) { + t.Errorf("unexpected eval result -want/+got\n%s", cmp.Diff(tc.want, got0, CmpOptions...)) + } + if !cmp.Equal(got0, got1, CmpOptions...) { + t.Errorf("unexpected differing results -got0/+got1\n%s", cmp.Diff(got0, got1, CmpOptions...)) + } + + }) + } +} + +func TestCompile(t *testing.T) { + testCases := []struct { + name string + fn *semantic.FunctionExpression + types map[string]semantic.Type + scope map[string]compiler.Value + want compiler.Value + wantErr bool + }{ + { + name: "simple ident return", + fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{ + {Key: &semantic.Identifier{Name: "r"}}, + }, + Body: &semantic.IdentifierExpression{Name: "r"}, + }, + types: map[string]semantic.Type{ + "r": semantic.Int, + }, + scope: map[string]compiler.Value{ + "r": compiler.NewInt(4), + }, + want: compiler.NewInt(4), + wantErr: false, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + f, err := compiler.Compile(tc.fn, tc.types) + if tc.wantErr != (err != nil) { + t.Errorf("unexpected error %s", err) + } + + got, err := f.Eval(tc.scope) + if tc.wantErr != (err != nil) { + t.Errorf("unexpected error %s", err) + } + + if !cmp.Equal(tc.want, got, CmpOptions...) { + t.Errorf("unexpected value -want/+got\n%s", cmp.Diff(tc.want, got, CmpOptions...)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/compiler/doc.go b/vendor/github.com/influxdata/ifql/compiler/doc.go new file mode 100644 index 000000000..455101a46 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/compiler/doc.go @@ -0,0 +1,9 @@ +// The compiler package provides a compiler and Go runtime for a subset of the IFQL language. +// Only pure functions are supported by the compiler. +// A function is compiled and then may be called repeatedly with different arguments. +// The function must be pure meaning it has no side effects. Other language features are not supported. +// +// This runtime is not portable by design. The runtime consists of Go types that have been constructed based on the IFQL function being compiled. +// Those types are not serializable and cannot be transported to other systems or environments. +// This design is intended to limit the scope under which compilation must be supported. +package compiler diff --git a/vendor/github.com/influxdata/ifql/compiler/runtime.go b/vendor/github.com/influxdata/ifql/compiler/runtime.go new file mode 100644 index 000000000..0d3ffb07e --- /dev/null +++ b/vendor/github.com/influxdata/ifql/compiler/runtime.go @@ -0,0 +1,1866 @@ +package compiler + +import ( + "fmt" + "regexp" + + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/semantic" +) + +type Evaluator interface { + Type() semantic.Type + EvalBool(scope Scope) bool + EvalInt(scope Scope) int64 + EvalUInt(scope Scope) uint64 + EvalFloat(scope Scope) float64 + EvalString(scope Scope) string + EvalRegexp(scope Scope) *regexp.Regexp + EvalTime(scope Scope) Time + EvalObject(scope Scope) *Object +} + +type Func interface { + Type() semantic.Type + Eval(scope Scope) (Value, error) + EvalBool(scope Scope) (bool, error) + EvalInt(scope Scope) (int64, error) + EvalUInt(scope Scope) (uint64, error) + EvalFloat(scope Scope) (float64, error) + EvalString(scope Scope) (string, error) + EvalRegexp(scope Scope) (*regexp.Regexp, error) + EvalTime(scope Scope) (Time, error) + EvalObject(scope Scope) (*Object, error) +} + +type Time int64 + +type compiledFn struct { + root Evaluator + inTypes map[string]semantic.Type +} + +func (c compiledFn) validate(scope Scope) error { + // Validate scope + for k, t := range c.inTypes { + if scope.Type(k) != t { + return fmt.Errorf("missing or incorrectly typed value found in scope for name %q", k) + } + } + return nil +} + +func (c compiledFn) Type() semantic.Type { + return c.root.Type() +} + +func (c compiledFn) Eval(scope Scope) (Value, error) { + if err := c.validate(scope); err != nil { + return nil, err + } + var val interface{} + switch c.Type().Kind() { + case semantic.Bool: + val = c.root.EvalBool(scope) + case semantic.Int: + val = c.root.EvalInt(scope) + case semantic.UInt: + val = c.root.EvalUInt(scope) + case semantic.Float: + val = c.root.EvalFloat(scope) + case semantic.String: + val = c.root.EvalString(scope) + case semantic.Regexp: + val = c.root.EvalRegexp(scope) + case semantic.Time: + val = c.root.EvalTime(scope) + case semantic.Object: + val = c.root.EvalObject(scope) + default: + return nil, fmt.Errorf("unsupported kind %s", c.Type().Kind()) + } + return value{ + typ: c.Type(), + Value: val, + }, nil +} + +func (c compiledFn) EvalBool(scope Scope) (bool, error) { + if err := c.validate(scope); err != nil { + return false, err + } + return c.root.EvalBool(scope), nil +} +func (c compiledFn) EvalInt(scope Scope) (int64, error) { + if err := c.validate(scope); err != nil { + return 0, err + } + return c.root.EvalInt(scope), nil +} +func (c compiledFn) EvalUInt(scope Scope) (uint64, error) { + if err := c.validate(scope); err != nil { + return 0, err + } + return c.root.EvalUInt(scope), nil +} +func (c compiledFn) EvalFloat(scope Scope) (float64, error) { + if err := c.validate(scope); err != nil { + return 0, err + } + return c.root.EvalFloat(scope), nil +} +func (c compiledFn) EvalString(scope Scope) (string, error) { + if err := c.validate(scope); err != nil { + return "", err + } + return c.root.EvalString(scope), nil +} +func (c compiledFn) EvalRegexp(scope Scope) (*regexp.Regexp, error) { + if err := c.validate(scope); err != nil { + return nil, err + } + return c.root.EvalRegexp(scope), nil +} +func (c compiledFn) EvalTime(scope Scope) (Time, error) { + if err := c.validate(scope); err != nil { + return 0, err + } + return c.root.EvalTime(scope), nil +} +func (c compiledFn) EvalObject(scope Scope) (*Object, error) { + if err := c.validate(scope); err != nil { + return nil, err + } + return c.root.EvalObject(scope), nil +} + +type Value interface { + Type() semantic.Type + Bool() bool + Int() int64 + UInt() uint64 + Float() float64 + Str() string + Regexp() *regexp.Regexp + Time() Time + Object() *Object +} + +type value struct { + typ semantic.Type + Value interface{} +} + +func (v value) Type() semantic.Type { + return v.typ +} +func (v value) Bool() bool { + return v.Value.(bool) +} +func (v value) Int() int64 { + return v.Value.(int64) +} +func (v value) UInt() uint64 { + return v.Value.(uint64) +} +func (v value) Float() float64 { + return v.Value.(float64) +} +func (v value) Str() string { + return v.Value.(string) +} +func (v value) Regexp() *regexp.Regexp { + return v.Value.(*regexp.Regexp) +} +func (v value) Time() Time { + return v.Value.(Time) +} +func (v value) Object() *Object { + return v.Value.(*Object) +} + +func NewBool(v bool) Value { + return value{ + typ: semantic.Bool, + Value: v, + } +} +func NewUInt(v uint64) Value { + return value{ + typ: semantic.UInt, + Value: v, + } +} +func NewInt(v int64) Value { + return value{ + typ: semantic.Int, + Value: v, + } +} +func NewFloat(v float64) Value { + return value{ + typ: semantic.Float, + Value: v, + } +} +func NewString(v string) Value { + return value{ + typ: semantic.String, + Value: v, + } +} +func NewRegexp(v *regexp.Regexp) Value { + return value{ + typ: semantic.Regexp, + Value: v, + } +} +func NewTime(v Time) Value { + return value{ + typ: semantic.Time, + Value: v, + } +} + +type Scope map[string]Value + +func (s Scope) Type(name string) semantic.Type { + return s[name].Type() +} +func (s Scope) Set(name string, v Value) { + s[name] = v +} +func (s Scope) GetBool(name string) bool { + return s[name].Bool() +} +func (s Scope) GetInt(name string) int64 { + return s[name].Int() +} +func (s Scope) GetUInt(name string) uint64 { + return s[name].UInt() +} +func (s Scope) GetFloat(name string) float64 { + return s[name].Float() +} +func (s Scope) GetString(name string) string { + return s[name].Str() +} +func (s Scope) GetRegexp(name string) *regexp.Regexp { + return s[name].Regexp() +} +func (s Scope) GetTime(name string) Time { + return s[name].Time() +} +func (s Scope) GetObject(name string) *Object { + return s[name].Object() +} + +func eval(e Evaluator, scope Scope) Value { + switch e.Type().Kind() { + case semantic.Bool: + return NewBool(e.EvalBool(scope)) + case semantic.Int: + return NewInt(e.EvalInt(scope)) + case semantic.UInt: + return NewUInt(e.EvalUInt(scope)) + case semantic.Float: + return NewFloat(e.EvalFloat(scope)) + case semantic.String: + return NewString(e.EvalString(scope)) + case semantic.Regexp: + return NewRegexp(e.EvalRegexp(scope)) + case semantic.Time: + return NewTime(e.EvalTime(scope)) + default: + return nil + } +} + +func checkKind(act, exp semantic.Kind) { + if act != exp { + panic(unexpectedKind(act, exp)) + } +} + +func unexpectedKind(act, exp semantic.Kind) error { + return fmt.Errorf("unexpected kind: got %q want %q", act, exp) +} + +type blockEvaluator struct { + t semantic.Type + body []Evaluator + value Value +} + +func (e *blockEvaluator) Type() semantic.Type { + return e.t +} + +func (e *blockEvaluator) eval(scope Scope) { + for _, b := range e.body { + e.value = eval(b, scope) + } +} + +func (e *blockEvaluator) EvalBool(scope Scope) bool { + checkKind(e.t.Kind(), semantic.Bool) + e.eval(scope) + return e.value.Bool() +} + +func (e *blockEvaluator) EvalInt(scope Scope) int64 { + checkKind(e.t.Kind(), semantic.Int) + e.eval(scope) + return e.value.Int() +} + +func (e *blockEvaluator) EvalUInt(scope Scope) uint64 { + checkKind(e.t.Kind(), semantic.UInt) + e.eval(scope) + return e.value.UInt() +} + +func (e *blockEvaluator) EvalFloat(scope Scope) float64 { + checkKind(e.t.Kind(), semantic.Float) + e.eval(scope) + return e.value.Float() +} + +func (e *blockEvaluator) EvalString(scope Scope) string { + checkKind(e.t.Kind(), semantic.String) + e.eval(scope) + return e.value.Str() +} +func (e *blockEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + checkKind(e.t.Kind(), semantic.Regexp) + e.eval(scope) + return e.value.Regexp() +} + +func (e *blockEvaluator) EvalTime(scope Scope) Time { + checkKind(e.t.Kind(), semantic.Time) + e.eval(scope) + return e.value.Time() +} +func (e *blockEvaluator) EvalObject(scope Scope) *Object { + checkKind(e.t.Kind(), semantic.Object) + e.eval(scope) + return e.value.Object() +} + +type returnEvaluator struct { + Evaluator +} + +type declarationEvaluator struct { + t semantic.Type + id string + init Evaluator +} + +func (e *declarationEvaluator) Type() semantic.Type { + return e.t +} + +func (e *declarationEvaluator) eval(scope Scope) { + scope.Set(e.id, eval(e.init, scope)) +} + +func (e *declarationEvaluator) EvalBool(scope Scope) bool { + e.eval(scope) + return scope.GetBool(e.id) +} + +func (e *declarationEvaluator) EvalInt(scope Scope) int64 { + e.eval(scope) + return scope.GetInt(e.id) +} + +func (e *declarationEvaluator) EvalUInt(scope Scope) uint64 { + e.eval(scope) + return scope.GetUInt(e.id) +} + +func (e *declarationEvaluator) EvalFloat(scope Scope) float64 { + e.eval(scope) + return scope.GetFloat(e.id) +} + +func (e *declarationEvaluator) EvalString(scope Scope) string { + e.eval(scope) + return scope.GetString(e.id) +} +func (e *declarationEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + e.eval(scope) + return scope.GetRegexp(e.id) +} + +func (e *declarationEvaluator) EvalTime(scope Scope) Time { + e.eval(scope) + return scope.GetTime(e.id) +} + +func (e *declarationEvaluator) EvalObject(scope Scope) *Object { + e.eval(scope) + return scope.GetObject(e.id) +} + +type mapEvaluator struct { + t semantic.Type + properties map[string]Evaluator +} + +func (e *mapEvaluator) Type() semantic.Type { + return e.t +} + +func (e *mapEvaluator) EvalBool(scope Scope) bool { + panic(unexpectedKind(e.t.Kind(), semantic.Bool)) +} + +func (e *mapEvaluator) EvalInt(scope Scope) int64 { + panic(unexpectedKind(e.t.Kind(), semantic.Int)) +} + +func (e *mapEvaluator) EvalUInt(scope Scope) uint64 { + panic(unexpectedKind(e.t.Kind(), semantic.UInt)) +} + +func (e *mapEvaluator) EvalFloat(scope Scope) float64 { + panic(unexpectedKind(e.t.Kind(), semantic.Float)) +} + +func (e *mapEvaluator) EvalString(scope Scope) string { + panic(unexpectedKind(e.t.Kind(), semantic.String)) +} +func (e *mapEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *mapEvaluator) EvalTime(scope Scope) Time { + panic(unexpectedKind(e.t.Kind(), semantic.Time)) +} +func (e *mapEvaluator) EvalObject(scope Scope) *Object { + obj := NewObject() + for k, node := range e.properties { + v := eval(node, scope) + obj.Set(k, v) + } + return obj +} + +type Object struct { + values map[string]Value + propertyTypes map[string]semantic.Type + typ semantic.Type +} + +func NewObject() *Object { + return &Object{ + values: make(map[string]Value), + propertyTypes: make(map[string]semantic.Type), + } +} + +func (o *Object) Set(name string, v Value) { + o.values[name] = v + if o.propertyTypes[name] != v.Type() { + o.SetPropertyType(name, v.Type()) + } +} +func (o *Object) Get(name string) Value { + return o.values[name] +} +func (o *Object) SetPropertyType(name string, t semantic.Type) { + o.propertyTypes[name] = t + o.typ = nil +} +func (o *Object) Type() semantic.Type { + if o.typ == nil { + o.typ = semantic.NewObjectType(o.propertyTypes) + } + return o.typ +} +func (o *Object) Bool() bool { + panic("map is not a boolean") +} + +func (o *Object) Int() int64 { + panic("map is not a int") +} + +func (o *Object) UInt() uint64 { + panic("map is not a uint") +} + +func (o *Object) Float() float64 { + panic("map is not a float") +} + +func (o *Object) Str() string { + panic("map is not a string") +} +func (o *Object) Regexp() *regexp.Regexp { + panic("map is not a regular expression") +} + +func (o *Object) Time() Time { + panic("map is not a time") +} + +func (o *Object) Object() *Object { + return o +} + +type logicalEvaluator struct { + t semantic.Type + operator ast.LogicalOperatorKind + left, right Evaluator +} + +func (e *logicalEvaluator) Type() semantic.Type { + return e.t +} + +func (e *logicalEvaluator) EvalBool(scope Scope) bool { + switch e.operator { + case ast.AndOperator: + return e.left.EvalBool(scope) && e.right.EvalBool(scope) + case ast.OrOperator: + return e.left.EvalBool(scope) || e.right.EvalBool(scope) + default: + panic(fmt.Errorf("unknown logical operator %v", e.operator)) + } +} + +func (e *logicalEvaluator) EvalInt(scope Scope) int64 { + panic(unexpectedKind(e.t.Kind(), semantic.Int)) +} + +func (e *logicalEvaluator) EvalUInt(scope Scope) uint64 { + panic(unexpectedKind(e.t.Kind(), semantic.UInt)) +} + +func (e *logicalEvaluator) EvalFloat(scope Scope) float64 { + panic(unexpectedKind(e.t.Kind(), semantic.Float)) +} + +func (e *logicalEvaluator) EvalString(scope Scope) string { + panic(unexpectedKind(e.t.Kind(), semantic.String)) +} +func (e *logicalEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *logicalEvaluator) EvalTime(scope Scope) Time { + panic(unexpectedKind(e.t.Kind(), semantic.Time)) +} +func (e *logicalEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type binaryFunc func(scope Scope, left, right Evaluator) Value + +type binarySignature struct { + Operator ast.OperatorKind + Left, Right semantic.Type +} + +type binaryEvaluator struct { + t semantic.Type + left, right Evaluator + f binaryFunc +} + +func (e *binaryEvaluator) Type() semantic.Type { + return e.t +} + +func (e *binaryEvaluator) EvalBool(scope Scope) bool { + return e.f(scope, e.left, e.right).Bool() +} + +func (e *binaryEvaluator) EvalInt(scope Scope) int64 { + return e.f(scope, e.left, e.right).Int() +} + +func (e *binaryEvaluator) EvalUInt(scope Scope) uint64 { + return e.f(scope, e.left, e.right).UInt() +} + +func (e *binaryEvaluator) EvalFloat(scope Scope) float64 { + return e.f(scope, e.left, e.right).Float() +} + +func (e *binaryEvaluator) EvalString(scope Scope) string { + return e.f(scope, e.left, e.right).Str() +} + +func (e *binaryEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *binaryEvaluator) EvalTime(scope Scope) Time { + return e.f(scope, e.left, e.right).Time() +} +func (e *binaryEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type unaryEvaluator struct { + t semantic.Type + node Evaluator +} + +func (e *unaryEvaluator) Type() semantic.Type { + return e.t +} + +func (e *unaryEvaluator) EvalBool(scope Scope) bool { + // There is only one boolean unary operator + return !e.node.EvalBool(scope) +} + +func (e *unaryEvaluator) EvalInt(scope Scope) int64 { + // There is only one integer unary operator + return -e.node.EvalInt(scope) +} + +func (e *unaryEvaluator) EvalUInt(scope Scope) uint64 { + panic(unexpectedKind(e.t.Kind(), semantic.UInt)) +} + +func (e *unaryEvaluator) EvalFloat(scope Scope) float64 { + // There is only one float unary operator + return -e.node.EvalFloat(scope) +} + +func (e *unaryEvaluator) EvalString(scope Scope) string { + panic(unexpectedKind(e.t.Kind(), semantic.String)) +} +func (e *unaryEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *unaryEvaluator) EvalTime(scope Scope) Time { + panic(unexpectedKind(e.t.Kind(), semantic.Time)) +} +func (e *unaryEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type integerEvaluator struct { + t semantic.Type + i int64 +} + +func (e *integerEvaluator) Type() semantic.Type { + return e.t +} + +func (e *integerEvaluator) EvalBool(scope Scope) bool { + panic(unexpectedKind(e.t.Kind(), semantic.Bool)) +} + +func (e *integerEvaluator) EvalInt(scope Scope) int64 { + return e.i +} + +func (e *integerEvaluator) EvalUInt(scope Scope) uint64 { + return uint64(e.i) +} + +func (e *integerEvaluator) EvalFloat(scope Scope) float64 { + panic(unexpectedKind(e.t.Kind(), semantic.Float)) +} + +func (e *integerEvaluator) EvalString(scope Scope) string { + panic(unexpectedKind(e.t.Kind(), semantic.String)) +} + +func (e *integerEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *integerEvaluator) EvalTime(scope Scope) Time { + panic(unexpectedKind(e.t.Kind(), semantic.Time)) +} +func (e *integerEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type stringEvaluator struct { + t semantic.Type + s string +} + +func (e *stringEvaluator) Type() semantic.Type { + return e.t +} + +func (e *stringEvaluator) EvalBool(scope Scope) bool { + panic(unexpectedKind(e.t.Kind(), semantic.Bool)) +} + +func (e *stringEvaluator) EvalInt(scope Scope) int64 { + panic(unexpectedKind(e.t.Kind(), semantic.Int)) +} + +func (e *stringEvaluator) EvalUInt(scope Scope) uint64 { + panic(unexpectedKind(e.t.Kind(), semantic.UInt)) +} + +func (e *stringEvaluator) EvalFloat(scope Scope) float64 { + panic(unexpectedKind(e.t.Kind(), semantic.Float)) +} + +func (e *stringEvaluator) EvalString(scope Scope) string { + return e.s +} +func (e *stringEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *stringEvaluator) EvalTime(scope Scope) Time { + panic(unexpectedKind(e.t.Kind(), semantic.Time)) +} +func (e *stringEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type regexpEvaluator struct { + t semantic.Type + r *regexp.Regexp +} + +func (e *regexpEvaluator) Type() semantic.Type { + return e.t +} + +func (e *regexpEvaluator) EvalBool(scope Scope) bool { + panic(unexpectedKind(e.t.Kind(), semantic.Bool)) +} + +func (e *regexpEvaluator) EvalInt(scope Scope) int64 { + panic(unexpectedKind(e.t.Kind(), semantic.Int)) +} + +func (e *regexpEvaluator) EvalUInt(scope Scope) uint64 { + panic(unexpectedKind(e.t.Kind(), semantic.UInt)) +} + +func (e *regexpEvaluator) EvalFloat(scope Scope) float64 { + panic(unexpectedKind(e.t.Kind(), semantic.Float)) +} + +func (e *regexpEvaluator) EvalString(scope Scope) string { + panic(unexpectedKind(e.t.Kind(), semantic.String)) +} + +func (e *regexpEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + return e.r +} + +func (e *regexpEvaluator) EvalTime(scope Scope) Time { + panic(unexpectedKind(e.t.Kind(), semantic.Time)) +} + +func (e *regexpEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type booleanEvaluator struct { + t semantic.Type + b bool +} + +func (e *booleanEvaluator) Type() semantic.Type { + return e.t +} + +func (e *booleanEvaluator) EvalBool(scope Scope) bool { + return e.b +} + +func (e *booleanEvaluator) EvalInt(scope Scope) int64 { + panic(unexpectedKind(e.t.Kind(), semantic.Int)) +} + +func (e *booleanEvaluator) EvalUInt(scope Scope) uint64 { + panic(unexpectedKind(e.t.Kind(), semantic.UInt)) +} + +func (e *booleanEvaluator) EvalFloat(scope Scope) float64 { + panic(unexpectedKind(e.t.Kind(), semantic.Float)) +} + +func (e *booleanEvaluator) EvalString(scope Scope) string { + panic(unexpectedKind(e.t.Kind(), semantic.String)) +} + +func (e *booleanEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *booleanEvaluator) EvalTime(scope Scope) Time { + panic(unexpectedKind(e.t.Kind(), semantic.Time)) +} +func (e *booleanEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type floatEvaluator struct { + t semantic.Type + f float64 +} + +func (e *floatEvaluator) Type() semantic.Type { + return e.t +} + +func (e *floatEvaluator) EvalBool(scope Scope) bool { + panic(unexpectedKind(e.t.Kind(), semantic.Bool)) +} + +func (e *floatEvaluator) EvalInt(scope Scope) int64 { + panic(unexpectedKind(e.t.Kind(), semantic.Int)) +} + +func (e *floatEvaluator) EvalUInt(scope Scope) uint64 { + panic(unexpectedKind(e.t.Kind(), semantic.UInt)) +} + +func (e *floatEvaluator) EvalFloat(scope Scope) float64 { + return e.f +} + +func (e *floatEvaluator) EvalString(scope Scope) string { + panic(unexpectedKind(e.t.Kind(), semantic.String)) +} + +func (e *floatEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *floatEvaluator) EvalTime(scope Scope) Time { + panic(unexpectedKind(e.t.Kind(), semantic.Time)) +} +func (e *floatEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type timeEvaluator struct { + t semantic.Type + time Time +} + +func (e *timeEvaluator) Type() semantic.Type { + return e.t +} + +func (e *timeEvaluator) EvalBool(scope Scope) bool { + panic(unexpectedKind(e.t.Kind(), semantic.Bool)) +} + +func (e *timeEvaluator) EvalInt(scope Scope) int64 { + panic(unexpectedKind(e.t.Kind(), semantic.Int)) +} + +func (e *timeEvaluator) EvalUInt(scope Scope) uint64 { + panic(unexpectedKind(e.t.Kind(), semantic.UInt)) +} + +func (e *timeEvaluator) EvalFloat(scope Scope) float64 { + panic(unexpectedKind(e.t.Kind(), semantic.Float)) +} + +func (e *timeEvaluator) EvalString(scope Scope) string { + panic(unexpectedKind(e.t.Kind(), semantic.String)) +} + +func (e *timeEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + panic(unexpectedKind(e.t.Kind(), semantic.Regexp)) +} + +func (e *timeEvaluator) EvalTime(scope Scope) Time { + return e.time +} +func (e *timeEvaluator) EvalObject(scope Scope) *Object { + panic(unexpectedKind(e.t.Kind(), semantic.Object)) +} + +type identifierEvaluator struct { + t semantic.Type + name string +} + +func (e *identifierEvaluator) Type() semantic.Type { + return e.t +} + +func (e *identifierEvaluator) EvalBool(scope Scope) bool { + return scope.GetBool(e.name) +} + +func (e *identifierEvaluator) EvalInt(scope Scope) int64 { + return scope.GetInt(e.name) +} + +func (e *identifierEvaluator) EvalUInt(scope Scope) uint64 { + return scope.GetUInt(e.name) +} + +func (e *identifierEvaluator) EvalFloat(scope Scope) float64 { + return scope.GetFloat(e.name) +} + +func (e *identifierEvaluator) EvalString(scope Scope) string { + return scope.GetString(e.name) +} + +func (e *identifierEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + return scope.GetRegexp(e.name) +} + +func (e *identifierEvaluator) EvalTime(scope Scope) Time { + return scope.GetTime(e.name) +} +func (e *identifierEvaluator) EvalObject(scope Scope) *Object { + return scope.GetObject(e.name) +} + +type memberEvaluator struct { + t semantic.Type + object Evaluator + property string +} + +func (e *memberEvaluator) Type() semantic.Type { + return e.t +} + +func (e *memberEvaluator) EvalBool(scope Scope) bool { + return e.object.EvalObject(scope).Get(e.property).Bool() +} + +func (e *memberEvaluator) EvalInt(scope Scope) int64 { + return e.object.EvalObject(scope).Get(e.property).Int() +} + +func (e *memberEvaluator) EvalUInt(scope Scope) uint64 { + return e.object.EvalObject(scope).Get(e.property).UInt() +} + +func (e *memberEvaluator) EvalFloat(scope Scope) float64 { + return e.object.EvalObject(scope).Get(e.property).Float() +} + +func (e *memberEvaluator) EvalString(scope Scope) string { + return e.object.EvalObject(scope).Get(e.property).Str() +} +func (e *memberEvaluator) EvalRegexp(scope Scope) *regexp.Regexp { + return e.object.EvalObject(scope).Get(e.property).Regexp() +} + +func (e *memberEvaluator) EvalTime(scope Scope) Time { + return e.object.EvalObject(scope).Get(e.property).Time() +} +func (e *memberEvaluator) EvalObject(scope Scope) *Object { + return e.object.EvalObject(scope).Get(e.property).Object() +} + +// Map of binary functions +var binaryFuncs = map[binarySignature]struct { + Func binaryFunc + ResultKind semantic.Kind +}{ + //--------------- + // Math Operators + //--------------- + {Operator: ast.AdditionOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Int, + Value: l + r, + } + }, + ResultKind: semantic.Int, + }, + {Operator: ast.AdditionOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.UInt, + Value: l + r, + } + }, + ResultKind: semantic.UInt, + }, + {Operator: ast.AdditionOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Float, + Value: l + r, + } + }, + ResultKind: semantic.Float, + }, + {Operator: ast.SubtractionOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Int, + Value: l - r, + } + }, + ResultKind: semantic.Int, + }, + {Operator: ast.SubtractionOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.UInt, + Value: l - r, + } + }, + ResultKind: semantic.UInt, + }, + {Operator: ast.SubtractionOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Float, + Value: l - r, + } + }, + ResultKind: semantic.Float, + }, + {Operator: ast.MultiplicationOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Int, + Value: l * r, + } + }, + ResultKind: semantic.Int, + }, + {Operator: ast.MultiplicationOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.UInt, + Value: l * r, + } + }, + ResultKind: semantic.UInt, + }, + {Operator: ast.MultiplicationOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Float, + Value: l * r, + } + }, + ResultKind: semantic.Float, + }, + {Operator: ast.DivisionOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Int, + Value: l / r, + } + }, + ResultKind: semantic.Int, + }, + {Operator: ast.DivisionOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.UInt, + Value: l / r, + } + }, + ResultKind: semantic.UInt, + }, + {Operator: ast.DivisionOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Float, + Value: l / r, + } + }, + ResultKind: semantic.Float, + }, + + //--------------------- + // Comparison Operators + //--------------------- + + // LessThanEqualOperator + + {Operator: ast.LessThanEqualOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l <= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanEqualOperator, Left: semantic.Int, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalUInt(scope) + if l < 0 { + return value{ + typ: semantic.Bool, + Value: true, + } + } + return value{ + typ: semantic.Bool, + Value: uint64(l) <= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanEqualOperator, Left: semantic.Int, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) <= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanEqualOperator, Left: semantic.UInt, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalInt(scope) + if r < 0 { + return value{ + typ: semantic.Bool, + Value: false, + } + } + return value{ + typ: semantic.Bool, + Value: l <= uint64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanEqualOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l <= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanEqualOperator, Left: semantic.UInt, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) <= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanEqualOperator, Left: semantic.Float, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l <= float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanEqualOperator, Left: semantic.Float, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l <= float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanEqualOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: l <= r, + } + }, + ResultKind: semantic.Bool, + }, + + // LessThanOperator + + {Operator: ast.LessThanOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l < r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanOperator, Left: semantic.Int, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalUInt(scope) + if l < 0 { + return value{ + typ: semantic.Bool, + Value: true, + } + } + return value{ + typ: semantic.Bool, + Value: uint64(l) < r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanOperator, Left: semantic.Int, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) < r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanOperator, Left: semantic.UInt, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalInt(scope) + if r < 0 { + return value{ + typ: semantic.Bool, + Value: false, + } + } + return value{ + typ: semantic.Bool, + Value: l < uint64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l < r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanOperator, Left: semantic.UInt, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) < r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanOperator, Left: semantic.Float, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l < float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanOperator, Left: semantic.Float, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l < float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.LessThanOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: l < r, + } + }, + ResultKind: semantic.Bool, + }, + + // GreaterThanEqualOperator + + {Operator: ast.GreaterThanEqualOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l >= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanEqualOperator, Left: semantic.Int, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalUInt(scope) + if l < 0 { + return value{ + typ: semantic.Bool, + Value: true, + } + } + return value{ + typ: semantic.Bool, + Value: uint64(l) >= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanEqualOperator, Left: semantic.Int, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) >= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanEqualOperator, Left: semantic.UInt, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalInt(scope) + if r < 0 { + return value{ + typ: semantic.Bool, + Value: false, + } + } + return value{ + typ: semantic.Bool, + Value: l >= uint64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanEqualOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l >= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanEqualOperator, Left: semantic.UInt, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) >= r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanEqualOperator, Left: semantic.Float, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l >= float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanEqualOperator, Left: semantic.Float, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l >= float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanEqualOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: l >= r, + } + }, + ResultKind: semantic.Bool, + }, + + // GreaterThanOperator + + {Operator: ast.GreaterThanOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l > r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanOperator, Left: semantic.Int, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalUInt(scope) + if l < 0 { + return value{ + typ: semantic.Bool, + Value: true, + } + } + return value{ + typ: semantic.Bool, + Value: uint64(l) > r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanOperator, Left: semantic.Int, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) > r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanOperator, Left: semantic.UInt, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalInt(scope) + if r < 0 { + return value{ + typ: semantic.Bool, + Value: false, + } + } + return value{ + typ: semantic.Bool, + Value: l > uint64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l > r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanOperator, Left: semantic.UInt, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) > r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanOperator, Left: semantic.Float, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l > float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanOperator, Left: semantic.Float, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l > float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.GreaterThanOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: l > r, + } + }, + ResultKind: semantic.Bool, + }, + + // EqualOperator + + {Operator: ast.EqualOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l == r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.Int, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalUInt(scope) + if l < 0 { + return value{ + typ: semantic.Bool, + Value: false, + } + } + return value{ + typ: semantic.Bool, + Value: uint64(l) == r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.Int, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) == r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.UInt, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalInt(scope) + if r < 0 { + return value{ + typ: semantic.Bool, + Value: false, + } + } + return value{ + typ: semantic.Bool, + Value: l == uint64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l == r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.UInt, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) == r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.Float, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l == float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.Float, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l == float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: l == r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.EqualOperator, Left: semantic.String, Right: semantic.String}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalString(scope) + r := right.EvalString(scope) + return value{ + typ: semantic.Bool, + Value: l == r, + } + }, + ResultKind: semantic.Bool, + }, + + // NotEqualOperator + + {Operator: ast.NotEqualOperator, Left: semantic.Int, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l != r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.Int, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalUInt(scope) + if l < 0 { + return value{ + typ: semantic.Bool, + Value: true, + } + } + return value{ + typ: semantic.Bool, + Value: uint64(l) != r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.Int, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) != r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.UInt, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalInt(scope) + if r < 0 { + return value{ + typ: semantic.Bool, + Value: true, + } + } + return value{ + typ: semantic.Bool, + Value: l != uint64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.UInt, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l != r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.UInt, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalUInt(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: float64(l) != r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.Float, Right: semantic.Int}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalInt(scope) + return value{ + typ: semantic.Bool, + Value: l != float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.Float, Right: semantic.UInt}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalUInt(scope) + return value{ + typ: semantic.Bool, + Value: l != float64(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.Float, Right: semantic.Float}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalFloat(scope) + r := right.EvalFloat(scope) + return value{ + typ: semantic.Bool, + Value: l != r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotEqualOperator, Left: semantic.String, Right: semantic.String}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalString(scope) + r := right.EvalString(scope) + return value{ + typ: semantic.Bool, + Value: l != r, + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.RegexpMatchOperator, Left: semantic.String, Right: semantic.Regexp}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalString(scope) + r := right.EvalRegexp(scope) + return value{ + typ: semantic.Bool, + Value: r.MatchString(l), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.RegexpMatchOperator, Left: semantic.Regexp, Right: semantic.String}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalRegexp(scope) + r := right.EvalString(scope) + return value{ + typ: semantic.Bool, + Value: l.MatchString(r), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotRegexpMatchOperator, Left: semantic.String, Right: semantic.Regexp}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalString(scope) + r := right.EvalRegexp(scope) + return value{ + typ: semantic.Bool, + Value: !r.MatchString(l), + } + }, + ResultKind: semantic.Bool, + }, + {Operator: ast.NotRegexpMatchOperator, Left: semantic.Regexp, Right: semantic.String}: { + Func: func(scope Scope, left, right Evaluator) Value { + l := left.EvalRegexp(scope) + r := right.EvalString(scope) + return value{ + typ: semantic.Bool, + Value: !l.MatchString(r), + } + }, + ResultKind: semantic.Bool, + }, +} diff --git a/vendor/github.com/influxdata/ifql/complete/complete.go b/vendor/github.com/influxdata/ifql/complete/complete.go new file mode 100644 index 000000000..28e81264a --- /dev/null +++ b/vendor/github.com/influxdata/ifql/complete/complete.go @@ -0,0 +1,97 @@ +package complete + +import ( + "errors" + "fmt" + "sort" + + "github.com/influxdata/ifql/interpreter" + "github.com/influxdata/ifql/semantic" +) + +type functionType interface { + Params() map[string]semantic.Type +} + +// FunctionSuggestion provides information about a function +type FunctionSuggestion struct { + Params map[string]string +} + +// Completer provides methods for suggestions in IFQL queries +type Completer struct { + scope *interpreter.Scope + declarations semantic.DeclarationScope +} + +// NewCompleter creates a new completer from scope and declarations +func NewCompleter(scope *interpreter.Scope, declarations semantic.DeclarationScope) Completer { + return Completer{scope: scope, declarations: declarations} +} + +// Names returns the slice of names of declared expressions +func (c Completer) Names() []string { + names := c.scope.Names() + sort.Strings(names) + return names +} + +// Declaration returns a declaration based on the expression name, if one exists +func (c Completer) Declaration(name string) (semantic.VariableDeclaration, error) { + d, ok := c.declarations[name] + if !ok { + return d, errors.New("could not find declaration") + } + + return d, nil +} + +// FunctionNames returns all declaration names of the Function Kind +func (c Completer) FunctionNames() []string { + funcs := []string{} + + for name, d := range c.declarations { + if isFunction(d) { + funcs = append(funcs, name) + } + } + + sort.Strings(funcs) + + return funcs +} + +// FunctionSuggestion returns information needed for autocomplete suggestions for a function +func (c Completer) FunctionSuggestion(name string) (FunctionSuggestion, error) { + var s FunctionSuggestion + + d, err := c.Declaration(name) + if err != nil { + return s, err + } + + if !isFunction(d) { + return s, fmt.Errorf("name ( %s ) is not a function", name) + } + + funcType, ok := d.InitType().(functionType) + if !ok { + return s, errors.New("could not cast function type") + } + + params := map[string]string{} + + for k, v := range funcType.Params() { + params[k] = v.Kind().String() + } + + s = FunctionSuggestion{ + Params: params, + } + + return s, nil +} + +func isFunction(d semantic.VariableDeclaration) bool { + return d.InitType().Kind() == semantic.Function +} diff --git a/vendor/github.com/influxdata/ifql/complete/complete_test.go b/vendor/github.com/influxdata/ifql/complete/complete_test.go new file mode 100644 index 000000000..ba1c88c53 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/complete/complete_test.go @@ -0,0 +1,87 @@ +package complete + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + _ "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/interpreter" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/semantic" +) + +var scope *interpreter.Scope +var declarations semantic.DeclarationScope + +func init() { + query.FinalizeRegistration() + scope, declarations = query.BuiltIns() +} + +func TestNames(t *testing.T) { + s := interpreter.NewScope() + var v interpreter.Value + s.Set("boom", v) + s.Set("tick", v) + + c := NewCompleter(s, semantic.DeclarationScope{}) + + results := c.Names() + expected := []string{ + "boom", + "tick", + } + + if !cmp.Equal(results, expected) { + t.Error(cmp.Diff(results, expected), "unexpected names from declarations") + } +} + +func TestDeclaration(t *testing.T) { + name := "range" + expected := declarations[name].ID() + + declaration, _ := NewCompleter(scope, declarations).Declaration(name) + result := declaration.ID() + + if !cmp.Equal(result, expected) { + t.Error(cmp.Diff(result, expected), "unexpected declaration for name") + } +} + +func TestFunctionNames(t *testing.T) { + d := make(semantic.DeclarationScope) + d["boom"] = semantic.NewExternalVariableDeclaration( + "boom", semantic.NewFunctionType(semantic.FunctionSignature{})) + + d["noBoom"] = semantic.NewExternalVariableDeclaration("noBoom", semantic.String) + + s := interpreter.NewScope() + c := NewCompleter(s, d) + results := c.FunctionNames() + + expected := []string{ + "boom", + } + + if !cmp.Equal(results, expected) { + t.Error(cmp.Diff(results, expected), "unexpected function names") + } +} + +func TestFunctionSuggestion(t *testing.T) { + name := "range" + result, _ := NewCompleter(scope, declarations).FunctionSuggestion(name) + + expected := FunctionSuggestion{ + Params: map[string]string{ + "start": semantic.Time.String(), + "stop": semantic.Time.String(), + "table": query.TableObjectType.Kind().String(), + }, + } + + if !cmp.Equal(result, expected) { + t.Error(cmp.Diff(result, expected), "does not match expected suggestion") + } +} diff --git a/vendor/github.com/influxdata/ifql/docker-compose.yml b/vendor/github.com/influxdata/ifql/docker-compose.yml new file mode 100644 index 000000000..53674ac35 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/docker-compose.yml @@ -0,0 +1,23 @@ +version: "3" +services: + ifqld: + image: quay.io/influxdb/ifqld + ports: + - 8093:8093 + networks: + - ifql_net + environment: + - "HOSTS=influxdb:8082" + depends_on: + - influxdb + influxdb: + image: influxdb + ports: + - 8082:8082 + - 8086:8086 + volumes: + - ./examples/influxdb.conf:/etc/influxdb/influxdb.conf:ro + networks: + - ifql_net +networks: + ifql_net: diff --git a/vendor/github.com/influxdata/ifql/functions/count.go b/vendor/github.com/influxdata/ifql/functions/count.go new file mode 100644 index 000000000..2b0ab3a1e --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/count.go @@ -0,0 +1,135 @@ +package functions + +import ( + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" +) + +const CountKind = "count" + +type CountOpSpec struct { +} + +var countSignature = query.DefaultFunctionSignature() + +func init() { + query.RegisterFunction(CountKind, createCountOpSpec, countSignature) + query.RegisterOpSpec(CountKind, newCountOp) + plan.RegisterProcedureSpec(CountKind, newCountProcedure, CountKind) + execute.RegisterTransformation(CountKind, createCountTransformation) +} + +func createCountOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + return new(CountOpSpec), nil +} + +func newCountOp() query.OperationSpec { + return new(CountOpSpec) +} + +func (s *CountOpSpec) Kind() query.OperationKind { + return CountKind +} + +type CountProcedureSpec struct { +} + +func newCountProcedure(query.OperationSpec, plan.Administration) (plan.ProcedureSpec, error) { + return new(CountProcedureSpec), nil +} + +func (s *CountProcedureSpec) Kind() plan.ProcedureKind { + return CountKind +} + +func (s *CountProcedureSpec) Copy() plan.ProcedureSpec { + return new(CountProcedureSpec) +} + +func (s *CountProcedureSpec) AggregateMethod() string { + return CountKind +} +func (s *CountProcedureSpec) ReAggregateSpec() plan.ProcedureSpec { + return new(SumProcedureSpec) +} + +func (s *CountProcedureSpec) PushDownRules() []plan.PushDownRule { + return []plan.PushDownRule{{ + Root: FromKind, + Through: nil, + Match: func(spec plan.ProcedureSpec) bool { + selectSpec := spec.(*FromProcedureSpec) + return !selectSpec.GroupingSet + }, + }} +} + +func (s *CountProcedureSpec) PushDown(root *plan.Procedure, dup func() *plan.Procedure) { + selectSpec := root.Spec.(*FromProcedureSpec) + if selectSpec.AggregateSet { + root = dup() + selectSpec = root.Spec.(*FromProcedureSpec) + selectSpec.AggregateSet = false + selectSpec.AggregateMethod = "" + return + } + selectSpec.AggregateSet = true + selectSpec.AggregateMethod = s.AggregateMethod() +} + +type CountAgg struct { + count int64 +} + +func createCountTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + t, d := execute.NewAggregateTransformationAndDataset(id, mode, a.Bounds(), new(CountAgg), a.Allocator()) + return t, d, nil +} + +func (a *CountAgg) NewBoolAgg() execute.DoBoolAgg { + a.count = 0 + return a +} +func (a *CountAgg) NewIntAgg() execute.DoIntAgg { + a.count = 0 + return a +} +func (a *CountAgg) NewUIntAgg() execute.DoUIntAgg { + a.count = 0 + return a +} +func (a *CountAgg) NewFloatAgg() execute.DoFloatAgg { + a.count = 0 + return a +} +func (a *CountAgg) NewStringAgg() execute.DoStringAgg { + a.count = 0 + return a +} + +func (a *CountAgg) DoBool(vs []bool) { + a.count += int64(len(vs)) +} +func (a *CountAgg) DoUInt(vs []uint64) { + a.count += int64(len(vs)) +} +func (a *CountAgg) DoInt(vs []int64) { + a.count += int64(len(vs)) +} +func (a *CountAgg) DoFloat(vs []float64) { + a.count += int64(len(vs)) +} +func (a *CountAgg) DoString(vs []string) { + a.count += int64(len(vs)) +} + +func (a *CountAgg) Type() execute.DataType { + return execute.TInt +} +func (a *CountAgg) ValueInt() int64 { + return a.count +} diff --git a/vendor/github.com/influxdata/ifql/functions/count_test.go b/vendor/github.com/influxdata/ifql/functions/count_test.go new file mode 100644 index 000000000..d0b806f45 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/count_test.go @@ -0,0 +1,131 @@ +package functions_test + +import ( + "testing" + "time" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestCount_NewQuery(t *testing.T) { + tests := []querytest.NewQueryTestCase{ + { + Name: "from with range and count", + Raw: `from(db:"mydb") |> range(start:-4h, stop:-2h) |> count()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "range1", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Relative: -2 * time.Hour, + IsRelative: true, + }, + }, + }, + { + ID: "count2", + Spec: &functions.CountOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "range1"}, + {Parent: "range1", Child: "count2"}, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + querytest.NewQueryTestHelper(t, tc) + }) + } +} + +func TestCountOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"count","kind":"count"}`) + op := &query.Operation{ + ID: "count", + Spec: &functions.CountOpSpec{}, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestCount_Process(t *testing.T) { + executetest.AggFuncTestHelper( + t, + new(functions.CountAgg), + []float64{1, 2, 3, 4, 5, 6, 7, 8, 9, 10}, + int64(10), + ) +} +func BenchmarkCount(b *testing.B) { + executetest.AggFuncBenchmarkHelper( + b, + new(functions.CountAgg), + NormalData, + int64(len(NormalData)), + ) +} + +func TestCount_PushDown_Match(t *testing.T) { + spec := new(functions.CountProcedureSpec) + from := new(functions.FromProcedureSpec) + + // Should not match when an aggregate is set + from.GroupingSet = true + plantest.PhysicalPlan_PushDown_Match_TestHelper(t, spec, from, []bool{false}) + + // Should match when no aggregate is set + from.GroupingSet = false + plantest.PhysicalPlan_PushDown_Match_TestHelper(t, spec, from, []bool{true}) +} + +func TestCount_PushDown(t *testing.T) { + spec := new(functions.CountProcedureSpec) + root := &plan.Procedure{ + Spec: new(functions.FromProcedureSpec), + } + want := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + AggregateSet: true, + AggregateMethod: functions.CountKind, + }, + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, false, want) +} + +func TestCount_PushDown_Duplicate(t *testing.T) { + spec := new(functions.CountProcedureSpec) + root := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + AggregateSet: true, + AggregateMethod: functions.CountKind, + }, + } + want := &plan.Procedure{ + // Expect the duplicate has been reset to zero values + Spec: new(functions.FromProcedureSpec), + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, true, want) +} diff --git a/vendor/github.com/influxdata/ifql/functions/covariance.go b/vendor/github.com/influxdata/ifql/functions/covariance.go new file mode 100644 index 000000000..201922250 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/covariance.go @@ -0,0 +1,239 @@ +package functions + +import ( + "fmt" + "math" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" + "github.com/pkg/errors" +) + +const CovarianceKind = "covariance" + +type CovarianceOpSpec struct { + PearsonCorrelation bool `json:"pearsonr"` +} + +var covarianceSignature = query.DefaultFunctionSignature() + +func init() { + covarianceSignature.Params["pearsonr"] = semantic.Bool + + query.RegisterBuiltIn("covariance", covarianceBuiltIn) + query.RegisterFunction(CovarianceKind, createCovarianceOpSpec, covarianceSignature) + query.RegisterOpSpec(CovarianceKind, newCovarianceOp) + plan.RegisterProcedureSpec(CovarianceKind, newCovarianceProcedure, CovarianceKind) + execute.RegisterTransformation(CovarianceKind, createCovarianceTransformation) +} + +// covarianceBuiltIn defines a `cov` function with an automatic join. +var covarianceBuiltIn = ` +cov = (x,y,on,pearsonr=false) => + join( + tables:{x:x, y:y}, + on:on, + fn: (t) => ({x:t.x._value, y:t.y._value}), + ) + |> covariance(pearsonr:pearsonr) + +pearsonr = (x,y,on) => cov(x:x, y:y, on:on, pearsonr:true) +` + +func createCovarianceOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(CovarianceOpSpec) + pearsonr, ok, err := args.GetBool("pearsonr") + if err != nil { + return nil, err + } else if ok { + spec.PearsonCorrelation = pearsonr + } + return spec, nil +} + +func newCovarianceOp() query.OperationSpec { + return new(CovarianceOpSpec) +} + +func (s *CovarianceOpSpec) Kind() query.OperationKind { + return CovarianceKind +} + +type CovarianceProcedureSpec struct { + PearsonCorrelation bool +} + +func newCovarianceProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*CovarianceOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &CovarianceProcedureSpec{ + PearsonCorrelation: spec.PearsonCorrelation, + }, nil +} + +func (s *CovarianceProcedureSpec) Kind() plan.ProcedureKind { + return CovarianceKind +} +func (s *CovarianceProcedureSpec) Copy() plan.ProcedureSpec { + return new(CovarianceProcedureSpec) +} + +type CovarianceTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + bounds execute.Bounds + spec CovarianceProcedureSpec + + yIdx int + + n, + xm1, + ym1, + xm2, + ym2, + xym2 float64 +} + +func createCovarianceTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*CovarianceProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewCovarianceTransformation(d, cache, s, a.Bounds()) + return t, d, nil +} + +func NewCovarianceTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *CovarianceProcedureSpec, bounds execute.Bounds) *CovarianceTransformation { + return &CovarianceTransformation{ + d: d, + cache: cache, + bounds: bounds, + spec: *spec, + } +} + +func (t *CovarianceTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + key := execute.ToBlockKey(meta) + return t.d.RetractBlock(key) +} + +func (t *CovarianceTransformation) Process(id execute.DatasetID, b execute.Block) error { + cols := b.Cols() + builder, new := t.cache.BlockBuilder(blockMetadata{ + bounds: t.bounds, + tags: b.Tags(), + }) + if new { + builder.AddCol(execute.TimeCol) + execute.AddTags(b.Tags(), builder) + builder.AddCol(execute.ColMeta{ + Label: execute.DefaultValueColLabel, + Kind: execute.ValueColKind, + Type: execute.TFloat, + }) + } + var xIdx, yIdx = -1, -1 + for j, c := range cols { + if c.IsValue() { + if xIdx == -1 { + xIdx = j + } else if yIdx == -1 { + yIdx = j + } else { + return errors.New("covariance only supports two values") + } + } + } + if xIdx == -1 { + return errors.New("covariance must receive exactly two value columns, no value columns found") + } + if yIdx == -1 { + return errors.New("covariance must receive exactly two value columns, only one value column found") + } + if cols[xIdx].Type != cols[yIdx].Type { + return errors.New("cannot compute the covariance between different types") + } + t.yIdx = yIdx + t.reset() + values := b.Col(xIdx) + switch typ := cols[xIdx].Type; typ { + case execute.TFloat: + values.DoFloat(t.DoFloat) + default: + return fmt.Errorf("covariance does not support %v", typ) + } + + timeIdx := 0 + valueIdx := len(builder.Cols()) - 1 + + // Add row for aggregate values + builder.AppendTime(timeIdx, b.Bounds().Stop) + builder.AppendFloat(valueIdx, t.value()) + + return nil +} + +func (t *CovarianceTransformation) reset() { + t.n = 0 + t.xm1 = 0 + t.ym1 = 0 + t.xm2 = 0 + t.ym2 = 0 + t.xym2 = 0 +} +func (t *CovarianceTransformation) DoFloat(xs []float64, rr execute.RowReader) { + var xdelta, ydelta, xdelta2, ydelta2 float64 + for i, x := range xs { + y := rr.AtFloat(i, t.yIdx) + + t.n++ + + // Update means + xdelta = x - t.xm1 + ydelta = y - t.ym1 + t.xm1 += xdelta / t.n + t.ym1 += ydelta / t.n + + // Update variance sums + xdelta2 = x - t.xm1 + ydelta2 = y - t.ym1 + t.xm2 += xdelta * xdelta2 + t.ym2 += ydelta * ydelta2 + + // Update covariance sum + // Covariance is symetric so we do not need to compute the yxm2 value. + t.xym2 += xdelta * ydelta2 + } +} +func (t *CovarianceTransformation) value() float64 { + if t.n < 2 { + return math.NaN() + } + if t.spec.PearsonCorrelation { + return (t.xym2) / math.Sqrt(t.xm2*t.ym2) + } + return t.xym2 / (t.n - 1) +} + +func (t *CovarianceTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} + +func (t *CovarianceTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} + +func (t *CovarianceTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/covariance_test.go b/vendor/github.com/influxdata/ifql/functions/covariance_test.go new file mode 100644 index 000000000..9d75165ec --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/covariance_test.go @@ -0,0 +1,378 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" + "github.com/influxdata/ifql/semantic" +) + +func TestCovariance_NewQuery(t *testing.T) { + tests := []querytest.NewQueryTestCase{ + { + Name: "simple covariance", + Raw: `from(db:"mydb") |> covariance()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "covariance1", + Spec: &functions.CovarianceOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "covariance1"}, + }, + }, + }, + { + Name: "pearsonr", + Raw: `from(db:"mydb")|>covariance(pearsonr:true)`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "covariance1", + Spec: &functions.CovarianceOpSpec{ + PearsonCorrelation: true, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "covariance1"}, + }, + }, + }, + { + Name: "global covariance", + Raw: `cov(x: from(db:"mydb"), y:from(db:"mydb"), on:["host"], pearsonr:true)`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "from1", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "join2", + Spec: &functions.JoinOpSpec{ + On: []string{"host"}, + TableNames: map[query.OperationID]string{ + "from0": "x", + "from1": "y", + }, + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{ + {Key: &semantic.Identifier{Name: "t"}}, + }, + Body: &semantic.ObjectExpression{ + Properties: []*semantic.Property{ + { + Key: &semantic.Identifier{Name: "x"}, + Value: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "t"}, + Property: "x", + }, + Property: "_value", + }, + }, + { + Key: &semantic.Identifier{Name: "y"}, + Value: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "t"}, + Property: "y", + }, + Property: "_value", + }, + }, + }, + }, + }, + }, + }, + { + ID: "covariance3", + Spec: &functions.CovarianceOpSpec{ + PearsonCorrelation: true, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "join2"}, + {Parent: "from1", Child: "join2"}, + {Parent: "join2", Child: "covariance3"}, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + querytest.NewQueryTestHelper(t, tc) + }) + } +} + +func TestCovarianceOperation_Marshaling(t *testing.T) { + data := []byte(`{ + "id":"covariance", + "kind":"covariance", + "spec":{ + "pearsonr":true + } + }`) + op := &query.Operation{ + ID: "covariance", + Spec: &functions.CovarianceOpSpec{ + PearsonCorrelation: true, + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestCovariance_Process(t *testing.T) { + testCases := []struct { + name string + bounds execute.Bounds + spec *functions.CovarianceProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "variance", + bounds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + spec: &functions.CovarianceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 1.0, 1.0}, + {execute.Time(1), 2.0, 2.0}, + {execute.Time(2), 3.0, 3.0}, + {execute.Time(3), 4.0, 4.0}, + {execute.Time(4), 5.0, 5.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(5), 2.5}, + }, + }}, + }, + { + name: "negative covariance", + bounds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + spec: &functions.CovarianceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 1.0, 5.0}, + {execute.Time(1), 2.0, 4.0}, + {execute.Time(2), 3.0, 3.0}, + {execute.Time(3), 4.0, 2.0}, + {execute.Time(4), 5.0, 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(5), -2.5}, + }, + }}, + }, + { + name: "small covariance", + bounds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + spec: &functions.CovarianceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 1.0, 1.0}, + {execute.Time(1), 2.0, 1.0}, + {execute.Time(2), 3.0, 1.0}, + {execute.Time(3), 4.0, 1.0}, + {execute.Time(4), 5.0, 2.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(5), 0.5}, + }, + }}, + }, + { + name: "pearson correlation", + bounds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + spec: &functions.CovarianceProcedureSpec{ + PearsonCorrelation: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 1.0, 1.0}, + {execute.Time(1), 2.0, 2.0}, + {execute.Time(2), 3.0, 3.0}, + {execute.Time(3), 4.0, 4.0}, + {execute.Time(4), 5.0, 5.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(5), 1.0}, + }, + }}, + }, + { + name: "pearson correlation opposite", + bounds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + spec: &functions.CovarianceProcedureSpec{ + PearsonCorrelation: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 1.0, 5.0}, + {execute.Time(1), 2.0, 4.0}, + {execute.Time(2), 3.0, 3.0}, + {execute.Time(3), 4.0, 2.0}, + {execute.Time(4), 5.0, 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(5), -1.0}, + }, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewCovarianceTransformation(d, c, tc.spec, tc.bounds) + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/data_test.go b/vendor/github.com/influxdata/ifql/functions/data_test.go new file mode 100644 index 000000000..ba37ff83a --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/data_test.go @@ -0,0 +1,78 @@ +package functions_test + +import ( + "math/rand" + "time" + + "github.com/gonum/stat/distuv" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" +) + +const ( + N = 1e6 + Mu = 10 + Sigma = 3 + + seed = 42 +) + +func init() { + query.FinalizeRegistration() +} + +// NormalData is a slice of N random values that are normaly distributed with mean Mu and standard deviation Sigma. +var NormalData []float64 + +// NormalBlock is a block of data whose value col is NormalData. +var NormalBlock execute.Block + +func init() { + dist := distuv.Normal{ + Mu: Mu, + Sigma: Sigma, + Source: rand.New(rand.NewSource(seed)), + } + NormalData = make([]float64, N) + for i := range NormalData { + NormalData[i] = dist.Rand() + } + normalBlockBuilder := execute.NewColListBlockBuilder(executetest.UnlimitedAllocator) + normalBlockBuilder.SetBounds(execute.Bounds{ + Start: execute.Time(time.Date(2016, 10, 10, 0, 0, 0, 0, time.UTC).UnixNano()), + Stop: execute.Time(time.Date(2017, 10, 10, 0, 0, 0, 0, time.UTC).UnixNano()), + }) + + normalBlockBuilder.AddCol(execute.TimeCol) + normalBlockBuilder.AddCol(execute.ColMeta{Label: execute.DefaultValueColLabel, Type: execute.TFloat, Kind: execute.ValueColKind}) + normalBlockBuilder.AddCol(execute.ColMeta{Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}) + normalBlockBuilder.AddCol(execute.ColMeta{Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}) + + times := make([]execute.Time, N) + values := NormalData + t1 := "a" + t2 := make([]string, N) + + start := normalBlockBuilder.Bounds().Start + for i, v := range values { + // There are roughly 1 million, 31 second intervals in a year. + times[i] = start + execute.Time(time.Duration(i*31)*time.Second) + // Pick t2 based off the value + switch int(v) % 3 { + case 0: + t2[i] = "x" + case 1: + t2[i] = "y" + case 2: + t2[i] = "z" + } + } + + normalBlockBuilder.AppendTimes(0, times) + normalBlockBuilder.AppendFloats(1, values) + normalBlockBuilder.SetCommonString(2, t1) + normalBlockBuilder.AppendStrings(3, t2) + + NormalBlock, _ = normalBlockBuilder.Block() +} diff --git a/vendor/github.com/influxdata/ifql/functions/derivative.go b/vendor/github.com/influxdata/ifql/functions/derivative.go new file mode 100644 index 000000000..3de03ed36 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/derivative.go @@ -0,0 +1,303 @@ +package functions + +import ( + "fmt" + "math" + "time" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const DerivativeKind = "derivative" + +type DerivativeOpSpec struct { + Unit query.Duration `json:"unit"` + NonNegative bool `json:"non_negative"` +} + +var derivativeSignature = query.DefaultFunctionSignature() + +func init() { + derivativeSignature.Params["unit"] = semantic.Duration + derivativeSignature.Params["nonNegative"] = semantic.Bool + + query.RegisterFunction(DerivativeKind, createDerivativeOpSpec, derivativeSignature) + query.RegisterOpSpec(DerivativeKind, newDerivativeOp) + plan.RegisterProcedureSpec(DerivativeKind, newDerivativeProcedure, DerivativeKind) + execute.RegisterTransformation(DerivativeKind, createDerivativeTransformation) +} + +func createDerivativeOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(DerivativeOpSpec) + + if unit, ok, err := args.GetDuration("unit"); err != nil { + return nil, err + } else if ok { + spec.Unit = unit + } else { + //Default is 1s + spec.Unit = query.Duration(time.Second) + } + + if nn, ok, err := args.GetBool("nonNegative"); err != nil { + return nil, err + } else if ok { + spec.NonNegative = nn + } + + return spec, nil +} + +func newDerivativeOp() query.OperationSpec { + return new(DerivativeOpSpec) +} + +func (s *DerivativeOpSpec) Kind() query.OperationKind { + return DerivativeKind +} + +type DerivativeProcedureSpec struct { + Unit query.Duration `json:"unit"` + NonNegative bool `json:"non_negative"` +} + +func newDerivativeProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*DerivativeOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &DerivativeProcedureSpec{ + Unit: spec.Unit, + NonNegative: spec.NonNegative, + }, nil +} + +func (s *DerivativeProcedureSpec) Kind() plan.ProcedureKind { + return DerivativeKind +} +func (s *DerivativeProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(DerivativeProcedureSpec) + *ns = *s + return ns +} + +func createDerivativeTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*DerivativeProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewDerivativeTransformation(d, cache, s) + return t, d, nil +} + +type derivativeTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + unit time.Duration + nonNegative bool +} + +func NewDerivativeTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *DerivativeProcedureSpec) *derivativeTransformation { + return &derivativeTransformation{ + d: d, + cache: cache, + unit: time.Duration(spec.Unit), + nonNegative: spec.NonNegative, + } +} + +func (t *derivativeTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *derivativeTransformation) Process(id execute.DatasetID, b execute.Block) error { + builder, new := t.cache.BlockBuilder(b) + if new { + cols := b.Cols() + for j, c := range cols { + switch c.Kind { + case execute.TimeColKind: + builder.AddCol(c) + case execute.TagColKind: + builder.AddCol(c) + if c.Common { + builder.SetCommonString(j, b.Tags()[c.Label]) + } + case execute.ValueColKind: + dc := c + // Derivative always results in a float64 + dc.Type = execute.TFloat + builder.AddCol(dc) + } + } + } + cols := b.Cols() + derivatives := make([]*derivative, len(cols)) + for j, c := range cols { + if c.IsValue() { + d := newDerivative(j, t.unit, t.nonNegative) + derivatives[j] = d + } + } + + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i, t := range ts { + include := false + for _, d := range derivatives { + if d == nil { + continue + } + var ok bool + j := d.col + switch cols[j].Type { + case execute.TInt: + ok = d.updateInt(t, rr.AtInt(i, j)) + case execute.TUInt: + ok = d.updateUInt(t, rr.AtUInt(i, j)) + case execute.TFloat: + ok = d.updateFloat(t, rr.AtFloat(i, j)) + } + include = include || ok + } + if include { + for j, c := range cols { + switch c.Kind { + case execute.TimeColKind: + builder.AppendTime(j, rr.AtTime(i, j)) + case execute.TagColKind: + builder.AppendString(j, rr.AtString(i, j)) + case execute.ValueColKind: + //TODO(nathanielc): Write null markers when we have support for null values. + builder.AppendFloat(j, derivatives[j].value()) + } + } + } + } + }) + + return nil +} + +func (t *derivativeTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *derivativeTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *derivativeTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} + +func newDerivative(col int, unit time.Duration, nonNegative bool) *derivative { + return &derivative{ + col: col, + first: true, + unit: float64(unit), + nonNegative: nonNegative, + } +} + +type derivative struct { + col int + first bool + unit float64 + nonNegative bool + + pIntValue int64 + pUIntValue uint64 + pFloatValue float64 + pTime execute.Time + + v float64 +} + +func (d *derivative) value() float64 { + return d.v +} + +func (d *derivative) updateInt(t execute.Time, v int64) bool { + if d.first { + d.pTime = t + d.pIntValue = v + d.first = false + d.v = math.NaN() + return false + } + + diff := float64(v - d.pIntValue) + elapsed := float64(time.Duration(t-d.pTime)) / d.unit + + d.pTime = t + d.pIntValue = v + + if d.nonNegative && diff < 0 { + d.v = math.NaN() + return false + } + + d.v = diff / elapsed + return true +} +func (d *derivative) updateUInt(t execute.Time, v uint64) bool { + if d.first { + d.pTime = t + d.pUIntValue = v + d.first = false + d.v = math.NaN() + return false + } + + var diff float64 + if d.pUIntValue > v { + // Prevent uint64 overflow by applying the negative sign after the conversion to a float64. + diff = float64(d.pUIntValue-v) * -1 + } else { + diff = float64(v - d.pUIntValue) + } + elapsed := float64(time.Duration(t-d.pTime)) / d.unit + + d.pTime = t + d.pUIntValue = v + + if d.nonNegative && diff < 0 { + d.v = math.NaN() + return false + } + + d.v = diff / elapsed + return true +} +func (d *derivative) updateFloat(t execute.Time, v float64) bool { + if d.first { + d.pTime = t + d.pFloatValue = v + d.first = false + d.v = math.NaN() + return false + } + + diff := v - d.pFloatValue + elapsed := float64(time.Duration(t-d.pTime)) / d.unit + + d.pTime = t + d.pFloatValue = v + + if d.nonNegative && diff < 0 { + d.v = math.NaN() + return false + } + + d.v = diff / elapsed + return true +} diff --git a/vendor/github.com/influxdata/ifql/functions/derivative_test.go b/vendor/github.com/influxdata/ifql/functions/derivative_test.go new file mode 100644 index 000000000..1ffcc6d1e --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/derivative_test.go @@ -0,0 +1,533 @@ +package functions_test + +import ( + "math" + "testing" + "time" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestDerivativeOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"derivative","kind":"derivative","spec":{"unit":"1m","non_negative":true}}`) + op := &query.Operation{ + ID: "derivative", + Spec: &functions.DerivativeOpSpec{ + Unit: query.Duration(time.Minute), + NonNegative: true, + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestDerivative_PassThrough(t *testing.T) { + executetest.TransformationPassThroughTestHelper(t, func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + s := functions.NewDerivativeTransformation( + d, + c, + &functions.DerivativeProcedureSpec{}, + ) + return s + }) +} + +func TestDerivative_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.DerivativeProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "float", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), -1.0}, + }, + }}, + }, + { + name: "float with units", + spec: &functions.DerivativeProcedureSpec{ + Unit: query.Duration(time.Second), + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1 * time.Second), 2.0}, + {execute.Time(3 * time.Second), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3 * time.Second), -0.5}, + }, + }}, + }, + { + name: "int", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), int64(20)}, + {execute.Time(2), int64(10)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), -10.0}, + }, + }}, + }, + { + name: "int with units", + spec: &functions.DerivativeProcedureSpec{ + Unit: query.Duration(time.Second), + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1 * time.Second), int64(20)}, + {execute.Time(3 * time.Second), int64(10)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3 * time.Second), -5.0}, + }, + }}, + }, + { + name: "int non negative", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), int64(20)}, + {execute.Time(2), int64(10)}, + {execute.Time(3), int64(20)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 10.0}, + }, + }}, + }, + { + name: "uint", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TUInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), uint64(10)}, + {execute.Time(2), uint64(20)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), 10.0}, + }, + }}, + }, + { + name: "uint with negative result", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TUInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), uint64(20)}, + {execute.Time(2), uint64(10)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), -10.0}, + }, + }}, + }, + { + name: "uint with non negative", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TUInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), uint64(20)}, + {execute.Time(2), uint64(10)}, + {execute.Time(3), uint64(20)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 10.0}, + }, + }}, + }, + { + name: "uint with units", + spec: &functions.DerivativeProcedureSpec{ + Unit: query.Duration(time.Second), + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TUInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1 * time.Second), uint64(20)}, + {execute.Time(3 * time.Second), uint64(10)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3 * time.Second), -5.0}, + }, + }}, + }, + { + name: "non negative one block", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + {execute.Time(3), 2.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 1.0}, + }, + }}, + }, + { + name: "non negative one block with empty result", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + }}, + }, + { + name: "float with tags", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a"}, + {execute.Time(2), 1.0, "b"}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), -1.0, "b"}, + }, + }}, + }, + { + name: "float with multiple values", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, 20.0}, + {execute.Time(2), 1.0, 10.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), -1.0, -10.0}, + }, + }}, + }, + { + name: "float non negative with multiple values", + spec: &functions.DerivativeProcedureSpec{ + Unit: 1, + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, 20.0}, + {execute.Time(2), 1.0, 10.0}, + {execute.Time(3), 2.0, 0.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 1.0, math.NaN()}, + }, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewDerivativeTransformation(d, c, tc.spec) + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/difference.go b/vendor/github.com/influxdata/ifql/functions/difference.go new file mode 100644 index 000000000..8cff6b954 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/difference.go @@ -0,0 +1,291 @@ +package functions + +import ( + "fmt" + "math" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const DifferenceKind = "difference" + +type DifferenceOpSpec struct { + NonNegative bool `json:"non_negative"` +} + +var differenceSignature = query.DefaultFunctionSignature() + +func init() { + differenceSignature.Params["nonNegative"] = semantic.Bool + + query.RegisterFunction(DifferenceKind, createDifferenceOpSpec, differenceSignature) + query.RegisterOpSpec(DifferenceKind, newDifferenceOp) + plan.RegisterProcedureSpec(DifferenceKind, newDifferenceProcedure, DifferenceKind) + execute.RegisterTransformation(DifferenceKind, createDifferenceTransformation) +} + +func createDifferenceOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + err := a.AddParentFromArgs(args) + if err != nil { + return nil, err + } + + spec := new(DifferenceOpSpec) + + if nn, ok, err := args.GetBool("nonNegative"); err != nil { + return nil, err + } else if ok { + spec.NonNegative = nn + } + + return spec, nil +} + +func newDifferenceOp() query.OperationSpec { + return new(DifferenceOpSpec) +} + +func (s *DifferenceOpSpec) Kind() query.OperationKind { + return DifferenceKind +} + +type DifferenceProcedureSpec struct { + NonNegative bool `json:"non_negative"` +} + +func newDifferenceProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*DifferenceOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &DifferenceProcedureSpec{ + NonNegative: spec.NonNegative, + }, nil +} + +func (s *DifferenceProcedureSpec) Kind() plan.ProcedureKind { + return DifferenceKind +} +func (s *DifferenceProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(DifferenceProcedureSpec) + *ns = *s + return ns +} + +func createDifferenceTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*DifferenceProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewDifferenceTransformation(d, cache, s) + return t, d, nil +} + +type differenceTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + nonNegative bool +} + +func NewDifferenceTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *DifferenceProcedureSpec) *differenceTransformation { + return &differenceTransformation{ + d: d, + cache: cache, + nonNegative: spec.NonNegative, + } +} + +func (t *differenceTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *differenceTransformation) Process(id execute.DatasetID, b execute.Block) error { + builder, new := t.cache.BlockBuilder(b) + if new { + cols := b.Cols() + for j, c := range cols { + switch c.Kind { + case execute.TimeColKind: + builder.AddCol(c) + case execute.TagColKind: + builder.AddCol(c) + if c.Common { + builder.SetCommonString(j, b.Tags()[c.Label]) + } + case execute.ValueColKind: + var typ execute.DataType + switch c.Type { + case execute.TInt, execute.TUInt: + typ = execute.TInt + case execute.TFloat: + typ = execute.TFloat + } + builder.AddCol(execute.ColMeta{ + Label: c.Label, + Kind: execute.ValueColKind, + Type: typ, + }) + } + } + } + cols := b.Cols() + differences := make([]*difference, len(cols)) + for j, c := range cols { + if c.IsValue() { + d := newDifference(j, t.nonNegative) + differences[j] = d + } + } + + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i := range ts { + include := false + for _, d := range differences { + if d == nil { + continue + } + var ok bool + j := d.col + switch cols[j].Type { + case execute.TInt: + ok = d.updateInt(rr.AtInt(i, j)) + case execute.TUInt: + ok = d.updateUInt(rr.AtUInt(i, j)) + case execute.TFloat: + ok = d.updateFloat(rr.AtFloat(i, j)) + } + include = include || ok + } + if include { + for j, c := range builder.Cols() { + switch c.Kind { + case execute.TimeColKind: + builder.AppendTime(j, rr.AtTime(i, j)) + case execute.TagColKind: + builder.AppendString(j, rr.AtString(i, j)) + case execute.ValueColKind: + //TODO(nathanielc): Write null markers when we have support for null values. + switch c.Type { + case execute.TInt: + builder.AppendInt(j, differences[j].valueInt()) + case execute.TFloat: + builder.AppendFloat(j, differences[j].valueFloat()) + } + } + } + } + } + }) + + return nil +} + +func (t *differenceTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *differenceTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *differenceTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} + +func newDifference(col int, nonNegative bool) *difference { + return &difference{ + col: col, + first: true, + nonNegative: nonNegative, + } +} + +type difference struct { + col int + first bool + nonNegative bool + + pIntValue int64 + pUIntValue uint64 + pFloatValue float64 + + diffInt int64 + diffFloat float64 +} + +func (d *difference) valueInt() int64 { + return d.diffInt +} +func (d *difference) valueFloat() float64 { + return d.diffFloat +} + +func (d *difference) updateInt(v int64) bool { + if d.first { + d.pIntValue = v + d.first = false + d.diffInt = 0 + return false + } + + d.diffInt = v - d.pIntValue + + d.pIntValue = v + + if d.nonNegative && d.diffInt < 0 { + d.diffInt = 0 + return false + } + + return true +} +func (d *difference) updateUInt(v uint64) bool { + if d.first { + d.pUIntValue = v + d.first = false + d.diffInt = 0 + return false + } + + if d.pUIntValue > v { + d.diffInt = int64(d.pUIntValue-v) * -1 + } else { + d.diffInt = int64(v - d.pUIntValue) + } + + d.pUIntValue = v + + if d.nonNegative && d.diffInt < 0 { + d.diffInt = 0 + return false + } + + return true +} +func (d *difference) updateFloat(v float64) bool { + if d.first { + d.pFloatValue = v + d.first = false + d.diffFloat = math.NaN() + return false + } + + d.diffFloat = v - d.pFloatValue + d.pFloatValue = v + + if d.nonNegative && d.diffFloat < 0 { + d.diffFloat = math.NaN() + return false + } + + return true +} diff --git a/vendor/github.com/influxdata/ifql/functions/difference_test.go b/vendor/github.com/influxdata/ifql/functions/difference_test.go new file mode 100644 index 000000000..b35a2b696 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/difference_test.go @@ -0,0 +1,415 @@ +package functions_test + +import ( + "math" + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestDifferenceOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"difference","kind":"difference","spec":{"non_negative":true}}`) + op := &query.Operation{ + ID: "difference", + Spec: &functions.DifferenceOpSpec{ + NonNegative: true, + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestDifference_PassThrough(t *testing.T) { + executetest.TransformationPassThroughTestHelper(t, func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + s := functions.NewDifferenceTransformation( + d, + c, + &functions.DifferenceProcedureSpec{}, + ) + return s + }) +} + +func TestDifference_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.DifferenceProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "float", + spec: &functions.DifferenceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), -1.0}, + }, + }}, + }, + { + name: "int", + spec: &functions.DifferenceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), int64(20)}, + {execute.Time(2), int64(10)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), int64(-10)}, + }, + }}, + }, + { + name: "int non negative", + spec: &functions.DifferenceProcedureSpec{ + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), int64(20)}, + {execute.Time(2), int64(10)}, + {execute.Time(3), int64(20)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), int64(10)}, + }, + }}, + }, + { + name: "uint", + spec: &functions.DifferenceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TUInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), uint64(10)}, + {execute.Time(2), uint64(20)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), int64(10)}, + }, + }}, + }, + { + name: "uint with negative result", + spec: &functions.DifferenceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TUInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), uint64(20)}, + {execute.Time(2), uint64(10)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), int64(-10)}, + }, + }}, + }, + { + name: "uint with non negative", + spec: &functions.DifferenceProcedureSpec{ + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TUInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), uint64(20)}, + {execute.Time(2), uint64(10)}, + {execute.Time(3), uint64(20)}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), int64(10)}, + }, + }}, + }, + { + name: "non negative one block", + spec: &functions.DifferenceProcedureSpec{ + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + {execute.Time(3), 2.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 1.0}, + }, + }}, + }, + { + name: "non negative one block with empty result", + spec: &functions.DifferenceProcedureSpec{ + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + }}, + }, + { + name: "float with tags", + spec: &functions.DifferenceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a"}, + {execute.Time(2), 1.0, "b"}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), -1.0, "b"}, + }, + }}, + }, + { + name: "float with multiple values", + spec: &functions.DifferenceProcedureSpec{}, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, 20.0}, + {execute.Time(2), 1.0, 10.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), -1.0, -10.0}, + }, + }}, + }, + { + name: "float non negative with multiple values", + spec: &functions.DifferenceProcedureSpec{ + NonNegative: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, 20.0}, + {execute.Time(2), 1.0, 10.0}, + {execute.Time(3), 2.0, 0.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 1.0, math.NaN()}, + }, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewDifferenceTransformation(d, c, tc.spec) + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/distinct.go b/vendor/github.com/influxdata/ifql/functions/distinct.go new file mode 100644 index 000000000..acb2b4998 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/distinct.go @@ -0,0 +1,221 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const DistinctKind = "distinct" + +type DistinctOpSpec struct { + Column string `json:"column"` +} + +var distinctSignature = query.DefaultFunctionSignature() + +func init() { + distinctSignature.Params["column"] = semantic.String + + query.RegisterFunction(DistinctKind, createDistinctOpSpec, distinctSignature) + query.RegisterOpSpec(DistinctKind, newDistinctOp) + plan.RegisterProcedureSpec(DistinctKind, newDistinctProcedure, DistinctKind) + execute.RegisterTransformation(DistinctKind, createDistinctTransformation) +} + +func createDistinctOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(DistinctOpSpec) + + if col, ok, err := args.GetString("column"); err != nil { + return nil, err + } else if ok { + spec.Column = col + } else { + spec.Column = execute.DefaultValueColLabel + } + + return spec, nil +} + +func newDistinctOp() query.OperationSpec { + return new(DistinctOpSpec) +} + +func (s *DistinctOpSpec) Kind() query.OperationKind { + return DistinctKind +} + +type DistinctProcedureSpec struct { + Column string +} + +func newDistinctProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*DistinctOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &DistinctProcedureSpec{ + Column: spec.Column, + }, nil +} + +func (s *DistinctProcedureSpec) Kind() plan.ProcedureKind { + return DistinctKind +} +func (s *DistinctProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(DistinctProcedureSpec) + + *ns = *s + + return ns +} + +func createDistinctTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*DistinctProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewDistinctTransformation(d, cache, s) + return t, d, nil +} + +type distinctTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + column string +} + +func NewDistinctTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *DistinctProcedureSpec) *distinctTransformation { + return &distinctTransformation{ + d: d, + cache: cache, + column: spec.Column, + } +} + +func (t *distinctTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *distinctTransformation) Process(id execute.DatasetID, b execute.Block) error { + builder, new := t.cache.BlockBuilder(b) + if new { + execute.AddBlockCols(b, builder) + } + + colIdx := execute.ColIdx(t.column, builder.Cols()) + col := builder.Cols()[colIdx] + + var ( + boolDistinct map[bool]bool + intDistinct map[int64]bool + uintDistinct map[uint64]bool + floatDistinct map[float64]bool + stringDistinct map[string]bool + timeDistinct map[execute.Time]bool + ) + switch col.Type { + case execute.TBool: + boolDistinct = make(map[bool]bool) + case execute.TInt: + intDistinct = make(map[int64]bool) + case execute.TUInt: + uintDistinct = make(map[uint64]bool) + case execute.TFloat: + floatDistinct = make(map[float64]bool) + case execute.TString: + stringDistinct = make(map[string]bool) + case execute.TTime: + timeDistinct = make(map[execute.Time]bool) + } + + cols := builder.Cols() + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i := range ts { + // Check distinct + switch col.Type { + case execute.TBool: + v := rr.AtBool(i, colIdx) + if boolDistinct[v] { + continue + } + boolDistinct[v] = true + case execute.TInt: + v := rr.AtInt(i, colIdx) + if intDistinct[v] { + continue + } + intDistinct[v] = true + case execute.TUInt: + v := rr.AtUInt(i, colIdx) + if uintDistinct[v] { + continue + } + uintDistinct[v] = true + case execute.TFloat: + v := rr.AtFloat(i, colIdx) + if floatDistinct[v] { + continue + } + floatDistinct[v] = true + case execute.TString: + v := rr.AtString(i, colIdx) + if stringDistinct[v] { + continue + } + stringDistinct[v] = true + case execute.TTime: + v := rr.AtTime(i, colIdx) + if timeDistinct[v] { + continue + } + timeDistinct[v] = true + } + + for j, c := range cols { + if c.Common { + continue + } + switch c.Type { + case execute.TBool: + builder.AppendBool(j, rr.AtBool(i, j)) + case execute.TInt: + builder.AppendInt(j, rr.AtInt(i, j)) + case execute.TUInt: + builder.AppendUInt(j, rr.AtUInt(i, j)) + case execute.TFloat: + builder.AppendFloat(j, rr.AtFloat(i, j)) + case execute.TString: + builder.AppendString(j, rr.AtString(i, j)) + case execute.TTime: + builder.AppendTime(j, rr.AtTime(i, j)) + default: + execute.PanicUnknownType(c.Type) + } + } + } + }) + + return nil +} + +func (t *distinctTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *distinctTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *distinctTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/distinct_test.go b/vendor/github.com/influxdata/ifql/functions/distinct_test.go new file mode 100644 index 000000000..171e93230 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/distinct_test.go @@ -0,0 +1,173 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestDistinctOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"distinct","kind":"distinct","spec":{"column":"_value"}}`) + op := &query.Operation{ + ID: "distinct", + Spec: &functions.DistinctOpSpec{ + Column: "_value", + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestDistinct_PassThrough(t *testing.T) { + executetest.TransformationPassThroughTestHelper(t, func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + s := functions.NewDistinctTransformation( + d, + c, + &functions.DistinctProcedureSpec{ + Column: "_value", + }, + ) + return s + }) +} + +func TestDistinct_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.DistinctProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "one block", + spec: &functions.DistinctProcedureSpec{ + Column: "_value", + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + {execute.Time(3), 3.0}, + {execute.Time(4), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + {execute.Time(3), 3.0}, + }, + }}, + }, + { + name: "distinct tag", + spec: &functions.DistinctProcedureSpec{ + Column: "t1", + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), "a", 2.0}, + {execute.Time(2), "a", 1.0}, + {execute.Time(3), "b", 3.0}, + {execute.Time(4), "c", 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), "a", 2.0}, + {execute.Time(3), "b", 3.0}, + {execute.Time(4), "c", 1.0}, + }, + }}, + }, + { + name: "distinct times", + spec: &functions.DistinctProcedureSpec{ + Column: "_time", + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), "a", 2.0}, + {execute.Time(2), "a", 1.0}, + {execute.Time(3), "b", 3.0}, + {execute.Time(3), "c", 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), "a", 2.0}, + {execute.Time(2), "a", 1.0}, + {execute.Time(3), "b", 3.0}, + }, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewDistinctTransformation(d, c, tc.spec) + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/filter.go b/vendor/github.com/influxdata/ifql/functions/filter.go new file mode 100644 index 000000000..e7de361c3 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/filter.go @@ -0,0 +1,280 @@ +package functions + +import ( + "fmt" + "log" + + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const FilterKind = "filter" + +type FilterOpSpec struct { + Fn *semantic.FunctionExpression `json:"fn"` +} + +var filterSignature = query.DefaultFunctionSignature() + +func init() { + //TODO(nathanielc): Use complete function signature here, or formalize soft kind validation instead of complete function validation. + filterSignature.Params["fn"] = semantic.Function + + query.RegisterFunction(FilterKind, createFilterOpSpec, filterSignature) + query.RegisterOpSpec(FilterKind, newFilterOp) + plan.RegisterProcedureSpec(FilterKind, newFilterProcedure, FilterKind) + execute.RegisterTransformation(FilterKind, createFilterTransformation) +} + +func createFilterOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + f, err := args.GetRequiredFunction("fn") + if err != nil { + return nil, err + } + + resolved, err := f.Resolve() + if err != nil { + return nil, err + } + + return &FilterOpSpec{ + Fn: resolved, + }, nil +} +func newFilterOp() query.OperationSpec { + return new(FilterOpSpec) +} + +func (s *FilterOpSpec) Kind() query.OperationKind { + return FilterKind +} + +type FilterProcedureSpec struct { + Fn *semantic.FunctionExpression +} + +func newFilterProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*FilterOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &FilterProcedureSpec{ + Fn: spec.Fn, + }, nil +} + +func (s *FilterProcedureSpec) Kind() plan.ProcedureKind { + return FilterKind +} +func (s *FilterProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(FilterProcedureSpec) + ns.Fn = s.Fn.Copy().(*semantic.FunctionExpression) + return ns +} + +func (s *FilterProcedureSpec) PushDownRules() []plan.PushDownRule { + return []plan.PushDownRule{ + { + Root: FromKind, + Through: []plan.ProcedureKind{GroupKind, LimitKind, RangeKind}, + Match: func(spec plan.ProcedureSpec) bool { + // TODO(nathanielc): Remove once row functions support calling functions + if _, ok := s.Fn.Body.(semantic.Expression); !ok { + return false + } + fs := spec.(*FromProcedureSpec) + if fs.Filter != nil { + if _, ok := fs.Filter.Body.(semantic.Expression); !ok { + return false + } + } + return true + }, + }, + { + Root: FilterKind, + Through: []plan.ProcedureKind{GroupKind, LimitKind, RangeKind}, + Match: func(spec plan.ProcedureSpec) bool { + // TODO(nathanielc): Remove once row functions support calling functions + if _, ok := s.Fn.Body.(semantic.Expression); !ok { + return false + } + fs := spec.(*FilterProcedureSpec) + if _, ok := fs.Fn.Body.(semantic.Expression); !ok { + return false + } + return true + }, + }, + } +} + +func (s *FilterProcedureSpec) PushDown(root *plan.Procedure, dup func() *plan.Procedure) { + switch spec := root.Spec.(type) { + case *FromProcedureSpec: + if spec.FilterSet { + spec.Filter = mergeArrowFunction(spec.Filter, s.Fn) + return + } + spec.FilterSet = true + spec.Filter = s.Fn + case *FilterProcedureSpec: + spec.Fn = mergeArrowFunction(spec.Fn, s.Fn) + } +} + +func mergeArrowFunction(a, b *semantic.FunctionExpression) *semantic.FunctionExpression { + fn := a.Copy().(*semantic.FunctionExpression) + + aExp, aOK := a.Body.(semantic.Expression) + bExp, bOK := b.Body.(semantic.Expression) + + if aOK && bOK { + fn.Body = &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: aExp, + Right: bExp, + } + return fn + } + + // TODO(nathanielc): This code is unreachable while the current PushDownRule Match function is inplace. + + and := &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: aExp, + Right: bExp, + } + + // Create pass through arguments expression + passThroughArgs := &semantic.ObjectExpression{ + Properties: make([]*semantic.Property, len(a.Params)), + } + for i, p := range a.Params { + passThroughArgs.Properties[i] = &semantic.Property{ + Key: p.Key, + //TODO(nathanielc): Construct valid IdentifierExpression with Declaration for the value. + //Value: p.Key, + } + } + + if !aOK { + // Rewrite left expression as a function call. + and.Left = &semantic.CallExpression{ + Callee: a.Copy().(*semantic.FunctionExpression), + Arguments: passThroughArgs.Copy().(*semantic.ObjectExpression), + } + } + if !bOK { + // Rewrite right expression as a function call. + and.Right = &semantic.CallExpression{ + Callee: b.Copy().(*semantic.FunctionExpression), + Arguments: passThroughArgs.Copy().(*semantic.ObjectExpression), + } + } + return fn +} + +func createFilterTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*FilterProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t, err := NewFilterTransformation(d, cache, s) + if err != nil { + return nil, nil, err + } + return t, d, nil +} + +type filterTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + fn *execute.RowPredicateFn +} + +func NewFilterTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *FilterProcedureSpec) (*filterTransformation, error) { + fn, err := execute.NewRowPredicateFn(spec.Fn) + if err != nil { + return nil, err + } + + return &filterTransformation{ + d: d, + cache: cache, + fn: fn, + }, nil +} + +func (t *filterTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *filterTransformation) Process(id execute.DatasetID, b execute.Block) error { + builder, new := t.cache.BlockBuilder(b) + if new { + execute.AddBlockCols(b, builder) + } + + // Prepare the function for the column types. + cols := b.Cols() + if err := t.fn.Prepare(cols); err != nil { + // TODO(nathanielc): Should we not fail the query for failed compilation? + return err + } + + // Append only matching rows to block + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i := range ts { + if pass, err := t.fn.Eval(i, rr); err != nil { + log.Printf("failed to evaluate filter expression: %v", err) + continue + } else if !pass { + // No match, skipping + continue + } + for j, c := range cols { + if c.Common { + continue + } + switch c.Type { + case execute.TBool: + builder.AppendBool(j, rr.AtBool(i, j)) + case execute.TInt: + builder.AppendInt(j, rr.AtInt(i, j)) + case execute.TUInt: + builder.AppendUInt(j, rr.AtUInt(i, j)) + case execute.TFloat: + builder.AppendFloat(j, rr.AtFloat(i, j)) + case execute.TString: + builder.AppendString(j, rr.AtString(i, j)) + case execute.TTime: + builder.AppendTime(j, rr.AtTime(i, j)) + default: + execute.PanicUnknownType(c.Type) + } + } + } + }) + return nil +} + +func (t *filterTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *filterTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *filterTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/filter_test.go b/vendor/github.com/influxdata/ifql/functions/filter_test.go new file mode 100644 index 000000000..b0879d469 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/filter_test.go @@ -0,0 +1,911 @@ +package functions_test + +import ( + "regexp" + "testing" + "time" + + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" + "github.com/influxdata/ifql/semantic" +) + +func TestFilter_NewQuery(t *testing.T) { + tests := []querytest.NewQueryTestCase{ + { + Name: "from with database filter and range", + Raw: `from(db:"mydb") |> filter(fn: (r) => r["t1"]=="val1" and r["t2"]=="val2") |> range(start:-4h, stop:-2h) |> count()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "filter1", + Spec: &functions.FilterOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t1", + }, + Right: &semantic.StringLiteral{Value: "val1"}, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t2", + }, + Right: &semantic.StringLiteral{Value: "val2"}, + }, + }, + }, + }, + }, + { + ID: "range2", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Relative: -2 * time.Hour, + IsRelative: true, + }, + }, + }, + { + ID: "count3", + Spec: &functions.CountOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "filter1"}, + {Parent: "filter1", Child: "range2"}, + {Parent: "range2", Child: "count3"}, + }, + }, + }, + { + Name: "from with database filter (and with or) and range", + Raw: `from(db:"mydb") + |> filter(fn: (r) => + ( + (r["t1"]=="val1") + and + (r["t2"]=="val2") + ) + or + (r["t3"]=="val3") + ) + |> range(start:-4h, stop:-2h) + |> count()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "filter1", + Spec: &functions.FilterOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.OrOperator, + Left: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t1", + }, + Right: &semantic.StringLiteral{Value: "val1"}, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t2", + }, + Right: &semantic.StringLiteral{Value: "val2"}, + }, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t3", + }, + Right: &semantic.StringLiteral{Value: "val3"}, + }, + }, + }, + }, + }, + { + ID: "range2", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Relative: -2 * time.Hour, + IsRelative: true, + }, + }, + }, + { + ID: "count3", + Spec: &functions.CountOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "filter1"}, + {Parent: "filter1", Child: "range2"}, + {Parent: "range2", Child: "count3"}, + }, + }, + }, + { + Name: "from with database filter including fields", + Raw: `from(db:"mydb") + |> filter(fn: (r) => + (r["t1"] =="val1") + and + (r["_field"] == 10) + ) + |> range(start:-4h, stop:-2h) + |> count()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "filter1", + Spec: &functions.FilterOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t1", + }, + Right: &semantic.StringLiteral{Value: "val1"}, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_field", + }, + Right: &semantic.IntegerLiteral{Value: 10}, + }, + }, + }, + }, + }, + { + ID: "range2", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Relative: -2 * time.Hour, + IsRelative: true, + }, + }, + }, + { + ID: "count3", + Spec: &functions.CountOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "filter1"}, + {Parent: "filter1", Child: "range2"}, + {Parent: "range2", Child: "count3"}, + }, + }, + }, + { + Name: "from with database filter with no parens including fields", + Raw: `from(db:"mydb") + |> filter(fn: (r) => + r["t1"]=="val1" + and + r["_field"] == 10 + ) + |> range(start:-4h, stop:-2h) + |> count()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "filter1", + Spec: &functions.FilterOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t1", + }, + Right: &semantic.StringLiteral{Value: "val1"}, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_field", + }, + Right: &semantic.IntegerLiteral{Value: 10}, + }, + }, + }, + }, + }, + { + ID: "range2", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Relative: -2 * time.Hour, + IsRelative: true, + }, + }, + }, + { + ID: "count3", + Spec: &functions.CountOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "filter1"}, + {Parent: "filter1", Child: "range2"}, + {Parent: "range2", Child: "count3"}, + }, + }, + }, + { + Name: "from with database filter with no parens including regex and field", + Raw: `from(db:"mydb") + |> filter(fn: (r) => + r["t1"]==/val1/ + and + r["_field"] == 10.5 + ) + |> range(start:-4h, stop:-2h) + |> count()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "filter1", + Spec: &functions.FilterOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t1", + }, + Right: &semantic.RegexpLiteral{Value: regexp.MustCompile("val1")}, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_field", + }, + Right: &semantic.FloatLiteral{Value: 10.5}, + }, + }, + }, + }, + }, + { + ID: "range2", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Relative: -2 * time.Hour, + IsRelative: true, + }, + }, + }, + { + ID: "count3", + Spec: &functions.CountOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "filter1"}, + {Parent: "filter1", Child: "range2"}, + {Parent: "range2", Child: "count3"}, + }, + }, + }, + { + Name: "from with database regex with escape", + Raw: `from(db:"mydb") + |> filter(fn: (r) => + r["t1"]==/va\/l1/ + )`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "filter1", + Spec: &functions.FilterOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t1", + }, + Right: &semantic.RegexpLiteral{Value: regexp.MustCompile(`va/l1`)}, + }, + }, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "filter1"}, + }, + }, + }, + { + Name: "from with database with two regex", + Raw: `from(db:"mydb") + |> filter(fn: (r) => + r["t1"]==/va\/l1/ + and + r["t2"] != /val2/ + )`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "filter1", + Spec: &functions.FilterOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t1", + }, + Right: &semantic.RegexpLiteral{Value: regexp.MustCompile(`va/l1`)}, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t2", + }, + Right: &semantic.RegexpLiteral{Value: regexp.MustCompile(`val2`)}, + }, + }, + }, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "filter1"}, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + querytest.NewQueryTestHelper(t, tc) + }) + } +} +func TestFilterOperation_Marshaling(t *testing.T) { + data := []byte(`{ + "id":"filter", + "kind":"filter", + "spec":{ + "fn":{ + "type": "ArrowFunctionExpression", + "params": [{"type":"FunctionParam","key":{"type":"Identifier","name":"r"}}], + "body":{ + "type":"BinaryExpression", + "operator": "!=", + "left":{ + "type":"MemberExpression", + "object": { + "type": "IdentifierExpression", + "name":"r" + }, + "property": "_measurement" + }, + "right":{ + "type":"StringLiteral", + "value":"mem" + } + } + } + } + }`) + op := &query.Operation{ + ID: "filter", + Spec: &functions.FilterOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "mem"}, + }, + }, + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestFilter_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.FilterProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: `_value>5`, + spec: &functions.FilterProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.GreaterThanOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_value", + }, + Right: &semantic.FloatLiteral{Value: 5}, + }, + }, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 6.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), 6.0}, + }, + }}, + }, + { + name: "_value>5 multiple blocks", + spec: &functions.FilterProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.GreaterThanOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_value", + }, + Right: &semantic.FloatLiteral{ + Value: 5, + }, + }, + }, + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 3.0}, + {execute.Time(2), 6.0}, + {execute.Time(2), 1.0}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 3, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 3.0}, + {execute.Time(3), 2.0}, + {execute.Time(4), 8.0}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), 6.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 3, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(4), 8.0}, + }, + }, + }, + }, + { + name: "_value>5 and t1 = a and t2 = y", + spec: &functions.FilterProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.GreaterThanOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_value", + }, + Right: &semantic.FloatLiteral{ + Value: 5, + }, + }, + Right: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t1", + }, + Right: &semantic.StringLiteral{ + Value: "a", + }, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "t2", + }, + Right: &semantic.StringLiteral{ + Value: "y", + }, + }, + }, + }, + }, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "a", "x"}, + {execute.Time(2), 6.0, "a", "x"}, + {execute.Time(3), 8.0, "a", "y"}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(3), 8.0, "a", "y"}, + }, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + f, err := functions.NewFilterTransformation(d, c, tc.spec) + if err != nil { + t.Fatal(err) + } + return f + }, + ) + }) + } +} + +func TestFilter_PushDown(t *testing.T) { + spec := &functions.FilterProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "mem"}, + }, + }, + } + root := &plan.Procedure{ + Spec: new(functions.FromProcedureSpec), + } + want := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + FilterSet: true, + Filter: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "mem"}, + }, + }, + }, + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, false, want) +} + +func TestFilter_PushDown_MergeExpressions(t *testing.T) { + testCases := []struct { + name string + spec *functions.FilterProcedureSpec + root *plan.Procedure + want *plan.Procedure + }{ + { + name: "merge with from", + spec: &functions.FilterProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "cpu"}, + }, + }, + }, + root: &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + FilterSet: true, + Filter: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "mem"}, + }, + }, + }, + }, + want: &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + FilterSet: true, + Filter: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "mem"}, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "cpu"}, + }, + }, + }, + }, + }, + }, + { + name: "merge with filter", + spec: &functions.FilterProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "cpu"}, + }, + }, + }, + root: &plan.Procedure{ + Spec: &functions.FilterProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "mem"}, + }, + }, + }, + }, + want: &plan.Procedure{ + Spec: &functions.FilterProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.LogicalExpression{ + Operator: ast.AndOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "mem"}, + }, + Right: &semantic.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_measurement", + }, + Right: &semantic.StringLiteral{Value: "cpu"}, + }, + }, + }, + }, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + plantest.PhysicalPlan_PushDown_TestHelper(t, tc.spec, tc.root, false, tc.want) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/first.go b/vendor/github.com/influxdata/ifql/functions/first.go new file mode 100644 index 000000000..247b3fb52 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/first.go @@ -0,0 +1,181 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const FirstKind = "first" + +type FirstOpSpec struct { + Column string `json:"column"` + UseRowTime bool `json:"useRowtime"` +} + +var firstSignature = query.DefaultFunctionSignature() + +func init() { + firstSignature.Params["column"] = semantic.String + firstSignature.Params["useRowTime"] = semantic.Bool + + query.RegisterFunction(FirstKind, createFirstOpSpec, firstSignature) + query.RegisterOpSpec(FirstKind, newFirstOp) + plan.RegisterProcedureSpec(FirstKind, newFirstProcedure, FirstKind) + execute.RegisterTransformation(FirstKind, createFirstTransformation) +} + +func createFirstOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(FirstOpSpec) + if c, ok, err := args.GetString("column"); err != nil { + return nil, err + } else if ok { + spec.Column = c + } + if useRowTime, ok, err := args.GetBool("useRowTime"); err != nil { + return nil, err + } else if ok { + spec.UseRowTime = useRowTime + } + + return spec, nil +} + +func newFirstOp() query.OperationSpec { + return new(FirstOpSpec) +} + +func (s *FirstOpSpec) Kind() query.OperationKind { + return FirstKind +} + +type FirstProcedureSpec struct { + Column string + UseRowTime bool +} + +func newFirstProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*FirstOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + return &FirstProcedureSpec{ + Column: spec.Column, + UseRowTime: spec.UseRowTime, + }, nil +} + +func (s *FirstProcedureSpec) Kind() plan.ProcedureKind { + return FirstKind +} +func (s *FirstProcedureSpec) PushDownRules() []plan.PushDownRule { + return []plan.PushDownRule{{ + Root: FromKind, + Through: []plan.ProcedureKind{GroupKind, LimitKind, FilterKind}, + Match: func(spec plan.ProcedureSpec) bool { + selectSpec := spec.(*FromProcedureSpec) + return !selectSpec.AggregateSet + }, + }} +} + +func (s *FirstProcedureSpec) PushDown(root *plan.Procedure, dup func() *plan.Procedure) { + selectSpec := root.Spec.(*FromProcedureSpec) + if selectSpec.BoundsSet || selectSpec.LimitSet || selectSpec.DescendingSet { + root = dup() + selectSpec = root.Spec.(*FromProcedureSpec) + selectSpec.BoundsSet = false + selectSpec.Bounds = plan.BoundsSpec{} + selectSpec.LimitSet = false + selectSpec.PointsLimit = 0 + selectSpec.SeriesLimit = 0 + selectSpec.SeriesOffset = 0 + selectSpec.DescendingSet = false + selectSpec.Descending = false + return + } + selectSpec.BoundsSet = true + selectSpec.Bounds = plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + } + selectSpec.LimitSet = true + selectSpec.PointsLimit = 1 + selectSpec.DescendingSet = true + selectSpec.Descending = false +} +func (s *FirstProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(FirstProcedureSpec) + *ns = *s + ns.Column = s.Column + ns.UseRowTime = s.UseRowTime + return ns +} + +type FirstSelector struct { + selected bool +} + +func createFirstTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + ps, ok := spec.(*FirstProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", ps) + } + t, d := execute.NewIndexSelectorTransformationAndDataset(id, mode, a.Bounds(), new(FirstSelector), ps.Column, ps.UseRowTime, a.Allocator()) + return t, d, nil +} + +func (s *FirstSelector) reset() { + s.selected = false +} + +func (s *FirstSelector) NewBoolSelector() execute.DoBoolIndexSelector { + s.reset() + return s +} +func (s *FirstSelector) NewIntSelector() execute.DoIntIndexSelector { + s.reset() + return s +} +func (s *FirstSelector) NewUIntSelector() execute.DoUIntIndexSelector { + s.reset() + return s +} +func (s *FirstSelector) NewFloatSelector() execute.DoFloatIndexSelector { + s.reset() + return s +} +func (s *FirstSelector) NewStringSelector() execute.DoStringIndexSelector { + s.reset() + return s +} + +func (s *FirstSelector) selectFirst(l int) []int { + if !s.selected && l > 0 { + s.selected = true + return []int{0} + } + return nil +} +func (s *FirstSelector) DoBool(vs []bool) []int { + return s.selectFirst(len(vs)) +} +func (s *FirstSelector) DoInt(vs []int64) []int { + return s.selectFirst(len(vs)) +} +func (s *FirstSelector) DoUInt(vs []uint64) []int { + return s.selectFirst(len(vs)) +} +func (s *FirstSelector) DoFloat(vs []float64) []int { + return s.selectFirst(len(vs)) +} +func (s *FirstSelector) DoString(vs []string) []int { + return s.selectFirst(len(vs)) +} diff --git a/vendor/github.com/influxdata/ifql/functions/first_test.go b/vendor/github.com/influxdata/ifql/functions/first_test.go new file mode 100644 index 000000000..9e0225f82 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/first_test.go @@ -0,0 +1,130 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestFirstOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"first","kind":"first","spec":{"useRowTime":true}}`) + op := &query.Operation{ + ID: "first", + Spec: &functions.FirstOpSpec{ + UseRowTime: true, + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestFirst_Process(t *testing.T) { + testCases := []struct { + name string + data *executetest.Block + want [][]int + }{ + { + name: "first", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 7.0, "a", "x"}, + }, + }, + want: [][]int{{0}, nil, nil, nil, nil, nil, nil, nil, nil, nil}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.IndexSelectorFuncTestHelper( + t, + new(functions.FirstSelector), + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkFirst(b *testing.B) { + executetest.IndexSelectorFuncBenchmarkHelper(b, new(functions.FirstSelector), NormalBlock) +} + +func TestFirst_PushDown_Match(t *testing.T) { + spec := new(functions.FirstProcedureSpec) + from := new(functions.FromProcedureSpec) + + // Should not match when an aggregate is set + from.AggregateSet = true + plantest.PhysicalPlan_PushDown_Match_TestHelper(t, spec, from, []bool{false}) + + // Should match when no aggregate is set + from.AggregateSet = false + plantest.PhysicalPlan_PushDown_Match_TestHelper(t, spec, from, []bool{true}) +} + +func TestFirst_PushDown(t *testing.T) { + spec := new(functions.FirstProcedureSpec) + root := &plan.Procedure{ + Spec: new(functions.FromProcedureSpec), + } + want := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + LimitSet: true, + PointsLimit: 1, + DescendingSet: true, + Descending: false, + }, + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, false, want) +} +func TestFirst_PushDown_Duplicate(t *testing.T) { + spec := new(functions.FirstProcedureSpec) + root := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + LimitSet: true, + PointsLimit: 1, + DescendingSet: true, + Descending: false, + }, + } + want := &plan.Procedure{ + // Expect the duplicate has been reset to zero values + Spec: new(functions.FromProcedureSpec), + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, true, want) +} diff --git a/vendor/github.com/influxdata/ifql/functions/from.go b/vendor/github.com/influxdata/ifql/functions/from.go new file mode 100644 index 000000000..cebff8617 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/from.go @@ -0,0 +1,187 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const FromKind = "from" + +type FromOpSpec struct { + Database string `json:"database"` + Hosts []string `json:"hosts"` +} + +var fromSignature = semantic.FunctionSignature{ + Params: map[string]semantic.Type{ + "db": semantic.String, + }, + ReturnType: query.TableObjectType, +} + +func init() { + query.RegisterFunction(FromKind, createFromOpSpec, fromSignature) + query.RegisterOpSpec(FromKind, newFromOp) + plan.RegisterProcedureSpec(FromKind, newFromProcedure, FromKind) + execute.RegisterSource(FromKind, createFromSource) +} + +func createFromOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + db, err := args.GetRequiredString("db") + if err != nil { + return nil, err + } + spec := &FromOpSpec{ + Database: db, + } + + if array, ok, err := args.GetArray("hosts", semantic.String); err != nil { + return nil, err + } else if ok { + spec.Hosts = array.AsStrings() + } + return spec, nil +} + +func newFromOp() query.OperationSpec { + return new(FromOpSpec) +} + +func (s *FromOpSpec) Kind() query.OperationKind { + return FromKind +} + +type FromProcedureSpec struct { + Database string + Hosts []string + + BoundsSet bool + Bounds plan.BoundsSpec + + FilterSet bool + Filter *semantic.FunctionExpression + + DescendingSet bool + Descending bool + + LimitSet bool + PointsLimit int64 + SeriesLimit int64 + SeriesOffset int64 + + WindowSet bool + Window plan.WindowSpec + + GroupingSet bool + OrderByTime bool + MergeAll bool + GroupKeys []string + GroupExcept []string + GroupKeep []string + + AggregateSet bool + AggregateMethod string +} + +func newFromProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*FromOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &FromProcedureSpec{ + Database: spec.Database, + Hosts: spec.Hosts, + }, nil +} + +func (s *FromProcedureSpec) Kind() plan.ProcedureKind { + return FromKind +} +func (s *FromProcedureSpec) TimeBounds() plan.BoundsSpec { + return s.Bounds +} +func (s *FromProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(FromProcedureSpec) + + ns.Database = s.Database + + if len(s.Hosts) > 0 { + ns.Hosts = make([]string, len(s.Hosts)) + copy(ns.Hosts, s.Hosts) + } + + ns.BoundsSet = s.BoundsSet + ns.Bounds = s.Bounds + + ns.FilterSet = s.FilterSet + // TODO copy predicate + ns.Filter = s.Filter + + ns.DescendingSet = s.DescendingSet + ns.Descending = s.Descending + + ns.LimitSet = s.LimitSet + ns.PointsLimit = s.PointsLimit + ns.SeriesLimit = s.SeriesLimit + ns.SeriesOffset = s.SeriesOffset + + ns.WindowSet = s.WindowSet + ns.Window = s.Window + + ns.AggregateSet = s.AggregateSet + ns.AggregateMethod = s.AggregateMethod + + return ns +} + +func createFromSource(prSpec plan.ProcedureSpec, id execute.DatasetID, sr execute.StorageReader, a execute.Administration) execute.Source { + spec := prSpec.(*FromProcedureSpec) + var w execute.Window + if spec.WindowSet { + w = execute.Window{ + Every: execute.Duration(spec.Window.Every), + Period: execute.Duration(spec.Window.Period), + Round: execute.Duration(spec.Window.Round), + Start: a.ResolveTime(spec.Window.Start), + } + } else { + duration := execute.Duration(a.ResolveTime(spec.Bounds.Stop)) - execute.Duration(a.ResolveTime(spec.Bounds.Start)) + w = execute.Window{ + Every: duration, + Period: duration, + Start: a.ResolveTime(spec.Bounds.Start), + } + } + currentTime := w.Start + execute.Time(w.Period) + bounds := execute.Bounds{ + Start: a.ResolveTime(spec.Bounds.Start), + Stop: a.ResolveTime(spec.Bounds.Stop), + } + return execute.NewStorageSource( + id, + sr, + execute.ReadSpec{ + Database: spec.Database, + Hosts: spec.Hosts, + Predicate: spec.Filter, + PointsLimit: spec.PointsLimit, + SeriesLimit: spec.SeriesLimit, + SeriesOffset: spec.SeriesOffset, + Descending: spec.Descending, + OrderByTime: spec.OrderByTime, + MergeAll: spec.MergeAll, + GroupKeys: spec.GroupKeys, + GroupExcept: spec.GroupExcept, + GroupKeep: spec.GroupKeep, + AggregateMethod: spec.AggregateMethod, + }, + bounds, + w, + currentTime, + ) +} diff --git a/vendor/github.com/influxdata/ifql/functions/from_test.go b/vendor/github.com/influxdata/ifql/functions/from_test.go new file mode 100644 index 000000000..e3375ca30 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/from_test.go @@ -0,0 +1,83 @@ +package functions_test + +import ( + "testing" + "time" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/querytest" +) + +func TestFrom_NewQuery(t *testing.T) { + tests := []querytest.NewQueryTestCase{ + { + Name: "from", + Raw: `from()`, + WantErr: true, + }, + { + Name: "from", + Raw: `from(db:"telegraf", db:"oops")`, + WantErr: true, + }, + { + Name: "from", + Raw: `from(db:"telegraf", chicken:"what is this?")`, + WantErr: true, + }, + { + Name: "from with database", + Raw: `from(db:"mydb") |> range(start:-4h, stop:-2h) |> sum()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "range1", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Relative: -2 * time.Hour, + IsRelative: true, + }, + }, + }, + { + ID: "sum2", + Spec: &functions.SumOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "range1"}, + {Parent: "range1", Child: "sum2"}, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + querytest.NewQueryTestHelper(t, tc) + }) + } +} + +func TestFromOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"from","kind":"from","spec":{"database":"mydb"}}`) + op := &query.Operation{ + ID: "from", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} diff --git a/vendor/github.com/influxdata/ifql/functions/group.go b/vendor/github.com/influxdata/ifql/functions/group.go new file mode 100644 index 000000000..17f7cac52 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/group.go @@ -0,0 +1,449 @@ +package functions + +import ( + "errors" + "fmt" + "sort" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const GroupKind = "group" + +type GroupOpSpec struct { + By []string `json:"by"` + Keep []string `json:"keep"` + Except []string `json:"except"` +} + +var groupSignature = query.DefaultFunctionSignature() + +func init() { + groupSignature.Params["by"] = semantic.NewArrayType(semantic.String) + groupSignature.Params["keep"] = semantic.NewArrayType(semantic.String) + groupSignature.Params["except"] = semantic.NewArrayType(semantic.String) + + query.RegisterFunction(GroupKind, createGroupOpSpec, groupSignature) + query.RegisterOpSpec(GroupKind, newGroupOp) + plan.RegisterProcedureSpec(GroupKind, newGroupProcedure, GroupKind) + plan.RegisterRewriteRule(AggregateGroupRewriteRule{}) + execute.RegisterTransformation(GroupKind, createGroupTransformation) +} + +func createGroupOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(GroupOpSpec) + if array, ok, err := args.GetArray("by", semantic.String); err != nil { + return nil, err + } else if ok { + spec.By = array.AsStrings() + } + if array, ok, err := args.GetArray("keep", semantic.String); err != nil { + return nil, err + } else if ok { + spec.Keep = array.AsStrings() + } + if array, ok, err := args.GetArray("except", semantic.String); err != nil { + return nil, err + } else if ok { + spec.Except = array.AsStrings() + } + + if len(spec.By) > 0 && len(spec.Except) > 0 { + return nil, errors.New(`cannot specify both "by" and "except" keyword arguments`) + } + return spec, nil +} + +func newGroupOp() query.OperationSpec { + return new(GroupOpSpec) +} + +func (s *GroupOpSpec) Kind() query.OperationKind { + return GroupKind +} + +type GroupProcedureSpec struct { + By []string + Except []string + Keep []string +} + +func newGroupProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*GroupOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + p := &GroupProcedureSpec{ + By: spec.By, + Except: spec.Except, + Keep: spec.Keep, + } + return p, nil +} + +func (s *GroupProcedureSpec) Kind() plan.ProcedureKind { + return GroupKind +} +func (s *GroupProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(GroupProcedureSpec) + + ns.By = make([]string, len(s.By)) + copy(ns.By, s.By) + + ns.Except = make([]string, len(s.Except)) + copy(ns.Except, s.Except) + + ns.Keep = make([]string, len(s.Keep)) + copy(ns.Keep, s.Keep) + + return ns +} + +func (s *GroupProcedureSpec) PushDownRules() []plan.PushDownRule { + return []plan.PushDownRule{{ + Root: FromKind, + Through: []plan.ProcedureKind{LimitKind, RangeKind, FilterKind}, + Match: func(spec plan.ProcedureSpec) bool { + selectSpec := spec.(*FromProcedureSpec) + return !selectSpec.AggregateSet + }, + }} +} + +func (s *GroupProcedureSpec) PushDown(root *plan.Procedure, dup func() *plan.Procedure) { + selectSpec := root.Spec.(*FromProcedureSpec) + if selectSpec.GroupingSet { + root = dup() + selectSpec = root.Spec.(*FromProcedureSpec) + selectSpec.OrderByTime = false + selectSpec.GroupingSet = false + selectSpec.MergeAll = false + selectSpec.GroupKeys = nil + selectSpec.GroupExcept = nil + selectSpec.GroupKeep = nil + return + } + selectSpec.GroupingSet = true + // TODO implement OrderByTime + //selectSpec.OrderByTime = true + + // Merge all series into a single group if we have no specific grouping dimensions. + selectSpec.MergeAll = len(s.By) == 0 && len(s.Except) == 0 + selectSpec.GroupKeys = s.By + selectSpec.GroupExcept = s.Except + selectSpec.GroupKeep = s.Keep +} + +type AggregateGroupRewriteRule struct { +} + +func (r AggregateGroupRewriteRule) Root() plan.ProcedureKind { + return FromKind +} + +func (r AggregateGroupRewriteRule) Rewrite(pr *plan.Procedure, planner plan.PlanRewriter) error { + var agg *plan.Procedure + pr.DoChildren(func(child *plan.Procedure) { + if _, ok := child.Spec.(plan.AggregateProcedureSpec); ok { + agg = child + } + }) + if agg == nil { + return nil + } + fromSpec := pr.Spec.(*FromProcedureSpec) + if fromSpec.AggregateSet { + return nil + } + + // Rewrite + isoFrom, err := planner.IsolatePath(pr, agg) + if err != nil { + return err + } + return r.rewrite(isoFrom, planner) +} + +func (r AggregateGroupRewriteRule) rewrite(fromPr *plan.Procedure, planner plan.PlanRewriter) error { + fromSpec := fromPr.Spec.(*FromProcedureSpec) + aggPr := fromPr.Child(0) + aggSpec := aggPr.Spec.(plan.AggregateProcedureSpec) + + fromSpec.AggregateSet = true + fromSpec.AggregateMethod = aggSpec.AggregateMethod() + + if err := planner.RemoveBranch(aggPr); err != nil { + return err + } + + planner.AddChild(fromPr, aggSpec.ReAggregateSpec()) + return nil +} + +func createGroupTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*GroupProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewGroupTransformation(d, cache, s) + return t, d, nil +} + +type groupTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + keys []string + except []string + keep []string + + // Ignoring is true of len(keys) == 0 && len(except) > 0 + ignoring bool +} + +func NewGroupTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *GroupProcedureSpec) *groupTransformation { + t := &groupTransformation{ + d: d, + cache: cache, + keys: spec.By, + except: spec.Except, + keep: spec.Keep, + ignoring: len(spec.By) == 0 && len(spec.Except) > 0, + } + sort.Strings(t.keys) + sort.Strings(t.except) + sort.Strings(t.keep) + return t +} + +func (t *groupTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) (err error) { + //TODO(nathanielc): Investigate if this can be smarter and not retract all blocks with the same time bounds. + t.cache.ForEachBuilder(func(bk execute.BlockKey, builder execute.BlockBuilder) { + if err != nil { + return + } + if meta.Bounds().Equal(builder.Bounds()) { + err = t.d.RetractBlock(bk) + } + }) + return +} + +func (t *groupTransformation) Process(id execute.DatasetID, b execute.Block) error { + isFanIn := false + var tags execute.Tags + if t.ignoring { + // Assume we can fan in, we check for the false condition below + isFanIn = true + blockTags := b.Tags() + tags = make(execute.Tags, len(blockTags)) + cols := b.Cols() + for _, c := range cols { + if c.IsTag() { + found := false + for _, tag := range t.except { + if tag == c.Label { + found = true + break + } + } + if !found { + if !c.Common { + isFanIn = false + break + } + tags[c.Label] = blockTags[c.Label] + } + } + } + } else { + tags, isFanIn = b.Tags().Subset(t.keys) + } + if isFanIn { + return t.processFanIn(b, tags) + } else { + return t.processFanOut(b) + } +} + +// processFanIn assumes that all rows of b will be placed in the same builder. +func (t *groupTransformation) processFanIn(b execute.Block, tags execute.Tags) error { + builder, new := t.cache.BlockBuilder(blockMetadata{ + tags: tags, + bounds: b.Bounds(), + }) + if new { + // Determine columns of new block. + + // Add existing columns, skipping tags. + for _, c := range b.Cols() { + if !c.IsTag() { + builder.AddCol(c) + } + } + + // Add tags. + execute.AddTags(tags, builder) + + // Add columns for tags that are to be kept. + for _, k := range t.keep { + builder.AddCol(execute.ColMeta{ + Label: k, + Type: execute.TString, + Kind: execute.TagColKind, + }) + } + } + + // Construct map of builder column index to block column index. + builderCols := builder.Cols() + blockCols := b.Cols() + colMap := make([]int, len(builderCols)) + for j, c := range builderCols { + found := false + for nj, nc := range blockCols { + if c.Label == nc.Label { + colMap[j] = nj + found = true + break + } + } + if !found { + return fmt.Errorf("block does not have the column %q", c.Label) + } + } + + execute.AppendBlock(b, builder, colMap) + return nil +} + +type tagMeta struct { + idx int + isCommon bool +} + +// processFanOut assumes each row of b could end up in a different builder. +func (t *groupTransformation) processFanOut(b execute.Block) error { + cols := b.Cols() + tagMap := make(map[string]tagMeta, len(cols)) + for j, c := range cols { + if c.IsTag() { + ignoreTag := false + for _, tag := range t.except { + if tag == c.Label { + ignoreTag = true + break + } + } + byTag := false + for _, tag := range t.keys { + if tag == c.Label { + byTag = true + break + } + } + keepTag := false + for _, tag := range t.keep { + if tag == c.Label { + keepTag = true + break + } + } + if (t.ignoring && !ignoreTag) || byTag || keepTag { + tagMap[c.Label] = tagMeta{ + idx: j, + isCommon: (t.ignoring && !keepTag) || (!t.ignoring && byTag), + } + } + } + } + + // Iterate over each row and append to specific builder + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i := range ts { + tags := t.determineRowTags(tagMap, i, rr) + builder, new := t.cache.BlockBuilder(blockMetadata{ + tags: tags, + bounds: b.Bounds(), + }) + if new { + // Add existing columns, skipping tags. + for _, c := range cols { + if !c.IsTag() { + builder.AddCol(c) + continue + } + if meta, ok := tagMap[c.Label]; ok { + j := builder.AddCol(execute.ColMeta{ + Label: c.Label, + Type: execute.TString, + Kind: execute.TagColKind, + Common: meta.isCommon, + }) + if meta.isCommon { + builder.SetCommonString(j, tags[c.Label]) + } + } + } + } + // Construct map of builder column index to block column index. + builderCols := builder.Cols() + colMap := make([]int, len(builderCols)) + for j, c := range builderCols { + for nj, nc := range cols { + if c.Label == nc.Label { + colMap[j] = nj + break + } + } + } + + // Add row to builder + execute.AppendRow(i, rr, builder, colMap) + } + }) + return nil +} + +func (t *groupTransformation) determineRowTags(tagMap map[string]tagMeta, i int, rr execute.RowReader) execute.Tags { + cols := rr.Cols() + tags := make(execute.Tags, len(cols)) + for t, meta := range tagMap { + if meta.isCommon { + tags[t] = rr.AtString(i, meta.idx) + } + } + return tags +} + +func (t *groupTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *groupTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *groupTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} + +type blockMetadata struct { + tags execute.Tags + bounds execute.Bounds +} + +func (m blockMetadata) Tags() execute.Tags { + return m.tags +} +func (m blockMetadata) Bounds() execute.Bounds { + return m.bounds +} diff --git a/vendor/github.com/influxdata/ifql/functions/group_test.go b/vendor/github.com/influxdata/ifql/functions/group_test.go new file mode 100644 index 000000000..4cf522fe4 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/group_test.go @@ -0,0 +1,482 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestGroupOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"group","kind":"group","spec":{"by":["t1","t2"],"keep":["t3","t4"]}}`) + op := &query.Operation{ + ID: "group", + Spec: &functions.GroupOpSpec{ + By: []string{"t1", "t2"}, + Keep: []string{"t3", "t4"}, + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestGroup_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.GroupProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "fan in", + spec: &functions.GroupProcedureSpec{ + By: []string{"t1"}, + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a", "x"}, + {execute.Time(2), 1.0, "a", "y"}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 4.0, "b", "x"}, + {execute.Time(2), 7.0, "b", "y"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a"}, + {execute.Time(2), 1.0, "a"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 4.0, "b"}, + {execute.Time(2), 7.0, "b"}, + }, + }, + }, + }, + { + name: "fan in ignoring", + spec: &functions.GroupProcedureSpec{ + Except: []string{"t2"}, + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a", "m", "x"}, + {execute.Time(2), 1.0, "a", "n", "x"}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 4.0, "b", "m", "x"}, + {execute.Time(2), 7.0, "b", "n", "x"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a", "x"}, + {execute.Time(2), 1.0, "a", "x"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 4.0, "b", "x"}, + {execute.Time(2), 7.0, "b", "x"}, + }, + }, + }, + }, + { + name: "fan in ignoring with keep", + spec: &functions.GroupProcedureSpec{ + Except: []string{"t2"}, + Keep: []string{"t2"}, + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a", "m", "x"}, + {execute.Time(2), 1.0, "a", "n", "x"}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 4.0, "b", "m", "x"}, + {execute.Time(2), 7.0, "b", "n", "x"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a", "x", "m"}, + {execute.Time(2), 1.0, "a", "x", "n"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 4.0, "b", "x", "m"}, + {execute.Time(2), 7.0, "b", "x", "n"}, + }, + }, + }, + }, + { + name: "fan out", + spec: &functions.GroupProcedureSpec{ + By: []string{"t1"}, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a"}, + {execute.Time(2), 1.0, "b"}, + }, + }}, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(2), 1.0, "b"}, + }, + }, + }, + }, + { + name: "fan out ignoring", + spec: &functions.GroupProcedureSpec{ + Except: []string{"t2"}, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a", "m", "x"}, + {execute.Time(2), 1.0, "a", "n", "y"}, + }, + }}, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a", "x"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(2), 1.0, "a", "y"}, + }, + }, + }, + }, + { + name: "fan out ignoring with keep", + spec: &functions.GroupProcedureSpec{ + Except: []string{"t2"}, + Keep: []string{"t2"}, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 3.0, "a", "m", "x"}, + {execute.Time(2), 2.0, "a", "n", "x"}, + {execute.Time(3), 1.0, "a", "m", "y"}, + {execute.Time(4), 0.0, "a", "n", "y"}, + }, + }}, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 3.0, "a", "m", "x"}, + {execute.Time(2), 2.0, "a", "n", "x"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + {Label: "t3", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(3), 1.0, "a", "m", "y"}, + {execute.Time(4), 0.0, "a", "n", "y"}, + }, + }, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewGroupTransformation(d, c, tc.spec) + }, + ) + }) + } +} + +func TestGroup_PushDown(t *testing.T) { + spec := &functions.GroupProcedureSpec{ + By: []string{"t1", "t2"}, + Keep: []string{"t3"}, + } + root := &plan.Procedure{ + Spec: new(functions.FromProcedureSpec), + } + want := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + GroupingSet: true, + MergeAll: false, + GroupKeys: []string{"t1", "t2"}, + GroupKeep: []string{"t3"}, + }, + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, false, want) +} +func TestGroup_PushDown_Duplicate(t *testing.T) { + spec := &functions.GroupProcedureSpec{ + By: []string{"t1", "t2"}, + Keep: []string{"t3"}, + } + root := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + GroupingSet: true, + MergeAll: true, + GroupKeep: []string{"t4"}, + }, + } + want := &plan.Procedure{ + // Expect the duplicate has been reset to zero values + Spec: new(functions.FromProcedureSpec), + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, true, want) +} diff --git a/vendor/github.com/influxdata/ifql/functions/integral.go b/vendor/github.com/influxdata/ifql/functions/integral.go new file mode 100644 index 000000000..ca16e554c --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/integral.go @@ -0,0 +1,214 @@ +package functions + +import ( + "fmt" + "time" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const IntegralKind = "integral" + +type IntegralOpSpec struct { + Unit query.Duration `json:"unit"` +} + +var integralSignature = query.DefaultFunctionSignature() + +func init() { + integralSignature.Params["unit"] = semantic.Duration + + query.RegisterFunction(IntegralKind, createIntegralOpSpec, integralSignature) + query.RegisterOpSpec(IntegralKind, newIntegralOp) + plan.RegisterProcedureSpec(IntegralKind, newIntegralProcedure, IntegralKind) + execute.RegisterTransformation(IntegralKind, createIntegralTransformation) +} + +func createIntegralOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(IntegralOpSpec) + + if unit, ok, err := args.GetDuration("unit"); err != nil { + return nil, err + } else if ok { + spec.Unit = unit + } else { + //Default is 1s + spec.Unit = query.Duration(time.Second) + } + + return spec, nil +} + +func newIntegralOp() query.OperationSpec { + return new(IntegralOpSpec) +} + +func (s *IntegralOpSpec) Kind() query.OperationKind { + return IntegralKind +} + +type IntegralProcedureSpec struct { + Unit query.Duration `json:"unit"` +} + +func newIntegralProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*IntegralOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &IntegralProcedureSpec{ + Unit: spec.Unit, + }, nil +} + +func (s *IntegralProcedureSpec) Kind() plan.ProcedureKind { + return IntegralKind +} +func (s *IntegralProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(IntegralProcedureSpec) + *ns = *s + return ns +} + +func createIntegralTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*IntegralProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewIntegralTransformation(d, cache, s, a.Bounds()) + return t, d, nil +} + +type integralTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + bounds execute.Bounds + + unit time.Duration +} + +func NewIntegralTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *IntegralProcedureSpec, bounds execute.Bounds) *integralTransformation { + return &integralTransformation{ + d: d, + cache: cache, + bounds: bounds, + unit: time.Duration(spec.Unit), + } +} + +func (t *integralTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *integralTransformation) Process(id execute.DatasetID, b execute.Block) error { + builder, new := t.cache.BlockBuilder(blockMetadata{ + bounds: t.bounds, + tags: b.Tags(), + }) + if new { + cols := b.Cols() + for j, c := range cols { + switch c.Kind { + case execute.TimeColKind: + builder.AddCol(c) + case execute.TagColKind: + if c.Common { + builder.AddCol(c) + builder.SetCommonString(j, b.Tags()[c.Label]) + } + case execute.ValueColKind: + dc := c + // Integral always results in a float64 + dc.Type = execute.TFloat + builder.AddCol(dc) + } + } + } + cols := b.Cols() + integrals := make([]*integral, len(cols)) + for j, c := range cols { + if c.IsValue() { + in := newIntegral(t.unit) + integrals[j] = in + } + } + + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + for j, in := range integrals { + if in == nil { + continue + } + for i, t := range ts { + in.updateFloat(t, rr.AtFloat(i, j)) + } + } + }) + + timeIdx := execute.TimeIdx(cols) + builder.AppendTime(timeIdx, b.Bounds().Stop) + + for j, in := range integrals { + if in == nil { + continue + } + builder.AppendFloat(j, in.value()) + } + + return nil +} + +func (t *integralTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *integralTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *integralTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} + +func newIntegral(unit time.Duration) *integral { + return &integral{ + first: true, + unit: float64(unit), + } +} + +type integral struct { + first bool + unit float64 + + pFloatValue float64 + pTime execute.Time + + sum float64 +} + +func (in *integral) value() float64 { + return in.sum +} + +func (in *integral) updateFloat(t execute.Time, v float64) { + if in.first { + in.pTime = t + in.pFloatValue = v + in.first = false + return + } + + elapsed := float64(t-in.pTime) / in.unit + in.sum += 0.5 * (v + in.pFloatValue) * elapsed + + in.pTime = t + in.pFloatValue = v +} diff --git a/vendor/github.com/influxdata/ifql/functions/integral_test.go b/vendor/github.com/influxdata/ifql/functions/integral_test.go new file mode 100644 index 000000000..20746ea37 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/integral_test.go @@ -0,0 +1,212 @@ +package functions_test + +import ( + "testing" + "time" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestIntegralOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"integral","kind":"integral","spec":{"unit":"1m"}}`) + op := &query.Operation{ + ID: "integral", + Spec: &functions.IntegralOpSpec{ + Unit: query.Duration(time.Minute), + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestIntegral_PassThrough(t *testing.T) { + executetest.TransformationPassThroughTestHelper(t, func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + s := functions.NewIntegralTransformation( + d, + c, + &functions.IntegralProcedureSpec{}, + execute.Bounds{}, + ) + return s + }) +} + +func TestIntegral_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.IntegralProcedureSpec + bounds execute.Bounds + data []execute.Block + want []*executetest.Block + }{ + { + name: "float", + spec: &functions.IntegralProcedureSpec{ + Unit: 1, + }, + bounds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 1.5}, + }, + }}, + }, + { + name: "float with units", + spec: &functions.IntegralProcedureSpec{ + Unit: query.Duration(time.Second), + }, + bounds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1 * time.Second), 2.0}, + {execute.Time(3 * time.Second), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: execute.Time(1 * time.Second), + Stop: execute.Time(4 * time.Second), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(4 * time.Second), 3.0}, + }, + }}, + }, + { + name: "float with tags", + spec: &functions.IntegralProcedureSpec{ + Unit: 1, + }, + bounds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "a"}, + {execute.Time(2), 1.0, "b"}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 1.5}, + }, + }}, + }, + { + name: "float with multiple values", + spec: &functions.IntegralProcedureSpec{ + Unit: 1, + }, + bounds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, 20.0}, + {execute.Time(2), 1.0, 10.0}, + {execute.Time(3), 2.0, 20.0}, + {execute.Time(4), 1.0, 10.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(5), 4.5, 45.0}, + }, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewIntegralTransformation(d, c, tc.spec, tc.bounds) + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/join.go b/vendor/github.com/influxdata/ifql/functions/join.go new file mode 100644 index 000000000..f6f9065e8 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/join.go @@ -0,0 +1,799 @@ +package functions + +import ( + "fmt" + "log" + "math" + "sort" + "sync" + + "github.com/influxdata/ifql/compiler" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" + "github.com/pkg/errors" +) + +const JoinKind = "join" +const MergeJoinKind = "merge-join" + +type JoinOpSpec struct { + // On is a list of tags on which to join. + On []string `json:"on"` + // Fn is a function accepting a single parameter. + // The parameter is map if records for each of the parent operations. + Fn *semantic.FunctionExpression `json:"fn"` + // TableNames are the names to give to each parent when populating the parameter for the function. + // The first parent is referenced by the first name and so forth. + // TODO(nathanielc): Change this to a map of parent operation IDs to names. + // Then make it possible for the transformation to map operation IDs to parent IDs. + TableNames map[query.OperationID]string `json:"table_names"` +} + +var joinSignature = semantic.FunctionSignature{ + Params: map[string]semantic.Type{ + "tables": semantic.Object, + "fn": semantic.Function, + "on": semantic.NewArrayType(semantic.String), + }, + ReturnType: query.TableObjectType, + PipeArgument: "tables", +} + +func init() { + query.RegisterFunction(JoinKind, createJoinOpSpec, semantic.FunctionSignature{}) + query.RegisterOpSpec(JoinKind, newJoinOp) + //TODO(nathanielc): Allow for other types of join implementations + plan.RegisterProcedureSpec(MergeJoinKind, newMergeJoinProcedure, JoinKind) + execute.RegisterTransformation(MergeJoinKind, createMergeJoinTransformation) +} + +func createJoinOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + f, err := args.GetRequiredFunction("fn") + if err != nil { + return nil, err + } + resolved, err := f.Resolve() + if err != nil { + return nil, err + } + spec := &JoinOpSpec{ + Fn: resolved, + TableNames: make(map[query.OperationID]string), + } + + if array, ok, err := args.GetArray("on", semantic.String); err != nil { + return nil, err + } else if ok { + spec.On = array.AsStrings() + } + + if m, ok, err := args.GetObject("tables"); err != nil { + return nil, err + } else if ok { + for k, t := range m.Properties { + if t.Type().Kind() != semantic.Object { + return nil, fmt.Errorf("value for key %q in tables must be an object: got %v", k, t.Type().Kind()) + } + if t.Type() != query.TableObjectType { + return nil, fmt.Errorf("value for key %q in tables must be an table object: got %v", k, t.Type()) + } + p := t.(query.TableObject) + a.AddParent(p) + spec.TableNames[p.ID()] = k + } + } + + return spec, nil +} + +func newJoinOp() query.OperationSpec { + return new(JoinOpSpec) +} + +func (s *JoinOpSpec) Kind() query.OperationKind { + return JoinKind +} + +type MergeJoinProcedureSpec struct { + On []string `json:"keys"` + Fn *semantic.FunctionExpression `json:"f"` + TableNames map[plan.ProcedureID]string `json:"table_names"` +} + +func newMergeJoinProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*JoinOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + tableNames := make(map[plan.ProcedureID]string, len(spec.TableNames)) + for qid, name := range spec.TableNames { + pid := pa.ConvertID(qid) + tableNames[pid] = name + } + + p := &MergeJoinProcedureSpec{ + On: spec.On, + Fn: spec.Fn, + TableNames: tableNames, + } + sort.Strings(p.On) + return p, nil +} + +func (s *MergeJoinProcedureSpec) Kind() plan.ProcedureKind { + return MergeJoinKind +} +func (s *MergeJoinProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(MergeJoinProcedureSpec) + + ns.On = make([]string, len(s.On)) + copy(ns.On, s.On) + + ns.Fn = s.Fn.Copy().(*semantic.FunctionExpression) + + return ns +} + +func (s *MergeJoinProcedureSpec) ParentChanged(old, new plan.ProcedureID) { + if v, ok := s.TableNames[old]; ok { + delete(s.TableNames, old) + s.TableNames[new] = v + } +} + +func createMergeJoinTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*MergeJoinProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + parents := a.Parents() + if len(parents) != 2 { + //TODO(nathanielc): Support n-way joins + return nil, nil, errors.New("joins currently must only have two parents") + } + + tableNames := make(map[execute.DatasetID]string, len(s.TableNames)) + for pid, name := range s.TableNames { + id := a.ConvertID(pid) + tableNames[id] = name + } + leftName := tableNames[parents[0]] + rightName := tableNames[parents[1]] + + joinFn, err := NewRowJoinFunction(s.Fn, parents, tableNames) + if err != nil { + return nil, nil, errors.Wrap(err, "invalid expression") + } + cache := NewMergeJoinCache(joinFn, a.Allocator(), leftName, rightName) + d := execute.NewDataset(id, mode, cache) + t := NewMergeJoinTransformation(d, cache, s, parents, tableNames) + return t, d, nil +} + +type mergeJoinTransformation struct { + parents []execute.DatasetID + + mu sync.Mutex + + d execute.Dataset + cache MergeJoinCache + + leftID, rightID execute.DatasetID + leftName, rightName string + + parentState map[execute.DatasetID]*mergeJoinParentState + + keys []string +} + +func NewMergeJoinTransformation(d execute.Dataset, cache MergeJoinCache, spec *MergeJoinProcedureSpec, parents []execute.DatasetID, tableNames map[execute.DatasetID]string) *mergeJoinTransformation { + t := &mergeJoinTransformation{ + d: d, + cache: cache, + keys: spec.On, + leftID: parents[0], + rightID: parents[1], + leftName: tableNames[parents[0]], + rightName: tableNames[parents[1]], + } + t.parentState = make(map[execute.DatasetID]*mergeJoinParentState) + for _, id := range parents { + t.parentState[id] = new(mergeJoinParentState) + } + return t +} + +type mergeJoinParentState struct { + mark execute.Time + processing execute.Time + finished bool +} + +func (t *mergeJoinTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + t.mu.Lock() + defer t.mu.Unlock() + + bm := blockMetadata{ + tags: meta.Tags().IntersectingSubset(t.keys), + bounds: meta.Bounds(), + } + return t.d.RetractBlock(execute.ToBlockKey(bm)) +} + +func (t *mergeJoinTransformation) Process(id execute.DatasetID, b execute.Block) error { + t.mu.Lock() + defer t.mu.Unlock() + + bm := blockMetadata{ + tags: b.Tags().IntersectingSubset(t.keys), + bounds: b.Bounds(), + } + tables := t.cache.Tables(bm) + + var table execute.BlockBuilder + switch id { + case t.leftID: + table = tables.left + case t.rightID: + table = tables.right + } + + colMap := t.addNewCols(b, table) + + times := b.Times() + times.DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i := range ts { + execute.AppendRow(i, rr, table, colMap) + } + }) + return nil +} + +// addNewCols adds column to builder that exist on b and are part of the join keys. +// This method ensures that the left and right tables always have the same columns. +// A colMap is returned mapping cols of builder to cols of b. +func (t *mergeJoinTransformation) addNewCols(b execute.Block, builder execute.BlockBuilder) []int { + cols := b.Cols() + existing := builder.Cols() + colMap := make([]int, len(existing)) + for j, c := range cols { + // Skip common tags or tags that are not one of the join keys. + if c.IsTag() { + if c.Common { + continue + } + found := false + for _, k := range t.keys { + if c.Label == k { + found = true + break + } + } + // Column is not one of the join keys + if !found { + continue + } + } + // Check if column already exists + found := false + for ej, ec := range existing { + if c.Label == ec.Label { + colMap[ej] = j + found = true + break + } + } + // Add new column + if !found { + builder.AddCol(c) + colMap = append(colMap, j) + } + } + return colMap +} + +func (t *mergeJoinTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + t.mu.Lock() + defer t.mu.Unlock() + t.parentState[id].mark = mark + + min := execute.Time(math.MaxInt64) + for _, state := range t.parentState { + if state.mark < min { + min = state.mark + } + } + + return t.d.UpdateWatermark(min) +} + +func (t *mergeJoinTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + t.mu.Lock() + defer t.mu.Unlock() + t.parentState[id].processing = pt + + min := execute.Time(math.MaxInt64) + for _, state := range t.parentState { + if state.processing < min { + min = state.processing + } + } + + return t.d.UpdateProcessingTime(min) +} + +func (t *mergeJoinTransformation) Finish(id execute.DatasetID, err error) { + t.mu.Lock() + defer t.mu.Unlock() + if err != nil { + t.d.Finish(err) + } + + t.parentState[id].finished = true + finished := true + for _, state := range t.parentState { + finished = finished && state.finished + } + + if finished { + t.d.Finish(nil) + } +} + +type MergeJoinCache interface { + Tables(execute.BlockMetadata) *joinTables +} + +type mergeJoinCache struct { + data map[execute.BlockKey]*joinTables + alloc *execute.Allocator + + leftName, rightName string + + triggerSpec query.TriggerSpec + + joinFn *joinFunc +} + +func NewMergeJoinCache(joinFn *joinFunc, a *execute.Allocator, leftName, rightName string) *mergeJoinCache { + return &mergeJoinCache{ + data: make(map[execute.BlockKey]*joinTables), + joinFn: joinFn, + alloc: a, + leftName: leftName, + rightName: rightName, + } +} + +func (c *mergeJoinCache) BlockMetadata(key execute.BlockKey) execute.BlockMetadata { + return c.data[key] +} + +func (c *mergeJoinCache) Block(key execute.BlockKey) (execute.Block, error) { + return c.data[key].Join() +} + +func (c *mergeJoinCache) ForEach(f func(execute.BlockKey)) { + for bk := range c.data { + f(bk) + } +} + +func (c *mergeJoinCache) ForEachWithContext(f func(execute.BlockKey, execute.Trigger, execute.BlockContext)) { + for bk, tables := range c.data { + bc := execute.BlockContext{ + Bounds: tables.bounds, + Count: tables.Size(), + } + f(bk, tables.trigger, bc) + } +} + +func (c *mergeJoinCache) DiscardBlock(key execute.BlockKey) { + c.data[key].ClearData() +} + +func (c *mergeJoinCache) ExpireBlock(key execute.BlockKey) { + delete(c.data, key) +} + +func (c *mergeJoinCache) SetTriggerSpec(spec query.TriggerSpec) { + c.triggerSpec = spec +} + +func (c *mergeJoinCache) Tables(bm execute.BlockMetadata) *joinTables { + key := execute.ToBlockKey(bm) + tables := c.data[key] + if tables == nil { + tables = &joinTables{ + tags: bm.Tags(), + bounds: bm.Bounds(), + alloc: c.alloc, + left: execute.NewColListBlockBuilder(c.alloc), + right: execute.NewColListBlockBuilder(c.alloc), + leftName: c.leftName, + rightName: c.rightName, + trigger: execute.NewTriggerFromSpec(c.triggerSpec), + joinFn: c.joinFn, + } + tables.left.AddCol(execute.TimeCol) + tables.right.AddCol(execute.TimeCol) + c.data[key] = tables + } + return tables +} + +type joinTables struct { + tags execute.Tags + bounds execute.Bounds + + alloc *execute.Allocator + + left, right *execute.ColListBlockBuilder + leftName, rightName string + + trigger execute.Trigger + + joinFn *joinFunc +} + +func (t *joinTables) Bounds() execute.Bounds { + return t.bounds +} +func (t *joinTables) Tags() execute.Tags { + return t.tags +} +func (t *joinTables) Size() int { + return t.left.NRows() + t.right.NRows() +} + +func (t *joinTables) ClearData() { + t.left = execute.NewColListBlockBuilder(t.alloc) + t.right = execute.NewColListBlockBuilder(t.alloc) +} + +// Join performs a sort-merge join +func (t *joinTables) Join() (execute.Block, error) { + // First prepare the join function + left := t.left.RawBlock() + right := t.right.RawBlock() + err := t.joinFn.Prepare(map[string]*execute.ColListBlock{ + t.leftName: left, + t.rightName: right, + }) + if err != nil { + return nil, errors.Wrap(err, "failed to prepare join function") + } + // Create a builder to the result of the join + builder := execute.NewColListBlockBuilder(t.alloc) + builder.SetBounds(t.bounds) + builder.AddCol(execute.TimeCol) + + // Add new value columns in sorted order + properties := t.joinFn.Type().Properties() + keys := make([]string, 0, len(properties)) + for k := range properties { + keys = append(keys, k) + } + sort.Strings(keys) + for _, k := range keys { + builder.AddCol(execute.ColMeta{ + Label: k, + Type: execute.ConvertFromKind(properties[k].Kind()), + Kind: execute.ValueColKind, + }) + } + + // Add common tags + execute.AddTags(t.tags, builder) + + // Add non common tags + cols := t.left.Cols() + for _, c := range cols { + if c.IsTag() && !c.Common { + builder.AddCol(c) + } + } + + // Now that all columns have been added, keep a reference. + bCols := builder.Cols() + + // Determine sort order for the joining tables + sortOrder := make([]string, len(cols)) + for i, c := range cols { + sortOrder[i] = c.Label + } + t.left.Sort(sortOrder, false) + t.right.Sort(sortOrder, false) + + var ( + leftSet, rightSet subset + leftKey, rightKey joinKey + ) + + rows := map[string]int{ + t.leftName: -1, + t.rightName: -1, + } + + leftSet, leftKey = t.advance(leftSet.Stop, left) + rightSet, rightKey = t.advance(rightSet.Stop, right) + for !leftSet.Empty() && !rightSet.Empty() { + if leftKey.Equal(rightKey) { + // Inner join + for l := leftSet.Start; l < leftSet.Stop; l++ { + for r := rightSet.Start; r < rightSet.Stop; r++ { + // Evaluate expression and add to block + rows[t.leftName] = l + rows[t.rightName] = r + m, err := t.joinFn.Eval(rows) + if err != nil { + return nil, errors.Wrap(err, "failed to evaluate join function") + } + for j, c := range bCols { + switch c.Kind { + case execute.TimeColKind: + builder.AppendTime(j, leftKey.Time) + case execute.TagColKind: + if c.Common { + continue + } + + builder.AppendString(j, leftKey.Tags[c.Label]) + case execute.ValueColKind: + v := m.Get(c.Label) + execute.AppendValue(builder, j, v) + default: + log.Printf("unexpected column %v", c) + } + } + } + } + leftSet, leftKey = t.advance(leftSet.Stop, left) + rightSet, rightKey = t.advance(rightSet.Stop, right) + } else if leftKey.Less(rightKey) { + leftSet, leftKey = t.advance(leftSet.Stop, left) + } else { + rightSet, rightKey = t.advance(rightSet.Stop, right) + } + } + return builder.Block() +} + +func (t *joinTables) advance(offset int, table *execute.ColListBlock) (subset, joinKey) { + if n := table.NRows(); n == offset { + return subset{Start: n, Stop: n}, joinKey{} + } + start := offset + key := rowKey(start, table) + s := subset{Start: start} + offset++ + for offset < table.NRows() && equalRowKeys(start, offset, table) { + offset++ + } + s.Stop = offset + return s, key +} + +type subset struct { + Start int + Stop int +} + +func (s subset) Empty() bool { + return s.Start == s.Stop +} + +func rowKey(i int, table *execute.ColListBlock) (k joinKey) { + k.Tags = make(map[string]string) + for j, c := range table.Cols() { + switch c.Kind { + case execute.TimeColKind: + k.Time = table.AtTime(i, j) + case execute.TagColKind: + k.Tags[c.Label] = table.AtString(i, j) + } + } + return +} + +func equalRowKeys(x, y int, table *execute.ColListBlock) bool { + for j, c := range table.Cols() { + if c.Label == execute.TimeColLabel { + if table.AtTime(x, j) != table.AtTime(y, j) { + return false + } + } else if c.IsTag() { + if table.AtString(x, j) != table.AtString(y, j) { + return false + } + } + } + return true +} + +type joinKey struct { + Time execute.Time + Tags map[string]string +} + +func (k joinKey) Equal(o joinKey) bool { + if k.Time == o.Time { + for t := range k.Tags { + if k.Tags[t] != o.Tags[t] { + return false + } + } + return true + } + return false +} +func (k joinKey) Less(o joinKey) bool { + if k.Time == o.Time { + for t := range k.Tags { + if k.Tags[t] != o.Tags[t] { + return k.Tags[t] < o.Tags[t] + } + } + } + return k.Time < o.Time +} + +type joinFunc struct { + fn *semantic.FunctionExpression + compilationCache *compiler.CompilationCache + scope compiler.Scope + + preparedFn compiler.Func + + recordName string + record *compiler.Object + + recordCols map[tableCol]int + references map[string][]string + + isWrap bool + wrapObj *compiler.Object + + tableData map[string]*execute.ColListBlock +} + +type tableCol struct { + table, col string +} + +func NewRowJoinFunction(fn *semantic.FunctionExpression, parentIDs []execute.DatasetID, tableNames map[execute.DatasetID]string) (*joinFunc, error) { + if len(fn.Params) != 1 { + return nil, errors.New("join function should only have one parameter for the map of tables") + } + return &joinFunc{ + compilationCache: compiler.NewCompilationCache(fn), + scope: make(compiler.Scope, 1), + references: findTableReferences(fn), + recordCols: make(map[tableCol]int), + record: compiler.NewObject(), + recordName: fn.Params[0].Key.Name, + wrapObj: compiler.NewObject(), + }, nil +} + +func (f *joinFunc) Prepare(tables map[string]*execute.ColListBlock) error { + f.tableData = tables + propertyTypes := make(map[string]semantic.Type, len(f.references)) + // Prepare types and recordcols + for tbl, b := range tables { + cols := b.Cols() + f.record.Set(tbl, compiler.NewObject()) + tblPropertyTypes := make(map[string]semantic.Type, len(f.references[tbl])) + for _, r := range f.references[tbl] { + found := false + for j, c := range cols { + if r == c.Label { + f.recordCols[tableCol{table: tbl, col: c.Label}] = j + tblPropertyTypes[r] = execute.ConvertToKind(c.Type) + found = true + break + } + } + if !found { + return fmt.Errorf("function references unknown column %q of table %q", r, tbl) + } + } + propertyTypes[tbl] = semantic.NewObjectType(tblPropertyTypes) + } + // Compile fn for given types + fn, err := f.compilationCache.Compile(map[string]semantic.Type{ + f.recordName: semantic.NewObjectType(propertyTypes), + }) + if err != nil { + return err + } + f.preparedFn = fn + + k := f.preparedFn.Type().Kind() + f.isWrap = k != semantic.Object + if f.isWrap { + f.wrapObj.SetPropertyType(execute.DefaultValueColLabel, f.preparedFn.Type()) + } + return nil +} + +func (f *joinFunc) Type() semantic.Type { + if f.isWrap { + return f.wrapObj.Type() + } + return f.preparedFn.Type() +} + +func (f *joinFunc) Eval(rows map[string]int) (*compiler.Object, error) { + for tbl, references := range f.references { + row := rows[tbl] + data := f.tableData[tbl] + obj := f.record.Get(tbl).(*compiler.Object) + for _, r := range references { + obj.Set(r, readValue(row, f.recordCols[tableCol{table: tbl, col: r}], data)) + } + f.record.Set(tbl, obj) + } + f.scope[f.recordName] = f.record + + v, err := f.preparedFn.Eval(f.scope) + if err != nil { + return nil, err + } + if f.isWrap { + f.wrapObj.Set(execute.DefaultValueColLabel, v) + return f.wrapObj, nil + } + return v.Object(), nil +} + +func readValue(i, j int, table *execute.ColListBlock) compiler.Value { + cols := table.Cols() + switch t := cols[j].Type; t { + case execute.TBool: + return compiler.NewBool(table.AtBool(i, j)) + case execute.TInt: + return compiler.NewInt(table.AtInt(i, j)) + case execute.TUInt: + return compiler.NewUInt(table.AtUInt(i, j)) + case execute.TFloat: + return compiler.NewFloat(table.AtFloat(i, j)) + case execute.TString: + return compiler.NewString(table.AtString(i, j)) + default: + execute.PanicUnknownType(t) + return nil + } +} + +func findTableReferences(fn *semantic.FunctionExpression) map[string][]string { + v := &tableReferenceVisitor{ + record: fn.Params[0].Key.Name, + refs: make(map[string][]string), + } + semantic.Walk(v, fn) + return v.refs +} + +type tableReferenceVisitor struct { + record string + refs map[string][]string +} + +func (c *tableReferenceVisitor) Visit(node semantic.Node) semantic.Visitor { + if col, ok := node.(*semantic.MemberExpression); ok { + if table, ok := col.Object.(*semantic.MemberExpression); ok { + if record, ok := table.Object.(*semantic.IdentifierExpression); ok && record.Name == c.record { + c.refs[table.Property] = append(c.refs[table.Property], col.Property) + return nil + } + } + } + return c +} + +func (c *tableReferenceVisitor) Done() {} diff --git a/vendor/github.com/influxdata/ifql/functions/join_test.go b/vendor/github.com/influxdata/ifql/functions/join_test.go new file mode 100644 index 000000000..db54df68b --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/join_test.go @@ -0,0 +1,968 @@ +package functions_test + +import ( + "sort" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" + "github.com/influxdata/ifql/semantic" +) + +func TestJoin_NewQuery(t *testing.T) { + tests := []querytest.NewQueryTestCase{ + { + Name: "basic two-way join", + Raw: ` +a = from(db:"dbA") |> range(start:-1h) +b = from(db:"dbB") |> range(start:-1h) +join(tables:{a:a,b:b}, on:["host"], fn: (t) => t.a["_value"] + t.b["_value"])`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "dbA", + }, + }, + { + ID: "range1", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -1 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + IsRelative: true, + }, + }, + }, + { + ID: "from2", + Spec: &functions.FromOpSpec{ + Database: "dbB", + }, + }, + { + ID: "range3", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -1 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + IsRelative: true, + }, + }, + }, + { + ID: "join4", + Spec: &functions.JoinOpSpec{ + On: []string{"host"}, + TableNames: map[query.OperationID]string{"range1": "a", "range3": "b"}, + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "t"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "a", + }, + Property: "_value", + }, + Right: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "b", + }, + Property: "_value", + }, + }, + }, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "range1"}, + {Parent: "from2", Child: "range3"}, + {Parent: "range1", Child: "join4"}, + {Parent: "range3", Child: "join4"}, + }, + }, + }, + { + Name: "from with join with complex ast", + Raw: ` + a = from(db:"ifql") |> range(start:-1h) + b = from(db:"ifql") |> range(start:-1h) + join(tables:{a:a,b:b}, on:["t1"], fn: (t) => (t.a["_value"]-t.b["_value"])/t.b["_value"]) + `, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "ifql", + }, + }, + { + ID: "range1", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -1 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + IsRelative: true, + }, + }, + }, + { + ID: "from2", + Spec: &functions.FromOpSpec{ + Database: "ifql", + }, + }, + { + ID: "range3", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -1 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + IsRelative: true, + }, + }, + }, + { + ID: "join4", + Spec: &functions.JoinOpSpec{ + On: []string{"t1"}, + TableNames: map[query.OperationID]string{"range1": "a", "range3": "b"}, + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "t"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.DivisionOperator, + Left: &semantic.BinaryExpression{ + Operator: ast.SubtractionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "a", + }, + Property: "_value", + }, + Right: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "b", + }, + Property: "_value", + }, + }, + Right: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "b", + }, + Property: "_value", + }, + }, + }, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "range1"}, + {Parent: "from2", Child: "range3"}, + {Parent: "range1", Child: "join4"}, + {Parent: "range3", Child: "join4"}, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + querytest.NewQueryTestHelper(t, tc) + }) + } +} + +func TestJoinOperation_Marshaling(t *testing.T) { + data := []byte(`{ + "id":"join", + "kind":"join", + "spec":{ + "on":["t1","t2"], + "table_names": {"sum1":"a","count3":"b"}, + "fn":{ + "params": [{"type":"FunctionParam","key":{"type":"Identifier","name":"t"}}], + "body":{ + "type":"BinaryExpression", + "operator": "+", + "left": { + "type": "MemberExpression", + "object": { + "type":"IdentifierExpression", + "name":"a" + }, + "property": "_value" + }, + "right":{ + "type": "MemberExpression", + "object": { + "type":"IdentifierExpression", + "name":"b" + }, + "property": "_value" + } + } + } + } + }`) + op := &query.Operation{ + ID: "join", + Spec: &functions.JoinOpSpec{ + On: []string{"t1", "t2"}, + TableNames: map[query.OperationID]string{"sum1": "a", "count3": "b"}, + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "t"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "a", + }, + Property: "_value", + }, + Right: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "b", + }, + Property: "_value", + }, + }, + }, + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestMergeJoin_Process(t *testing.T) { + addFunction := &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "t"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "a", + }, + Property: "_value", + }, + Right: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "b", + }, + Property: "_value", + }, + }, + } + passThroughFunc := &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "t"}}}, + Body: &semantic.ObjectExpression{ + Properties: []*semantic.Property{ + { + Key: &semantic.Identifier{Name: "a"}, + Value: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "a", + }, + Property: "_value", + }, + }, + { + Key: &semantic.Identifier{Name: "b"}, + Value: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "b", + }, + Property: "_value", + }, + }, + }, + }, + } + parentID0 := plantest.RandomProcedureID() + parentID1 := plantest.RandomProcedureID() + tableNames := map[plan.ProcedureID]string{ + parentID0: "a", + parentID1: "b", + } + testCases := []struct { + skip bool + name string + spec *functions.MergeJoinProcedureSpec + data0 []*executetest.Block // data from parent 0 + data1 []*executetest.Block // data from parent 1 + want []*executetest.Block + }{ + { + name: "simple inner", + spec: &functions.MergeJoinProcedureSpec{ + Fn: addFunction, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 2.0}, + {execute.Time(3), 3.0}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 10.0}, + {execute.Time(2), 20.0}, + {execute.Time(3), 30.0}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 11.0}, + {execute.Time(2), 22.0}, + {execute.Time(3), 33.0}, + }, + }, + }, + }, + { + name: "simple inner with ints", + spec: &functions.MergeJoinProcedureSpec{ + Fn: addFunction, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), int64(1)}, + {execute.Time(2), int64(2)}, + {execute.Time(3), int64(3)}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), int64(10)}, + {execute.Time(2), int64(20)}, + {execute.Time(3), int64(30)}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), int64(11)}, + {execute.Time(2), int64(22)}, + {execute.Time(3), int64(33)}, + }, + }, + }, + }, + { + name: "inner with missing values", + spec: &functions.MergeJoinProcedureSpec{ + Fn: addFunction, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 2.0}, + {execute.Time(3), 3.0}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 10.0}, + {execute.Time(3), 30.0}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 11.0}, + {execute.Time(3), 33.0}, + }, + }, + }, + }, + { + name: "inner with multiple matches", + spec: &functions.MergeJoinProcedureSpec{ + Fn: addFunction, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 2.0}, + {execute.Time(3), 3.0}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 10.0}, + {execute.Time(1), 10.1}, + {execute.Time(2), 20.0}, + {execute.Time(3), 30.0}, + {execute.Time(3), 30.1}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 11.0}, + {execute.Time(1), 11.1}, + {execute.Time(2), 22.0}, + {execute.Time(3), 33.0}, + {execute.Time(3), 33.1}, + }, + }, + }, + }, + { + name: "inner with common tags", + spec: &functions.MergeJoinProcedureSpec{ + On: []string{"t1"}, + Fn: addFunction, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "a"}, + {execute.Time(2), 2.0, "a"}, + {execute.Time(3), 3.0, "a"}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 10.0, "a"}, + {execute.Time(2), 20.0, "a"}, + {execute.Time(3), 30.0, "a"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 11.0, "a"}, + {execute.Time(2), 22.0, "a"}, + {execute.Time(3), 33.0, "a"}, + }, + }, + }, + }, + { + name: "inner with extra attributes", + spec: &functions.MergeJoinProcedureSpec{ + On: []string{"t1"}, + Fn: addFunction, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "a"}, + {execute.Time(1), 1.5, "b"}, + {execute.Time(2), 2.0, "a"}, + {execute.Time(2), 2.5, "b"}, + {execute.Time(3), 3.0, "a"}, + {execute.Time(3), 3.5, "b"}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 10.0, "a"}, + {execute.Time(1), 10.1, "b"}, + {execute.Time(2), 20.0, "a"}, + {execute.Time(2), 20.1, "b"}, + {execute.Time(3), 30.0, "a"}, + {execute.Time(3), 30.1, "b"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 11.0, "a"}, + {execute.Time(1), 11.6, "b"}, + {execute.Time(2), 22.0, "a"}, + {execute.Time(2), 22.6, "b"}, + {execute.Time(3), 33.0, "a"}, + {execute.Time(3), 33.6, "b"}, + }, + }, + }, + }, + { + name: "inner with tags and extra attributes", + spec: &functions.MergeJoinProcedureSpec{ + On: []string{"t1", "t2"}, + Fn: addFunction, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "a", "x"}, + {execute.Time(1), 1.5, "a", "y"}, + {execute.Time(2), 2.0, "a", "x"}, + {execute.Time(2), 2.5, "a", "y"}, + {execute.Time(3), 3.0, "a", "x"}, + {execute.Time(3), 3.5, "a", "y"}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 10.0, "a", "x"}, + {execute.Time(1), 10.1, "a", "y"}, + {execute.Time(2), 20.0, "a", "x"}, + {execute.Time(2), 20.1, "a", "y"}, + {execute.Time(3), 30.0, "a", "x"}, + {execute.Time(3), 30.1, "a", "y"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 11.0, "a", "x"}, + {execute.Time(1), 11.6, "a", "y"}, + {execute.Time(2), 22.0, "a", "x"}, + {execute.Time(2), 22.6, "a", "y"}, + {execute.Time(3), 33.0, "a", "x"}, + {execute.Time(3), 33.6, "a", "y"}, + }, + }, + }, + }, + { + name: "simple inner with multiple values", + spec: &functions.MergeJoinProcedureSpec{ + Fn: passThroughFunc, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 2.0}, + {execute.Time(3), 3.0}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 10.0}, + {execute.Time(2), 20.0}, + {execute.Time(3), 30.0}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "a", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "b", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, 10.0}, + {execute.Time(2), 2.0, 20.0}, + {execute.Time(3), 3.0, 30.0}, + }, + }, + }, + }, + { + name: "inner with multiple value, tags and extra attributes", + spec: &functions.MergeJoinProcedureSpec{ + On: []string{"t1", "t2"}, + Fn: passThroughFunc, + TableNames: tableNames, + }, + data0: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "a", "x"}, + {execute.Time(1), 1.5, "a", "y"}, + {execute.Time(2), 2.0, "a", "x"}, + {execute.Time(2), 2.5, "a", "y"}, + {execute.Time(3), 3.0, "a", "x"}, + {execute.Time(3), 3.5, "a", "y"}, + }, + }, + }, + data1: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 10.0, "a", "x"}, + {execute.Time(1), 10.1, "a", "y"}, + {execute.Time(2), 20.0, "a", "x"}, + {execute.Time(2), 20.1, "a", "y"}, + {execute.Time(3), 30.0, "a", "x"}, + {execute.Time(3), 30.1, "a", "y"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 10, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "a", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "b", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, 10.0, "a", "x"}, + {execute.Time(1), 1.5, 10.1, "a", "y"}, + {execute.Time(2), 2.0, 20.0, "a", "x"}, + {execute.Time(2), 2.5, 20.1, "a", "y"}, + {execute.Time(3), 3.0, 30.0, "a", "x"}, + {execute.Time(3), 3.5, 30.1, "a", "y"}, + }, + }, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + if tc.skip { + t.Skip() + } + parents := []execute.DatasetID{execute.DatasetID(parentID0), execute.DatasetID(parentID1)} + + tableNames := make(map[execute.DatasetID]string, len(tc.spec.TableNames)) + for pid, name := range tc.spec.TableNames { + tableNames[execute.DatasetID(pid)] = name + } + + d := executetest.NewDataset(executetest.RandomDatasetID()) + joinExpr, err := functions.NewRowJoinFunction(tc.spec.Fn, parents, tableNames) + if err != nil { + t.Fatal(err) + } + c := functions.NewMergeJoinCache(joinExpr, executetest.UnlimitedAllocator, tableNames[parents[0]], tableNames[parents[1]]) + c.SetTriggerSpec(execute.DefaultTriggerSpec) + jt := functions.NewMergeJoinTransformation(d, c, tc.spec, parents, tableNames) + + l := len(tc.data0) + if len(tc.data1) > l { + l = len(tc.data1) + } + for i := 0; i < l; i++ { + if i < len(tc.data0) { + if err := jt.Process(parents[0], tc.data0[i]); err != nil { + t.Fatal(err) + } + } + if i < len(tc.data1) { + if err := jt.Process(parents[1], tc.data1[i]); err != nil { + t.Fatal(err) + } + } + } + + got := executetest.BlocksFromCache(c) + + sort.Sort(executetest.SortedBlocks(got)) + sort.Sort(executetest.SortedBlocks(tc.want)) + + if !cmp.Equal(tc.want, got) { + t.Errorf("unexpected blocks -want/+got\n%s", cmp.Diff(tc.want, got)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/last.go b/vendor/github.com/influxdata/ifql/functions/last.go new file mode 100644 index 000000000..ff8ab5b2e --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/last.go @@ -0,0 +1,187 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const LastKind = "last" + +type LastOpSpec struct { + Column string `json:"column"` + UseRowTime bool `json:"useRowtime"` +} + +var lastSignature = query.DefaultFunctionSignature() + +func init() { + lastSignature.Params["column"] = semantic.String + lastSignature.Params["useRowTime"] = semantic.Bool + + query.RegisterFunction(LastKind, createLastOpSpec, lastSignature) + query.RegisterOpSpec(LastKind, newLastOp) + plan.RegisterProcedureSpec(LastKind, newLastProcedure, LastKind) + execute.RegisterTransformation(LastKind, createLastTransformation) +} + +func createLastOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(LastOpSpec) + if c, ok, err := args.GetString("column"); err != nil { + return nil, err + } else if ok { + spec.Column = c + } + if useRowTime, ok, err := args.GetBool("useRowTime"); err != nil { + return nil, err + } else if ok { + spec.UseRowTime = useRowTime + } + return spec, nil +} + +func newLastOp() query.OperationSpec { + return new(LastOpSpec) +} + +func (s *LastOpSpec) Kind() query.OperationKind { + return LastKind +} + +type LastProcedureSpec struct { + Column string + UseRowTime bool +} + +func newLastProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*LastOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + return &LastProcedureSpec{ + Column: spec.Column, + UseRowTime: spec.UseRowTime, + }, nil +} + +func (s *LastProcedureSpec) Kind() plan.ProcedureKind { + return LastKind +} + +func (s *LastProcedureSpec) PushDownRules() []plan.PushDownRule { + return []plan.PushDownRule{{ + Root: FromKind, + Through: []plan.ProcedureKind{GroupKind, LimitKind, FilterKind}, + Match: func(spec plan.ProcedureSpec) bool { + selectSpec := spec.(*FromProcedureSpec) + return !selectSpec.AggregateSet + }, + }} +} + +func (s *LastProcedureSpec) PushDown(root *plan.Procedure, dup func() *plan.Procedure) { + selectSpec := root.Spec.(*FromProcedureSpec) + if selectSpec.BoundsSet || selectSpec.LimitSet || selectSpec.DescendingSet { + root = dup() + selectSpec = root.Spec.(*FromProcedureSpec) + selectSpec.BoundsSet = false + selectSpec.Bounds = plan.BoundsSpec{} + selectSpec.LimitSet = false + selectSpec.PointsLimit = 0 + selectSpec.SeriesLimit = 0 + selectSpec.SeriesOffset = 0 + selectSpec.DescendingSet = false + selectSpec.Descending = false + return + } + selectSpec.BoundsSet = true + selectSpec.Bounds = plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + } + selectSpec.LimitSet = true + selectSpec.PointsLimit = 1 + selectSpec.DescendingSet = true + selectSpec.Descending = true +} + +func (s *LastProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(LastProcedureSpec) + ns.Column = s.Column + ns.UseRowTime = s.UseRowTime + return ns +} + +type LastSelector struct { + rows []execute.Row +} + +func createLastTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + ps, ok := spec.(*LastProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", ps) + } + t, d := execute.NewRowSelectorTransformationAndDataset(id, mode, a.Bounds(), new(LastSelector), ps.Column, ps.UseRowTime, a.Allocator()) + return t, d, nil +} + +func (s *LastSelector) reset() { + s.rows = nil +} +func (s *LastSelector) NewBoolSelector() execute.DoBoolRowSelector { + s.reset() + return s +} + +func (s *LastSelector) NewIntSelector() execute.DoIntRowSelector { + s.reset() + return s +} + +func (s *LastSelector) NewUIntSelector() execute.DoUIntRowSelector { + s.reset() + return s +} + +func (s *LastSelector) NewFloatSelector() execute.DoFloatRowSelector { + s.reset() + return s +} + +func (s *LastSelector) NewStringSelector() execute.DoStringRowSelector { + s.reset() + return s +} + +func (s *LastSelector) Rows() []execute.Row { + return s.rows +} + +func (s *LastSelector) selectLast(l int, rr execute.RowReader) { + if l > 0 { + s.rows = []execute.Row{execute.ReadRow(l-1, rr)} + } +} + +func (s *LastSelector) DoBool(vs []bool, rr execute.RowReader) { + s.selectLast(len(vs), rr) +} +func (s *LastSelector) DoInt(vs []int64, rr execute.RowReader) { + s.selectLast(len(vs), rr) +} +func (s *LastSelector) DoUInt(vs []uint64, rr execute.RowReader) { + s.selectLast(len(vs), rr) +} +func (s *LastSelector) DoFloat(vs []float64, rr execute.RowReader) { + s.selectLast(len(vs), rr) +} +func (s *LastSelector) DoString(vs []string, rr execute.RowReader) { + s.selectLast(len(vs), rr) +} diff --git a/vendor/github.com/influxdata/ifql/functions/last_test.go b/vendor/github.com/influxdata/ifql/functions/last_test.go new file mode 100644 index 000000000..e4b1396fe --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/last_test.go @@ -0,0 +1,132 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestLastOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"last","kind":"last","spec":{"useRowTime":true}}`) + op := &query.Operation{ + ID: "last", + Spec: &functions.LastOpSpec{ + UseRowTime: true, + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestLast_Process(t *testing.T) { + testCases := []struct { + name string + data *executetest.Block + want []execute.Row + }{ + { + name: "last", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime}, + {Label: "_value", Type: execute.TFloat}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 7.0, "a", "x"}, + }, + }, + want: []execute.Row{{ + Values: []interface{}{execute.Time(90), 7.0, "a", "x"}, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.RowSelectorFuncTestHelper( + t, + new(functions.LastSelector), + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkLast(b *testing.B) { + executetest.RowSelectorFuncBenchmarkHelper(b, new(functions.LastSelector), NormalBlock) +} + +func TestLast_PushDown_Match(t *testing.T) { + spec := new(functions.LastProcedureSpec) + from := new(functions.FromProcedureSpec) + + // Should not match when an aggregate is set + from.AggregateSet = true + plantest.PhysicalPlan_PushDown_Match_TestHelper(t, spec, from, []bool{false}) + + // Should match when no aggregate is set + from.AggregateSet = false + plantest.PhysicalPlan_PushDown_Match_TestHelper(t, spec, from, []bool{true}) +} + +func TestLast_PushDown(t *testing.T) { + spec := new(functions.LastProcedureSpec) + root := &plan.Procedure{ + Spec: new(functions.FromProcedureSpec), + } + want := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + LimitSet: true, + PointsLimit: 1, + DescendingSet: true, + Descending: true, + }, + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, false, want) +} +func TestLast_PushDown_Duplicate(t *testing.T) { + spec := new(functions.LastProcedureSpec) + root := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + LimitSet: true, + PointsLimit: 1, + DescendingSet: true, + Descending: true, + }, + } + want := &plan.Procedure{ + // Expect the duplicate has been reset to zero values + Spec: new(functions.FromProcedureSpec), + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, true, want) +} diff --git a/vendor/github.com/influxdata/ifql/functions/limit.go b/vendor/github.com/influxdata/ifql/functions/limit.go new file mode 100644 index 000000000..1ca56910a --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/limit.go @@ -0,0 +1,202 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const LimitKind = "limit" + +// LimitOpSpec limits the number of rows returned per block. +// Currently offset is not supported. +type LimitOpSpec struct { + N int64 `json:"n"` + //Offset int64 `json:"offset"` +} + +var limitSignature = query.DefaultFunctionSignature() + +func init() { + integralSignature.Params["n"] = semantic.Int + + query.RegisterFunction(LimitKind, createLimitOpSpec, limitSignature) + query.RegisterOpSpec(LimitKind, newLimitOp) + plan.RegisterProcedureSpec(LimitKind, newLimitProcedure, LimitKind) + // TODO register a range transformation. Currently range is only supported if it is pushed down into a select procedure. + execute.RegisterTransformation(LimitKind, createLimitTransformation) +} + +func createLimitOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(LimitOpSpec) + + n, err := args.GetRequiredInt("n") + if err != nil { + return nil, err + } + spec.N = n + + return spec, nil +} + +func newLimitOp() query.OperationSpec { + return new(LimitOpSpec) +} + +func (s *LimitOpSpec) Kind() query.OperationKind { + return LimitKind +} + +type LimitProcedureSpec struct { + N int64 `json:"n"` + //Offset int64 `json:"offset"` +} + +func newLimitProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*LimitOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + return &LimitProcedureSpec{ + N: spec.N, + //Offset: spec.Offset, + }, nil +} + +func (s *LimitProcedureSpec) Kind() plan.ProcedureKind { + return LimitKind +} +func (s *LimitProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(LimitProcedureSpec) + ns.N = s.N + //ns.Offset = s.Offset + return ns +} + +func (s *LimitProcedureSpec) PushDownRules() []plan.PushDownRule { + return []plan.PushDownRule{{ + Root: FromKind, + Through: []plan.ProcedureKind{GroupKind, RangeKind, FilterKind}, + }} +} +func (s *LimitProcedureSpec) PushDown(root *plan.Procedure, dup func() *plan.Procedure) { + selectSpec := root.Spec.(*FromProcedureSpec) + if selectSpec.LimitSet { + root = dup() + selectSpec = root.Spec.(*FromProcedureSpec) + selectSpec.LimitSet = false + selectSpec.PointsLimit = 0 + selectSpec.SeriesLimit = 0 + selectSpec.SeriesOffset = 0 + return + } + selectSpec.LimitSet = true + selectSpec.PointsLimit = s.N + selectSpec.SeriesLimit = 0 + selectSpec.SeriesOffset = 0 +} + +func createLimitTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*LimitProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewLimitTransformation(d, cache, s) + return t, d, nil +} + +type limitTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + n int + + colMap []int +} + +func NewLimitTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *LimitProcedureSpec) *limitTransformation { + return &limitTransformation{ + d: d, + cache: cache, + n: int(spec.N), + } +} + +func (t *limitTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *limitTransformation) Process(id execute.DatasetID, b execute.Block) error { + builder, new := t.cache.BlockBuilder(b) + if new { + execute.AddBlockCols(b, builder) + } + + ncols := builder.NCols() + if cap(t.colMap) < ncols { + t.colMap = make([]int, ncols) + for j := range t.colMap { + t.colMap[j] = j + } + } else { + t.colMap = t.colMap[:ncols] + } + + // AppendBlock with limit + n := t.n + times := b.Times() + + cols := builder.Cols() + timeIdx := execute.TimeIdx(cols) + times.DoTime(func(ts []execute.Time, rr execute.RowReader) { + l := len(ts) + if l > n { + l = n + } + n -= l + builder.AppendTimes(timeIdx, ts[:l]) + for j, c := range cols { + if j == timeIdx || c.Common { + continue + } + for i := range ts[:l] { + switch c.Type { + case execute.TBool: + builder.AppendBool(j, rr.AtBool(i, t.colMap[j])) + case execute.TInt: + builder.AppendInt(j, rr.AtInt(i, t.colMap[j])) + case execute.TUInt: + builder.AppendUInt(j, rr.AtUInt(i, t.colMap[j])) + case execute.TFloat: + builder.AppendFloat(j, rr.AtFloat(i, t.colMap[j])) + case execute.TString: + builder.AppendString(j, rr.AtString(i, t.colMap[j])) + case execute.TTime: + builder.AppendTime(j, rr.AtTime(i, t.colMap[j])) + default: + execute.PanicUnknownType(c.Type) + } + } + } + }) + return nil +} + +func (t *limitTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *limitTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *limitTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/limit_test.go b/vendor/github.com/influxdata/ifql/functions/limit_test.go new file mode 100644 index 000000000..baa28370c --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/limit_test.go @@ -0,0 +1,183 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestLimitOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"limit","kind":"limit","spec":{"n":10}}`) + op := &query.Operation{ + ID: "limit", + Spec: &functions.LimitOpSpec{ + N: 10, + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestLimit_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.LimitProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "one block", + spec: &functions.LimitProcedureSpec{ + N: 1, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime}, + {Label: "_value", Type: execute.TFloat}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime}, + {Label: "_value", Type: execute.TFloat}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + }, + }}, + }, + { + name: "multiple blocks", + spec: &functions.LimitProcedureSpec{ + N: 2, + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime}, + {Label: "_value", Type: execute.TFloat}, + }, + Data: [][]interface{}{ + {execute.Time(1), 3.0}, + {execute.Time(2), 2.0}, + {execute.Time(2), 1.0}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 3, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime}, + {Label: "_value", Type: execute.TFloat}, + }, + Data: [][]interface{}{ + {execute.Time(3), 3.0}, + {execute.Time(3), 2.0}, + {execute.Time(4), 1.0}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime}, + {Label: "_value", Type: execute.TFloat}, + }, + Data: [][]interface{}{ + {execute.Time(1), 3.0}, + {execute.Time(2), 2.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 3, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime}, + {Label: "_value", Type: execute.TFloat}, + }, + Data: [][]interface{}{ + {execute.Time(3), 3.0}, + {execute.Time(3), 2.0}, + }, + }, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewLimitTransformation(d, c, tc.spec) + }, + ) + }) + } +} + +func TestLimit_PushDown(t *testing.T) { + spec := &functions.LimitProcedureSpec{ + N: 42, + } + root := &plan.Procedure{ + Spec: new(functions.FromProcedureSpec), + } + want := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + LimitSet: true, + PointsLimit: 42, + }, + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, false, want) +} +func TestLimit_PushDown_Duplicate(t *testing.T) { + spec := &functions.LimitProcedureSpec{ + N: 9, + } + root := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + LimitSet: true, + PointsLimit: 42, + }, + } + want := &plan.Procedure{ + // Expect the duplicate has been reset to zero values + Spec: new(functions.FromProcedureSpec), + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, true, want) +} diff --git a/vendor/github.com/influxdata/ifql/functions/map.go b/vendor/github.com/influxdata/ifql/functions/map.go new file mode 100644 index 000000000..dc65ef374 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/map.go @@ -0,0 +1,192 @@ +package functions + +import ( + "fmt" + "log" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const MapKind = "map" + +type MapOpSpec struct { + Fn *semantic.FunctionExpression `json:"fn"` +} + +var mapSignature = query.DefaultFunctionSignature() + +func init() { + mapSignature.Params["fn"] = semantic.Function + + query.RegisterFunction(MapKind, createMapOpSpec, mapSignature) + query.RegisterOpSpec(MapKind, newMapOp) + plan.RegisterProcedureSpec(MapKind, newMapProcedure, MapKind) + execute.RegisterTransformation(MapKind, createMapTransformation) +} + +func createMapOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + f, err := args.GetRequiredFunction("fn") + if err != nil { + return nil, err + } + resolved, err := f.Resolve() + if err != nil { + return nil, err + } + return &MapOpSpec{ + Fn: resolved, + }, nil +} + +func newMapOp() query.OperationSpec { + return new(MapOpSpec) +} + +func (s *MapOpSpec) Kind() query.OperationKind { + return MapKind +} + +type MapProcedureSpec struct { + Fn *semantic.FunctionExpression +} + +func newMapProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*MapOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &MapProcedureSpec{ + Fn: spec.Fn, + }, nil +} + +func (s *MapProcedureSpec) Kind() plan.ProcedureKind { + return MapKind +} +func (s *MapProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(MapProcedureSpec) + ns.Fn = s.Fn.Copy().(*semantic.FunctionExpression) + return ns +} + +func createMapTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*MapProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t, err := NewMapTransformation(d, cache, s) + if err != nil { + return nil, nil, err + } + return t, d, nil +} + +type mapTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + fn *execute.RowMapFn +} + +func NewMapTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *MapProcedureSpec) (*mapTransformation, error) { + fn, err := execute.NewRowMapFn(spec.Fn) + if err != nil { + return nil, err + } + return &mapTransformation{ + d: d, + cache: cache, + fn: fn, + }, nil +} + +func (t *mapTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *mapTransformation) Process(id execute.DatasetID, b execute.Block) error { + // Prepare the functions for the column types. + cols := b.Cols() + err := t.fn.Prepare(cols) + if err != nil { + // TODO(nathanielc): Should we not fail the query for failed compilation? + return err + } + + builder, new := t.cache.BlockBuilder(b) + if !new { + return fmt.Errorf("received duplicate block bounds: %v tags: %v", b.Bounds(), b.Tags()) + } + + // Add tag columns to builder + colMap := make([]int, 0, len(cols)) + for j, c := range cols { + if !c.IsValue() { + nj := builder.AddCol(c) + if c.Common { + builder.SetCommonString(nj, b.Tags()[c.Label]) + } + colMap = append(colMap, j) + } + } + + mapType := t.fn.Type() + // Add new value columns + for k, t := range mapType.Properties() { + builder.AddCol(execute.ColMeta{ + Label: k, + Type: execute.ConvertFromKind(t.Kind()), + Kind: execute.ValueColKind, + }) + } + + bCols := builder.Cols() + // Append modified rows + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i := range ts { + m, err := t.fn.Eval(i, rr) + if err != nil { + log.Printf("failed to evaluate map expression: %v", err) + continue + } + for j, c := range bCols { + if c.Common { + // We already set the common tag values + continue + } + switch c.Kind { + case execute.TimeColKind: + builder.AppendTime(j, rr.AtTime(i, colMap[j])) + case execute.TagColKind: + builder.AppendString(j, rr.AtString(i, colMap[j])) + case execute.ValueColKind: + v := m.Get(c.Label) + execute.AppendValue(builder, j, v) + default: + log.Printf("unknown column kind %v", c.Kind) + } + } + } + }) + return nil +} + +func (t *mapTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *mapTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *mapTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/map_test.go b/vendor/github.com/influxdata/ifql/functions/map_test.go new file mode 100644 index 000000000..516ef0ce2 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/map_test.go @@ -0,0 +1,268 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" + "github.com/influxdata/ifql/semantic" +) + +func TestMap_NewQuery(t *testing.T) { + tests := []querytest.NewQueryTestCase{ + { + Name: "simple static map", + Raw: `from(db:"mydb") |> map(fn: (r) => r._value + 1)`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "map1", + Spec: &functions.MapOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_value", + }, + Right: &semantic.IntegerLiteral{Value: 1}, + }, + }, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "map1"}, + }, + }, + }, + { + Name: "resolve map", + Raw: `x = 2 from(db:"mydb") |> map(fn: (r) => r._value + x)`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "map1", + Spec: &functions.MapOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_value", + }, + Right: &semantic.IntegerLiteral{Value: 2}, + }, + }, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "map1"}, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + querytest.NewQueryTestHelper(t, tc) + }) + } +} + +func TestMapOperation_Marshaling(t *testing.T) { + data := []byte(`{ + "id":"map", + "kind":"map", + "spec":{ + "fn":{ + "type": "ArrowFunctionExpression", + "params": [{"type":"FunctionParam","key":{"type":"Identifier","name":"r"}}], + "body":{ + "type":"BinaryExpression", + "operator": "-", + "left":{ + "type":"MemberExpression", + "object": { + "type": "IdentifierExpression", + "name":"r" + }, + "property": "_value" + }, + "right":{ + "type":"FloatLiteral", + "value": 5.6 + } + } + } + } + }`) + op := &query.Operation{ + ID: "map", + Spec: &functions.MapOpSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.SubtractionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_value", + }, + Right: &semantic.FloatLiteral{Value: 5.6}, + }, + }, + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} +func TestMap_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.MapProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: `_value+5`, + spec: &functions.MapProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_value", + }, + Right: &semantic.FloatLiteral{ + Value: 5, + }, + }, + }, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 6.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 6.0}, + {execute.Time(2), 11.0}, + }, + }}, + }, + { + name: `_value*_value`, + spec: &functions.MapProcedureSpec{ + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.MultiplicationOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_value", + }, + Right: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "r", + }, + Property: "_value", + }, + }, + }, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 6.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 36.0}, + }, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + f, err := functions.NewMapTransformation(d, c, tc.spec) + if err != nil { + t.Fatal(err) + } + return f + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/max.go b/vendor/github.com/influxdata/ifql/functions/max.go new file mode 100644 index 000000000..5c9ec75a3 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/max.go @@ -0,0 +1,178 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const MaxKind = "max" + +type MaxOpSpec struct { + Column string `json:"column"` + UseRowTime bool `json:"useRowtime"` +} + +var maxSignature = query.DefaultFunctionSignature() + +func init() { + maxSignature.Params["column"] = semantic.String + maxSignature.Params["useRowTime"] = semantic.Bool + + query.RegisterFunction(MaxKind, createMaxOpSpec, maxSignature) + query.RegisterOpSpec(MaxKind, newMaxOp) + plan.RegisterProcedureSpec(MaxKind, newMaxProcedure, MaxKind) + execute.RegisterTransformation(MaxKind, createMaxTransformation) +} + +func createMaxOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(MaxOpSpec) + if c, ok, err := args.GetString("column"); err != nil { + return nil, err + } else if ok { + spec.Column = c + } + if useRowTime, ok, err := args.GetBool("useRowTime"); err != nil { + return nil, err + } else if ok { + spec.UseRowTime = useRowTime + } + + return spec, nil +} + +func newMaxOp() query.OperationSpec { + return new(MaxOpSpec) +} + +func (s *MaxOpSpec) Kind() query.OperationKind { + return MaxKind +} + +type MaxProcedureSpec struct { + Column string + UseRowTime bool +} + +func newMaxProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*MaxOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + return &MaxProcedureSpec{ + Column: spec.Column, + UseRowTime: spec.UseRowTime, + }, nil +} + +func (s *MaxProcedureSpec) Kind() plan.ProcedureKind { + return MaxKind +} +func (s *MaxProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(MaxProcedureSpec) + ns.Column = s.Column + ns.UseRowTime = s.UseRowTime + return ns +} + +type MaxSelector struct { + set bool + rows []execute.Row +} + +func createMaxTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + ps, ok := spec.(*MaxProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", ps) + } + t, d := execute.NewRowSelectorTransformationAndDataset(id, mode, a.Bounds(), new(MaxSelector), ps.Column, ps.UseRowTime, a.Allocator()) + return t, d, nil +} + +type MaxIntSelector struct { + MaxSelector + max int64 +} +type MaxUIntSelector struct { + MaxSelector + max uint64 +} +type MaxFloatSelector struct { + MaxSelector + max float64 +} + +func (s *MaxSelector) NewBoolSelector() execute.DoBoolRowSelector { + return nil +} + +func (s *MaxSelector) NewIntSelector() execute.DoIntRowSelector { + return new(MaxIntSelector) +} + +func (s *MaxSelector) NewUIntSelector() execute.DoUIntRowSelector { + return new(MaxUIntSelector) +} + +func (s *MaxSelector) NewFloatSelector() execute.DoFloatRowSelector { + return new(MaxFloatSelector) +} + +func (s *MaxSelector) NewStringSelector() execute.DoStringRowSelector { + return nil +} + +func (s *MaxSelector) Rows() []execute.Row { + if !s.set { + return nil + } + return s.rows +} + +func (s *MaxSelector) selectRow(idx int, rr execute.RowReader) { + // Capture row + if idx >= 0 { + s.rows = []execute.Row{execute.ReadRow(idx, rr)} + } +} + +func (s *MaxIntSelector) DoInt(vs []int64, rr execute.RowReader) { + maxIdx := -1 + for i, v := range vs { + if !s.set || v > s.max { + s.set = true + s.max = v + maxIdx = i + } + } + s.selectRow(maxIdx, rr) +} +func (s *MaxUIntSelector) DoUInt(vs []uint64, rr execute.RowReader) { + maxIdx := -1 + for i, v := range vs { + if !s.set || v > s.max { + s.set = true + s.max = v + maxIdx = i + } + } + s.selectRow(maxIdx, rr) +} +func (s *MaxFloatSelector) DoFloat(vs []float64, rr execute.RowReader) { + maxIdx := -1 + for i, v := range vs { + if !s.set || v > s.max { + s.set = true + s.max = v + maxIdx = i + } + } + s.selectRow(maxIdx, rr) +} diff --git a/vendor/github.com/influxdata/ifql/functions/max_test.go b/vendor/github.com/influxdata/ifql/functions/max_test.go new file mode 100644 index 000000000..fa7a80e61 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/max_test.go @@ -0,0 +1,125 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestMaxOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"max","kind":"max","spec":{"useRowTime":true}}`) + op := &query.Operation{ + ID: "max", + Spec: &functions.MaxOpSpec{ + UseRowTime: true, + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestMax_Process(t *testing.T) { + testCases := []struct { + name string + data *executetest.Block + want []execute.Row + }{ + { + name: "first", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 10.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 7.0, "a", "x"}, + }, + }, + want: []execute.Row{{ + Values: []interface{}{execute.Time(0), 10.0, "a", "y"}, + }}, + }, + { + name: "last", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, + want: []execute.Row{{ + Values: []interface{}{execute.Time(90), 10.0, "a", "x"}, + }}, + }, + { + name: "middle", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 10.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 8.0, "a", "x"}, + }, + }, + want: []execute.Row{{ + Values: []interface{}{execute.Time(50), 10.0, "a", "x"}, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.RowSelectorFuncTestHelper( + t, + new(functions.MaxSelector), + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkMax(b *testing.B) { + executetest.RowSelectorFuncBenchmarkHelper(b, new(functions.MaxSelector), NormalBlock) +} diff --git a/vendor/github.com/influxdata/ifql/functions/mean.go b/vendor/github.com/influxdata/ifql/functions/mean.go new file mode 100644 index 000000000..a0bf1e771 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/mean.go @@ -0,0 +1,119 @@ +package functions + +import ( + "math" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" +) + +const MeanKind = "mean" + +type MeanOpSpec struct { +} + +var meanSignature = query.DefaultFunctionSignature() + +func init() { + query.RegisterFunction(MeanKind, createMeanOpSpec, meanSignature) + query.RegisterOpSpec(MeanKind, newMeanOp) + plan.RegisterProcedureSpec(MeanKind, newMeanProcedure, MeanKind) + execute.RegisterTransformation(MeanKind, createMeanTransformation) +} +func createMeanOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + return new(MeanOpSpec), nil +} + +func newMeanOp() query.OperationSpec { + return new(MeanOpSpec) +} + +func (s *MeanOpSpec) Kind() query.OperationKind { + return MeanKind +} + +type MeanProcedureSpec struct { +} + +func newMeanProcedure(query.OperationSpec, plan.Administration) (plan.ProcedureSpec, error) { + return new(MeanProcedureSpec), nil +} + +func (s *MeanProcedureSpec) Kind() plan.ProcedureKind { + return MeanKind +} +func (s *MeanProcedureSpec) Copy() plan.ProcedureSpec { + return new(MeanProcedureSpec) +} + +type MeanAgg struct { + count float64 + sum float64 +} + +func createMeanTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + t, d := execute.NewAggregateTransformationAndDataset(id, mode, a.Bounds(), new(MeanAgg), a.Allocator()) + return t, d, nil +} + +func (a *MeanAgg) reset() { + a.count = 0 + a.sum = 0 +} +func (a *MeanAgg) NewBoolAgg() execute.DoBoolAgg { + return nil +} + +func (a *MeanAgg) NewIntAgg() execute.DoIntAgg { + a.reset() + return a +} + +func (a *MeanAgg) NewUIntAgg() execute.DoUIntAgg { + a.reset() + return a +} + +func (a *MeanAgg) NewFloatAgg() execute.DoFloatAgg { + a.reset() + return a +} + +func (a *MeanAgg) NewStringAgg() execute.DoStringAgg { + return nil +} + +func (a *MeanAgg) DoInt(vs []int64) { + a.count += float64(len(vs)) + for _, v := range vs { + //TODO handle overflow + a.sum += float64(v) + } +} +func (a *MeanAgg) DoUInt(vs []uint64) { + a.count += float64(len(vs)) + for _, v := range vs { + //TODO handle overflow + a.sum += float64(v) + } +} +func (a *MeanAgg) DoFloat(vs []float64) { + a.count += float64(len(vs)) + for _, v := range vs { + a.sum += v + } +} +func (a *MeanAgg) Type() execute.DataType { + return execute.TFloat +} +func (a *MeanAgg) ValueFloat() float64 { + if a.count < 1 { + return math.NaN() + } + return a.sum / a.count +} diff --git a/vendor/github.com/influxdata/ifql/functions/mean_test.go b/vendor/github.com/influxdata/ifql/functions/mean_test.go new file mode 100644 index 000000000..8cd2a41a1 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/mean_test.go @@ -0,0 +1,65 @@ +package functions_test + +import ( + "math" + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestMeanOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"mean","kind":"mean"}`) + op := &query.Operation{ + ID: "mean", + Spec: &functions.MeanOpSpec{}, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestMean_Process(t *testing.T) { + testCases := []struct { + name string + data []float64 + want float64 + }{ + { + name: "zero", + data: []float64{0, 0, 0}, + want: 0.0, + }, + { + name: "nonzero", + data: []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + want: 4.5, + }, + { + name: "NaN", + data: []float64{}, + want: math.NaN(), + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.AggFuncTestHelper( + t, + new(functions.MeanAgg), + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkMean(b *testing.B) { + executetest.AggFuncBenchmarkHelper( + b, + new(functions.MeanAgg), + NormalData, + 10.00081696729983, + ) +} diff --git a/vendor/github.com/influxdata/ifql/functions/min.go b/vendor/github.com/influxdata/ifql/functions/min.go new file mode 100644 index 000000000..13498835d --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/min.go @@ -0,0 +1,178 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const MinKind = "min" + +type MinOpSpec struct { + Column string `json:"column"` + UseRowTime bool `json:"useRowtime"` +} + +var minSignature = query.DefaultFunctionSignature() + +func init() { + minSignature.Params["column"] = semantic.String + minSignature.Params["useRowTime"] = semantic.Bool + + query.RegisterFunction(MinKind, createMinOpSpec, minSignature) + query.RegisterOpSpec(MinKind, newMinOp) + plan.RegisterProcedureSpec(MinKind, newMinProcedure, MinKind) + execute.RegisterTransformation(MinKind, createMinTransformation) +} + +func createMinOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(MinOpSpec) + if c, ok, err := args.GetString("column"); err != nil { + return nil, err + } else if ok { + spec.Column = c + } + if useRowTime, ok, err := args.GetBool("useRowTime"); err != nil { + return nil, err + } else if ok { + spec.UseRowTime = useRowTime + } + + return spec, nil +} + +func newMinOp() query.OperationSpec { + return new(MinOpSpec) +} + +func (s *MinOpSpec) Kind() query.OperationKind { + return MinKind +} + +type MinProcedureSpec struct { + Column string + UseRowTime bool +} + +func newMinProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*MinOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + return &MinProcedureSpec{ + Column: spec.Column, + UseRowTime: spec.UseRowTime, + }, nil +} + +func (s *MinProcedureSpec) Kind() plan.ProcedureKind { + return MinKind +} +func (s *MinProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(MinProcedureSpec) + ns.Column = s.Column + ns.UseRowTime = s.UseRowTime + return ns +} + +type MinSelector struct { + set bool + rows []execute.Row +} + +func createMinTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + ps, ok := spec.(*MinProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", ps) + } + t, d := execute.NewRowSelectorTransformationAndDataset(id, mode, a.Bounds(), new(MinSelector), ps.Column, ps.UseRowTime, a.Allocator()) + return t, d, nil +} + +type MinIntSelector struct { + MinSelector + min int64 +} +type MinUIntSelector struct { + MinSelector + min uint64 +} +type MinFloatSelector struct { + MinSelector + min float64 +} + +func (s *MinSelector) NewBoolSelector() execute.DoBoolRowSelector { + return nil +} + +func (s *MinSelector) NewIntSelector() execute.DoIntRowSelector { + return new(MinIntSelector) +} + +func (s *MinSelector) NewUIntSelector() execute.DoUIntRowSelector { + return new(MinUIntSelector) +} + +func (s *MinSelector) NewFloatSelector() execute.DoFloatRowSelector { + return new(MinFloatSelector) +} + +func (s *MinSelector) NewStringSelector() execute.DoStringRowSelector { + return nil +} + +func (s *MinSelector) Rows() []execute.Row { + if !s.set { + return nil + } + return s.rows +} + +func (s *MinSelector) selectRow(idx int, rr execute.RowReader) { + // Capture row + if idx >= 0 { + s.rows = []execute.Row{execute.ReadRow(idx, rr)} + } +} + +func (s *MinIntSelector) DoInt(vs []int64, rr execute.RowReader) { + minIdx := -1 + for i, v := range vs { + if !s.set || v < s.min { + s.set = true + s.min = v + minIdx = i + } + } + s.selectRow(minIdx, rr) +} +func (s *MinUIntSelector) DoUInt(vs []uint64, rr execute.RowReader) { + minIdx := -1 + for i, v := range vs { + if !s.set || v < s.min { + s.set = true + s.min = v + minIdx = i + } + } + s.selectRow(minIdx, rr) +} +func (s *MinFloatSelector) DoFloat(vs []float64, rr execute.RowReader) { + minIdx := -1 + for i, v := range vs { + if !s.set || v < s.min { + s.set = true + s.min = v + minIdx = i + } + } + s.selectRow(minIdx, rr) +} diff --git a/vendor/github.com/influxdata/ifql/functions/min_test.go b/vendor/github.com/influxdata/ifql/functions/min_test.go new file mode 100644 index 000000000..8417243c4 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/min_test.go @@ -0,0 +1,125 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestMinOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"min","kind":"min","spec":{"useRowTime":true}}`) + op := &query.Operation{ + ID: "min", + Spec: &functions.MinOpSpec{ + UseRowTime: true, + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestMin_Process(t *testing.T) { + testCases := []struct { + name string + data *executetest.Block + want []execute.Row + }{ + { + name: "first", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 7.0, "a", "x"}, + }, + }, + want: []execute.Row{{ + Values: []interface{}{execute.Time(0), 0.0, "a", "y"}, + }}, + }, + { + name: "last", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 0.0, "a", "x"}, + }, + }, + want: []execute.Row{{ + Values: []interface{}{execute.Time(90), 0.0, "a", "x"}, + }}, + }, + { + name: "middle", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 0.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 8.0, "a", "x"}, + }, + }, + want: []execute.Row{{ + Values: []interface{}{execute.Time(50), 0.0, "a", "x"}, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.RowSelectorFuncTestHelper( + t, + new(functions.MinSelector), + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkMin(b *testing.B) { + executetest.RowSelectorFuncBenchmarkHelper(b, new(functions.MinSelector), NormalBlock) +} diff --git a/vendor/github.com/influxdata/ifql/functions/percentile.go b/vendor/github.com/influxdata/ifql/functions/percentile.go new file mode 100644 index 000000000..05cca1f99 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/percentile.go @@ -0,0 +1,260 @@ +package functions + +import ( + "fmt" + "math" + "sort" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" + "github.com/influxdata/tdigest" + "github.com/pkg/errors" +) + +const PercentileKind = "percentile" +const ExactPercentileKind = "exact-percentile" + +type PercentileOpSpec struct { + Percentile float64 `json:"percentile"` + Compression float64 `json:"compression"` + Exact bool `json:"exact"` +} + +var percentileSignature = query.DefaultFunctionSignature() + +func init() { + percentileSignature.Params["p"] = semantic.Float + + query.RegisterFunction(PercentileKind, createPercentileOpSpec, percentileSignature) + query.RegisterBuiltIn("percentile", percentileBuiltin) + + query.RegisterOpSpec(PercentileKind, newPercentileOp) + plan.RegisterProcedureSpec(PercentileKind, newPercentileProcedure, PercentileKind) + execute.RegisterTransformation(PercentileKind, createPercentileTransformation) + execute.RegisterTransformation(ExactPercentileKind, createExactPercentileTransformation) +} + +var percentileBuiltin = ` +// median returns the 50th percentile. +// By default an approximate percentile is computed, this can be disabled by passing exact:true. +// Using the exact method requires that the entire data set can fit in memory. +median = (exact=false, compression=0.0, table=<-) => percentile(table:table, p:0.5, exact:exact, compression:compression) +` + +func createPercentileOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(PercentileOpSpec) + p, err := args.GetRequiredFloat("p") + if err != nil { + return nil, err + } + spec.Percentile = p + + if spec.Percentile < 0 || spec.Percentile > 1 { + return nil, errors.New("percentile must be between 0 and 1.") + } + + if c, ok, err := args.GetFloat("compression"); err != nil { + return nil, err + } else if ok { + spec.Compression = c + } + + if exact, ok, err := args.GetBool("exact"); err != nil { + return nil, err + } else if ok { + spec.Exact = exact + } + + if spec.Compression > 0 && spec.Exact { + return nil, errors.New("cannot specify both compression and exact.") + } + + // Set default Compression if not exact + if !spec.Exact && spec.Compression == 0 { + spec.Compression = 1000 + } + + return spec, nil +} + +func newPercentileOp() query.OperationSpec { + return new(PercentileOpSpec) +} + +func (s *PercentileOpSpec) Kind() query.OperationKind { + return PercentileKind +} + +type PercentileProcedureSpec struct { + Percentile float64 `json:"percentile"` + Compression float64 `json:"compression"` +} + +func (s *PercentileProcedureSpec) Kind() plan.ProcedureKind { + return PercentileKind +} +func (s *PercentileProcedureSpec) Copy() plan.ProcedureSpec { + return &PercentileProcedureSpec{ + Percentile: s.Percentile, + Compression: s.Compression, + } +} + +type ExactPercentileProcedureSpec struct { + Percentile float64 `json:"percentile"` +} + +func (s *ExactPercentileProcedureSpec) Kind() plan.ProcedureKind { + return ExactPercentileKind +} +func (s *ExactPercentileProcedureSpec) Copy() plan.ProcedureSpec { + return &ExactPercentileProcedureSpec{Percentile: s.Percentile} +} + +func newPercentileProcedure(qs query.OperationSpec, a plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*PercentileOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + if spec.Exact { + return &ExactPercentileProcedureSpec{ + Percentile: spec.Percentile, + }, nil + } + return &PercentileProcedureSpec{ + Percentile: spec.Percentile, + Compression: spec.Compression, + }, nil +} + +type PercentileAgg struct { + Quantile, + Compression float64 + + digest *tdigest.TDigest +} + +func createPercentileTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + ps, ok := spec.(*PercentileProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", ps) + } + agg := &PercentileAgg{ + Quantile: ps.Percentile, + Compression: ps.Compression, + } + t, d := execute.NewAggregateTransformationAndDataset(id, mode, a.Bounds(), agg, a.Allocator()) + return t, d, nil +} + +func (a *PercentileAgg) reset() { + a.digest = tdigest.NewWithCompression(a.Compression) +} +func (a *PercentileAgg) NewBoolAgg() execute.DoBoolAgg { + return nil +} + +func (a *PercentileAgg) NewIntAgg() execute.DoIntAgg { + return nil +} + +func (a *PercentileAgg) NewUIntAgg() execute.DoUIntAgg { + return nil +} + +func (a *PercentileAgg) NewFloatAgg() execute.DoFloatAgg { + a.reset() + return a +} + +func (a *PercentileAgg) NewStringAgg() execute.DoStringAgg { + return nil +} + +func (a *PercentileAgg) DoFloat(vs []float64) { + for _, v := range vs { + a.digest.Add(v, 1) + } +} + +func (a *PercentileAgg) Type() execute.DataType { + return execute.TFloat +} +func (a *PercentileAgg) ValueFloat() float64 { + return a.digest.Quantile(a.Quantile) +} + +type ExactPercentileAgg struct { + Quantile float64 + + data []float64 +} + +func createExactPercentileTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + ps, ok := spec.(*ExactPercentileProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", ps) + } + agg := &ExactPercentileAgg{ + Quantile: ps.Percentile, + } + t, d := execute.NewAggregateTransformationAndDataset(id, mode, a.Bounds(), agg, a.Allocator()) + return t, d, nil +} + +func (a *ExactPercentileAgg) reset() { + a.data = a.data[0:0] +} +func (a *ExactPercentileAgg) NewBoolAgg() execute.DoBoolAgg { + return nil +} + +func (a *ExactPercentileAgg) NewIntAgg() execute.DoIntAgg { + return nil +} + +func (a *ExactPercentileAgg) NewUIntAgg() execute.DoUIntAgg { + return nil +} + +func (a *ExactPercentileAgg) NewFloatAgg() execute.DoFloatAgg { + a.reset() + return a +} + +func (a *ExactPercentileAgg) NewStringAgg() execute.DoStringAgg { + return nil +} + +func (a *ExactPercentileAgg) DoFloat(vs []float64) { + a.data = append(a.data, vs...) +} + +func (a *ExactPercentileAgg) Type() execute.DataType { + return execute.TFloat +} + +func (a *ExactPercentileAgg) ValueFloat() float64 { + sort.Float64s(a.data) + + x := a.Quantile * float64(len(a.data)-1) + x0 := math.Floor(x) + x1 := math.Ceil(x) + + if x0 == x1 { + return a.data[int(x0)] + } + + // Linear interpolate + y0 := a.data[int(x0)] + y1 := a.data[int(x1)] + y := y0*(x1-x) + y1*(x-x0) + + return y +} diff --git a/vendor/github.com/influxdata/ifql/functions/percentile_test.go b/vendor/github.com/influxdata/ifql/functions/percentile_test.go new file mode 100644 index 000000000..ad3a08d00 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/percentile_test.go @@ -0,0 +1,150 @@ +package functions_test + +import ( + "math" + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestPercentileOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"percentile","kind":"percentile","spec":{"percentile":0.9}}`) + op := &query.Operation{ + ID: "percentile", + Spec: &functions.PercentileOpSpec{ + Percentile: 0.9, + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestPercentile_Process(t *testing.T) { + testCases := []struct { + name string + data []float64 + percentile float64 + exact bool + want float64 + }{ + { + name: "zero", + data: []float64{0, 0, 0}, + percentile: 0.5, + want: 0.0, + }, + { + name: "50th", + data: []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1}, + percentile: 0.5, + want: 3, + }, + { + name: "75th", + data: []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1}, + percentile: 0.75, + want: 4, + }, + { + name: "90th", + data: []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1}, + percentile: 0.9, + want: 5, + }, + { + name: "99th", + data: []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1}, + percentile: 0.99, + want: 5, + }, + { + name: "exact 50th", + data: []float64{1, 2, 3, 4, 5}, + percentile: 0.5, + exact: true, + want: 3, + }, + { + name: "exact 75th", + data: []float64{1, 2, 3, 4, 5}, + percentile: 0.75, + exact: true, + want: 4, + }, + { + name: "exact 90th", + data: []float64{1, 2, 3, 4, 5}, + percentile: 0.9, + exact: true, + want: 4.6, + }, + { + name: "exact 99th", + data: []float64{1, 2, 3, 4, 5}, + percentile: 0.99, + exact: true, + want: 4.96, + }, + { + name: "exact 100th", + data: []float64{1, 2, 3, 4, 5}, + percentile: 1, + exact: true, + want: 5, + }, + { + name: "exact 50th normal", + data: NormalData, + percentile: 0.5, + exact: true, + want: 9.997645059676595, + }, + { + name: "normal", + data: NormalData, + percentile: 0.9, + want: 13.843815760607427, + }, + { + name: "NaN", + data: []float64{}, + want: math.NaN(), + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + var agg execute.Aggregate + if tc.exact { + agg = &functions.ExactPercentileAgg{Quantile: tc.percentile} + } else { + agg = &functions.PercentileAgg{ + Quantile: tc.percentile, + Compression: 1000, + } + } + executetest.AggFuncTestHelper( + t, + agg, + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkPercentile(b *testing.B) { + executetest.AggFuncBenchmarkHelper( + b, + &functions.PercentileAgg{ + Quantile: 0.9, + Compression: 1000, + }, + NormalData, + 13.843815760607427, + ) +} diff --git a/vendor/github.com/influxdata/ifql/functions/range.go b/vendor/github.com/influxdata/ifql/functions/range.go new file mode 100644 index 000000000..ebce53ef9 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/range.go @@ -0,0 +1,114 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const RangeKind = "range" + +type RangeOpSpec struct { + Start query.Time `json:"start"` + Stop query.Time `json:"stop"` +} + +var rangeSignature = query.DefaultFunctionSignature() + +func init() { + rangeSignature.Params["start"] = semantic.Time + rangeSignature.Params["stop"] = semantic.Time + + query.RegisterFunction(RangeKind, createRangeOpSpec, rangeSignature) + query.RegisterOpSpec(RangeKind, newRangeOp) + plan.RegisterProcedureSpec(RangeKind, newRangeProcedure, RangeKind) + // TODO register a range transformation. Currently range is only supported if it is pushed down into a select procedure. + //execute.RegisterTransformation(RangeKind, createRangeTransformation) +} + +func createRangeOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + start, err := args.GetRequiredTime("start") + if err != nil { + return nil, err + } + spec := &RangeOpSpec{ + Start: start, + } + + if stop, ok, err := args.GetTime("stop"); err != nil { + return nil, err + } else if ok { + spec.Stop = stop + } else { + // Make stop time implicit "now" + spec.Stop.IsRelative = true + } + + return spec, nil +} + +func newRangeOp() query.OperationSpec { + return new(RangeOpSpec) +} + +func (s *RangeOpSpec) Kind() query.OperationKind { + return RangeKind +} + +type RangeProcedureSpec struct { + Bounds plan.BoundsSpec +} + +func newRangeProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*RangeOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + return &RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: spec.Start, + Stop: spec.Stop, + }, + }, nil +} + +func (s *RangeProcedureSpec) Kind() plan.ProcedureKind { + return RangeKind +} +func (s *RangeProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(RangeProcedureSpec) + ns.Bounds = s.Bounds + return ns +} + +func (s *RangeProcedureSpec) PushDownRules() []plan.PushDownRule { + return []plan.PushDownRule{{ + Root: FromKind, + Through: []plan.ProcedureKind{GroupKind, LimitKind, FilterKind}, + }} +} +func (s *RangeProcedureSpec) PushDown(root *plan.Procedure, dup func() *plan.Procedure) { + selectSpec := root.Spec.(*FromProcedureSpec) + if selectSpec.BoundsSet { + // Example case where this matters + // var data = select(database: "mydb") + // var past = data.range(start:-2d,stop:-1d) + // var current = data.range(start:-1d,stop:now) + root = dup() + selectSpec = root.Spec.(*FromProcedureSpec) + selectSpec.BoundsSet = false + selectSpec.Bounds = plan.BoundsSpec{} + return + } + selectSpec.BoundsSet = true + selectSpec.Bounds = s.Bounds +} + +func (s *RangeProcedureSpec) TimeBounds() plan.BoundsSpec { + return s.Bounds +} diff --git a/vendor/github.com/influxdata/ifql/functions/range_test.go b/vendor/github.com/influxdata/ifql/functions/range_test.go new file mode 100644 index 000000000..b5e10e964 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/range_test.go @@ -0,0 +1,120 @@ +package functions_test + +import ( + "testing" + "time" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestRange_NewQuery(t *testing.T) { + tests := []querytest.NewQueryTestCase{ + { + Name: "from with database with range", + Raw: `from(db:"mydb") |> range(start:-4h, stop:-2h) |> sum()`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "range1", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Relative: -2 * time.Hour, + IsRelative: true, + }, + }, + }, + { + ID: "sum2", + Spec: &functions.SumOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "range1"}, + {Parent: "range1", Child: "sum2"}, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + querytest.NewQueryTestHelper(t, tc) + }) + } +} + +func TestRangeOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"range","kind":"range","spec":{"start":"-1h","stop":"2017-10-10T00:00:00Z"}}`) + op := &query.Operation{ + ID: "range", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -1 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + Absolute: time.Date(2017, 10, 10, 0, 0, 0, 0, time.UTC), + }, + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestRange_PushDown(t *testing.T) { + spec := &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Stop: query.Now, + }, + } + root := &plan.Procedure{ + Spec: new(functions.FromProcedureSpec), + } + want := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Stop: query.Now, + }, + }, + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, false, want) +} +func TestRange_PushDown_Duplicate(t *testing.T) { + spec := &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Stop: query.Now, + }, + } + root := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + }, + } + want := &plan.Procedure{ + // Expect the duplicate has been reset to zero values + Spec: new(functions.FromProcedureSpec), + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, true, want) +} diff --git a/vendor/github.com/influxdata/ifql/functions/sample.go b/vendor/github.com/influxdata/ifql/functions/sample.go new file mode 100644 index 000000000..b03471a8d --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/sample.go @@ -0,0 +1,188 @@ +package functions + +import ( + "fmt" + + "math/rand" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const SampleKind = "sample" + +type SampleOpSpec struct { + Column string `json:"column"` + UseRowTime bool `json:"useRowtime"` + N int64 `json:"n"` + Pos int64 `json:"pos"` +} + +var sampleSignature = query.DefaultFunctionSignature() + +func init() { + sampleSignature.Params["column"] = semantic.String + sampleSignature.Params["useRowTime"] = semantic.Bool + + query.RegisterFunction(SampleKind, createSampleOpSpec, sampleSignature) + query.RegisterOpSpec(SampleKind, newSampleOp) + plan.RegisterProcedureSpec(SampleKind, newSampleProcedure, SampleKind) + execute.RegisterTransformation(SampleKind, createSampleTransformation) +} + +func createSampleOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(SampleOpSpec) + if c, ok, err := args.GetString("column"); err != nil { + return nil, err + } else if ok { + spec.Column = c + } + if useRowTime, ok, err := args.GetBool("useRowTime"); err != nil { + return nil, err + } else if ok { + spec.UseRowTime = useRowTime + } + + n, err := args.GetRequiredInt("n") + if err != nil { + return nil, err + } + spec.N = n + + if pos, ok, err := args.GetInt("pos"); err != nil { + return nil, err + } else if ok { + spec.Pos = pos + } else { + spec.Pos = -1 + } + + return spec, nil +} + +func newSampleOp() query.OperationSpec { + return new(SampleOpSpec) +} + +func (s *SampleOpSpec) Kind() query.OperationKind { + return SampleKind +} + +type SampleProcedureSpec struct { + Column string + UseRowTime bool + N int64 + Pos int64 +} + +func newSampleProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*SampleOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + return &SampleProcedureSpec{ + Column: spec.Column, + UseRowTime: spec.UseRowTime, + N: spec.N, + Pos: spec.Pos, + }, nil +} + +func (s *SampleProcedureSpec) Kind() plan.ProcedureKind { + return SampleKind +} +func (s *SampleProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(SampleProcedureSpec) + ns.Column = s.Column + ns.UseRowTime = s.UseRowTime + ns.N = s.N + ns.Pos = s.Pos + return ns +} + +type SampleSelector struct { + N int + Pos int + + offset int + selected []int +} + +func createSampleTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + ps, ok := spec.(*SampleProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", ps) + } + + ss := &SampleSelector{ + N: int(ps.N), + Pos: int(ps.Pos), + } + t, d := execute.NewIndexSelectorTransformationAndDataset(id, mode, a.Bounds(), ss, ps.Column, ps.UseRowTime, a.Allocator()) + return t, d, nil +} + +func (s *SampleSelector) reset() { + pos := s.Pos + if pos < 0 { + pos = rand.Intn(s.N) + } + s.offset = pos +} + +func (s *SampleSelector) NewBoolSelector() execute.DoBoolIndexSelector { + s.reset() + return s +} + +func (s *SampleSelector) NewIntSelector() execute.DoIntIndexSelector { + s.reset() + return s +} + +func (s *SampleSelector) NewUIntSelector() execute.DoUIntIndexSelector { + s.reset() + return s +} + +func (s *SampleSelector) NewFloatSelector() execute.DoFloatIndexSelector { + s.reset() + return s +} + +func (s *SampleSelector) NewStringSelector() execute.DoStringIndexSelector { + s.reset() + return s +} + +func (s *SampleSelector) selectSample(l int) []int { + var i int + s.selected = s.selected[0:0] + for i = s.offset; i < l; i += s.N { + s.selected = append(s.selected, i) + } + s.offset = i - l + return s.selected +} + +func (s *SampleSelector) DoBool(vs []bool) []int { + return s.selectSample(len(vs)) +} +func (s *SampleSelector) DoInt(vs []int64) []int { + return s.selectSample(len(vs)) +} +func (s *SampleSelector) DoUInt(vs []uint64) []int { + return s.selectSample(len(vs)) +} +func (s *SampleSelector) DoFloat(vs []float64) []int { + return s.selectSample(len(vs)) +} +func (s *SampleSelector) DoString(vs []string) []int { + return s.selectSample(len(vs)) +} diff --git a/vendor/github.com/influxdata/ifql/functions/sample_test.go b/vendor/github.com/influxdata/ifql/functions/sample_test.go new file mode 100644 index 000000000..85e5fad3e --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/sample_test.go @@ -0,0 +1,336 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestSampleOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"sample","kind":"sample","spec":{"useRowTime":true, "n":5, "pos":0}}`) + op := &query.Operation{ + ID: "sample", + Spec: &functions.SampleOpSpec{ + UseRowTime: true, + N: 5, + Pos: 0, + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestSample_Process(t *testing.T) { + testCases := []struct { + name string + data execute.Block + want [][]int + fromor *functions.SampleSelector + }{ + { + fromor: &functions.SampleSelector{ + N: 1, + Pos: 0, + }, + name: "everything in separate Do calls", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, + want: [][]int{ + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + {0}, + }, + }, + { + fromor: &functions.SampleSelector{ + N: 1, + Pos: 0, + }, + name: "everything in single Do call", + data: execute.CopyBlock(&executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, executetest.UnlimitedAllocator), + want: [][]int{{ + 0, + 1, + 2, + 3, + 4, + 5, + 6, + 7, + 8, + 9, + }}, + }, + { + fromor: &functions.SampleSelector{ + N: 2, + Pos: 0, + }, + name: "every-other-even", + data: execute.CopyBlock(&executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, executetest.UnlimitedAllocator), + want: [][]int{{ + 0, + 2, + 4, + 6, + 8, + }}, + }, + { + fromor: &functions.SampleSelector{ + N: 2, + Pos: 1, + }, + name: "every-other-odd", + data: execute.CopyBlock(&executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, executetest.UnlimitedAllocator), + want: [][]int{{ + 1, + 3, + 5, + 7, + 9, + }}, + }, + { + fromor: &functions.SampleSelector{ + N: 3, + Pos: 0, + }, + name: "every-third-0", + data: execute.CopyBlock(&executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, executetest.UnlimitedAllocator), + want: [][]int{{ + 0, + 3, + 6, + 9, + }}, + }, + { + fromor: &functions.SampleSelector{ + N: 3, + Pos: 1, + }, + name: "every-third-1", + data: execute.CopyBlock(&executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, executetest.UnlimitedAllocator), + want: [][]int{{ + 1, + 4, + 7, + }}, + }, + { + fromor: &functions.SampleSelector{ + N: 3, + Pos: 2, + }, + name: "every-third-2", + data: execute.CopyBlock(&executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, executetest.UnlimitedAllocator), + want: [][]int{{ + 2, + 5, + 8, + }}, + }, + { + fromor: &functions.SampleSelector{ + N: 3, + Pos: 2, + }, + name: "every-third-2 in separate Do calls", + data: &executetest.Block{ + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 7.0, "a", "y"}, + {execute.Time(10), 5.0, "a", "x"}, + {execute.Time(20), 9.0, "a", "y"}, + {execute.Time(30), 4.0, "a", "x"}, + {execute.Time(40), 6.0, "a", "y"}, + {execute.Time(50), 8.0, "a", "x"}, + {execute.Time(60), 1.0, "a", "y"}, + {execute.Time(70), 2.0, "a", "x"}, + {execute.Time(80), 3.0, "a", "y"}, + {execute.Time(90), 10.0, "a", "x"}, + }, + }, + want: [][]int{ + nil, + nil, + {0}, + nil, + nil, + {0}, + nil, + nil, + {0}, + nil, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.IndexSelectorFuncTestHelper( + t, + tc.fromor, + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkSample(b *testing.B) { + ss := &functions.SampleSelector{ + N: 10, + Pos: 0, + } + executetest.IndexSelectorFuncBenchmarkHelper(b, ss, NormalBlock) +} diff --git a/vendor/github.com/influxdata/ifql/functions/set.go b/vendor/github.com/influxdata/ifql/functions/set.go new file mode 100644 index 000000000..36a55d0b3 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/set.go @@ -0,0 +1,211 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const SetKind = "set" + +type SetOpSpec struct { + Key string `json:"key"` + Value string `json:"value"` +} + +var setSignature = query.DefaultFunctionSignature() + +func init() { + setSignature.Params["key"] = semantic.String + setSignature.Params["value"] = semantic.String + + query.RegisterFunction(SetKind, createSetOpSpec, setSignature) + query.RegisterOpSpec(SetKind, newSetOp) + plan.RegisterProcedureSpec(SetKind, newSetProcedure, SetKind) + execute.RegisterTransformation(SetKind, createSetTransformation) +} + +func createSetOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(SetOpSpec) + key, err := args.GetRequiredString("key") + if err != nil { + return nil, err + } + spec.Key = key + + value, err := args.GetRequiredString("value") + if err != nil { + return nil, err + } + spec.Value = value + + return spec, nil +} + +func newSetOp() query.OperationSpec { + return new(SetOpSpec) +} + +func (s *SetOpSpec) Kind() query.OperationKind { + return SetKind +} + +type SetProcedureSpec struct { + Key, Value string +} + +func newSetProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + s, ok := qs.(*SetOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + p := &SetProcedureSpec{ + Key: s.Key, + Value: s.Value, + } + return p, nil +} + +func (s *SetProcedureSpec) Kind() plan.ProcedureKind { + return SetKind +} +func (s *SetProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(SetProcedureSpec) + ns.Key = s.Key + ns.Value = s.Value + return ns +} + +func createSetTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*SetProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewSetTransformation(d, cache, s) + return t, d, nil +} + +type setTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + key, value string +} + +func NewSetTransformation( + d execute.Dataset, + cache execute.BlockBuilderCache, + spec *SetProcedureSpec, +) execute.Transformation { + return &setTransformation{ + d: d, + cache: cache, + key: spec.Key, + value: spec.Value, + } +} + +func (t *setTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + // TODO + return nil +} + +func (t *setTransformation) Process(id execute.DatasetID, b execute.Block) error { + tags := b.Tags() + isCommon := false + if v, ok := tags[t.key]; ok { + isCommon = true + if v != t.value { + tags = tags.Copy() + tags[t.key] = t.value + } + } + builder, new := t.cache.BlockBuilder(blockMetadata{ + tags: tags, + bounds: b.Bounds(), + }) + if new { + // Add columns + found := false + cols := b.Cols() + for j, c := range cols { + if c.Label == t.key { + found = true + } + builder.AddCol(c) + if c.IsTag() && c.Common { + builder.SetCommonString(j, tags[c.Label]) + } + } + if !found { + builder.AddCol(execute.ColMeta{ + Label: t.key, + Type: execute.TString, + Kind: execute.TagColKind, + Common: isCommon, + }) + } + } + cols := builder.Cols() + setIdx := 0 + for j, c := range cols { + if c.Label == t.key { + setIdx = j + break + } + } + timeIdx := execute.TimeIdx(cols) + b.Col(timeIdx).DoTime(func(ts []execute.Time, rr execute.RowReader) { + builder.AppendTimes(timeIdx, ts) + for j, c := range cols { + if j == timeIdx || c.Common { + continue + } + for i := range ts { + switch c.Type { + case execute.TBool: + builder.AppendBool(j, rr.AtBool(i, j)) + case execute.TInt: + builder.AppendInt(j, rr.AtInt(i, j)) + case execute.TUInt: + builder.AppendUInt(j, rr.AtUInt(i, j)) + case execute.TFloat: + builder.AppendFloat(j, rr.AtFloat(i, j)) + case execute.TString: + // Set new value + var v string + if j == setIdx { + v = t.value + } else { + v = rr.AtString(i, j) + } + builder.AppendString(j, v) + case execute.TTime: + builder.AppendTime(j, rr.AtTime(i, j)) + default: + execute.PanicUnknownType(c.Type) + } + } + } + }) + return nil +} + +func (t *setTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *setTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *setTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/set_test.go b/vendor/github.com/influxdata/ifql/functions/set_test.go new file mode 100644 index 000000000..b21b7ba6d --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/set_test.go @@ -0,0 +1,288 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestSetOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"set","kind":"set","spec":{"key":"t1","value":"v1"}}`) + op := &query.Operation{ + ID: "set", + Spec: &functions.SetOpSpec{ + Key: "t1", + Value: "v1", + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestSet_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.SetProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "new col", + spec: &functions.SetProcedureSpec{ + Key: "t1", + Value: "bob", + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, "bob"}, + {execute.Time(2), 1.0, "bob"}, + }, + }}, + }, + { + name: "replace col", + spec: &functions.SetProcedureSpec{ + Key: "t1", + Value: "bob", + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "jim"}, + {execute.Time(2), 2.0, "sue"}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "bob"}, + {execute.Time(2), 2.0, "bob"}, + }, + }}, + }, + { + name: "replace common col", + spec: &functions.SetProcedureSpec{ + Key: "t1", + Value: "bob", + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "alice", "a"}, + {execute.Time(2), 1.0, "alice", "b"}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "bob", "a"}, + {execute.Time(2), 1.0, "bob", "b"}, + }, + }}, + }, + { + name: "replace common col, merging blocks", + spec: &functions.SetProcedureSpec{ + Key: "t1", + Value: "bob", + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "alice"}, + {execute.Time(2), 1.0, "alice"}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(3), 3.0, "sue"}, + {execute.Time(4), 5.0, "sue"}, + }, + }, + }, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "bob"}, + {execute.Time(2), 1.0, "bob"}, + {execute.Time(3), 3.0, "bob"}, + {execute.Time(4), 5.0, "bob"}, + }, + }}, + }, + { + name: "new common col, multiple blocks", + spec: &functions.SetProcedureSpec{ + Key: "t2", + Value: "bob", + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "alice"}, + {execute.Time(2), 1.0, "alice"}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(3), 3.0, "sue"}, + {execute.Time(4), 5.0, "sue"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "alice", "bob"}, + {execute.Time(2), 1.0, "alice", "bob"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 3.0, "sue", "bob"}, + {execute.Time(4), 5.0, "sue", "bob"}, + }, + }, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewSetTransformation(d, c, tc.spec) + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/shift.go b/vendor/github.com/influxdata/ifql/functions/shift.go new file mode 100644 index 000000000..f071166e1 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/shift.go @@ -0,0 +1,166 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const ShiftKind = "shift" + +type ShiftOpSpec struct { + Shift query.Duration `json:"shift"` +} + +var shiftSignature = query.DefaultFunctionSignature() + +func init() { + shiftSignature.Params["shift"] = semantic.Duration + + query.RegisterFunction(ShiftKind, createShiftOpSpec, shiftSignature) + query.RegisterOpSpec(ShiftKind, newShiftOp) + plan.RegisterProcedureSpec(ShiftKind, newShiftProcedure, ShiftKind) + execute.RegisterTransformation(ShiftKind, createShiftTransformation) +} + +func createShiftOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(ShiftOpSpec) + + if shift, err := args.GetRequiredDuration("shift"); err != nil { + return nil, err + } else { + spec.Shift = shift + } + + return spec, nil +} + +func newShiftOp() query.OperationSpec { + return new(ShiftOpSpec) +} + +func (s *ShiftOpSpec) Kind() query.OperationKind { + return ShiftKind +} + +type ShiftProcedureSpec struct { + Shift query.Duration +} + +func newShiftProcedure(qs query.OperationSpec, _ plan.Administration) (plan.ProcedureSpec, error) { + if spec, ok := qs.(*ShiftOpSpec); ok { + return &ShiftProcedureSpec{Shift: spec.Shift}, nil + } + + return nil, fmt.Errorf("invalid spec type %T", qs) +} + +func (s *ShiftProcedureSpec) Kind() plan.ProcedureKind { + return ShiftKind +} + +func (s *ShiftProcedureSpec) Copy() plan.ProcedureSpec { + return &ShiftProcedureSpec{Shift: s.Shift} +} + +func createShiftTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*ShiftProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewShiftTransformation(d, cache, s) + return t, d, nil +} + +type shiftTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + shift execute.Duration +} + +func NewShiftTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *ShiftProcedureSpec) *shiftTransformation { + return &shiftTransformation{ + d: d, + cache: cache, + shift: execute.Duration(spec.Shift), + } +} + +func (t *shiftTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *shiftTransformation) Process(id execute.DatasetID, b execute.Block) error { + builder, nw := t.cache.BlockBuilder(blockMetadata{ + tags: b.Tags(), + bounds: b.Bounds().Shift(t.shift), + }) + + if nw { + execute.AddBlockCols(b, builder) + } + + var k []execute.Time + cols := builder.Cols() + timeIdx := execute.TimeIdx(cols) + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + if cap(k) < len(ts) { + k = make([]execute.Time, len(ts)) + } + k = k[:len(ts)] + + for i := range ts { + k[i] = ts[i].Add(t.shift) + } + + builder.AppendTimes(timeIdx, k) + for j, c := range cols { + if j == timeIdx || c.Common { + continue + } + for i := range ts { + switch c.Type { + case execute.TBool: + builder.AppendBool(j, rr.AtBool(i, j)) + case execute.TInt: + builder.AppendInt(j, rr.AtInt(i, j)) + case execute.TUInt: + builder.AppendUInt(j, rr.AtUInt(i, j)) + case execute.TFloat: + builder.AppendFloat(j, rr.AtFloat(i, j)) + case execute.TString: + builder.AppendString(j, rr.AtString(i, j)) + case execute.TTime: + builder.AppendTime(j, rr.AtTime(i, j)) + default: + execute.PanicUnknownType(c.Type) + } + } + } + }) + + return nil +} + +func (t *shiftTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} + +func (t *shiftTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} + +func (t *shiftTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} + +func (t *shiftTransformation) SetParents(ids []execute.DatasetID) {} diff --git a/vendor/github.com/influxdata/ifql/functions/shift_test.go b/vendor/github.com/influxdata/ifql/functions/shift_test.go new file mode 100644 index 000000000..57eef8e17 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/shift_test.go @@ -0,0 +1,119 @@ +package functions_test + +import ( + "testing" + "time" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestShiftOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"shift","kind":"shift","spec":{"shift":"1h"}}`) + op := &query.Operation{ + ID: "shift", + Spec: &functions.ShiftOpSpec{ + Shift: query.Duration(1 * time.Hour), + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestShift_Process(t *testing.T) { + cols := []execute.ColMeta{ + {Label: execute.TimeColLabel, Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: execute.DefaultValueColLabel, Type: execute.TFloat, Kind: execute.ValueColKind}, + } + + testCases := []struct { + name string + spec *functions.ShiftProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "one block", + spec: &functions.ShiftProcedureSpec{ + Shift: query.Duration(1), + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{Start: 1, Stop: 3}, + ColMeta: cols, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{Start: 2, Stop: 4}, + ColMeta: cols, + Data: [][]interface{}{ + {execute.Time(2), 2.0}, + {execute.Time(3), 1.0}, + }, + }, + }, + }, + { + name: "multiple blocks", + spec: &functions.ShiftProcedureSpec{ + Shift: query.Duration(2), + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{Start: 1, Stop: 3}, + ColMeta: cols, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{Start: 3, Stop: 5}, + ColMeta: cols, + Data: [][]interface{}{ + {execute.Time(3), 3.0}, + {execute.Time(4), 4.0}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{Start: 3, Stop: 5}, + ColMeta: cols, + Data: [][]interface{}{ + {execute.Time(3), 2.0}, + {execute.Time(4), 1.0}, + }, + }, + { + Bnds: execute.Bounds{Start: 5, Stop: 7}, + ColMeta: cols, + Data: [][]interface{}{ + {execute.Time(5), 3.0}, + {execute.Time(6), 4.0}, + }, + }, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewShiftTransformation(d, c, tc.spec) + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/skew.go b/vendor/github.com/influxdata/ifql/functions/skew.go new file mode 100644 index 000000000..35cdb657f --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/skew.go @@ -0,0 +1,138 @@ +package functions + +import ( + "math" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" +) + +const SkewKind = "skew" + +type SkewOpSpec struct { +} + +var skewSignature = query.DefaultFunctionSignature() + +func init() { + query.RegisterFunction(SkewKind, createSkewOpSpec, skewSignature) + query.RegisterOpSpec(SkewKind, newSkewOp) + plan.RegisterProcedureSpec(SkewKind, newSkewProcedure, SkewKind) + execute.RegisterTransformation(SkewKind, createSkewTransformation) +} +func createSkewOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + return new(SkewOpSpec), nil +} + +func newSkewOp() query.OperationSpec { + return new(SkewOpSpec) +} + +func (s *SkewOpSpec) Kind() query.OperationKind { + return SkewKind +} + +type SkewProcedureSpec struct { +} + +func newSkewProcedure(query.OperationSpec, plan.Administration) (plan.ProcedureSpec, error) { + return new(SkewProcedureSpec), nil +} + +func (s *SkewProcedureSpec) Kind() plan.ProcedureKind { + return SkewKind +} +func (s *SkewProcedureSpec) Copy() plan.ProcedureSpec { + return new(SkewProcedureSpec) +} + +type SkewAgg struct { + n, m1, m2, m3 float64 +} + +func createSkewTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + t, d := execute.NewAggregateTransformationAndDataset(id, mode, a.Bounds(), new(SkewAgg), a.Allocator()) + return t, d, nil +} + +func (a *SkewAgg) reset() { + a.n = 0 + a.m1 = 0 + a.m2 = 0 + a.m3 = 0 +} +func (a *SkewAgg) NewBoolAgg() execute.DoBoolAgg { + return nil +} + +func (a *SkewAgg) NewIntAgg() execute.DoIntAgg { + a.reset() + return a +} + +func (a *SkewAgg) NewUIntAgg() execute.DoUIntAgg { + a.reset() + return a +} + +func (a *SkewAgg) NewFloatAgg() execute.DoFloatAgg { + a.reset() + return a +} + +func (a *SkewAgg) NewStringAgg() execute.DoStringAgg { + return nil +} + +func (a *SkewAgg) DoInt(vs []int64) { + for _, v := range vs { + n0 := a.n + a.n++ + // TODO handle overflow + delta := float64(v) - a.m1 + deltaN := delta / a.n + t := delta * deltaN * n0 + a.m3 += t*deltaN*(a.n-2) - 3*deltaN*a.m2 + a.m2 += t + a.m1 += deltaN + } +} +func (a *SkewAgg) DoUInt(vs []uint64) { + for _, v := range vs { + n0 := a.n + a.n++ + // TODO handle overflow + delta := float64(v) - a.m1 + deltaN := delta / a.n + t := delta * deltaN * n0 + a.m3 += t*deltaN*(a.n-2) - 3*deltaN*a.m2 + a.m2 += t + a.m1 += deltaN + } +} +func (a *SkewAgg) DoFloat(vs []float64) { + for _, v := range vs { + n0 := a.n + a.n++ + delta := v - a.m1 + deltaN := delta / a.n + t := delta * deltaN * n0 + a.m3 += t*deltaN*(a.n-2) - 3*deltaN*a.m2 + a.m2 += t + a.m1 += deltaN + } +} +func (a *SkewAgg) Type() execute.DataType { + return execute.TFloat +} +func (a *SkewAgg) ValueFloat() float64 { + if a.n < 2 { + return math.NaN() + } + return math.Sqrt(a.n) * a.m3 / math.Pow(a.m2, 1.5) +} diff --git a/vendor/github.com/influxdata/ifql/functions/skew_test.go b/vendor/github.com/influxdata/ifql/functions/skew_test.go new file mode 100644 index 000000000..a5757a15a --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/skew_test.go @@ -0,0 +1,75 @@ +package functions_test + +import ( + "math" + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestSkewOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"skew","kind":"skew"}`) + op := &query.Operation{ + ID: "skew", + Spec: &functions.SkewOpSpec{}, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestSkew_Process(t *testing.T) { + testCases := []struct { + name string + data []float64 + want float64 + }{ + { + name: "zero", + data: []float64{1, 2, 3}, + want: 0.0, + }, + { + name: "nonzero", + data: []float64{2, 2, 3}, + want: 0.7071067811865475, + }, + { + name: "nonzero", + data: []float64{2, 2, 3, 4}, + want: 0.49338220021815854, + }, + { + name: "NaN short", + data: []float64{1}, + want: math.NaN(), + }, + { + name: "NaN divide by zero", + data: []float64{1, 1, 1}, + want: math.NaN(), + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.AggFuncTestHelper( + t, + new(functions.SkewAgg), + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkSkew(b *testing.B) { + executetest.AggFuncBenchmarkHelper( + b, + new(functions.SkewAgg), + NormalData, + 0.0032200673020400935, + ) +} diff --git a/vendor/github.com/influxdata/ifql/functions/sort.go b/vendor/github.com/influxdata/ifql/functions/sort.go new file mode 100644 index 000000000..9b670890c --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/sort.go @@ -0,0 +1,157 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const SortKind = "sort" + +type SortOpSpec struct { + Cols []string `json:"cols"` + Desc bool `json:"desc"` +} + +var sortSignature = query.DefaultFunctionSignature() + +func init() { + sortSignature.Params["cols"] = semantic.NewArrayType(semantic.String) + + query.RegisterFunction(SortKind, createSortOpSpec, sortSignature) + query.RegisterOpSpec(SortKind, newSortOp) + plan.RegisterProcedureSpec(SortKind, newSortProcedure, SortKind) + execute.RegisterTransformation(SortKind, createSortTransformation) +} + +func createSortOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(SortOpSpec) + + if array, ok, err := args.GetArray("cols", semantic.String); err != nil { + return nil, err + } else if ok { + spec.Cols = array.AsStrings() + } else { + //Default behavior to sort by value + spec.Cols = []string{execute.DefaultValueColLabel} + } + + if desc, ok, err := args.GetBool("desc"); err != nil { + return nil, err + } else if ok { + spec.Desc = desc + } + + return spec, nil +} + +func newSortOp() query.OperationSpec { + return new(SortOpSpec) +} + +func (s *SortOpSpec) Kind() query.OperationKind { + return SortKind +} + +type SortProcedureSpec struct { + Cols []string + Desc bool +} + +func newSortProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*SortOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &SortProcedureSpec{ + Cols: spec.Cols, + Desc: spec.Desc, + }, nil +} + +func (s *SortProcedureSpec) Kind() plan.ProcedureKind { + return SortKind +} +func (s *SortProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(SortProcedureSpec) + + ns.Cols = make([]string, len(s.Cols)) + copy(ns.Cols, s.Cols) + + ns.Desc = s.Desc + return ns +} + +func createSortTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*SortProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewSortTransformation(d, cache, s) + return t, d, nil +} + +type sortTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + cols []string + desc bool + + colMap []int +} + +func NewSortTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *SortProcedureSpec) *sortTransformation { + return &sortTransformation{ + d: d, + cache: cache, + cols: spec.Cols, + desc: spec.Desc, + } +} + +func (t *sortTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *sortTransformation) Process(id execute.DatasetID, b execute.Block) error { + builder, new := t.cache.BlockBuilder(b) + if new { + execute.AddBlockCols(b, builder) + } + + ncols := builder.NCols() + if cap(t.colMap) < ncols { + t.colMap = make([]int, ncols) + for j := range t.colMap { + t.colMap[j] = j + } + } else { + t.colMap = t.colMap[:ncols] + } + + execute.AppendBlock(b, builder, t.colMap) + + builder.Sort(t.cols, t.desc) + return nil +} + +func (t *sortTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *sortTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *sortTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/sort_test.go b/vendor/github.com/influxdata/ifql/functions/sort_test.go new file mode 100644 index 000000000..fac26bc35 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/sort_test.go @@ -0,0 +1,366 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestSortOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"sort","kind":"sort","spec":{"cols":["t1","t2"],"desc":true}}`) + op := &query.Operation{ + ID: "sort", + Spec: &functions.SortOpSpec{ + Cols: []string{"t1", "t2"}, + Desc: true, + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestSort_PassThrough(t *testing.T) { + executetest.TransformationPassThroughTestHelper(t, func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + s := functions.NewSortTransformation( + d, + c, + &functions.SortProcedureSpec{ + Cols: []string{"_value"}, + Desc: true, + }, + ) + return s + }) +} + +func TestSort_Process(t *testing.T) { + testCases := []struct { + name string + spec *functions.SortProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "one block", + spec: &functions.SortProcedureSpec{ + Cols: []string{"_value"}, + Desc: false, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), 1.0}, + {execute.Time(1), 2.0}, + }, + }}, + }, + { + name: "one block descending", + spec: &functions.SortProcedureSpec{ + Cols: []string{"_value"}, + Desc: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 2.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), 2.0}, + {execute.Time(1), 1.0}, + }, + }}, + }, + { + name: "one block multiple columns", + spec: &functions.SortProcedureSpec{ + Cols: []string{"_value", "time"}, + Desc: false, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), 2.0}, + {execute.Time(1), 1.0}, + {execute.Time(2), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 1.0}, + {execute.Time(2), 2.0}, + }, + }}, + }, + { + name: "one block multiple columns descending", + spec: &functions.SortProcedureSpec{ + Cols: []string{"_value", "time"}, + Desc: true, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0}, + {execute.Time(2), 1.0}, + {execute.Time(2), 2.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), 2.0}, + {execute.Time(2), 1.0}, + {execute.Time(1), 1.0}, + }, + }}, + }, + { + name: "multiple blocks", + spec: &functions.SortProcedureSpec{ + Cols: []string{"_value"}, + Desc: false, + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 3.0}, + {execute.Time(2), 2.0}, + {execute.Time(2), 1.0}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 3, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(3), 3.0}, + {execute.Time(3), 2.0}, + {execute.Time(4), 1.0}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(2), 1.0}, + {execute.Time(2), 2.0}, + {execute.Time(1), 3.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 3, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(4), 1.0}, + {execute.Time(3), 2.0}, + {execute.Time(3), 3.0}, + }, + }, + }, + }, + { + name: "one block multiple columns with tags", + spec: &functions.SortProcedureSpec{ + Cols: []string{"_field", "_value"}, + Desc: false, + }, + data: []execute.Block{ + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "host", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "_field", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "hostA", "F1"}, + {execute.Time(1), 2.0, "hostA", "F2"}, + {execute.Time(1), 3.0, "hostA", "F3"}, + {execute.Time(2), 4.0, "hostA", "F1"}, + {execute.Time(2), 5.0, "hostA", "F2"}, + {execute.Time(2), 6.0, "hostA", "F3"}, + }, + }, + &executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "host", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "_field", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "hostB", "F1"}, + {execute.Time(1), 2.0, "hostB", "F2"}, + {execute.Time(1), 3.0, "hostB", "F3"}, + {execute.Time(2), 4.0, "hostB", "F1"}, + {execute.Time(2), 5.0, "hostB", "F2"}, + {execute.Time(2), 6.0, "hostB", "F3"}, + }, + }, + }, + want: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "host", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "_field", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "hostA", "F1"}, + {execute.Time(2), 4.0, "hostA", "F1"}, + {execute.Time(1), 2.0, "hostA", "F2"}, + {execute.Time(2), 5.0, "hostA", "F2"}, + {execute.Time(1), 3.0, "hostA", "F3"}, + {execute.Time(2), 6.0, "hostA", "F3"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "host", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "_field", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(1), 1.0, "hostB", "F1"}, + {execute.Time(2), 4.0, "hostB", "F1"}, + {execute.Time(1), 2.0, "hostB", "F2"}, + {execute.Time(2), 5.0, "hostB", "F2"}, + {execute.Time(1), 3.0, "hostB", "F3"}, + {execute.Time(2), 6.0, "hostB", "F3"}, + }, + }, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + return functions.NewSortTransformation(d, c, tc.spec) + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/spread.go b/vendor/github.com/influxdata/ifql/functions/spread.go new file mode 100644 index 000000000..b8b5b65fe --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/spread.go @@ -0,0 +1,176 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +// SpreadKind is the registration name for ifql, query, plan, and execution. +const SpreadKind = "spread" + +func init() { + query.RegisterFunction(SpreadKind, createSpreadOpSpec, semantic.FunctionSignature{}) + query.RegisterOpSpec(SpreadKind, newSpreadOp) + plan.RegisterProcedureSpec(SpreadKind, newSpreadProcedure, SpreadKind) + execute.RegisterTransformation(SpreadKind, createSpreadTransformation) +} + +func createSpreadOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + return new(SpreadOpSpec), nil +} + +func newSpreadOp() query.OperationSpec { + return new(SpreadOpSpec) +} + +// SpreadOpSpec defines the required arguments for IFQL. Currently, +// spread takes no arguments. +type SpreadOpSpec struct{} + +// Kind is used to lookup createSpreadOpSpec producing SpreadOpSpec +func (s *SpreadOpSpec) Kind() query.OperationKind { + return SpreadKind +} + +func newSpreadProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + _, ok := qs.(*SpreadOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + return &SpreadProcedureSpec{}, nil +} + +// SpreadProcedureSpec is created when mapping from SpreadOpSpec.Kind +// to a CreateProcedureSpec. +type SpreadProcedureSpec struct{} + +// Kind is used to lookup CreateTransformation producing SpreadAgg +func (s *SpreadProcedureSpec) Kind() plan.ProcedureKind { + return SpreadKind +} +func (s *SpreadProcedureSpec) Copy() plan.ProcedureSpec { + return new(SpreadProcedureSpec) +} + +func createSpreadTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + t, d := execute.NewAggregateTransformationAndDataset(id, mode, a.Bounds(), new(SpreadAgg), a.Allocator()) + return t, d, nil +} + +// SpreadAgg finds the difference between the max and min values a block +type SpreadAgg struct { + minSet bool + maxSet bool +} +type SpreadIntAgg struct { + SpreadAgg + min int64 + max int64 +} +type SpreadUIntAgg struct { + SpreadAgg + min uint64 + max uint64 +} +type SpreadFloatAgg struct { + SpreadAgg + min float64 + max float64 +} + +func (a *SpreadAgg) NewBoolAgg() execute.DoBoolAgg { + return nil +} + +func (a *SpreadAgg) NewIntAgg() execute.DoIntAgg { + return new(SpreadIntAgg) +} + +func (a *SpreadAgg) NewUIntAgg() execute.DoUIntAgg { + return new(SpreadUIntAgg) +} + +func (a *SpreadAgg) NewFloatAgg() execute.DoFloatAgg { + return new(SpreadFloatAgg) +} + +func (a *SpreadAgg) NewStringAgg() execute.DoStringAgg { + return nil +} + +// DoInt searches for the min and max value of the array and caches them in the aggregate +func (a *SpreadIntAgg) DoInt(vs []int64) { + for _, v := range vs { + if !a.minSet || v < a.min { + a.minSet = true + a.min = v + } + if !a.maxSet || v > a.max { + a.maxSet = true + a.max = v + } + } +} + +func (a *SpreadIntAgg) Type() execute.DataType { + return execute.TInt +} + +// Value returns the difference between max and min +func (a *SpreadIntAgg) ValueInt() int64 { + return a.max - a.min +} + +// Do searches for the min and max value of the array and caches them in the aggregate +func (a *SpreadUIntAgg) DoUInt(vs []uint64) { + for _, v := range vs { + if !a.minSet || v < a.min { + a.minSet = true + a.min = v + } + if !a.maxSet || v > a.max { + a.maxSet = true + a.max = v + } + } +} + +func (a *SpreadUIntAgg) Type() execute.DataType { + return execute.TUInt +} + +// Value returns the difference between max and min +func (a *SpreadUIntAgg) ValueUInt() uint64 { + return a.max - a.min +} + +// Do searches for the min and max value of the array and caches them in the aggregate +func (a *SpreadFloatAgg) DoFloat(vs []float64) { + for _, v := range vs { + if !a.minSet || v < a.min { + a.minSet = true + a.min = v + } + if !a.maxSet || v > a.max { + a.maxSet = true + a.max = v + } + } +} + +func (a *SpreadFloatAgg) Type() execute.DataType { + return execute.TFloat +} + +// Value returns the difference between max and min +func (a *SpreadFloatAgg) ValueFloat() float64 { + return a.max - a.min +} diff --git a/vendor/github.com/influxdata/ifql/functions/spread_test.go b/vendor/github.com/influxdata/ifql/functions/spread_test.go new file mode 100644 index 000000000..8ab3ade96 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/spread_test.go @@ -0,0 +1,41 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestSpreadOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"spread","kind":"spread"}`) + op := &query.Operation{ + ID: "spread", + Spec: &functions.SpreadOpSpec{}, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestSpread_Process(t *testing.T) { + agg := new(functions.SpreadAgg) + executetest.AggFuncTestHelper(t, + agg, + []float64{ + 0, 1, 2, 3, 4, + 5, 6, 7, 8, 9, + }, + float64(9), + ) +} + +func BenchmarkSpread(b *testing.B) { + executetest.AggFuncBenchmarkHelper( + b, + new(functions.SpreadAgg), + NormalData, + 28.227196461851847, + ) +} diff --git a/vendor/github.com/influxdata/ifql/functions/state_tracking.go b/vendor/github.com/influxdata/ifql/functions/state_tracking.go new file mode 100644 index 000000000..90b6e4bcd --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/state_tracking.go @@ -0,0 +1,302 @@ +package functions + +import ( + "fmt" + "log" + "time" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" + "github.com/pkg/errors" +) + +const StateTrackingKind = "stateTracking" + +type StateTrackingOpSpec struct { + Fn *semantic.FunctionExpression `json:"fn"` + CountLabel string `json:"count_label"` + DurationLabel string `json:"duration_label"` + DurationUnit query.Duration `json:"duration_unit"` +} + +var stateTrackingSignature = query.DefaultFunctionSignature() + +func init() { + stateTrackingSignature.Params["fn"] = semantic.Function + stateTrackingSignature.Params["countLabel"] = semantic.String + stateTrackingSignature.Params["durationLabel"] = semantic.String + stateTrackingSignature.Params["durationUnit"] = semantic.Duration + + query.RegisterFunction(StateTrackingKind, createStateTrackingOpSpec, stateTrackingSignature) + query.RegisterBuiltIn("state-tracking", stateTrackingBuiltin) + query.RegisterOpSpec(StateTrackingKind, newStateTrackingOp) + plan.RegisterProcedureSpec(StateTrackingKind, newStateTrackingProcedure, StateTrackingKind) + execute.RegisterTransformation(StateTrackingKind, createStateTrackingTransformation) +} + +var stateTrackingBuiltin = ` +// stateCount computes the number of consecutive records in a given state. +// The state is defined via the function fn. For each consecutive point for +// which the expression evaluates as true, the state count will be incremented +// When a point evaluates as false, the state count is reset. +// +// The state count will be added as an additional column to each record. If the +// expression evaluates as false, the value will be -1. If the expression +// generates an error during evaluation, the point is discarded, and does not +// affect the state count. +stateCount = (fn, label="stateCount", table=<-) => + stateTracking(table:table, countLabel:label, fn:fn) + +// stateDuration computes the duration of a given state. +// The state is defined via the function fn. For each consecutive point for +// which the expression evaluates as true, the state duration will be +// incremented by the duration between points. When a point evaluates as false, +// the state duration is reset. +// +// The state duration will be added as an additional column to each record. If the +// expression evaluates as false, the value will be -1. If the expression +// generates an error during evaluation, the point is discarded, and does not +// affect the state duration. +// +// Note that as the first point in the given state has no previous point, its +// state duration will be 0. +// +// The duration is represented as an integer in the units specified. +stateDuration = (fn, label="stateDuration", unit=1s, table=<-) => + stateTracking(table:table, durationLabel:label, fn:fn, durationUnit:unit) +` + +func createStateTrackingOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + f, err := args.GetRequiredFunction("fn") + if err != nil { + return nil, err + } + + resolved, err := f.Resolve() + if err != nil { + return nil, err + } + + spec := &StateTrackingOpSpec{ + Fn: resolved, + DurationUnit: query.Duration(time.Second), + } + + if label, ok, err := args.GetString("countLabel"); err != nil { + return nil, err + } else if ok { + spec.CountLabel = label + } + if label, ok, err := args.GetString("durationLabel"); err != nil { + return nil, err + } else if ok { + spec.DurationLabel = label + } + if unit, ok, err := args.GetDuration("durationUnit"); err != nil { + return nil, err + } else if ok { + spec.DurationUnit = unit + } + + if spec.DurationLabel != "" && spec.DurationUnit <= 0 { + return nil, errors.New("state tracking duration unit must be greater than zero") + } + return spec, nil +} + +func newStateTrackingOp() query.OperationSpec { + return new(StateTrackingOpSpec) +} + +func (s *StateTrackingOpSpec) Kind() query.OperationKind { + return StateTrackingKind +} + +type StateTrackingProcedureSpec struct { + Fn *semantic.FunctionExpression + CountLabel, + DurationLabel string + DurationUnit query.Duration +} + +func newStateTrackingProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + spec, ok := qs.(*StateTrackingOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + + return &StateTrackingProcedureSpec{ + Fn: spec.Fn, + CountLabel: spec.CountLabel, + DurationLabel: spec.DurationLabel, + DurationUnit: spec.DurationUnit, + }, nil +} + +func (s *StateTrackingProcedureSpec) Kind() plan.ProcedureKind { + return StateTrackingKind +} +func (s *StateTrackingProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(StateTrackingProcedureSpec) + *ns = *s + + ns.Fn = s.Fn.Copy().(*semantic.FunctionExpression) + + return ns +} + +func createStateTrackingTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*StateTrackingProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t, err := NewStateTrackingTransformation(d, cache, s) + if err != nil { + return nil, nil, err + } + return t, d, nil +} + +type stateTrackingTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + + fn *execute.RowPredicateFn + + countLabel, + durationLabel string + + durationUnit int64 + + colMap []int +} + +func NewStateTrackingTransformation(d execute.Dataset, cache execute.BlockBuilderCache, spec *StateTrackingProcedureSpec) (*stateTrackingTransformation, error) { + fn, err := execute.NewRowPredicateFn(spec.Fn) + if err != nil { + return nil, err + } + return &stateTrackingTransformation{ + d: d, + cache: cache, + fn: fn, + countLabel: spec.CountLabel, + durationLabel: spec.DurationLabel, + durationUnit: int64(spec.DurationUnit), + }, nil +} + +func (t *stateTrackingTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) error { + return t.d.RetractBlock(execute.ToBlockKey(meta)) +} + +func (t *stateTrackingTransformation) Process(id execute.DatasetID, b execute.Block) error { + // Prepare the functions for the column types. + cols := b.Cols() + err := t.fn.Prepare(cols) + if err != nil { + // TODO(nathanielc): Should we not fail the query for failed compilation? + return err + } + + builder, new := t.cache.BlockBuilder(b) + if !new { + return fmt.Errorf("received duplicate block bounds: %v tags: %v", b.Bounds(), b.Tags()) + } + + // Add tag columns to builder + for _, c := range cols { + nj := builder.AddCol(c) + if c.Common { + builder.SetCommonString(nj, b.Tags()[c.Label]) + } + } + + l := len(cols) + if cap(t.colMap) < l { + t.colMap = make([]int, l) + for j := range t.colMap { + t.colMap[j] = j + } + } else { + t.colMap = t.colMap[:l] + } + + var countCol, durationCol = -1, -1 + + // Add new value colums + if t.countLabel != "" { + countCol = builder.AddCol(execute.ColMeta{ + Label: t.countLabel, + Type: execute.TInt, + Kind: execute.ValueColKind, + }) + } + if t.durationLabel != "" { + durationCol = builder.AddCol(execute.ColMeta{ + Label: t.durationLabel, + Type: execute.TInt, + Kind: execute.ValueColKind, + }) + } + + var ( + startTime execute.Time + count, + duration int64 + inState bool + ) + + // Append modified rows + b.Times().DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i, tm := range ts { + match, err := t.fn.Eval(i, rr) + if err != nil { + log.Printf("failed to evaluate state count expression: %v", err) + continue + } + if !match { + count = -1 + duration = -1 + inState = false + } else { + if !inState { + startTime = tm + duration = 0 + count = 0 + inState = true + } + if t.durationUnit > 0 { + duration = int64(tm-startTime) / t.durationUnit + } + count++ + } + execute.AppendRowForCols(i, rr, builder, cols, t.colMap) + if countCol > 0 { + builder.AppendInt(countCol, count) + } + if durationCol > 0 { + builder.AppendInt(durationCol, duration) + } + } + }) + return nil +} + +func (t *stateTrackingTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *stateTrackingTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *stateTrackingTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/state_tracking_test.go b/vendor/github.com/influxdata/ifql/functions/state_tracking_test.go new file mode 100644 index 000000000..125ae24e2 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/state_tracking_test.go @@ -0,0 +1,201 @@ +package functions_test + +import ( + "testing" + "time" + + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" + "github.com/influxdata/ifql/semantic" +) + +func TestStateTrackingOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"id","kind":"stateTracking","spec":{"count_label":"c","duration_label":"d","duration_unit":"1m"}}`) + op := &query.Operation{ + ID: "id", + Spec: &functions.StateTrackingOpSpec{ + CountLabel: "c", + DurationLabel: "d", + DurationUnit: query.Duration(time.Minute), + }, + } + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestStateTracking_Process(t *testing.T) { + gt5 := &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.GreaterThanOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "r"}, + Property: "_value", + }, + Right: &semantic.FloatLiteral{Value: 5.0}, + }, + } + testCases := []struct { + name string + spec *functions.StateTrackingProcedureSpec + data []execute.Block + want []*executetest.Block + }{ + { + name: "one block", + spec: &functions.StateTrackingProcedureSpec{ + CountLabel: "count", + DurationLabel: "duration", + DurationUnit: 1, + Fn: gt5, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + {execute.Time(3), 6.0}, + {execute.Time(4), 7.0}, + {execute.Time(5), 8.0}, + {execute.Time(6), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "count", Type: execute.TInt, Kind: execute.ValueColKind}, + {Label: "duration", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, int64(-1), int64(-1)}, + {execute.Time(2), 1.0, int64(-1), int64(-1)}, + {execute.Time(3), 6.0, int64(1), int64(0)}, + {execute.Time(4), 7.0, int64(2), int64(1)}, + {execute.Time(5), 8.0, int64(3), int64(2)}, + {execute.Time(6), 1.0, int64(-1), int64(-1)}, + }, + }}, + }, + { + name: "only duration", + spec: &functions.StateTrackingProcedureSpec{ + DurationLabel: "duration", + DurationUnit: 1, + Fn: gt5, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + {execute.Time(3), 6.0}, + {execute.Time(4), 7.0}, + {execute.Time(5), 8.0}, + {execute.Time(6), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "duration", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, int64(-1)}, + {execute.Time(2), 1.0, int64(-1)}, + {execute.Time(3), 6.0, int64(0)}, + {execute.Time(4), 7.0, int64(1)}, + {execute.Time(5), 8.0, int64(2)}, + {execute.Time(6), 1.0, int64(-1)}, + }, + }}, + }, + { + name: "only count", + spec: &functions.StateTrackingProcedureSpec{ + CountLabel: "count", + Fn: gt5, + }, + data: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0}, + {execute.Time(2), 1.0}, + {execute.Time(3), 6.0}, + {execute.Time(4), 7.0}, + {execute.Time(5), 8.0}, + {execute.Time(6), 1.0}, + }, + }}, + want: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 3, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "count", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(1), 2.0, int64(-1)}, + {execute.Time(2), 1.0, int64(-1)}, + {execute.Time(3), 6.0, int64(1)}, + {execute.Time(4), 7.0, int64(2)}, + {execute.Time(5), 8.0, int64(3)}, + {execute.Time(6), 1.0, int64(-1)}, + }, + }}, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.ProcessTestHelper( + t, + tc.data, + tc.want, + func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + tx, err := functions.NewStateTrackingTransformation(d, c, tc.spec) + if err != nil { + t.Fatal(err) + } + return tx + }, + ) + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/stddev.go b/vendor/github.com/influxdata/ifql/functions/stddev.go new file mode 100644 index 000000000..f95f97dae --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/stddev.go @@ -0,0 +1,130 @@ +package functions + +import ( + "math" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" +) + +const StddevKind = "stddev" + +type StddevOpSpec struct { +} + +var stddevSignature = query.DefaultFunctionSignature() + +func init() { + query.RegisterFunction(StddevKind, createStddevOpSpec, stddevSignature) + query.RegisterOpSpec(StddevKind, newStddevOp) + plan.RegisterProcedureSpec(StddevKind, newStddevProcedure, StddevKind) + execute.RegisterTransformation(StddevKind, createStddevTransformation) +} +func createStddevOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + return new(StddevOpSpec), nil +} + +func newStddevOp() query.OperationSpec { + return new(StddevOpSpec) +} + +func (s *StddevOpSpec) Kind() query.OperationKind { + return StddevKind +} + +type StddevProcedureSpec struct { +} + +func newStddevProcedure(query.OperationSpec, plan.Administration) (plan.ProcedureSpec, error) { + return new(StddevProcedureSpec), nil +} + +func (s *StddevProcedureSpec) Kind() plan.ProcedureKind { + return StddevKind +} +func (s *StddevProcedureSpec) Copy() plan.ProcedureSpec { + return new(StddevProcedureSpec) +} + +type StddevAgg struct { + n, m2, mean float64 +} + +func createStddevTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + t, d := execute.NewAggregateTransformationAndDataset(id, mode, a.Bounds(), new(StddevAgg), a.Allocator()) + return t, d, nil +} + +func (a *StddevAgg) reset() { + a.n = 0 + a.mean = 0 + a.m2 = 0 +} +func (a *StddevAgg) NewBoolAgg() execute.DoBoolAgg { + return nil +} + +func (a *StddevAgg) NewIntAgg() execute.DoIntAgg { + a.reset() + return a +} + +func (a *StddevAgg) NewUIntAgg() execute.DoUIntAgg { + a.reset() + return a +} + +func (a *StddevAgg) NewFloatAgg() execute.DoFloatAgg { + a.reset() + return a +} + +func (a *StddevAgg) NewStringAgg() execute.DoStringAgg { + return nil +} +func (a *StddevAgg) DoInt(vs []int64) { + var delta, delta2 float64 + for _, v := range vs { + a.n++ + // TODO handle overflow + delta = float64(v) - a.mean + a.mean += delta / a.n + delta2 = float64(v) - a.mean + a.m2 += delta * delta2 + } +} +func (a *StddevAgg) DoUInt(vs []uint64) { + var delta, delta2 float64 + for _, v := range vs { + a.n++ + // TODO handle overflow + delta = float64(v) - a.mean + a.mean += delta / a.n + delta2 = float64(v) - a.mean + a.m2 += delta * delta2 + } +} +func (a *StddevAgg) DoFloat(vs []float64) { + var delta, delta2 float64 + for _, v := range vs { + a.n++ + delta = v - a.mean + a.mean += delta / a.n + delta2 = v - a.mean + a.m2 += delta * delta2 + } +} +func (a *StddevAgg) Type() execute.DataType { + return execute.TFloat +} +func (a *StddevAgg) ValueFloat() float64 { + if a.n < 2 { + return math.NaN() + } + return math.Sqrt(a.m2 / (a.n - 1)) +} diff --git a/vendor/github.com/influxdata/ifql/functions/stddev_test.go b/vendor/github.com/influxdata/ifql/functions/stddev_test.go new file mode 100644 index 000000000..e040149eb --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/stddev_test.go @@ -0,0 +1,65 @@ +package functions_test + +import ( + "math" + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestStddevOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"stddev","kind":"stddev"}`) + op := &query.Operation{ + ID: "stddev", + Spec: &functions.StddevOpSpec{}, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestStddev_Process(t *testing.T) { + testCases := []struct { + name string + data []float64 + want float64 + }{ + { + name: "zero", + data: []float64{1, 1, 1}, + want: 0.0, + }, + { + name: "nonzero", + data: []float64{1, 2, 3}, + want: 1.0, + }, + { + name: "NaN", + data: []float64{1}, + want: math.NaN(), + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + executetest.AggFuncTestHelper( + t, + new(functions.StddevAgg), + tc.data, + tc.want, + ) + }) + } +} + +func BenchmarkStddev(b *testing.B) { + executetest.AggFuncBenchmarkHelper( + b, + new(functions.StddevAgg), + NormalData, + 2.998926113076968, + ) +} diff --git a/vendor/github.com/influxdata/ifql/functions/sum.go b/vendor/github.com/influxdata/ifql/functions/sum.go new file mode 100644 index 000000000..d75c2ebab --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/sum.go @@ -0,0 +1,152 @@ +package functions + +import ( + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" +) + +const SumKind = "sum" + +type SumOpSpec struct { +} + +var sumSignature = query.DefaultFunctionSignature() + +func init() { + query.RegisterFunction(SumKind, createSumOpSpec, sumSignature) + query.RegisterOpSpec(SumKind, newSumOp) + plan.RegisterProcedureSpec(SumKind, newSumProcedure, SumKind) + execute.RegisterTransformation(SumKind, createSumTransformation) +} + +func createSumOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + return new(SumOpSpec), nil +} + +func newSumOp() query.OperationSpec { + return new(SumOpSpec) +} + +func (s *SumOpSpec) Kind() query.OperationKind { + return SumKind +} + +type SumProcedureSpec struct { +} + +func newSumProcedure(query.OperationSpec, plan.Administration) (plan.ProcedureSpec, error) { + return new(SumProcedureSpec), nil +} + +func (s *SumProcedureSpec) Kind() plan.ProcedureKind { + return SumKind +} + +func (s *SumProcedureSpec) Copy() plan.ProcedureSpec { + return new(SumProcedureSpec) +} + +func (s *SumProcedureSpec) AggregateMethod() string { + return SumKind +} +func (s *SumProcedureSpec) ReAggregateSpec() plan.ProcedureSpec { + return new(SumProcedureSpec) +} + +func (s *SumProcedureSpec) PushDownRules() []plan.PushDownRule { + return []plan.PushDownRule{{ + Root: FromKind, + Through: nil, + Match: func(spec plan.ProcedureSpec) bool { + selectSpec := spec.(*FromProcedureSpec) + return !selectSpec.GroupingSet + }, + }} +} +func (s *SumProcedureSpec) PushDown(root *plan.Procedure, dup func() *plan.Procedure) { + selectSpec := root.Spec.(*FromProcedureSpec) + if selectSpec.AggregateSet { + root = dup() + selectSpec = root.Spec.(*FromProcedureSpec) + selectSpec.AggregateSet = false + selectSpec.AggregateMethod = "" + return + } + selectSpec.AggregateSet = true + selectSpec.AggregateMethod = s.AggregateMethod() +} + +type SumAgg struct{} + +func createSumTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + t, d := execute.NewAggregateTransformationAndDataset(id, mode, a.Bounds(), new(SumAgg), a.Allocator()) + return t, d, nil +} +func (a *SumAgg) NewBoolAgg() execute.DoBoolAgg { + return nil +} +func (a *SumAgg) NewIntAgg() execute.DoIntAgg { + return new(SumIntAgg) +} +func (a *SumAgg) NewUIntAgg() execute.DoUIntAgg { + return new(SumUIntAgg) +} +func (a *SumAgg) NewFloatAgg() execute.DoFloatAgg { + return new(SumFloatAgg) +} +func (a *SumAgg) NewStringAgg() execute.DoStringAgg { + return nil +} + +type SumIntAgg struct { + sum int64 +} + +func (a *SumIntAgg) DoInt(vs []int64) { + for _, v := range vs { + a.sum += v + } +} +func (a *SumIntAgg) Type() execute.DataType { + return execute.TInt +} +func (a *SumIntAgg) ValueInt() int64 { + return a.sum +} + +type SumUIntAgg struct { + sum uint64 +} + +func (a *SumUIntAgg) DoUInt(vs []uint64) { + for _, v := range vs { + a.sum += v + } +} +func (a *SumUIntAgg) Type() execute.DataType { + return execute.TUInt +} +func (a *SumUIntAgg) ValueUInt() uint64 { + return a.sum +} + +type SumFloatAgg struct { + sum float64 +} + +func (a *SumFloatAgg) DoFloat(vs []float64) { + for _, v := range vs { + a.sum += v + } +} +func (a *SumFloatAgg) Type() execute.DataType { + return execute.TFloat +} +func (a *SumFloatAgg) ValueFloat() float64 { + return a.sum +} diff --git a/vendor/github.com/influxdata/ifql/functions/sum_test.go b/vendor/github.com/influxdata/ifql/functions/sum_test.go new file mode 100644 index 000000000..95dc6a151 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/sum_test.go @@ -0,0 +1,82 @@ +package functions_test + +import ( + "testing" + + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestSumOperation_Marshaling(t *testing.T) { + data := []byte(`{"id":"sum","kind":"sum"}`) + op := &query.Operation{ + ID: "sum", + Spec: &functions.SumOpSpec{}, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestSum_Process(t *testing.T) { + executetest.AggFuncTestHelper(t, + new(functions.SumAgg), + []float64{0, 1, 2, 3, 4, 5, 6, 7, 8, 9}, + float64(45), + ) +} + +func BenchmarkSum(b *testing.B) { + executetest.AggFuncBenchmarkHelper( + b, + new(functions.SumAgg), + NormalData, + 10000816.96729983, + ) +} + +func TestSum_PushDown_Match(t *testing.T) { + spec := new(functions.SumProcedureSpec) + from := new(functions.FromProcedureSpec) + + // Should not match when an aggregate is set + from.GroupingSet = true + plantest.PhysicalPlan_PushDown_Match_TestHelper(t, spec, from, []bool{false}) + + // Should match when no aggregate is set + from.GroupingSet = false + plantest.PhysicalPlan_PushDown_Match_TestHelper(t, spec, from, []bool{true}) +} + +func TestSum_PushDown(t *testing.T) { + spec := new(functions.SumProcedureSpec) + root := &plan.Procedure{ + Spec: new(functions.FromProcedureSpec), + } + want := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + AggregateSet: true, + AggregateMethod: functions.SumKind, + }, + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, false, want) +} +func TestSum_PushDown_Duplicate(t *testing.T) { + spec := new(functions.SumProcedureSpec) + root := &plan.Procedure{ + Spec: &functions.FromProcedureSpec{ + AggregateSet: true, + AggregateMethod: functions.SumKind, + }, + } + want := &plan.Procedure{ + // Expect the duplicate has been reset to zero values + Spec: new(functions.FromProcedureSpec), + } + + plantest.PhysicalPlan_PushDown_TestHelper(t, spec, root, true, want) +} diff --git a/vendor/github.com/influxdata/ifql/functions/top_bottom.go b/vendor/github.com/influxdata/ifql/functions/top_bottom.go new file mode 100644 index 000000000..b7c62d708 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/top_bottom.go @@ -0,0 +1,103 @@ +package functions + +import ( + "github.com/influxdata/ifql/query" +) + +func init() { + query.RegisterBuiltIn("top-bottom", topBottomBuiltIn) + // TODO(nathanielc): Provide an implementation of top/bottom transformation that can use a more efficient sort based on the limit. + // This transformation should be used when ever the planner sees a sort |> limit pair of procedures. +} + +var topBottomBuiltIn = ` +// _sortLimit is a helper function, which sorts and limits a table. +_sortLimit = (n, desc, cols=["_value"], table=<-) => + table + |> sort(cols:cols, desc:desc) + |> limit(n:n) + +// top sorts a table by cols and keeps only the top n records. +top = (n, cols=["_value"], table=<-) => _sortLimit(table:table, n:n, cols:cols, desc:true) + +// top sorts a table by cols and keeps only the bottom n records. +bottom = (n, cols=["_value"], table=<-) => _sortLimit(table:table, n:n, cols:cols, desc:false) + +// _highestOrLowest is a helper function, which reduces all groups into a single group by specific tags and a reducer function, +// then it selects the highest or lowest records based on the cols and the _sortLimit function. +// The default reducer assumes no reducing needs to be performed. +_highestOrLowest = (n, _sortLimit, reducer=(t=<-) => t, cols=["_value"], by=[], table=<-) => + table + |> group(by:by) + |> reducer() + |> group(keep:by) + |> _sortLimit(n:n, cols:cols) + +// highestMax returns the top N records from all groups using the maximum of each group. +highestMax = (n, cols=["_value"], by=[], table=<-) => + _highestOrLowest( + table:table, + n:n, + cols:cols, + by:by, + // TODO(nathanielc): Once max/min support selecting based on multiple columns change this to pass all columns. + reducer: (t=<-) => max(table:t, column:cols[0]), + _sortLimit: top, + ) + +// highestAverage returns the top N records from all groups using the average of each group. +highestAverage = (n, cols=["_value"], by=[], table=<-) => + _highestOrLowest( + table: table, + n:n, + cols:cols, + by:by, + reducer: (t=<-) => mean(table:t, ), + _sortLimit: top, + ) + +// highestCurrent returns the top N records from all groups using the last value of each group. +highestCurrent = (n, cols=["_value"], by=[], table=<-) => + _highestOrLowest( + table: table, + n:n, + cols:cols, + by:by, + reducer: (t=<-) => last(table:t, column:cols[0]), + _sortLimit: top, + ) + +// lowestMin returns the bottom N records from all groups using the minimum of each group. +lowestMin = (n, cols=["_value"], by=[], table=<-) => + _highestOrLowest( + table: table, + n:n, + cols:cols, + by:by, + // TODO(nathanielc): Once max/min support selecting based on multiple columns change this to pass all columns. + reducer: (t=<-) => min(table:t, column:cols[0]), + _sortLimit: bottom, + ) + +// lowestAverage returns the bottom N records from all groups using the average of each group. +lowestAverage = (n, cols=["_value"], by=[], table=<-) => + _highestOrLowest( + table: table, + n:n, + cols:cols, + by:by, + reducer: (t=<-) => mean(table:t), + _sortLimit: bottom, + ) + +// lowestCurrent returns the bottom N records from all groups using the last value of each group. +lowestCurrent = (n, cols=["_value"], by=[], table=<-) => + _highestOrLowest( + table: table, + n:n, + cols:cols, + by:by, + reducer: (t=<-) => last(table:t, column:cols[0]), + _sortLimit: bottom, + ) +` diff --git a/vendor/github.com/influxdata/ifql/functions/window.go b/vendor/github.com/influxdata/ifql/functions/window.go new file mode 100644 index 000000000..ce4ad64b5 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/window.go @@ -0,0 +1,254 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" + "github.com/pkg/errors" +) + +const WindowKind = "window" + +type WindowOpSpec struct { + Every query.Duration `json:"every"` + Period query.Duration `json:"period"` + Start query.Time `json:"start"` + Round query.Duration `json:"round"` + Triggering query.TriggerSpec `json:"triggering"` +} + +var windowSignature = query.DefaultFunctionSignature() + +func init() { + windowSignature.Params["every"] = semantic.Duration + windowSignature.Params["period"] = semantic.Duration + windowSignature.Params["round"] = semantic.Duration + windowSignature.Params["start"] = semantic.Time + + query.RegisterFunction(WindowKind, createWindowOpSpec, windowSignature) + query.RegisterOpSpec(WindowKind, newWindowOp) + plan.RegisterProcedureSpec(WindowKind, newWindowProcedure, WindowKind) + execute.RegisterTransformation(WindowKind, createWindowTransformation) +} + +func createWindowOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(WindowOpSpec) + every, everySet, err := args.GetDuration("every") + if err != nil { + return nil, err + } + if everySet { + spec.Every = query.Duration(every) + } + period, periodSet, err := args.GetDuration("period") + if err != nil { + return nil, err + } + if periodSet { + spec.Every = period + } + if round, ok, err := args.GetDuration("round"); err != nil { + return nil, err + } else if ok { + spec.Round = round + } + if start, ok, err := args.GetTime("start"); err != nil { + return nil, err + } else if ok { + spec.Start = start + } + + if !everySet && !periodSet { + return nil, errors.New(`window function requires at least one of "every" or "period" to be set`) + } + // Apply defaults + if !everySet { + spec.Every = spec.Period + } + if !periodSet { + spec.Period = spec.Every + } + return spec, nil +} + +func newWindowOp() query.OperationSpec { + return new(WindowOpSpec) +} + +func (s *WindowOpSpec) Kind() query.OperationKind { + return WindowKind +} + +type WindowProcedureSpec struct { + Window plan.WindowSpec + Triggering query.TriggerSpec +} + +func newWindowProcedure(qs query.OperationSpec, pa plan.Administration) (plan.ProcedureSpec, error) { + s, ok := qs.(*WindowOpSpec) + if !ok { + return nil, fmt.Errorf("invalid spec type %T", qs) + } + p := &WindowProcedureSpec{ + Window: plan.WindowSpec{ + Every: s.Every, + Period: s.Period, + Round: s.Round, + Start: s.Start, + }, + Triggering: s.Triggering, + } + if p.Triggering == nil { + p.Triggering = query.DefaultTrigger + } + return p, nil +} + +func (s *WindowProcedureSpec) Kind() plan.ProcedureKind { + return WindowKind +} +func (s *WindowProcedureSpec) Copy() plan.ProcedureSpec { + ns := new(WindowProcedureSpec) + ns.Window = s.Window + ns.Triggering = s.Triggering + return ns +} + +func (s *WindowProcedureSpec) TriggerSpec() query.TriggerSpec { + return s.Triggering +} + +func createWindowTransformation(id execute.DatasetID, mode execute.AccumulationMode, spec plan.ProcedureSpec, a execute.Administration) (execute.Transformation, execute.Dataset, error) { + s, ok := spec.(*WindowProcedureSpec) + if !ok { + return nil, nil, fmt.Errorf("invalid spec type %T", spec) + } + cache := execute.NewBlockBuilderCache(a.Allocator()) + d := execute.NewDataset(id, mode, cache) + t := NewFixedWindowTransformation(d, cache, a.Bounds(), execute.Window{ + Every: execute.Duration(s.Window.Every), + Period: execute.Duration(s.Window.Period), + Round: execute.Duration(s.Window.Round), + Start: a.ResolveTime(s.Window.Start), + }) + return t, d, nil +} + +type fixedWindowTransformation struct { + d execute.Dataset + cache execute.BlockBuilderCache + w execute.Window + bounds execute.Bounds + + offset execute.Duration +} + +func NewFixedWindowTransformation( + d execute.Dataset, + cache execute.BlockBuilderCache, + bounds execute.Bounds, + w execute.Window, +) execute.Transformation { + offset := execute.Duration(w.Start - w.Start.Truncate(w.Every)) + return &fixedWindowTransformation{ + d: d, + cache: cache, + w: w, + bounds: bounds, + offset: offset, + } +} + +func (t *fixedWindowTransformation) RetractBlock(id execute.DatasetID, meta execute.BlockMetadata) (err error) { + tagKey := meta.Tags().Key() + t.cache.ForEachBuilder(func(bk execute.BlockKey, bld execute.BlockBuilder) { + if err != nil { + return + } + if bld.Bounds().Overlaps(meta.Bounds()) && tagKey == bld.Tags().Key() { + err = t.d.RetractBlock(bk) + } + }) + return +} + +func (t *fixedWindowTransformation) Process(id execute.DatasetID, b execute.Block) error { + cols := b.Cols() + valueIdx := execute.ValueIdx(cols) + valueCol := cols[valueIdx] + times := b.Times() + times.DoTime(func(ts []execute.Time, rr execute.RowReader) { + for i, time := range ts { + bounds := t.getWindowBounds(time) + for _, bnds := range bounds { + builder, new := t.cache.BlockBuilder(blockMetadata{ + tags: b.Tags(), + bounds: bnds, + }) + if new { + builder.AddCol(execute.TimeCol) + builder.AddCol(valueCol) + execute.AddTags(b.Tags(), builder) + } + colMap := execute.AddNewCols(b, builder) + + execute.AppendRow(i, rr, builder, colMap) + } + } + }) + return nil +} + +func (t *fixedWindowTransformation) getWindowBounds(now execute.Time) []execute.Bounds { + stop := now.Truncate(t.w.Every) + execute.Time(t.offset) + if now >= stop { + stop += execute.Time(t.w.Every) + } + start := stop - execute.Time(t.w.Period) + + var bounds []execute.Bounds + + for now >= start { + bnds := execute.Bounds{ + Start: start, + Stop: stop, + } + + // Check global bounds + if bnds.Stop > t.bounds.Stop { + bnds.Stop = t.bounds.Stop + } + + if bnds.Start < t.bounds.Start { + bnds.Start = t.bounds.Start + } + + // Check bounds again since we just clamped them. + if bnds.Contains(now) { + bounds = append(bounds, bnds) + } + + // Shift up to next bounds + stop += execute.Time(t.w.Every) + start += execute.Time(t.w.Every) + } + + return bounds +} + +func (t *fixedWindowTransformation) UpdateWatermark(id execute.DatasetID, mark execute.Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *fixedWindowTransformation) UpdateProcessingTime(id execute.DatasetID, pt execute.Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *fixedWindowTransformation) Finish(id execute.DatasetID, err error) { + t.d.Finish(err) +} diff --git a/vendor/github.com/influxdata/ifql/functions/window_test.go b/vendor/github.com/influxdata/ifql/functions/window_test.go new file mode 100644 index 000000000..0ce933265 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/window_test.go @@ -0,0 +1,645 @@ +package functions_test + +import ( + "sort" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/querytest" +) + +func TestWindow_NewQuery(t *testing.T) { + tests := []querytest.NewQueryTestCase{ + { + Name: "from with window", + Raw: `from(db:"mydb") |> window(start:-4h, every:1h)`, + Want: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "from0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "window1", + Spec: &functions.WindowOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Every: query.Duration(time.Hour), + Period: query.Duration(time.Hour), + }, + }, + }, + Edges: []query.Edge{ + {Parent: "from0", Child: "window1"}, + }, + }, + }, + } + for _, tc := range tests { + tc := tc + t.Run(tc.Name, func(t *testing.T) { + t.Parallel() + querytest.NewQueryTestHelper(t, tc) + }) + } +} + +func TestWindowOperation_Marshaling(t *testing.T) { + //TODO: Test marshalling of triggerspec + data := []byte(`{"id":"window","kind":"window","spec":{"every":"1m","period":"1h","start":"-4h","round":"1s"}}`) + op := &query.Operation{ + ID: "window", + Spec: &functions.WindowOpSpec{ + Every: query.Duration(time.Minute), + Period: query.Duration(time.Hour), + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Round: query.Duration(time.Second), + }, + } + + querytest.OperationMarshalingTestHelper(t, data, op) +} + +func TestFixedWindow_PassThrough(t *testing.T) { + executetest.TransformationPassThroughTestHelper(t, func(d execute.Dataset, c execute.BlockBuilderCache) execute.Transformation { + fw := functions.NewFixedWindowTransformation( + d, + c, + execute.Bounds{}, + execute.Window{ + Every: execute.Duration(time.Minute), + Period: execute.Duration(time.Minute), + }, + ) + return fw + }) +} + +func TestFixedWindow_Process(t *testing.T) { + testCases := []struct { + name string + valueCol execute.ColMeta + start execute.Time + every, period execute.Duration + num int + want func(start execute.Time) []*executetest.Block + }{ + { + name: "nonoverlapping_nonaligned", + valueCol: execute.ColMeta{Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + // Use a time that is *not* aligned with the every/period durations of the window + start: execute.Time(time.Date(2017, 10, 10, 10, 10, 10, 10, time.UTC).UnixNano()), + every: execute.Duration(time.Minute), + period: execute.Duration(time.Minute), + num: 15, + want: func(start execute.Time) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: start, + Stop: start + execute.Time(time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start, 0.0}, + {start + execute.Time(10*time.Second), 1.0}, + {start + execute.Time(20*time.Second), 2.0}, + {start + execute.Time(30*time.Second), 3.0}, + {start + execute.Time(40*time.Second), 4.0}, + {start + execute.Time(50*time.Second), 5.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(1*time.Minute), + Stop: start + execute.Time(2*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(60*time.Second), 6.0}, + {start + execute.Time(70*time.Second), 7.0}, + {start + execute.Time(80*time.Second), 8.0}, + {start + execute.Time(90*time.Second), 9.0}, + {start + execute.Time(100*time.Second), 10.0}, + {start + execute.Time(110*time.Second), 11.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(2*time.Minute), + Stop: start + execute.Time(3*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(120*time.Second), 12.0}, + {start + execute.Time(130*time.Second), 13.0}, + {start + execute.Time(140*time.Second), 14.0}, + }, + }, + } + }, + }, + { + name: "nonoverlapping_aligned", + valueCol: execute.ColMeta{Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + // Use a time that is aligned with the every/period durations of the window + start: execute.Time(time.Date(2017, 10, 10, 10, 0, 0, 0, time.UTC).UnixNano()), + every: execute.Duration(time.Minute), + period: execute.Duration(time.Minute), + num: 15, + want: func(start execute.Time) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: start, + Stop: start + execute.Time(time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start, 0.0}, + {start + execute.Time(10*time.Second), 1.0}, + {start + execute.Time(20*time.Second), 2.0}, + {start + execute.Time(30*time.Second), 3.0}, + {start + execute.Time(40*time.Second), 4.0}, + {start + execute.Time(50*time.Second), 5.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(1*time.Minute), + Stop: start + execute.Time(2*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(60*time.Second), 6.0}, + {start + execute.Time(70*time.Second), 7.0}, + {start + execute.Time(80*time.Second), 8.0}, + {start + execute.Time(90*time.Second), 9.0}, + {start + execute.Time(100*time.Second), 10.0}, + {start + execute.Time(110*time.Second), 11.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(2*time.Minute), + Stop: start + execute.Time(3*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(120*time.Second), 12.0}, + {start + execute.Time(130*time.Second), 13.0}, + {start + execute.Time(140*time.Second), 14.0}, + }, + }, + } + }, + }, + { + name: "overlapping_nonaligned", + valueCol: execute.ColMeta{Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + // Use a time that is *not* aligned with the every/period durations of the window + start: execute.Time(time.Date(2017, 10, 10, 10, 10, 10, 10, time.UTC).UnixNano()), + every: execute.Duration(time.Minute), + period: execute.Duration(2 * time.Minute), + num: 15, + want: func(start execute.Time) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: start, + Stop: start + execute.Time(time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start, 0.0}, + {start + execute.Time(10*time.Second), 1.0}, + {start + execute.Time(20*time.Second), 2.0}, + {start + execute.Time(30*time.Second), 3.0}, + {start + execute.Time(40*time.Second), 4.0}, + {start + execute.Time(50*time.Second), 5.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start, + Stop: start + execute.Time(2*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start, 0.0}, + {start + execute.Time(10*time.Second), 1.0}, + {start + execute.Time(20*time.Second), 2.0}, + {start + execute.Time(30*time.Second), 3.0}, + {start + execute.Time(40*time.Second), 4.0}, + {start + execute.Time(50*time.Second), 5.0}, + {start + execute.Time(60*time.Second), 6.0}, + {start + execute.Time(70*time.Second), 7.0}, + {start + execute.Time(80*time.Second), 8.0}, + {start + execute.Time(90*time.Second), 9.0}, + {start + execute.Time(100*time.Second), 10.0}, + {start + execute.Time(110*time.Second), 11.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(1*time.Minute), + Stop: start + execute.Time(3*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(60*time.Second), 6.0}, + {start + execute.Time(70*time.Second), 7.0}, + {start + execute.Time(80*time.Second), 8.0}, + {start + execute.Time(90*time.Second), 9.0}, + {start + execute.Time(100*time.Second), 10.0}, + {start + execute.Time(110*time.Second), 11.0}, + {start + execute.Time(120*time.Second), 12.0}, + {start + execute.Time(130*time.Second), 13.0}, + {start + execute.Time(140*time.Second), 14.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(2*time.Minute), + Stop: start + execute.Time(4*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(120*time.Second), 12.0}, + {start + execute.Time(130*time.Second), 13.0}, + {start + execute.Time(140*time.Second), 14.0}, + }, + }, + } + }, + }, + { + name: "overlapping_aligned", + valueCol: execute.ColMeta{Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + // Use a time that is aligned with the every/period durations of the window + start: execute.Time(time.Date(2017, 10, 10, 10, 0, 0, 0, time.UTC).UnixNano()), + every: execute.Duration(time.Minute), + period: execute.Duration(2 * time.Minute), + num: 15, + want: func(start execute.Time) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: start, + Stop: start + execute.Time(time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start, 0.0}, + {start + execute.Time(10*time.Second), 1.0}, + {start + execute.Time(20*time.Second), 2.0}, + {start + execute.Time(30*time.Second), 3.0}, + {start + execute.Time(40*time.Second), 4.0}, + {start + execute.Time(50*time.Second), 5.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start, + Stop: start + execute.Time(2*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start, 0.0}, + {start + execute.Time(10*time.Second), 1.0}, + {start + execute.Time(20*time.Second), 2.0}, + {start + execute.Time(30*time.Second), 3.0}, + {start + execute.Time(40*time.Second), 4.0}, + {start + execute.Time(50*time.Second), 5.0}, + {start + execute.Time(60*time.Second), 6.0}, + {start + execute.Time(70*time.Second), 7.0}, + {start + execute.Time(80*time.Second), 8.0}, + {start + execute.Time(90*time.Second), 9.0}, + {start + execute.Time(100*time.Second), 10.0}, + {start + execute.Time(110*time.Second), 11.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(1*time.Minute), + Stop: start + execute.Time(3*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(60*time.Second), 6.0}, + {start + execute.Time(70*time.Second), 7.0}, + {start + execute.Time(80*time.Second), 8.0}, + {start + execute.Time(90*time.Second), 9.0}, + {start + execute.Time(100*time.Second), 10.0}, + {start + execute.Time(110*time.Second), 11.0}, + {start + execute.Time(120*time.Second), 12.0}, + {start + execute.Time(130*time.Second), 13.0}, + {start + execute.Time(140*time.Second), 14.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(2*time.Minute), + Stop: start + execute.Time(4*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(120*time.Second), 12.0}, + {start + execute.Time(130*time.Second), 13.0}, + {start + execute.Time(140*time.Second), 14.0}, + }, + }, + } + }, + }, + { + name: "underlapping_nonaligned", + valueCol: execute.ColMeta{Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + // Use a time that is *not* aligned with the every/period durations of the window + start: execute.Time(time.Date(2017, 10, 10, 10, 10, 10, 10, time.UTC).UnixNano()), + every: execute.Duration(2 * time.Minute), + period: execute.Duration(time.Minute), + num: 24, + want: func(start execute.Time) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: start + 1*execute.Time(time.Minute), + Stop: start + 2*execute.Time(time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(60*time.Second), 6.0}, + {start + execute.Time(70*time.Second), 7.0}, + {start + execute.Time(80*time.Second), 8.0}, + {start + execute.Time(90*time.Second), 9.0}, + {start + execute.Time(100*time.Second), 10.0}, + {start + execute.Time(110*time.Second), 11.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(3*time.Minute), + Stop: start + execute.Time(4*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(180*time.Second), 18.0}, + {start + execute.Time(190*time.Second), 19.0}, + {start + execute.Time(200*time.Second), 20.0}, + {start + execute.Time(210*time.Second), 21.0}, + {start + execute.Time(220*time.Second), 22.0}, + {start + execute.Time(230*time.Second), 23.0}, + }, + }, + } + }, + }, + { + name: "underlapping_aligned", + valueCol: execute.ColMeta{Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + // Use a time that is aligned with the every/period durations of the window + start: execute.Time(time.Date(2017, 10, 10, 10, 0, 0, 0, time.UTC).UnixNano()), + every: execute.Duration(2 * time.Minute), + period: execute.Duration(time.Minute), + num: 24, + want: func(start execute.Time) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: start + 1*execute.Time(time.Minute), + Stop: start + 2*execute.Time(time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(60*time.Second), 6.0}, + {start + execute.Time(70*time.Second), 7.0}, + {start + execute.Time(80*time.Second), 8.0}, + {start + execute.Time(90*time.Second), 9.0}, + {start + execute.Time(100*time.Second), 10.0}, + {start + execute.Time(110*time.Second), 11.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(3*time.Minute), + Stop: start + execute.Time(4*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(180*time.Second), 18.0}, + {start + execute.Time(190*time.Second), 19.0}, + {start + execute.Time(200*time.Second), 20.0}, + {start + execute.Time(210*time.Second), 21.0}, + {start + execute.Time(220*time.Second), 22.0}, + {start + execute.Time(230*time.Second), 23.0}, + }, + }, + } + }, + }, + { + name: "nonoverlapping_aligned_int", + valueCol: execute.ColMeta{Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + // Use a time that is aligned with the every/period durations of the window + start: execute.Time(time.Date(2017, 10, 10, 10, 0, 0, 0, time.UTC).UnixNano()), + every: execute.Duration(time.Minute), + period: execute.Duration(time.Minute), + num: 15, + want: func(start execute.Time) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: start, + Stop: start + execute.Time(time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start, int64(0.0)}, + {start + execute.Time(10*time.Second), int64(1)}, + {start + execute.Time(20*time.Second), int64(2)}, + {start + execute.Time(30*time.Second), int64(3)}, + {start + execute.Time(40*time.Second), int64(4)}, + {start + execute.Time(50*time.Second), int64(5)}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(1*time.Minute), + Stop: start + execute.Time(2*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(60*time.Second), int64(6)}, + {start + execute.Time(70*time.Second), int64(7)}, + {start + execute.Time(80*time.Second), int64(8)}, + {start + execute.Time(90*time.Second), int64(9)}, + {start + execute.Time(100*time.Second), int64(10)}, + {start + execute.Time(110*time.Second), int64(11)}, + }, + }, + { + Bnds: execute.Bounds{ + Start: start + execute.Time(2*time.Minute), + Stop: start + execute.Time(3*time.Minute), + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {start + execute.Time(120*time.Second), int64(12)}, + {start + execute.Time(130*time.Second), int64(13)}, + {start + execute.Time(140*time.Second), int64(14)}, + }, + }, + } + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + start := tc.start + stop := start + execute.Time(time.Hour) + + d := executetest.NewDataset(executetest.RandomDatasetID()) + c := execute.NewBlockBuilderCache(executetest.UnlimitedAllocator) + c.SetTriggerSpec(execute.DefaultTriggerSpec) + + fw := functions.NewFixedWindowTransformation( + d, + c, + execute.Bounds{ + Start: start, + Stop: stop, + }, + execute.Window{ + Every: tc.every, + Period: tc.period, + Start: start, + }, + ) + + block0 := &executetest.Block{ + Bnds: execute.Bounds{ + Start: start, + Stop: stop, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + tc.valueCol, + }, + } + + for i := 0; i < tc.num; i++ { + var v interface{} + switch tc.valueCol.Type { + case execute.TBool: + v = bool(i%2 == 0) + case execute.TInt: + v = int64(i) + case execute.TUInt: + v = uint64(i) + case execute.TFloat: + v = float64(i) + case execute.TString: + v = strconv.Itoa(i) + } + block0.Data = append(block0.Data, []interface{}{ + start + execute.Time(time.Duration(i)*10*time.Second), + v, + }) + } + + parentID := executetest.RandomDatasetID() + if err := fw.Process(parentID, block0); err != nil { + t.Fatal(err) + } + + got := executetest.BlocksFromCache(c) + + sort.Sort(executetest.SortedBlocks(got)) + want := tc.want(start) + sort.Sort(executetest.SortedBlocks(want)) + + if !cmp.Equal(want, got) { + t.Errorf("unexpected blocks -want/+got\n%s", cmp.Diff(want, got)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/functions/yield.go b/vendor/github.com/influxdata/ifql/functions/yield.go new file mode 100644 index 000000000..12e19f8bf --- /dev/null +++ b/vendor/github.com/influxdata/ifql/functions/yield.go @@ -0,0 +1,81 @@ +package functions + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +const YieldKind = "yield" + +type YieldOpSpec struct { + Name string `json:"name"` +} + +var yieldSignature = semantic.FunctionSignature{ + Params: map[string]semantic.Type{ + query.TableParameter: query.TableObjectType, + "name": semantic.String, + }, + ReturnType: query.TableObjectType, + PipeArgument: query.TableParameter, +} + +func init() { + query.RegisterFunction(YieldKind, createYieldOpSpec, yieldSignature) + query.RegisterOpSpec(YieldKind, newYieldOp) + plan.RegisterProcedureSpec(YieldKind, newYieldProcedure, YieldKind) +} + +func createYieldOpSpec(args query.Arguments, a *query.Administration) (query.OperationSpec, error) { + if err := a.AddParentFromArgs(args); err != nil { + return nil, err + } + + spec := new(YieldOpSpec) + + name, ok, err := args.GetString("name") + if err != nil { + return nil, err + } else if ok { + spec.Name = name + } else { + spec.Name = "_result" + } + + return spec, nil +} + +func newYieldOp() query.OperationSpec { + return new(YieldOpSpec) +} + +func (s *YieldOpSpec) Kind() query.OperationKind { + return YieldKind +} + +type YieldProcedureSpec struct { + Name string `json:"name"` +} + +func newYieldProcedure(qs query.OperationSpec, _ plan.Administration) (plan.ProcedureSpec, error) { + if spec, ok := qs.(*YieldOpSpec); ok { + return &YieldProcedureSpec{Name: spec.Name}, nil + } + + return nil, fmt.Errorf("invalid spec type %T", qs) +} + +func (s *YieldProcedureSpec) Kind() plan.ProcedureKind { + return YieldKind +} + +func (s *YieldProcedureSpec) Copy() plan.ProcedureSpec { + return &YieldProcedureSpec{Name: s.Name} +} + +func (s *YieldProcedureSpec) YieldName() string { + return s.Name +} diff --git a/vendor/github.com/influxdata/ifql/interpreter/doc.go b/vendor/github.com/influxdata/ifql/interpreter/doc.go new file mode 100644 index 000000000..6860c315d --- /dev/null +++ b/vendor/github.com/influxdata/ifql/interpreter/doc.go @@ -0,0 +1,2 @@ +// Package interpreter provides an implementation of an IFQL interpreter. +package interpreter diff --git a/vendor/github.com/influxdata/ifql/interpreter/interpreter.go b/vendor/github.com/influxdata/ifql/interpreter/interpreter.go new file mode 100644 index 000000000..5ffc0747f --- /dev/null +++ b/vendor/github.com/influxdata/ifql/interpreter/interpreter.go @@ -0,0 +1,1441 @@ +package interpreter + +import ( + "fmt" + "regexp" + "strconv" + "time" + + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/semantic" + "github.com/pkg/errors" +) + +func Eval(program *semantic.Program, scope *Scope, d Domain) error { + itrp := interpreter{ + d: d, + } + return itrp.eval(program, scope) +} + +// Domain represents any specific domain being used during evaluation. +type Domain interface{} + +type interpreter struct { + d Domain +} + +func (itrp interpreter) eval(program *semantic.Program, scope *Scope) error { + for _, stmt := range program.Body { + if err := itrp.doStatement(stmt, scope); err != nil { + return err + } + } + return nil +} + +func (itrp interpreter) doStatement(stmt semantic.Statement, scope *Scope) error { + scope.SetReturn(value{t: semantic.Invalid}) + switch s := stmt.(type) { + case *semantic.NativeVariableDeclaration: + if err := itrp.doVariableDeclaration(s, scope); err != nil { + return err + } + case *semantic.ExpressionStatement: + v, err := itrp.doExpression(s.Expression, scope) + if err != nil { + return err + } + scope.SetReturn(v) + case *semantic.BlockStatement: + nested := scope.Nest() + for i, stmt := range s.Body { + if err := itrp.doStatement(stmt, nested); err != nil { + return err + } + // Validate a return statement is the last statement + if _, ok := stmt.(*semantic.ReturnStatement); ok { + if i != len(s.Body)-1 { + return errors.New("return statement is not the last statement in the block") + } + } + } + // Propgate any return value from the nested scope out. + // Since a return statement is always last we do not have to worry about overriding an existing return value. + scope.SetReturn(nested.Return()) + case *semantic.ReturnStatement: + v, err := itrp.doExpression(s.Argument, scope) + if err != nil { + return err + } + scope.SetReturn(v) + default: + return fmt.Errorf("unsupported statement type %T", stmt) + } + return nil +} + +func (itrp interpreter) doVariableDeclaration(declaration *semantic.NativeVariableDeclaration, scope *Scope) error { + value, err := itrp.doExpression(declaration.Init, scope) + if err != nil { + return err + } + scope.Set(declaration.Identifier.Name, value) + return nil +} + +func (itrp interpreter) doExpression(expr semantic.Expression, scope *Scope) (Value, error) { + switch e := expr.(type) { + case semantic.Literal: + return itrp.doLiteral(e) + case *semantic.ArrayExpression: + return itrp.doArray(e, scope) + case *semantic.IdentifierExpression: + value, ok := scope.Lookup(e.Name) + if !ok { + return nil, fmt.Errorf("undefined identifier %q", e.Name) + } + return value, nil + case *semantic.CallExpression: + v, err := itrp.doCall(e, scope) + if err != nil { + // Determine function name + return nil, errors.Wrapf(err, "error calling function %q", functionName(e)) + } + return v, nil + case *semantic.MemberExpression: + obj, err := itrp.doExpression(e.Object, scope) + if err != nil { + return nil, err + } + return obj.Property(e.Property) + case *semantic.ObjectExpression: + return itrp.doObject(e, scope) + case *semantic.UnaryExpression: + v, err := itrp.doExpression(e.Argument, scope) + if err != nil { + return nil, err + } + switch e.Operator { + case ast.NotOperator: + if v.Type() != semantic.Bool { + return nil, fmt.Errorf("operand to unary expression is not a boolean value, got %v", v.Type()) + } + return NewBoolValue(!v.Value().(bool)), nil + case ast.SubtractionOperator: + switch t := v.Type(); t { + case semantic.Int: + return NewIntValue(-v.Value().(int64)), nil + case semantic.Float: + return NewFloatValue(-v.Value().(float64)), nil + case semantic.Duration: + return NewDurationValue(-v.Value().(time.Duration)), nil + default: + return nil, fmt.Errorf("operand to unary expression is not a number value, got %v", v.Type()) + } + default: + return nil, fmt.Errorf("unsupported operator %q to unary expression", e.Operator) + } + + case *semantic.BinaryExpression: + l, err := itrp.doExpression(e.Left, scope) + if err != nil { + return nil, err + } + + r, err := itrp.doExpression(e.Right, scope) + if err != nil { + return nil, err + } + + bf, ok := binaryFuncLookup[binaryFuncSignature{ + operator: e.Operator, + left: l.Type(), + right: r.Type(), + }] + if !ok { + return nil, fmt.Errorf("unsupported binary operation: %v %v %v", l.Type(), e.Operator, r.Type()) + } + return bf(l, r), nil + case *semantic.LogicalExpression: + l, err := itrp.doExpression(e.Left, scope) + if err != nil { + return nil, err + } + if l.Type() != semantic.Bool { + return nil, fmt.Errorf("left operand to logcial expression is not a boolean value, got %v", l.Type()) + } + left := l.Value().(bool) + + if e.Operator == ast.AndOperator && !left { + // Early return + return NewBoolValue(false), nil + } else if e.Operator == ast.OrOperator && left { + // Early return + return NewBoolValue(true), nil + } + + r, err := itrp.doExpression(e.Right, scope) + if err != nil { + return nil, err + } + if r.Type() != semantic.Bool { + return nil, errors.New("right operand to logcial expression is not a boolean value") + } + right := r.Value().(bool) + + switch e.Operator { + case ast.AndOperator: + return NewBoolValue(left && right), nil + case ast.OrOperator: + return NewBoolValue(left || right), nil + default: + return nil, fmt.Errorf("invalid logical operator %v", e.Operator) + } + case *semantic.FunctionExpression: + return value{ + t: semantic.Function, + v: arrowFunc{ + e: e, + scope: scope.Nest(), + }, + }, nil + default: + return nil, fmt.Errorf("unsupported expression %T", expr) + } +} + +func (itrp interpreter) doArray(a *semantic.ArrayExpression, scope *Scope) (Value, error) { + array := Array{ + Elements: make([]Value, len(a.Elements)), + } + elementType := semantic.EmptyArrayType.ElementType() + for i, el := range a.Elements { + v, err := itrp.doExpression(el, scope) + if err != nil { + return nil, err + } + if i == 0 { + elementType = v.Type() + } + if elementType != v.Type() { + return nil, fmt.Errorf("cannot mix types in an array, found both %v and %v", elementType, v.Type()) + } + array.Elements[i] = v + } + array.typ = semantic.NewArrayType(elementType) + return array, nil +} + +func (itrp interpreter) doObject(m *semantic.ObjectExpression, scope *Scope) (Value, error) { + obj := Object{ + Properties: make(map[string]Value, len(m.Properties)), + } + for _, p := range m.Properties { + v, err := itrp.doExpression(p.Value, scope) + if err != nil { + return nil, err + } + if _, ok := obj.Properties[p.Key.Name]; ok { + return nil, fmt.Errorf("duplicate key in object: %q", p.Key.Name) + } + obj.Properties[p.Key.Name] = v + } + return obj, nil +} + +func (itrp interpreter) doLiteral(lit semantic.Literal) (Value, error) { + switch l := lit.(type) { + case *semantic.DateTimeLiteral: + return value{ + t: semantic.Time, + v: l.Value, + }, nil + case *semantic.DurationLiteral: + return value{ + t: semantic.Duration, + v: l.Value, + }, nil + case *semantic.FloatLiteral: + return value{ + t: semantic.Float, + v: l.Value, + }, nil + case *semantic.IntegerLiteral: + return value{ + t: semantic.Int, + v: l.Value, + }, nil + case *semantic.UnsignedIntegerLiteral: + return value{ + t: semantic.UInt, + v: l.Value, + }, nil + case *semantic.StringLiteral: + return value{ + t: semantic.String, + v: l.Value, + }, nil + case *semantic.RegexpLiteral: + return value{ + t: semantic.Regexp, + v: l.Value, + }, nil + case *semantic.BooleanLiteral: + return value{ + t: semantic.Bool, + v: l.Value, + }, nil + // semantic.TODO(nathanielc): Support lists and objects + default: + return nil, fmt.Errorf("unknown literal type %T", lit) + } +} + +func functionName(call *semantic.CallExpression) string { + switch callee := call.Callee.(type) { + case *semantic.IdentifierExpression: + return callee.Name + case *semantic.MemberExpression: + return callee.Property + default: + return "" + } +} + +func (itrp interpreter) doCall(call *semantic.CallExpression, scope *Scope) (Value, error) { + callee, err := itrp.doExpression(call.Callee, scope) + if err != nil { + return nil, err + } + if callee.Type() != semantic.Function { + return nil, fmt.Errorf("cannot call function, value is of type %v", callee.Type()) + } + f := callee.Value().(Function) + arguments, err := itrp.doArguments(call.Arguments, scope) + if err != nil { + return nil, err + } + + // Check if the function is an arrowFunc and rebind it. + if af, ok := f.(arrowFunc); ok { + af.itrp = itrp + f = af + } + + // Call the function + v, err := f.Call(arguments, itrp.d) + if err != nil { + return nil, err + } + if unused := arguments.listUnused(); len(unused) > 0 { + return nil, fmt.Errorf("unused arguments %s", unused) + } + return v, nil +} + +func (itrp interpreter) doArguments(args *semantic.ObjectExpression, scope *Scope) (Arguments, error) { + if args == nil || len(args.Properties) == 0 { + return newArguments(nil), nil + } + paramsMap := make(map[string]Value, len(args.Properties)) + for _, p := range args.Properties { + value, err := itrp.doExpression(p.Value, scope) + if err != nil { + return nil, err + } + if _, ok := paramsMap[p.Key.Name]; ok { + return nil, fmt.Errorf("duplicate keyword parameter specified: %q", p.Key.Name) + } + paramsMap[p.Key.Name] = value + } + return newArguments(paramsMap), nil +} + +type Scope struct { + parent *Scope + values map[string]Value + returnValue Value +} + +func NewScope() *Scope { + return &Scope{ + values: make(map[string]Value), + returnValue: value{t: semantic.Invalid}, + } +} + +func (s *Scope) Lookup(name string) (Value, bool) { + if s == nil { + return nil, false + } + v, ok := s.values[name] + if !ok { + return s.parent.Lookup(name) + } + return v, ok +} + +func (s *Scope) Set(name string, value Value) { + s.values[name] = value +} + +// SetReturn sets the return value of this scope. +func (s *Scope) SetReturn(value Value) { + s.returnValue = value +} + +// Return reports the return value for this scope. If no return value has been set a value with type semantic.TInvalid is returned. +func (s *Scope) Return() Value { + return s.returnValue +} + +func (s *Scope) Names() []string { + if s == nil { + return nil + } + names := s.parent.Names() + for k := range s.values { + names = append(names, k) + } + return names +} + +// Nest returns a new nested scope. +func (s *Scope) Nest() *Scope { + c := NewScope() + c.parent = s + return c +} + +// Copy returns a copy of the scope and its parents. +func (s *Scope) Copy() *Scope { + c := NewScope() + + // copy parent values into new scope + curr := s + for curr != nil { + // copy values + for k, v := range curr.values { + c.values[k] = v + } + curr = curr.parent + } + return c +} + +// Value represents any value that can be the result of evaluating any expression. +type Value interface { + // Type reports the type of value + Type() semantic.Type + // Value returns the actual value represented. + Value() interface{} + // Property returns a new value which is a property of this value. + Property(name string) (Value, error) +} + +type value struct { + t semantic.Type + v interface{} +} + +func (v value) Type() semantic.Type { + return v.t +} +func (v value) Value() interface{} { + return v.v +} +func (v value) Property(name string) (Value, error) { + return nil, fmt.Errorf("property %q does not exist", name) +} +func (v value) String() string { + return fmt.Sprintf("%v", v.v) +} + +func NewBoolValue(v bool) Value { + return value{ + t: semantic.Bool, + v: v, + } +} +func NewIntValue(v int64) Value { + return value{ + t: semantic.Int, + v: v, + } +} +func NewUIntValue(v uint64) Value { + return value{ + t: semantic.UInt, + v: v, + } +} +func NewFloatValue(v float64) Value { + return value{ + t: semantic.Float, + v: v, + } +} +func NewStringValue(v string) Value { + return value{ + t: semantic.String, + v: v, + } +} +func NewTimeValue(v time.Time) Value { + return value{ + t: semantic.Time, + v: v, + } +} +func NewDurationValue(v time.Duration) Value { + return value{ + t: semantic.Duration, + v: v, + } +} + +// Function represents a callable type +type Function interface { + Call(args Arguments, d Domain) (Value, error) + // Resolve rewrites the function resolving any identifiers not listed in the function params. + Resolve() (*semantic.FunctionExpression, error) +} + +type arrowFunc struct { + e *semantic.FunctionExpression + scope *Scope + call func(Arguments, Domain) (Value, error) + + itrp interpreter +} + +func (f arrowFunc) Call(args Arguments, d Domain) (Value, error) { + for _, p := range f.e.Params { + if p.Default == nil { + v, err := args.GetRequired(p.Key.Name) + if err != nil { + return nil, err + } + f.scope.Set(p.Key.Name, v) + } else { + v, ok := args.Get(p.Key.Name) + if !ok { + // Use default value + var err error + v, err = f.itrp.doExpression(p.Default, f.scope) + if err != nil { + return nil, err + } + } + f.scope.Set(p.Key.Name, v) + } + } + switch n := f.e.Body.(type) { + case semantic.Expression: + return f.itrp.doExpression(n, f.scope) + case semantic.Statement: + err := f.itrp.doStatement(n, f.scope) + if err != nil { + return nil, err + } + v := f.scope.Return() + if v.Type() == semantic.Invalid { + return nil, errors.New("arrow function has no return value") + } + return v, nil + default: + return nil, fmt.Errorf("unsupported arrow function body type %T", f.e.Body) + } +} + +// Resolve rewrites the function resolving any identifiers not listed in the function params. +func (f arrowFunc) Resolve() (*semantic.FunctionExpression, error) { + n := f.e.Copy() + node, err := f.resolveIdentifiers(n) + if err != nil { + return nil, err + } + return node.(*semantic.FunctionExpression), nil +} + +func (f arrowFunc) resolveIdentifiers(n semantic.Node) (semantic.Node, error) { + switch n := n.(type) { + case *semantic.IdentifierExpression: + for _, p := range f.e.Params { + if n.Name == p.Key.Name { + // Identifier is a parameter do not resolve + return n, nil + } + } + v, ok := f.scope.Lookup(n.Name) + if !ok { + return nil, fmt.Errorf("name %q does not exist in scope", n.Name) + } + return resolveValue(v) + case *semantic.BlockStatement: + for i, s := range n.Body { + node, err := f.resolveIdentifiers(s) + if err != nil { + return nil, err + } + n.Body[i] = node.(semantic.Statement) + } + case *semantic.ExpressionStatement: + node, err := f.resolveIdentifiers(n.Expression) + if err != nil { + return nil, err + } + n.Expression = node.(semantic.Expression) + case *semantic.ReturnStatement: + node, err := f.resolveIdentifiers(n.Argument) + if err != nil { + return nil, err + } + n.Argument = node.(semantic.Expression) + case *semantic.NativeVariableDeclaration: + node, err := f.resolveIdentifiers(n.Init) + if err != nil { + return nil, err + } + n.Init = node.(semantic.Expression) + case *semantic.CallExpression: + node, err := f.resolveIdentifiers(n.Arguments) + if err != nil { + return nil, err + } + n.Arguments = node.(*semantic.ObjectExpression) + case *semantic.FunctionExpression: + node, err := f.resolveIdentifiers(n.Body) + if err != nil { + return nil, err + } + n.Body = node + case *semantic.BinaryExpression: + node, err := f.resolveIdentifiers(n.Left) + if err != nil { + return nil, err + } + n.Left = node.(semantic.Expression) + + node, err = f.resolveIdentifiers(n.Right) + if err != nil { + return nil, err + } + n.Right = node.(semantic.Expression) + case *semantic.UnaryExpression: + node, err := f.resolveIdentifiers(n.Argument) + if err != nil { + return nil, err + } + n.Argument = node.(semantic.Expression) + case *semantic.LogicalExpression: + node, err := f.resolveIdentifiers(n.Left) + if err != nil { + return nil, err + } + n.Left = node.(semantic.Expression) + node, err = f.resolveIdentifiers(n.Right) + if err != nil { + return nil, err + } + n.Right = node.(semantic.Expression) + case *semantic.ArrayExpression: + for i, el := range n.Elements { + node, err := f.resolveIdentifiers(el) + if err != nil { + return nil, err + } + n.Elements[i] = node.(semantic.Expression) + } + case *semantic.ObjectExpression: + for i, p := range n.Properties { + node, err := f.resolveIdentifiers(p) + if err != nil { + return nil, err + } + n.Properties[i] = node.(*semantic.Property) + } + case *semantic.ConditionalExpression: + node, err := f.resolveIdentifiers(n.Test) + if err != nil { + return nil, err + } + n.Test = node.(semantic.Expression) + + node, err = f.resolveIdentifiers(n.Alternate) + if err != nil { + return nil, err + } + n.Alternate = node.(semantic.Expression) + + node, err = f.resolveIdentifiers(n.Consequent) + if err != nil { + return nil, err + } + n.Consequent = node.(semantic.Expression) + case *semantic.Property: + node, err := f.resolveIdentifiers(n.Value) + if err != nil { + return nil, err + } + n.Value = node.(semantic.Expression) + } + return n, nil +} + +func resolveValue(v Value) (semantic.Node, error) { + switch t := v.Type(); t { + case semantic.String: + return &semantic.StringLiteral{ + Value: v.Value().(string), + }, nil + case semantic.Int: + return &semantic.IntegerLiteral{ + Value: v.Value().(int64), + }, nil + case semantic.UInt: + return &semantic.UnsignedIntegerLiteral{ + Value: v.Value().(uint64), + }, nil + case semantic.Float: + return &semantic.FloatLiteral{ + Value: v.Value().(float64), + }, nil + case semantic.Bool: + return &semantic.BooleanLiteral{ + Value: v.Value().(bool), + }, nil + case semantic.Time: + return &semantic.DateTimeLiteral{ + Value: v.Value().(time.Time), + }, nil + case semantic.Regexp: + return &semantic.RegexpLiteral{ + Value: v.Value().(*regexp.Regexp), + }, nil + case semantic.Duration: + return &semantic.DurationLiteral{ + Value: v.Value().(time.Duration), + }, nil + case semantic.Function: + return v.Value().(Function).Resolve() + case semantic.Array: + arr := v.Value().(Array) + node := new(semantic.ArrayExpression) + node.Elements = make([]semantic.Expression, len(arr.Elements)) + for i, el := range arr.Elements { + n, err := resolveValue(el) + if err != nil { + return nil, err + } + node.Elements[i] = n.(semantic.Expression) + } + return node, nil + case semantic.Object: + m := v.Value().(Object) + node := new(semantic.ObjectExpression) + node.Properties = make([]*semantic.Property, 0, len(m.Properties)) + for k, el := range m.Properties { + n, err := resolveValue(el) + if err != nil { + return nil, err + } + node.Properties = append(node.Properties, &semantic.Property{ + Key: &semantic.Identifier{Name: k}, + Value: n.(semantic.Expression), + }) + } + return node, nil + default: + return nil, fmt.Errorf("cannot resove value of type %v", t) + } +} + +// Array represents an sequence of elements +// All elements must be the same type +type Array struct { + Elements []Value + typ semantic.Type +} + +func NewArray(elementType semantic.Type) Array { + return Array{ + typ: semantic.NewArrayType(elementType), + } +} + +func (a Array) Type() semantic.Type { + return a.typ +} + +func (a Array) Value() interface{} { + return a +} + +func (a Array) Property(name string) (Value, error) { + i, err := strconv.Atoi(name) + if err != nil { + return nil, err + } + if i < 0 || i >= len(a.Elements) { + return nil, fmt.Errorf("out of bounds index %d, length: %d", i, len(a.Elements)) + } + return a.Elements[i], nil +} + +func (a Array) AsStrings() []string { + if a.typ.ElementType() != semantic.String { + return nil + } + strs := make([]string, len(a.Elements)) + for i, v := range a.Elements { + strs[i] = v.Value().(string) + } + return strs +} + +// Object represents an association of keys to values. +// Object values may be of any type. +type Object struct { + Properties map[string]Value +} + +func (m Object) Type() semantic.Type { + propertyTypes := make(map[string]semantic.Type) + for k, v := range m.Properties { + propertyTypes[k] = v.Type() + } + return semantic.NewObjectType(propertyTypes) +} +func (m Object) Value() interface{} { + return m +} +func (m Object) Property(name string) (Value, error) { + v, ok := m.Properties[name] + if ok { + return v, nil + } + return nil, fmt.Errorf("property %q does not exist", name) +} + +// Arguments provides access to the keyword arguments passed to a function. +// semantic.The Get{Type} methods return three values: the typed value of the arg, +// whether the argument was specified and any errors about the argument type. +// semantic.The GetRequired{Type} methods return only two values, the typed value of the arg and any errors, a missing argument is considered an error in this case. +type Arguments interface { + Get(name string) (Value, bool) + GetRequired(name string) (Value, error) + + GetString(name string) (string, bool, error) + GetInt(name string) (int64, bool, error) + GetFloat(name string) (float64, bool, error) + GetBool(name string) (bool, bool, error) + GetFunction(name string) (Function, bool, error) + GetArray(name string, t semantic.Kind) (Array, bool, error) + GetObject(name string) (Object, bool, error) + + GetRequiredString(name string) (string, error) + GetRequiredInt(name string) (int64, error) + GetRequiredFloat(name string) (float64, error) + GetRequiredBool(name string) (bool, error) + GetRequiredFunction(name string) (Function, error) + GetRequiredArray(name string, t semantic.Kind) (Array, error) + GetRequiredObject(name string) (Object, error) + + // listUnused returns the list of provided arguments that were not used by the function. + listUnused() []string +} + +type arguments struct { + params map[string]Value + used map[string]bool +} + +func newArguments(params map[string]Value) *arguments { + return &arguments{ + params: params, + used: make(map[string]bool, len(params)), + } +} + +func (a *arguments) Get(name string) (Value, bool) { + a.used[name] = true + v, ok := a.params[name] + return v, ok +} + +func (a *arguments) GetRequired(name string) (Value, error) { + a.used[name] = true + v, ok := a.params[name] + if !ok { + return nil, fmt.Errorf("missing required keyword argument %q", name) + } + return v, nil +} + +func (a *arguments) GetString(name string) (string, bool, error) { + v, ok, err := a.get(name, semantic.String, false) + if err != nil || !ok { + return "", ok, err + } + return v.Value().(string), ok, nil +} +func (a *arguments) GetRequiredString(name string) (string, error) { + v, _, err := a.get(name, semantic.String, true) + if err != nil { + return "", err + } + return v.Value().(string), nil +} +func (a *arguments) GetInt(name string) (int64, bool, error) { + v, ok, err := a.get(name, semantic.Int, false) + if err != nil || !ok { + return 0, ok, err + } + return v.Value().(int64), ok, nil +} +func (a *arguments) GetRequiredInt(name string) (int64, error) { + v, _, err := a.get(name, semantic.Int, true) + if err != nil { + return 0, err + } + return v.Value().(int64), nil +} +func (a *arguments) GetFloat(name string) (float64, bool, error) { + v, ok, err := a.get(name, semantic.Float, false) + if err != nil || !ok { + return 0, ok, err + } + return v.Value().(float64), ok, nil +} +func (a *arguments) GetRequiredFloat(name string) (float64, error) { + v, _, err := a.get(name, semantic.Float, true) + if err != nil { + return 0, err + } + return v.Value().(float64), nil +} +func (a *arguments) GetBool(name string) (bool, bool, error) { + v, ok, err := a.get(name, semantic.Bool, false) + if err != nil || !ok { + return false, ok, err + } + return v.Value().(bool), ok, nil +} +func (a *arguments) GetRequiredBool(name string) (bool, error) { + v, _, err := a.get(name, semantic.Bool, true) + if err != nil { + return false, err + } + return v.Value().(bool), nil +} + +func (a *arguments) GetArray(name string, t semantic.Kind) (Array, bool, error) { + v, ok, err := a.get(name, semantic.Array, false) + if err != nil || !ok { + return Array{}, ok, err + } + arr := v.Value().(Array) + if arr.Type().ElementType() != t { + return Array{}, true, fmt.Errorf("keyword argument %q should be of an array of type %v, but got an array of type %v", name, t, arr.Type()) + } + return v.Value().(Array), ok, nil +} +func (a *arguments) GetRequiredArray(name string, t semantic.Kind) (Array, error) { + v, _, err := a.get(name, semantic.Array, true) + if err != nil { + return Array{}, err + } + arr := v.Value().(Array) + if arr.Type().ElementType() != t { + return Array{}, fmt.Errorf("keyword argument %q should be of an array of type %v, but got an array of type %v", name, t, arr.Type()) + } + return arr, nil +} +func (a *arguments) GetFunction(name string) (Function, bool, error) { + v, ok, err := a.get(name, semantic.Function, false) + if err != nil || !ok { + return nil, ok, err + } + return v.Value().(Function), ok, nil +} +func (a *arguments) GetRequiredFunction(name string) (Function, error) { + v, _, err := a.get(name, semantic.Function, true) + if err != nil { + return nil, err + } + return v.Value().(Function), nil +} + +func (a *arguments) GetObject(name string) (Object, bool, error) { + v, ok, err := a.get(name, semantic.Object, false) + if err != nil || !ok { + return Object{}, ok, err + } + return v.Value().(Object), ok, nil +} +func (a *arguments) GetRequiredObject(name string) (Object, error) { + v, _, err := a.get(name, semantic.Object, true) + if err != nil { + return Object{}, err + } + return v.Value().(Object), nil +} + +func (a *arguments) get(name string, kind semantic.Kind, required bool) (Value, bool, error) { + a.used[name] = true + v, ok := a.params[name] + if !ok { + if required { + return nil, false, fmt.Errorf("missing required keyword argument %q", name) + } + return nil, false, nil + } + if v.Type().Kind() != kind { + return nil, true, fmt.Errorf("keyword argument %q should be of kind %v, but got %v", name, kind, v.Type().Kind()) + } + return v, true, nil +} + +func (a *arguments) listUnused() []string { + var unused []string + for k := range a.params { + if !a.used[k] { + unused = append(unused, k) + } + } + + return unused +} + +type binaryFunc func(l, r Value) Value + +type binaryFuncSignature struct { + operator ast.OperatorKind + left, right semantic.Type +} + +var binaryFuncLookup = map[binaryFuncSignature]binaryFunc{ + //--------------- + // Math Operators + //--------------- + {operator: ast.AdditionOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewIntValue(l + r) + }, + {operator: ast.AdditionOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewUIntValue(l + r) + }, + {operator: ast.AdditionOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewFloatValue(l + r) + }, + {operator: ast.SubtractionOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewIntValue(l - r) + }, + {operator: ast.SubtractionOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewUIntValue(l - r) + }, + {operator: ast.SubtractionOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewFloatValue(l - r) + }, + {operator: ast.MultiplicationOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewIntValue(l * r) + }, + {operator: ast.MultiplicationOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewUIntValue(l * r) + }, + {operator: ast.MultiplicationOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewFloatValue(l * r) + }, + {operator: ast.DivisionOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewIntValue(l / r) + }, + {operator: ast.DivisionOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewUIntValue(l / r) + }, + {operator: ast.DivisionOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewFloatValue(l / r) + }, + + //--------------------- + // Comparison Operators + //--------------------- + + // LessThanEqualOperator + + {operator: ast.LessThanEqualOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewBoolValue(l <= r) + }, + {operator: ast.LessThanEqualOperator, left: semantic.Int, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(uint64) + if l < 0 { + return NewBoolValue(true) + } + return NewBoolValue(uint64(l) <= r) + }, + {operator: ast.LessThanEqualOperator, left: semantic.Int, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) <= r) + }, + {operator: ast.LessThanEqualOperator, left: semantic.UInt, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(int64) + if r < 0 { + return NewBoolValue(false) + } + return NewBoolValue(l <= uint64(r)) + }, + {operator: ast.LessThanEqualOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewBoolValue(l <= r) + }, + {operator: ast.LessThanEqualOperator, left: semantic.UInt, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) <= r) + }, + {operator: ast.LessThanEqualOperator, left: semantic.Float, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(int64) + return NewBoolValue(l <= float64(r)) + }, + {operator: ast.LessThanEqualOperator, left: semantic.Float, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(uint64) + return NewBoolValue(l <= float64(r)) + }, + {operator: ast.LessThanEqualOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewBoolValue(l <= r) + }, + + // LessThanOperator + + {operator: ast.LessThanOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewBoolValue(l < r) + }, + {operator: ast.LessThanOperator, left: semantic.Int, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(uint64) + if l < 0 { + return NewBoolValue(true) + } + return NewBoolValue(uint64(l) < r) + }, + {operator: ast.LessThanOperator, left: semantic.Int, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) < r) + }, + {operator: ast.LessThanOperator, left: semantic.UInt, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(int64) + if r < 0 { + return NewBoolValue(false) + } + return NewBoolValue(l < uint64(r)) + }, + {operator: ast.LessThanOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewBoolValue(l < r) + }, + {operator: ast.LessThanOperator, left: semantic.UInt, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) < r) + }, + {operator: ast.LessThanOperator, left: semantic.Float, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(int64) + return NewBoolValue(l < float64(r)) + }, + {operator: ast.LessThanOperator, left: semantic.Float, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(uint64) + return NewBoolValue(l < float64(r)) + }, + {operator: ast.LessThanOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewBoolValue(l < r) + }, + + // GreaterThanEqualOperator + + {operator: ast.GreaterThanEqualOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewBoolValue(l >= r) + }, + {operator: ast.GreaterThanEqualOperator, left: semantic.Int, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(uint64) + if l < 0 { + return NewBoolValue(true) + } + return NewBoolValue(uint64(l) >= r) + }, + {operator: ast.GreaterThanEqualOperator, left: semantic.Int, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) >= r) + }, + {operator: ast.GreaterThanEqualOperator, left: semantic.UInt, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(int64) + if r < 0 { + return NewBoolValue(false) + } + return NewBoolValue(l >= uint64(r)) + }, + {operator: ast.GreaterThanEqualOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewBoolValue(l >= r) + }, + {operator: ast.GreaterThanEqualOperator, left: semantic.UInt, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) >= r) + }, + {operator: ast.GreaterThanEqualOperator, left: semantic.Float, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(int64) + return NewBoolValue(l >= float64(r)) + }, + {operator: ast.GreaterThanEqualOperator, left: semantic.Float, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(uint64) + return NewBoolValue(l >= float64(r)) + }, + {operator: ast.GreaterThanEqualOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewBoolValue(l >= r) + }, + + // GreaterThanOperator + + {operator: ast.GreaterThanOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewBoolValue(l > r) + }, + {operator: ast.GreaterThanOperator, left: semantic.Int, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(uint64) + if l < 0 { + return NewBoolValue(true) + } + return NewBoolValue(uint64(l) > r) + }, + {operator: ast.GreaterThanOperator, left: semantic.Int, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) > r) + }, + {operator: ast.GreaterThanOperator, left: semantic.UInt, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(int64) + if r < 0 { + return NewBoolValue(false) + } + return NewBoolValue(l > uint64(r)) + }, + {operator: ast.GreaterThanOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewBoolValue(l > r) + }, + {operator: ast.GreaterThanOperator, left: semantic.UInt, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) > r) + }, + {operator: ast.GreaterThanOperator, left: semantic.Float, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(int64) + return NewBoolValue(l > float64(r)) + }, + {operator: ast.GreaterThanOperator, left: semantic.Float, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(uint64) + return NewBoolValue(l > float64(r)) + }, + {operator: ast.GreaterThanOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewBoolValue(l > r) + }, + + // EqualOperator + + {operator: ast.EqualOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewBoolValue(l == r) + }, + {operator: ast.EqualOperator, left: semantic.Int, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(uint64) + if l < 0 { + return NewBoolValue(false) + } + return NewBoolValue(uint64(l) == r) + }, + {operator: ast.EqualOperator, left: semantic.Int, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) == r) + }, + {operator: ast.EqualOperator, left: semantic.UInt, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(int64) + if r < 0 { + return NewBoolValue(false) + } + return NewBoolValue(l == uint64(r)) + }, + {operator: ast.EqualOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewBoolValue(l == r) + }, + {operator: ast.EqualOperator, left: semantic.UInt, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) == r) + }, + {operator: ast.EqualOperator, left: semantic.Float, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(int64) + return NewBoolValue(l == float64(r)) + }, + {operator: ast.EqualOperator, left: semantic.Float, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(uint64) + return NewBoolValue(l == float64(r)) + }, + {operator: ast.EqualOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewBoolValue(l == r) + }, + {operator: ast.EqualOperator, left: semantic.String, right: semantic.String}: func(lv, rv Value) Value { + l := lv.Value().(string) + r := rv.Value().(string) + return NewBoolValue(l == r) + }, + + // NotEqualOperator + + {operator: ast.NotEqualOperator, left: semantic.Int, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(int64) + return NewBoolValue(l != r) + }, + {operator: ast.NotEqualOperator, left: semantic.Int, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(uint64) + if l < 0 { + return NewBoolValue(true) + } + return NewBoolValue(uint64(l) != r) + }, + {operator: ast.NotEqualOperator, left: semantic.Int, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(int64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) != r) + }, + {operator: ast.NotEqualOperator, left: semantic.UInt, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(int64) + if r < 0 { + return NewBoolValue(true) + } + return NewBoolValue(l != uint64(r)) + }, + {operator: ast.NotEqualOperator, left: semantic.UInt, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(uint64) + return NewBoolValue(l != r) + }, + {operator: ast.NotEqualOperator, left: semantic.UInt, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(uint64) + r := rv.Value().(float64) + return NewBoolValue(float64(l) != r) + }, + {operator: ast.NotEqualOperator, left: semantic.Float, right: semantic.Int}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(int64) + return NewBoolValue(l != float64(r)) + }, + {operator: ast.NotEqualOperator, left: semantic.Float, right: semantic.UInt}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(uint64) + return NewBoolValue(l != float64(r)) + }, + {operator: ast.NotEqualOperator, left: semantic.Float, right: semantic.Float}: func(lv, rv Value) Value { + l := lv.Value().(float64) + r := rv.Value().(float64) + return NewBoolValue(l != r) + }, + {operator: ast.NotEqualOperator, left: semantic.String, right: semantic.String}: func(lv, rv Value) Value { + l := lv.Value().(string) + r := rv.Value().(string) + return NewBoolValue(l != r) + }, + {operator: ast.RegexpMatchOperator, left: semantic.String, right: semantic.Regexp}: func(lv, rv Value) Value { + l := lv.Value().(string) + r := rv.Value().(*regexp.Regexp) + return NewBoolValue(r.MatchString(l)) + }, + {operator: ast.RegexpMatchOperator, left: semantic.Regexp, right: semantic.String}: func(lv, rv Value) Value { + l := lv.Value().(*regexp.Regexp) + r := rv.Value().(string) + return NewBoolValue(l.MatchString(r)) + }, + {operator: ast.NotRegexpMatchOperator, left: semantic.String, right: semantic.Regexp}: func(lv, rv Value) Value { + l := lv.Value().(string) + r := rv.Value().(*regexp.Regexp) + return NewBoolValue(!r.MatchString(l)) + }, + {operator: ast.NotRegexpMatchOperator, left: semantic.Regexp, right: semantic.String}: func(lv, rv Value) Value { + l := lv.Value().(*regexp.Regexp) + r := rv.Value().(string) + return NewBoolValue(!l.MatchString(r)) + }, +} diff --git a/vendor/github.com/influxdata/ifql/interpreter/interpreter_test.go b/vendor/github.com/influxdata/ifql/interpreter/interpreter_test.go new file mode 100644 index 000000000..4f2874466 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/interpreter/interpreter_test.go @@ -0,0 +1,308 @@ +package interpreter_test + +import ( + "errors" + "fmt" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/interpreter" + "github.com/influxdata/ifql/parser" + "github.com/influxdata/ifql/semantic" + "github.com/influxdata/ifql/semantic/semantictest" +) + +var testScope = interpreter.NewScope() +var testDeclarations = make(map[string]semantic.VariableDeclaration) + +func init() { + testScope.Set("fortyTwo", function{ + name: "fortyTwo", + call: func(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) { + return interpreter.NewFloatValue(42.0), nil + }, + }) + testScope.Set("six", function{ + name: "six", + call: func(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) { + return interpreter.NewFloatValue(6.0), nil + }, + }) + testScope.Set("nine", function{ + name: "nine", + call: func(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) { + return interpreter.NewFloatValue(9.0), nil + }, + }) + testScope.Set("fail", function{ + name: "fail", + call: func(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) { + return nil, errors.New("fail") + }, + }) + testScope.Set("plusOne", function{ + name: "plusOne", + call: func(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) { + v, err := args.GetRequiredFloat("x") + if err != nil { + return nil, err + } + return interpreter.NewFloatValue(v + 1), nil + }, + }) + testDeclarations["plusOne"] = semantic.NewExternalVariableDeclaration( + "plusOne", + semantic.NewFunctionType(semantic.FunctionSignature{ + Params: map[string]semantic.Type{"x": semantic.Float}, + ReturnType: semantic.Float, + PipeArgument: "x", + }), + ) +} + +// TestEval tests whether a program can run to completion or not +func TestEval(t *testing.T) { + testCases := []struct { + name string + query string + wantErr bool + }{ + { + name: "call function", + query: "six()", + }, + { + name: "call function with fail", + query: "fail()", + wantErr: true, + }, + { + name: "reassign nested scope", + query: ` + six = six() + six() + `, + wantErr: true, + }, + { + name: "binary expressions", + query: ` + six = six() + nine = nine() + + answer = fortyTwo() == six * nine + `, + }, + { + name: "logical expressions short circuit", + query: ` + six = six() + nine = nine() + + answer = (not (fortyTwo() == six * nine)) or fail() + `, + }, + { + name: "arrow function", + query: ` + plusSix = (r) => r + six() + plusSix(r:1.0) == 7.0 or fail() + `, + }, + { + name: "arrow function block", + query: ` + f = (r) => { + r2 = r * r + return (r - r2) / r2 + } + f(r:2.0) == -0.5 or fail() + `, + }, + { + name: "arrow function with default param", + query: ` + addN = (r,n=4) => r + n + addN(r:2) == 6 or fail() + addN(r:3,n:1) == 4 or fail() + `, + }, + { + name: "scope closing", + query: ` + x = 5 + plusX = (r) => r + x + plusX(r:2) == 7 or fail() + `, + }, + { + name: "scope closing mutable", + query: ` + x = 5 + plusX = (r) => r + x + plusX(r:2) == 7 or fail() + x = 1 + plusX(r:2) == 3 or fail() + `, + }, + { + name: "return map from func", + query: ` + toMap = (a,b) => ({ + a: a, + b: b, + }) + m = toMap(a:1, b:false) + m.a == 1 or fail() + not m.b or fail() + `, + }, + { + name: "pipe expression", + query: ` + add = (a=<-,b) => a + b + one = 1 + one |> add(b:2) == 3 or fail() + `, + }, + { + name: "ignore pipe default", + query: ` + add = (a=<-,b) => a + b + add(a:1, b:2) == 3 or fail() + `, + }, + { + name: "pipe expression function", + query: ` + add = (a=<-,b) => a + b + six() |> add(b:2.0) == 8.0 or fail() + `, + }, + { + name: "pipe builtin function", + query: ` + six() |> plusOne() == 7.0 or fail() + `, + }, + { + name: "regex match", + query: ` + "abba" =~ /^a.*a$/ or fail() + `, + }, + { + name: "regex not match", + query: ` + "abc" =~ /^a.*a$/ and fail() + `, + }, + { + name: "not regex match", + query: ` + "abc" !~ /^a.*a$/ or fail() + `, + }, + { + name: "not regex not match", + query: ` + "abba" !~ /^a.*a$/ and fail() + `, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + program, err := parser.NewAST(tc.query) + if err != nil { + t.Fatal(err) + } + graph, err := semantic.New(program, testDeclarations) + if err != nil { + t.Fatal(err) + } + + err = interpreter.Eval(graph, testScope.Nest(), nil) + if !tc.wantErr && err != nil { + t.Fatal(err) + } else if tc.wantErr && err == nil { + t.Fatal("expected error") + } + }) + } + +} +func TestFunction_Resolve(t *testing.T) { + var got *semantic.FunctionExpression + scope := interpreter.NewScope() + scope.Set("resolver", function{ + name: "resolver", + call: func(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) { + f, err := args.GetRequiredFunction("f") + if err != nil { + return nil, err + } + got, err = f.Resolve() + if err != nil { + return nil, err + } + return nil, nil + }, + }) + + program, err := parser.NewAST(` + x = 42 + resolver(f: (r) => r + x) +`) + if err != nil { + t.Fatal(err) + } + + graph, err := semantic.New(program, testDeclarations) + if err != nil { + t.Fatal(err) + } + + if err := interpreter.Eval(graph, scope, nil); err != nil { + t.Fatal(err) + } + + want := &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "r"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.IdentifierExpression{Name: "r"}, + Right: &semantic.IntegerLiteral{Value: 42}, + }, + } + if !cmp.Equal(want, got, semantictest.CmpOptions...) { + t.Errorf("unexpected resoved function: -want/+got\n%s", cmp.Diff(want, got, semantictest.CmpOptions...)) + } +} + +type function struct { + name string + call func(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) +} + +func (f function) Type() semantic.Type { + //TODO(nathanielc): Return a complete function type + return semantic.Function +} + +func (f function) Value() interface{} { + return f +} +func (f function) Property(name string) (interpreter.Value, error) { + return nil, fmt.Errorf("property %q does not exist", name) +} + +func (f function) Call(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) { + return f.call(args, d) +} + +func (f function) Resolve() (*semantic.FunctionExpression, error) { + return nil, fmt.Errorf("function %q cannot be resolved", f.name) +} diff --git a/vendor/github.com/influxdata/ifql/parser/Makefile b/vendor/github.com/influxdata/ifql/parser/Makefile new file mode 100644 index 000000000..36f7bb293 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/parser/Makefile @@ -0,0 +1,10 @@ +all: ifql.go + + +ifql.go: ifql.peg parser.go parser_debug.go ../bin/pigeon + PATH=../bin:${PATH} $(GO_GENERATE) -x ./... + +clean: + rm -f ifql.go + +.PHONY: all clean diff --git a/vendor/github.com/influxdata/ifql/parser/doc.go b/vendor/github.com/influxdata/ifql/parser/doc.go new file mode 100644 index 000000000..7da5badde --- /dev/null +++ b/vendor/github.com/influxdata/ifql/parser/doc.go @@ -0,0 +1,3 @@ +// Package parser provides a PEG parser for parsing IFQL. +// Parsing an IFQL script produces an AST. +package parser diff --git a/vendor/github.com/influxdata/ifql/parser/ifql.go b/vendor/github.com/influxdata/ifql/parser/ifql.go new file mode 100644 index 000000000..cd67034d3 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/parser/ifql.go @@ -0,0 +1,7868 @@ +package parser + +import ( + "bytes" + "errors" + "fmt" + "io" + "io/ioutil" + "math" + "os" + "sort" + "strings" + "unicode" + "unicode/utf8" +) + +// DO NOT EDIT: This file is auto generated by the pigeon PEG parser generator. + +var g = &grammar{ + rules: []*rule{ + { + name: "Start", + pos: position{line: 8, col: 1, offset: 102}, + expr: &actionExpr{ + pos: position{line: 9, col: 5, offset: 112}, + run: (*parser).callonStart1, + expr: &seqExpr{ + pos: position{line: 9, col: 5, offset: 112}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 9, col: 8, offset: 115}, + label: "program", + expr: &ruleRefExpr{ + pos: position{line: 9, col: 16, offset: 123}, + name: "Program", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + ¬Expr{ + pos: position{line: 483, col: 5, offset: 8851}, + expr: &anyMatcher{ + line: 483, col: 6, offset: 8852, + }, + }, + }, + }, + }, + }, + { + name: "Program", + pos: position{line: 13, col: 1, offset: 173}, + expr: &actionExpr{ + pos: position{line: 14, col: 5, offset: 185}, + run: (*parser).callonProgram1, + expr: &labeledExpr{ + pos: position{line: 14, col: 5, offset: 185}, + label: "body", + expr: &ruleRefExpr{ + pos: position{line: 14, col: 10, offset: 190}, + name: "SourceElements", + }, + }, + }, + }, + { + name: "SourceElements", + pos: position{line: 18, col: 1, offset: 256}, + expr: &actionExpr{ + pos: position{line: 19, col: 5, offset: 275}, + run: (*parser).callonSourceElements1, + expr: &seqExpr{ + pos: position{line: 19, col: 5, offset: 275}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 19, col: 5, offset: 275}, + label: "head", + expr: &ruleRefExpr{ + pos: position{line: 19, col: 10, offset: 280}, + name: "SourceElement", + }, + }, + &labeledExpr{ + pos: position{line: 19, col: 24, offset: 294}, + label: "tail", + expr: &zeroOrMoreExpr{ + pos: position{line: 19, col: 29, offset: 299}, + expr: &seqExpr{ + pos: position{line: 19, col: 30, offset: 300}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 19, col: 33, offset: 303}, + name: "SourceElement", + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "SourceElement", + pos: position{line: 23, col: 1, offset: 365}, + expr: &ruleRefExpr{ + pos: position{line: 24, col: 5, offset: 383}, + name: "Statement", + }, + }, + { + name: "Statement", + pos: position{line: 26, col: 1, offset: 394}, + expr: &choiceExpr{ + pos: position{line: 27, col: 5, offset: 408}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 27, col: 5, offset: 408}, + name: "VariableStatement", + }, + &ruleRefExpr{ + pos: position{line: 28, col: 5, offset: 430}, + name: "ReturnStatement", + }, + &ruleRefExpr{ + pos: position{line: 29, col: 5, offset: 450}, + name: "ExpressionStatement", + }, + &ruleRefExpr{ + pos: position{line: 30, col: 5, offset: 474}, + name: "BlockStatement", + }, + }, + }, + }, + { + name: "VariableStatement", + pos: position{line: 33, col: 1, offset: 491}, + expr: &actionExpr{ + pos: position{line: 34, col: 5, offset: 513}, + run: (*parser).callonVariableStatement1, + expr: &labeledExpr{ + pos: position{line: 34, col: 5, offset: 513}, + label: "declaration", + expr: &ruleRefExpr{ + pos: position{line: 34, col: 17, offset: 525}, + name: "VariableDeclaration", + }, + }, + }, + }, + { + name: "ReturnStatement", + pos: position{line: 38, col: 1, offset: 604}, + expr: &actionExpr{ + pos: position{line: 39, col: 5, offset: 624}, + run: (*parser).callonReturnStatement1, + expr: &seqExpr{ + pos: position{line: 39, col: 5, offset: 624}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 39, col: 5, offset: 624}, + val: "return", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 39, col: 17, offset: 636}, + label: "argument", + expr: &ruleRefExpr{ + pos: position{line: 39, col: 26, offset: 645}, + name: "Expr", + }, + }, + }, + }, + }, + }, + { + name: "ExpressionStatement", + pos: position{line: 43, col: 1, offset: 708}, + expr: &actionExpr{ + pos: position{line: 44, col: 5, offset: 732}, + run: (*parser).callonExpressionStatement1, + expr: &labeledExpr{ + pos: position{line: 44, col: 5, offset: 732}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 44, col: 10, offset: 737}, + name: "Expr", + }, + }, + }, + }, + { + name: "BlockStatement", + pos: position{line: 48, col: 1, offset: 796}, + expr: &actionExpr{ + pos: position{line: 49, col: 5, offset: 815}, + run: (*parser).callonBlockStatement1, + expr: &seqExpr{ + pos: position{line: 49, col: 5, offset: 815}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 49, col: 5, offset: 815}, + val: "{", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 49, col: 12, offset: 822}, + label: "body", + expr: &zeroOrMoreExpr{ + pos: position{line: 49, col: 17, offset: 827}, + expr: &seqExpr{ + pos: position{line: 49, col: 19, offset: 829}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 49, col: 22, offset: 832}, + name: "Statement", + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 49, col: 41, offset: 851}, + val: "}", + ignoreCase: false, + }, + }, + }, + }, + }, + { + name: "VariableDeclaration", + pos: position{line: 53, col: 1, offset: 908}, + expr: &actionExpr{ + pos: position{line: 54, col: 5, offset: 932}, + run: (*parser).callonVariableDeclaration1, + expr: &seqExpr{ + pos: position{line: 54, col: 5, offset: 932}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 54, col: 5, offset: 932}, + label: "id", + expr: &actionExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + run: (*parser).callonVariableDeclaration4, + expr: &seqExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 460, col: 5, offset: 8638}, + val: "[_\\pL]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 460, col: 11, offset: 8644}, + expr: &charClassMatcher{ + pos: position{line: 460, col: 11, offset: 8644}, + val: "[_0-9\\pL]", + chars: []rune{'_'}, + ranges: []rune{'0', '9'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 54, col: 22, offset: 949}, + val: "=", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 54, col: 29, offset: 956}, + label: "init", + expr: &ruleRefExpr{ + pos: position{line: 54, col: 34, offset: 961}, + name: "Expr", + }, + }, + }, + }, + }, + }, + { + name: "MemberExpressions", + pos: position{line: 59, col: 1, offset: 1022}, + expr: &actionExpr{ + pos: position{line: 60, col: 5, offset: 1044}, + run: (*parser).callonMemberExpressions1, + expr: &seqExpr{ + pos: position{line: 60, col: 5, offset: 1044}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 60, col: 5, offset: 1044}, + label: "head", + expr: &actionExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + run: (*parser).callonMemberExpressions4, + expr: &seqExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 460, col: 5, offset: 8638}, + val: "[_\\pL]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 460, col: 11, offset: 8644}, + expr: &charClassMatcher{ + pos: position{line: 460, col: 11, offset: 8644}, + val: "[_0-9\\pL]", + chars: []rune{'_'}, + ranges: []rune{'0', '9'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 61, col: 5, offset: 1091}, + label: "tail", + expr: &zeroOrMoreExpr{ + pos: position{line: 61, col: 10, offset: 1096}, + expr: &actionExpr{ + pos: position{line: 62, col: 10, offset: 1107}, + run: (*parser).callonMemberExpressions11, + expr: &seqExpr{ + pos: position{line: 62, col: 10, offset: 1107}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 62, col: 13, offset: 1110}, + label: "property", + expr: &ruleRefExpr{ + pos: position{line: 62, col: 22, offset: 1119}, + name: "MemberExpressionProperty", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "MemberExpressionProperty", + pos: position{line: 70, col: 1, offset: 1259}, + expr: &choiceExpr{ + pos: position{line: 71, col: 5, offset: 1288}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 71, col: 5, offset: 1288}, + run: (*parser).callonMemberExpressionProperty2, + expr: &seqExpr{ + pos: position{line: 71, col: 5, offset: 1288}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 71, col: 5, offset: 1288}, + val: ".", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 71, col: 12, offset: 1295}, + label: "property", + expr: &actionExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + run: (*parser).callonMemberExpressionProperty14, + expr: &seqExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 460, col: 5, offset: 8638}, + val: "[_\\pL]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 460, col: 11, offset: 8644}, + expr: &charClassMatcher{ + pos: position{line: 460, col: 11, offset: 8644}, + val: "[_0-9\\pL]", + chars: []rune{'_'}, + ranges: []rune{'0', '9'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 74, col: 7, offset: 1356}, + run: (*parser).callonMemberExpressionProperty19, + expr: &seqExpr{ + pos: position{line: 74, col: 7, offset: 1356}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 74, col: 7, offset: 1356}, + val: "[", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 74, col: 14, offset: 1363}, + label: "property", + expr: &ruleRefExpr{ + pos: position{line: 74, col: 23, offset: 1372}, + name: "Primary", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 74, col: 34, offset: 1383}, + val: "]", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "CallExpression", + pos: position{line: 78, col: 1, offset: 1426}, + expr: &actionExpr{ + pos: position{line: 79, col: 5, offset: 1445}, + run: (*parser).callonCallExpression1, + expr: &seqExpr{ + pos: position{line: 79, col: 5, offset: 1445}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 79, col: 5, offset: 1445}, + label: "head", + expr: &actionExpr{ + pos: position{line: 80, col: 7, offset: 1458}, + run: (*parser).callonCallExpression4, + expr: &seqExpr{ + pos: position{line: 80, col: 7, offset: 1458}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 80, col: 7, offset: 1458}, + label: "callee", + expr: &ruleRefExpr{ + pos: position{line: 80, col: 14, offset: 1465}, + name: "MemberExpressions", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 80, col: 35, offset: 1486}, + label: "args", + expr: &ruleRefExpr{ + pos: position{line: 80, col: 40, offset: 1491}, + name: "Arguments", + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 84, col: 5, offset: 1574}, + label: "tail", + expr: &zeroOrMoreExpr{ + pos: position{line: 84, col: 10, offset: 1579}, + expr: &choiceExpr{ + pos: position{line: 85, col: 9, offset: 1589}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 85, col: 9, offset: 1589}, + run: (*parser).callonCallExpression21, + expr: &seqExpr{ + pos: position{line: 85, col: 9, offset: 1589}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 85, col: 12, offset: 1592}, + label: "args", + expr: &ruleRefExpr{ + pos: position{line: 85, col: 17, offset: 1597}, + name: "Arguments", + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 88, col: 10, offset: 1680}, + run: (*parser).callonCallExpression33, + expr: &seqExpr{ + pos: position{line: 88, col: 10, offset: 1680}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 88, col: 13, offset: 1683}, + label: "property", + expr: &ruleRefExpr{ + pos: position{line: 88, col: 22, offset: 1692}, + name: "MemberExpressionProperty", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "PipeExpression", + pos: position{line: 96, col: 1, offset: 1857}, + expr: &actionExpr{ + pos: position{line: 97, col: 5, offset: 1876}, + run: (*parser).callonPipeExpression1, + expr: &seqExpr{ + pos: position{line: 97, col: 5, offset: 1876}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 97, col: 5, offset: 1876}, + label: "head", + expr: &ruleRefExpr{ + pos: position{line: 97, col: 10, offset: 1881}, + name: "PipeExpressionHead", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 97, col: 32, offset: 1903}, + label: "tail", + expr: &oneOrMoreExpr{ + pos: position{line: 97, col: 37, offset: 1908}, + expr: &seqExpr{ + pos: position{line: 97, col: 38, offset: 1909}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 97, col: 41, offset: 1912}, + name: "PipeExpressionPipe", + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "PipeExpressionHead", + pos: position{line: 101, col: 1, offset: 1995}, + expr: &choiceExpr{ + pos: position{line: 102, col: 5, offset: 2018}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 102, col: 5, offset: 2018}, + name: "CallExpression", + }, + &actionExpr{ + pos: position{line: 378, col: 5, offset: 7123}, + run: (*parser).callonPipeExpressionHead3, + expr: &seqExpr{ + pos: position{line: 378, col: 7, offset: 7125}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 378, col: 7, offset: 7125}, + val: "\"", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 378, col: 11, offset: 7129}, + expr: &choiceExpr{ + pos: position{line: 386, col: 5, offset: 7338}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 386, col: 5, offset: 7338}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 386, col: 5, offset: 7338}, + expr: &charClassMatcher{ + pos: position{line: 386, col: 8, offset: 7341}, + val: "[\"\\\\\\n]", + chars: []rune{'"', '\\', '\n'}, + ignoreCase: false, + inverted: false, + }, + }, + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + }, + }, + &seqExpr{ + pos: position{line: 387, col: 5, offset: 7375}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 387, col: 5, offset: 7375}, + val: "\\", + ignoreCase: false, + }, + &choiceExpr{ + pos: position{line: 390, col: 5, offset: 7423}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 390, col: 5, offset: 7423}, + val: "\"", + ignoreCase: false, + }, + &actionExpr{ + pos: position{line: 391, col: 5, offset: 7431}, + run: (*parser).callonPipeExpressionHead16, + expr: &choiceExpr{ + pos: position{line: 391, col: 7, offset: 7433}, + alternatives: []interface{}{ + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + ¬Expr{ + pos: position{line: 483, col: 5, offset: 8851}, + expr: &anyMatcher{ + line: 483, col: 6, offset: 8852, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 378, col: 29, offset: 7147}, + val: "\"", + ignoreCase: false, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 381, col: 5, offset: 7207}, + run: (*parser).callonPipeExpressionHead23, + expr: &seqExpr{ + pos: position{line: 381, col: 7, offset: 7209}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 381, col: 7, offset: 7209}, + val: "\"", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 381, col: 11, offset: 7213}, + expr: &choiceExpr{ + pos: position{line: 386, col: 5, offset: 7338}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 386, col: 5, offset: 7338}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 386, col: 5, offset: 7338}, + expr: &charClassMatcher{ + pos: position{line: 386, col: 8, offset: 7341}, + val: "[\"\\\\\\n]", + chars: []rune{'"', '\\', '\n'}, + ignoreCase: false, + inverted: false, + }, + }, + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + }, + }, + &seqExpr{ + pos: position{line: 387, col: 5, offset: 7375}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 387, col: 5, offset: 7375}, + val: "\\", + ignoreCase: false, + }, + &choiceExpr{ + pos: position{line: 390, col: 5, offset: 7423}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 390, col: 5, offset: 7423}, + val: "\"", + ignoreCase: false, + }, + &actionExpr{ + pos: position{line: 391, col: 5, offset: 7431}, + run: (*parser).callonPipeExpressionHead36, + expr: &choiceExpr{ + pos: position{line: 391, col: 7, offset: 7433}, + alternatives: []interface{}{ + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + ¬Expr{ + pos: position{line: 483, col: 5, offset: 8851}, + expr: &anyMatcher{ + line: 483, col: 6, offset: 8852, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 381, col: 31, offset: 7233}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + ¬Expr{ + pos: position{line: 483, col: 5, offset: 8851}, + expr: &anyMatcher{ + line: 483, col: 6, offset: 8852, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 426, col: 5, offset: 8041}, + run: (*parser).callonPipeExpressionHead46, + expr: &seqExpr{ + pos: position{line: 426, col: 5, offset: 8041}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 426, col: 8, offset: 8044}, + val: "true", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 429, col: 5, offset: 8115}, + run: (*parser).callonPipeExpressionHead65, + expr: &seqExpr{ + pos: position{line: 429, col: 5, offset: 8115}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 429, col: 8, offset: 8118}, + val: "false", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 397, col: 5, offset: 7543}, + run: (*parser).callonPipeExpressionHead84, + expr: &seqExpr{ + pos: position{line: 397, col: 5, offset: 7543}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 397, col: 5, offset: 7543}, + val: "/", + ignoreCase: false, + }, + &labeledExpr{ + pos: position{line: 397, col: 9, offset: 7547}, + label: "pattern", + expr: &actionExpr{ + pos: position{line: 402, col: 5, offset: 7624}, + run: (*parser).callonPipeExpressionHead88, + expr: &labeledExpr{ + pos: position{line: 402, col: 5, offset: 7624}, + label: "chars", + expr: &oneOrMoreExpr{ + pos: position{line: 402, col: 11, offset: 7630}, + expr: &choiceExpr{ + pos: position{line: 407, col: 5, offset: 7714}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 407, col: 5, offset: 7714}, + run: (*parser).callonPipeExpressionHead92, + expr: &seqExpr{ + pos: position{line: 407, col: 5, offset: 7714}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 407, col: 5, offset: 7714}, + expr: &charClassMatcher{ + pos: position{line: 407, col: 6, offset: 7715}, + val: "[\\\\/]", + chars: []rune{'\\', '/'}, + ignoreCase: false, + inverted: false, + }, + }, + &labeledExpr{ + pos: position{line: 407, col: 12, offset: 7721}, + label: "re", + expr: &actionExpr{ + pos: position{line: 421, col: 5, offset: 7961}, + run: (*parser).callonPipeExpressionHead97, + expr: &seqExpr{ + pos: position{line: 421, col: 5, offset: 7961}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 421, col: 5, offset: 7961}, + expr: &charClassMatcher{ + pos: position{line: 477, col: 5, offset: 8821}, + val: "[\\n\\r]", + chars: []rune{'\n', '\r'}, + ignoreCase: false, + inverted: false, + }, + }, + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 413, col: 5, offset: 7830}, + run: (*parser).callonPipeExpressionHead102, + expr: &litMatcher{ + pos: position{line: 413, col: 5, offset: 7830}, + val: "\\/", + ignoreCase: false, + }, + }, + &actionExpr{ + pos: position{line: 416, col: 5, offset: 7878}, + run: (*parser).callonPipeExpressionHead104, + expr: &seqExpr{ + pos: position{line: 416, col: 5, offset: 7878}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 416, col: 5, offset: 7878}, + val: "\\", + ignoreCase: false, + }, + &actionExpr{ + pos: position{line: 421, col: 5, offset: 7961}, + run: (*parser).callonPipeExpressionHead107, + expr: &seqExpr{ + pos: position{line: 421, col: 5, offset: 7961}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 421, col: 5, offset: 7961}, + expr: &charClassMatcher{ + pos: position{line: 477, col: 5, offset: 8821}, + val: "[\\n\\r]", + chars: []rune{'\n', '\r'}, + ignoreCase: false, + inverted: false, + }, + }, + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 397, col: 28, offset: 7566}, + val: "/", + ignoreCase: false, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 453, col: 5, offset: 8452}, + run: (*parser).callonPipeExpressionHead113, + expr: &litMatcher{ + pos: position{line: 453, col: 5, offset: 8452}, + val: "<-", + ignoreCase: false, + }, + }, + &actionExpr{ + pos: position{line: 373, col: 5, offset: 7036}, + run: (*parser).callonPipeExpressionHead115, + expr: &oneOrMoreExpr{ + pos: position{line: 373, col: 5, offset: 7036}, + expr: &seqExpr{ + pos: position{line: 370, col: 5, offset: 6993}, + exprs: []interface{}{ + &choiceExpr{ + pos: position{line: 439, col: 6, offset: 8288}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 439, col: 6, offset: 8288}, + val: "0", + ignoreCase: false, + }, + &seqExpr{ + pos: position{line: 439, col: 12, offset: 8294}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 447, col: 5, offset: 8412}, + val: "[1-9]", + ranges: []rune{'1', '9'}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 439, col: 25, offset: 8307}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 361, col: 9, offset: 6843}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 342, col: 5, offset: 6676}, + val: "ns", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 345, col: 6, offset: 6704}, + val: "us", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 345, col: 13, offset: 6711}, + val: "µs", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 345, col: 20, offset: 6719}, + val: "μs", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 348, col: 5, offset: 6748}, + val: "ms", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 351, col: 5, offset: 6770}, + val: "[smh]", + chars: []rune{'s', 'm', 'h'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 337, col: 5, offset: 6588}, + run: (*parser).callonPipeExpressionHead131, + expr: &seqExpr{ + pos: position{line: 337, col: 5, offset: 6588}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 331, col: 18, offset: 6503}, + val: "-", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 331, col: 32, offset: 6517}, + val: "-", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 337, col: 14, offset: 6597}, + val: "T", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 328, col: 14, offset: 6433}, + val: ":", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 328, col: 29, offset: 6448}, + val: ":", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &zeroOrOneExpr{ + pos: position{line: 328, col: 44, offset: 6463}, + expr: &seqExpr{ + pos: position{line: 319, col: 5, offset: 6303}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 319, col: 5, offset: 6303}, + val: ".", + ignoreCase: false, + }, + &oneOrMoreExpr{ + pos: position{line: 319, col: 9, offset: 6307}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 325, col: 6, offset: 6386}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 325, col: 6, offset: 6386}, + val: "Z", + ignoreCase: false, + }, + &seqExpr{ + pos: position{line: 322, col: 5, offset: 6333}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 322, col: 6, offset: 6334}, + val: "[+-]", + chars: []rune{'+', '-'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 322, col: 26, offset: 6354}, + val: ":", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 434, col: 5, offset: 8206}, + run: (*parser).callonPipeExpressionHead166, + expr: &seqExpr{ + pos: position{line: 434, col: 5, offset: 8206}, + exprs: []interface{}{ + &choiceExpr{ + pos: position{line: 439, col: 6, offset: 8288}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 439, col: 6, offset: 8288}, + val: "0", + ignoreCase: false, + }, + &seqExpr{ + pos: position{line: 439, col: 12, offset: 8294}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 447, col: 5, offset: 8412}, + val: "[1-9]", + ranges: []rune{'1', '9'}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 439, col: 25, offset: 8307}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 434, col: 13, offset: 8214}, + val: ".", + ignoreCase: false, + }, + &oneOrMoreExpr{ + pos: position{line: 434, col: 17, offset: 8218}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 442, col: 5, offset: 8335}, + run: (*parser).callonPipeExpressionHead177, + expr: &choiceExpr{ + pos: position{line: 439, col: 6, offset: 8288}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 439, col: 6, offset: 8288}, + val: "0", + ignoreCase: false, + }, + &seqExpr{ + pos: position{line: 439, col: 12, offset: 8294}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 447, col: 5, offset: 8412}, + val: "[1-9]", + ranges: []rune{'1', '9'}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 439, col: 25, offset: 8307}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 104, col: 5, offset: 2171}, + name: "Array", + }, + &ruleRefExpr{ + pos: position{line: 105, col: 5, offset: 2181}, + name: "MemberExpressions", + }, + &actionExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + run: (*parser).callonPipeExpressionHead186, + expr: &seqExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 460, col: 5, offset: 8638}, + val: "[_\\pL]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 460, col: 11, offset: 8644}, + expr: &charClassMatcher{ + pos: position{line: 460, col: 11, offset: 8644}, + val: "[_0-9\\pL]", + chars: []rune{'_'}, + ranges: []rune{'0', '9'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 107, col: 5, offset: 2218}, + name: "ObjectExpression", + }, + &ruleRefExpr{ + pos: position{line: 108, col: 5, offset: 2239}, + name: "ArrowFunctionExpression", + }, + &ruleRefExpr{ + pos: position{line: 109, col: 5, offset: 2267}, + name: "Parens", + }, + }, + }, + }, + { + name: "PipeExpressionPipe", + pos: position{line: 111, col: 1, offset: 2275}, + expr: &actionExpr{ + pos: position{line: 112, col: 5, offset: 2298}, + run: (*parser).callonPipeExpressionPipe1, + expr: &seqExpr{ + pos: position{line: 112, col: 5, offset: 2298}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 112, col: 5, offset: 2298}, + val: "|>", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 112, col: 13, offset: 2306}, + label: "call", + expr: &ruleRefExpr{ + pos: position{line: 112, col: 18, offset: 2311}, + name: "CallExpression", + }, + }, + }, + }, + }, + }, + { + name: "Arguments", + pos: position{line: 116, col: 1, offset: 2388}, + expr: &actionExpr{ + pos: position{line: 117, col: 5, offset: 2402}, + run: (*parser).callonArguments1, + expr: &seqExpr{ + pos: position{line: 117, col: 5, offset: 2402}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 117, col: 5, offset: 2402}, + val: "(", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 117, col: 12, offset: 2409}, + label: "args", + expr: &zeroOrOneExpr{ + pos: position{line: 117, col: 17, offset: 2414}, + expr: &ruleRefExpr{ + pos: position{line: 117, col: 18, offset: 2415}, + name: "ObjectProperties", + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 117, col: 40, offset: 2437}, + val: ")", + ignoreCase: false, + }, + }, + }, + }, + }, + { + name: "ArrowFunctionExpression", + pos: position{line: 121, col: 1, offset: 2473}, + expr: &actionExpr{ + pos: position{line: 122, col: 5, offset: 2501}, + run: (*parser).callonArrowFunctionExpression1, + expr: &seqExpr{ + pos: position{line: 122, col: 5, offset: 2501}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 122, col: 5, offset: 2501}, + val: "(", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 122, col: 12, offset: 2508}, + label: "params", + expr: &zeroOrOneExpr{ + pos: position{line: 122, col: 19, offset: 2515}, + expr: &ruleRefExpr{ + pos: position{line: 122, col: 19, offset: 2515}, + name: "ArrowFunctionParams", + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 122, col: 43, offset: 2539}, + val: ")", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 122, col: 50, offset: 2546}, + val: "=>", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 122, col: 58, offset: 2554}, + label: "body", + expr: &ruleRefExpr{ + pos: position{line: 122, col: 63, offset: 2559}, + name: "ArrowFunctionBody", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "ArrowFunctionParams", + pos: position{line: 126, col: 1, offset: 2646}, + expr: &actionExpr{ + pos: position{line: 127, col: 5, offset: 2670}, + run: (*parser).callonArrowFunctionParams1, + expr: &seqExpr{ + pos: position{line: 127, col: 5, offset: 2670}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 127, col: 5, offset: 2670}, + label: "first", + expr: &ruleRefExpr{ + pos: position{line: 127, col: 11, offset: 2676}, + name: "ArrowFunctionParam", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 127, col: 33, offset: 2698}, + label: "rest", + expr: &zeroOrMoreExpr{ + pos: position{line: 127, col: 38, offset: 2703}, + expr: &ruleRefExpr{ + pos: position{line: 127, col: 38, offset: 2703}, + name: "ArrowFunctionParamsRest", + }, + }, + }, + &zeroOrOneExpr{ + pos: position{line: 127, col: 63, offset: 2728}, + expr: &litMatcher{ + pos: position{line: 127, col: 63, offset: 2728}, + val: ",", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + { + name: "ArrowFunctionParamsRest", + pos: position{line: 131, col: 1, offset: 2813}, + expr: &actionExpr{ + pos: position{line: 132, col: 5, offset: 2841}, + run: (*parser).callonArrowFunctionParamsRest1, + expr: &seqExpr{ + pos: position{line: 132, col: 5, offset: 2841}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 132, col: 5, offset: 2841}, + val: ",", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 132, col: 13, offset: 2849}, + label: "arg", + expr: &ruleRefExpr{ + pos: position{line: 132, col: 17, offset: 2853}, + name: "ArrowFunctionParam", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "ArrowFunctionParam", + pos: position{line: 136, col: 1, offset: 2906}, + expr: &choiceExpr{ + pos: position{line: 137, col: 5, offset: 2929}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 137, col: 5, offset: 2929}, + run: (*parser).callonArrowFunctionParam2, + expr: &seqExpr{ + pos: position{line: 137, col: 5, offset: 2929}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 137, col: 5, offset: 2929}, + label: "key", + expr: &actionExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + run: (*parser).callonArrowFunctionParam5, + expr: &seqExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 460, col: 5, offset: 8638}, + val: "[_\\pL]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 460, col: 11, offset: 8644}, + expr: &charClassMatcher{ + pos: position{line: 460, col: 11, offset: 8644}, + val: "[_0-9\\pL]", + chars: []rune{'_'}, + ranges: []rune{'0', '9'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 137, col: 23, offset: 2947}, + val: "=", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 137, col: 30, offset: 2954}, + label: "value", + expr: &ruleRefExpr{ + pos: position{line: 137, col: 36, offset: 2960}, + name: "Primary", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 140, col: 5, offset: 3033}, + run: (*parser).callonArrowFunctionParam37, + expr: &seqExpr{ + pos: position{line: 140, col: 5, offset: 3033}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 140, col: 5, offset: 3033}, + label: "key", + expr: &actionExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + run: (*parser).callonArrowFunctionParam40, + expr: &seqExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 460, col: 5, offset: 8638}, + val: "[_\\pL]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 460, col: 11, offset: 8644}, + expr: &charClassMatcher{ + pos: position{line: 460, col: 11, offset: 8644}, + val: "[_0-9\\pL]", + chars: []rune{'_'}, + ranges: []rune{'0', '9'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "ArrowFunctionBody", + pos: position{line: 145, col: 1, offset: 3109}, + expr: &choiceExpr{ + pos: position{line: 146, col: 5, offset: 3131}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 146, col: 5, offset: 3131}, + run: (*parser).callonArrowFunctionBody2, + expr: &labeledExpr{ + pos: position{line: 146, col: 5, offset: 3131}, + label: "body", + expr: &ruleRefExpr{ + pos: position{line: 146, col: 10, offset: 3136}, + name: "Expr", + }, + }, + }, + &actionExpr{ + pos: position{line: 149, col: 5, offset: 3176}, + run: (*parser).callonArrowFunctionBody5, + expr: &labeledExpr{ + pos: position{line: 149, col: 5, offset: 3176}, + label: "body", + expr: &ruleRefExpr{ + pos: position{line: 149, col: 10, offset: 3181}, + name: "BlockStatement", + }, + }, + }, + }, + }, + }, + { + name: "ObjectExpression", + pos: position{line: 153, col: 1, offset: 3224}, + expr: &actionExpr{ + pos: position{line: 154, col: 5, offset: 3245}, + run: (*parser).callonObjectExpression1, + expr: &seqExpr{ + pos: position{line: 154, col: 5, offset: 3245}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 154, col: 5, offset: 3245}, + val: "{", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 154, col: 12, offset: 3252}, + label: "object", + expr: &zeroOrOneExpr{ + pos: position{line: 154, col: 19, offset: 3259}, + expr: &ruleRefExpr{ + pos: position{line: 154, col: 20, offset: 3260}, + name: "ObjectProperties", + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 154, col: 42, offset: 3282}, + val: "}", + ignoreCase: false, + }, + }, + }, + }, + }, + { + name: "ObjectProperties", + pos: position{line: 158, col: 1, offset: 3316}, + expr: &actionExpr{ + pos: position{line: 159, col: 5, offset: 3337}, + run: (*parser).callonObjectProperties1, + expr: &seqExpr{ + pos: position{line: 159, col: 5, offset: 3337}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 159, col: 5, offset: 3337}, + label: "first", + expr: &ruleRefExpr{ + pos: position{line: 159, col: 11, offset: 3343}, + name: "Property", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 159, col: 23, offset: 3355}, + label: "rest", + expr: &zeroOrMoreExpr{ + pos: position{line: 159, col: 28, offset: 3360}, + expr: &ruleRefExpr{ + pos: position{line: 159, col: 28, offset: 3360}, + name: "PropertiesRest", + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &zeroOrOneExpr{ + pos: position{line: 159, col: 47, offset: 3379}, + expr: &litMatcher{ + pos: position{line: 159, col: 47, offset: 3379}, + val: ",", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + { + name: "PropertiesRest", + pos: position{line: 163, col: 1, offset: 3445}, + expr: &actionExpr{ + pos: position{line: 164, col: 5, offset: 3464}, + run: (*parser).callonPropertiesRest1, + expr: &seqExpr{ + pos: position{line: 164, col: 5, offset: 3464}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 164, col: 5, offset: 3464}, + val: ",", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 164, col: 13, offset: 3472}, + label: "arg", + expr: &ruleRefExpr{ + pos: position{line: 164, col: 17, offset: 3476}, + name: "Property", + }, + }, + }, + }, + }, + }, + { + name: "Property", + pos: position{line: 168, col: 1, offset: 3516}, + expr: &actionExpr{ + pos: position{line: 169, col: 5, offset: 3529}, + run: (*parser).callonProperty1, + expr: &seqExpr{ + pos: position{line: 169, col: 5, offset: 3529}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 169, col: 5, offset: 3529}, + label: "key", + expr: &actionExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + run: (*parser).callonProperty4, + expr: &seqExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 460, col: 5, offset: 8638}, + val: "[_\\pL]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 460, col: 11, offset: 8644}, + expr: &charClassMatcher{ + pos: position{line: 460, col: 11, offset: 8644}, + val: "[_0-9\\pL]", + chars: []rune{'_'}, + ranges: []rune{'0', '9'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 169, col: 24, offset: 3548}, + val: ":", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 169, col: 31, offset: 3555}, + label: "value", + expr: &ruleRefExpr{ + pos: position{line: 169, col: 37, offset: 3561}, + name: "Expr", + }, + }, + }, + }, + }, + }, + { + name: "Expr", + pos: position{line: 180, col: 1, offset: 3811}, + expr: &ruleRefExpr{ + pos: position{line: 181, col: 5, offset: 3820}, + name: "LogicalExpression", + }, + }, + { + name: "LogicalExpression", + pos: position{line: 188, col: 1, offset: 3921}, + expr: &actionExpr{ + pos: position{line: 189, col: 5, offset: 3943}, + run: (*parser).callonLogicalExpression1, + expr: &seqExpr{ + pos: position{line: 189, col: 5, offset: 3943}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 189, col: 5, offset: 3943}, + label: "head", + expr: &ruleRefExpr{ + pos: position{line: 189, col: 10, offset: 3948}, + name: "Equality", + }, + }, + &labeledExpr{ + pos: position{line: 189, col: 19, offset: 3957}, + label: "tail", + expr: &zeroOrMoreExpr{ + pos: position{line: 189, col: 24, offset: 3962}, + expr: &seqExpr{ + pos: position{line: 189, col: 26, offset: 3964}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 184, col: 5, offset: 3860}, + run: (*parser).callonLogicalExpression16, + expr: &choiceExpr{ + pos: position{line: 184, col: 6, offset: 3861}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 184, col: 6, offset: 3861}, + val: "or", + ignoreCase: true, + }, + &litMatcher{ + pos: position{line: 184, col: 14, offset: 3869}, + val: "and", + ignoreCase: true, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 189, col: 51, offset: 3989}, + name: "Equality", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "Equality", + pos: position{line: 198, col: 1, offset: 4157}, + expr: &actionExpr{ + pos: position{line: 199, col: 5, offset: 4170}, + run: (*parser).callonEquality1, + expr: &seqExpr{ + pos: position{line: 199, col: 5, offset: 4170}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 199, col: 5, offset: 4170}, + label: "head", + expr: &ruleRefExpr{ + pos: position{line: 199, col: 10, offset: 4175}, + name: "Relational", + }, + }, + &labeledExpr{ + pos: position{line: 199, col: 21, offset: 4186}, + label: "tail", + expr: &zeroOrMoreExpr{ + pos: position{line: 199, col: 26, offset: 4191}, + expr: &seqExpr{ + pos: position{line: 199, col: 28, offset: 4193}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 194, col: 5, offset: 4090}, + run: (*parser).callonEquality16, + expr: &choiceExpr{ + pos: position{line: 194, col: 6, offset: 4091}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 194, col: 6, offset: 4091}, + val: "==", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 194, col: 13, offset: 4098}, + val: "!=", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 194, col: 20, offset: 4105}, + val: "=~", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 194, col: 27, offset: 4112}, + val: "!~", + ignoreCase: false, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 199, col: 52, offset: 4217}, + name: "Relational", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "Relational", + pos: position{line: 216, col: 1, offset: 4490}, + expr: &actionExpr{ + pos: position{line: 217, col: 5, offset: 4505}, + run: (*parser).callonRelational1, + expr: &seqExpr{ + pos: position{line: 217, col: 5, offset: 4505}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 217, col: 5, offset: 4505}, + label: "head", + expr: &ruleRefExpr{ + pos: position{line: 217, col: 10, offset: 4510}, + name: "Additive", + }, + }, + &labeledExpr{ + pos: position{line: 217, col: 19, offset: 4519}, + label: "tail", + expr: &zeroOrMoreExpr{ + pos: position{line: 217, col: 24, offset: 4524}, + expr: &seqExpr{ + pos: position{line: 217, col: 26, offset: 4526}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 204, col: 5, offset: 4321}, + run: (*parser).callonRelational16, + expr: &choiceExpr{ + pos: position{line: 204, col: 9, offset: 4325}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 204, col: 9, offset: 4325}, + val: "<=", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 205, col: 9, offset: 4338}, + val: "<", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 206, col: 9, offset: 4350}, + val: ">=", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 207, col: 9, offset: 4363}, + val: ">", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 208, col: 9, offset: 4375}, + val: "startswith", + ignoreCase: true, + }, + &litMatcher{ + pos: position{line: 209, col: 9, offset: 4397}, + val: "in", + ignoreCase: true, + }, + &litMatcher{ + pos: position{line: 210, col: 9, offset: 4411}, + val: "not empty", + ignoreCase: true, + }, + &litMatcher{ + pos: position{line: 211, col: 9, offset: 4432}, + val: "empty", + ignoreCase: true, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 217, col: 52, offset: 4552}, + name: "Additive", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "Additive", + pos: position{line: 226, col: 1, offset: 4706}, + expr: &actionExpr{ + pos: position{line: 227, col: 5, offset: 4719}, + run: (*parser).callonAdditive1, + expr: &seqExpr{ + pos: position{line: 227, col: 5, offset: 4719}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 227, col: 5, offset: 4719}, + label: "head", + expr: &ruleRefExpr{ + pos: position{line: 227, col: 10, offset: 4724}, + name: "Multiplicative", + }, + }, + &labeledExpr{ + pos: position{line: 227, col: 25, offset: 4739}, + label: "tail", + expr: &zeroOrMoreExpr{ + pos: position{line: 227, col: 30, offset: 4744}, + expr: &seqExpr{ + pos: position{line: 227, col: 32, offset: 4746}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 222, col: 5, offset: 4651}, + run: (*parser).callonAdditive16, + expr: &charClassMatcher{ + pos: position{line: 222, col: 6, offset: 4652}, + val: "[+-]", + chars: []rune{'+', '-'}, + ignoreCase: false, + inverted: false, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 227, col: 55, offset: 4769}, + name: "Multiplicative", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "Multiplicative", + pos: position{line: 236, col: 1, offset: 4931}, + expr: &actionExpr{ + pos: position{line: 237, col: 5, offset: 4950}, + run: (*parser).callonMultiplicative1, + expr: &seqExpr{ + pos: position{line: 237, col: 5, offset: 4950}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 237, col: 5, offset: 4950}, + label: "head", + expr: &ruleRefExpr{ + pos: position{line: 237, col: 10, offset: 4955}, + name: "UnaryExpression", + }, + }, + &labeledExpr{ + pos: position{line: 237, col: 26, offset: 4971}, + label: "tail", + expr: &zeroOrMoreExpr{ + pos: position{line: 237, col: 31, offset: 4976}, + expr: &seqExpr{ + pos: position{line: 237, col: 33, offset: 4978}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 232, col: 5, offset: 4880}, + run: (*parser).callonMultiplicative16, + expr: &charClassMatcher{ + pos: position{line: 232, col: 6, offset: 4881}, + val: "[*/]", + chars: []rune{'*', '/'}, + ignoreCase: false, + inverted: false, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 237, col: 62, offset: 5007}, + name: "UnaryExpression", + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "UnaryExpression", + pos: position{line: 246, col: 1, offset: 5163}, + expr: &choiceExpr{ + pos: position{line: 247, col: 5, offset: 5183}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 247, col: 5, offset: 5183}, + run: (*parser).callonUnaryExpression2, + expr: &seqExpr{ + pos: position{line: 247, col: 5, offset: 5183}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 247, col: 8, offset: 5186}, + label: "op", + expr: &actionExpr{ + pos: position{line: 242, col: 5, offset: 5110}, + run: (*parser).callonUnaryExpression13, + expr: &choiceExpr{ + pos: position{line: 242, col: 6, offset: 5111}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 242, col: 6, offset: 5111}, + val: "-", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 242, col: 12, offset: 5117}, + val: "not", + ignoreCase: false, + }, + }, + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 247, col: 28, offset: 5206}, + label: "argument", + expr: &ruleRefExpr{ + pos: position{line: 247, col: 37, offset: 5215}, + name: "Primary", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 250, col: 5, offset: 5296}, + name: "Primary", + }, + }, + }, + }, + { + name: "Primary", + pos: position{line: 252, col: 1, offset: 5305}, + expr: &choiceExpr{ + pos: position{line: 253, col: 5, offset: 5317}, + alternatives: []interface{}{ + &ruleRefExpr{ + pos: position{line: 253, col: 5, offset: 5317}, + name: "PipeExpression", + }, + &ruleRefExpr{ + pos: position{line: 254, col: 5, offset: 5336}, + name: "Array", + }, + &actionExpr{ + pos: position{line: 378, col: 5, offset: 7123}, + run: (*parser).callonPrimary4, + expr: &seqExpr{ + pos: position{line: 378, col: 7, offset: 7125}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 378, col: 7, offset: 7125}, + val: "\"", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 378, col: 11, offset: 7129}, + expr: &choiceExpr{ + pos: position{line: 386, col: 5, offset: 7338}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 386, col: 5, offset: 7338}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 386, col: 5, offset: 7338}, + expr: &charClassMatcher{ + pos: position{line: 386, col: 8, offset: 7341}, + val: "[\"\\\\\\n]", + chars: []rune{'"', '\\', '\n'}, + ignoreCase: false, + inverted: false, + }, + }, + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + }, + }, + &seqExpr{ + pos: position{line: 387, col: 5, offset: 7375}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 387, col: 5, offset: 7375}, + val: "\\", + ignoreCase: false, + }, + &choiceExpr{ + pos: position{line: 390, col: 5, offset: 7423}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 390, col: 5, offset: 7423}, + val: "\"", + ignoreCase: false, + }, + &actionExpr{ + pos: position{line: 391, col: 5, offset: 7431}, + run: (*parser).callonPrimary17, + expr: &choiceExpr{ + pos: position{line: 391, col: 7, offset: 7433}, + alternatives: []interface{}{ + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + ¬Expr{ + pos: position{line: 483, col: 5, offset: 8851}, + expr: &anyMatcher{ + line: 483, col: 6, offset: 8852, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 378, col: 29, offset: 7147}, + val: "\"", + ignoreCase: false, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 381, col: 5, offset: 7207}, + run: (*parser).callonPrimary24, + expr: &seqExpr{ + pos: position{line: 381, col: 7, offset: 7209}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 381, col: 7, offset: 7209}, + val: "\"", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 381, col: 11, offset: 7213}, + expr: &choiceExpr{ + pos: position{line: 386, col: 5, offset: 7338}, + alternatives: []interface{}{ + &seqExpr{ + pos: position{line: 386, col: 5, offset: 7338}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 386, col: 5, offset: 7338}, + expr: &charClassMatcher{ + pos: position{line: 386, col: 8, offset: 7341}, + val: "[\"\\\\\\n]", + chars: []rune{'"', '\\', '\n'}, + ignoreCase: false, + inverted: false, + }, + }, + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + }, + }, + &seqExpr{ + pos: position{line: 387, col: 5, offset: 7375}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 387, col: 5, offset: 7375}, + val: "\\", + ignoreCase: false, + }, + &choiceExpr{ + pos: position{line: 390, col: 5, offset: 7423}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 390, col: 5, offset: 7423}, + val: "\"", + ignoreCase: false, + }, + &actionExpr{ + pos: position{line: 391, col: 5, offset: 7431}, + run: (*parser).callonPrimary37, + expr: &choiceExpr{ + pos: position{line: 391, col: 7, offset: 7433}, + alternatives: []interface{}{ + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + ¬Expr{ + pos: position{line: 483, col: 5, offset: 8851}, + expr: &anyMatcher{ + line: 483, col: 6, offset: 8852, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 381, col: 31, offset: 7233}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + ¬Expr{ + pos: position{line: 483, col: 5, offset: 8851}, + expr: &anyMatcher{ + line: 483, col: 6, offset: 8852, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 426, col: 5, offset: 8041}, + run: (*parser).callonPrimary47, + expr: &seqExpr{ + pos: position{line: 426, col: 5, offset: 8041}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 426, col: 8, offset: 8044}, + val: "true", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 429, col: 5, offset: 8115}, + run: (*parser).callonPrimary66, + expr: &seqExpr{ + pos: position{line: 429, col: 5, offset: 8115}, + exprs: []interface{}{ + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 429, col: 8, offset: 8118}, + val: "false", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 397, col: 5, offset: 7543}, + run: (*parser).callonPrimary85, + expr: &seqExpr{ + pos: position{line: 397, col: 5, offset: 7543}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 397, col: 5, offset: 7543}, + val: "/", + ignoreCase: false, + }, + &labeledExpr{ + pos: position{line: 397, col: 9, offset: 7547}, + label: "pattern", + expr: &actionExpr{ + pos: position{line: 402, col: 5, offset: 7624}, + run: (*parser).callonPrimary89, + expr: &labeledExpr{ + pos: position{line: 402, col: 5, offset: 7624}, + label: "chars", + expr: &oneOrMoreExpr{ + pos: position{line: 402, col: 11, offset: 7630}, + expr: &choiceExpr{ + pos: position{line: 407, col: 5, offset: 7714}, + alternatives: []interface{}{ + &actionExpr{ + pos: position{line: 407, col: 5, offset: 7714}, + run: (*parser).callonPrimary93, + expr: &seqExpr{ + pos: position{line: 407, col: 5, offset: 7714}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 407, col: 5, offset: 7714}, + expr: &charClassMatcher{ + pos: position{line: 407, col: 6, offset: 7715}, + val: "[\\\\/]", + chars: []rune{'\\', '/'}, + ignoreCase: false, + inverted: false, + }, + }, + &labeledExpr{ + pos: position{line: 407, col: 12, offset: 7721}, + label: "re", + expr: &actionExpr{ + pos: position{line: 421, col: 5, offset: 7961}, + run: (*parser).callonPrimary98, + expr: &seqExpr{ + pos: position{line: 421, col: 5, offset: 7961}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 421, col: 5, offset: 7961}, + expr: &charClassMatcher{ + pos: position{line: 477, col: 5, offset: 8821}, + val: "[\\n\\r]", + chars: []rune{'\n', '\r'}, + ignoreCase: false, + inverted: false, + }, + }, + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 413, col: 5, offset: 7830}, + run: (*parser).callonPrimary103, + expr: &litMatcher{ + pos: position{line: 413, col: 5, offset: 7830}, + val: "\\/", + ignoreCase: false, + }, + }, + &actionExpr{ + pos: position{line: 416, col: 5, offset: 7878}, + run: (*parser).callonPrimary105, + expr: &seqExpr{ + pos: position{line: 416, col: 5, offset: 7878}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 416, col: 5, offset: 7878}, + val: "\\", + ignoreCase: false, + }, + &actionExpr{ + pos: position{line: 421, col: 5, offset: 7961}, + run: (*parser).callonPrimary108, + expr: &seqExpr{ + pos: position{line: 421, col: 5, offset: 7961}, + exprs: []interface{}{ + ¬Expr{ + pos: position{line: 421, col: 5, offset: 7961}, + expr: &charClassMatcher{ + pos: position{line: 477, col: 5, offset: 8821}, + val: "[\\n\\r]", + chars: []rune{'\n', '\r'}, + ignoreCase: false, + inverted: false, + }, + }, + &anyMatcher{ + line: 466, col: 5, offset: 8719, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 397, col: 28, offset: 7566}, + val: "/", + ignoreCase: false, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 453, col: 5, offset: 8452}, + run: (*parser).callonPrimary114, + expr: &litMatcher{ + pos: position{line: 453, col: 5, offset: 8452}, + val: "<-", + ignoreCase: false, + }, + }, + &actionExpr{ + pos: position{line: 373, col: 5, offset: 7036}, + run: (*parser).callonPrimary116, + expr: &oneOrMoreExpr{ + pos: position{line: 373, col: 5, offset: 7036}, + expr: &seqExpr{ + pos: position{line: 370, col: 5, offset: 6993}, + exprs: []interface{}{ + &choiceExpr{ + pos: position{line: 439, col: 6, offset: 8288}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 439, col: 6, offset: 8288}, + val: "0", + ignoreCase: false, + }, + &seqExpr{ + pos: position{line: 439, col: 12, offset: 8294}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 447, col: 5, offset: 8412}, + val: "[1-9]", + ranges: []rune{'1', '9'}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 439, col: 25, offset: 8307}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 361, col: 9, offset: 6843}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 342, col: 5, offset: 6676}, + val: "ns", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 345, col: 6, offset: 6704}, + val: "us", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 345, col: 13, offset: 6711}, + val: "µs", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 345, col: 20, offset: 6719}, + val: "μs", + ignoreCase: false, + }, + &litMatcher{ + pos: position{line: 348, col: 5, offset: 6748}, + val: "ms", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 351, col: 5, offset: 6770}, + val: "[smh]", + chars: []rune{'s', 'm', 'h'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 337, col: 5, offset: 6588}, + run: (*parser).callonPrimary132, + expr: &seqExpr{ + pos: position{line: 337, col: 5, offset: 6588}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 331, col: 18, offset: 6503}, + val: "-", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 331, col: 32, offset: 6517}, + val: "-", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 337, col: 14, offset: 6597}, + val: "T", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 328, col: 14, offset: 6433}, + val: ":", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 328, col: 29, offset: 6448}, + val: ":", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &zeroOrOneExpr{ + pos: position{line: 328, col: 44, offset: 6463}, + expr: &seqExpr{ + pos: position{line: 319, col: 5, offset: 6303}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 319, col: 5, offset: 6303}, + val: ".", + ignoreCase: false, + }, + &oneOrMoreExpr{ + pos: position{line: 319, col: 9, offset: 6307}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &choiceExpr{ + pos: position{line: 325, col: 6, offset: 6386}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 325, col: 6, offset: 6386}, + val: "Z", + ignoreCase: false, + }, + &seqExpr{ + pos: position{line: 322, col: 5, offset: 6333}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 322, col: 6, offset: 6334}, + val: "[+-]", + chars: []rune{'+', '-'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &litMatcher{ + pos: position{line: 322, col: 26, offset: 6354}, + val: ":", + ignoreCase: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 434, col: 5, offset: 8206}, + run: (*parser).callonPrimary167, + expr: &seqExpr{ + pos: position{line: 434, col: 5, offset: 8206}, + exprs: []interface{}{ + &choiceExpr{ + pos: position{line: 439, col: 6, offset: 8288}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 439, col: 6, offset: 8288}, + val: "0", + ignoreCase: false, + }, + &seqExpr{ + pos: position{line: 439, col: 12, offset: 8294}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 447, col: 5, offset: 8412}, + val: "[1-9]", + ranges: []rune{'1', '9'}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 439, col: 25, offset: 8307}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 434, col: 13, offset: 8214}, + val: ".", + ignoreCase: false, + }, + &oneOrMoreExpr{ + pos: position{line: 434, col: 17, offset: 8218}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &actionExpr{ + pos: position{line: 442, col: 5, offset: 8335}, + run: (*parser).callonPrimary178, + expr: &choiceExpr{ + pos: position{line: 439, col: 6, offset: 8288}, + alternatives: []interface{}{ + &litMatcher{ + pos: position{line: 439, col: 6, offset: 8288}, + val: "0", + ignoreCase: false, + }, + &seqExpr{ + pos: position{line: 439, col: 12, offset: 8294}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 447, col: 5, offset: 8412}, + val: "[1-9]", + ranges: []rune{'1', '9'}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 439, col: 25, offset: 8307}, + expr: &charClassMatcher{ + pos: position{line: 450, col: 5, offset: 8429}, + val: "[0-9]", + ranges: []rune{'0', '9'}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 256, col: 5, offset: 5358}, + name: "CallExpression", + }, + &ruleRefExpr{ + pos: position{line: 257, col: 5, offset: 5377}, + name: "MemberExpressions", + }, + &actionExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + run: (*parser).callonPrimary187, + expr: &seqExpr{ + pos: position{line: 460, col: 5, offset: 8638}, + exprs: []interface{}{ + &charClassMatcher{ + pos: position{line: 460, col: 5, offset: 8638}, + val: "[_\\pL]", + chars: []rune{'_'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 460, col: 11, offset: 8644}, + expr: &charClassMatcher{ + pos: position{line: 460, col: 11, offset: 8644}, + val: "[_0-9\\pL]", + chars: []rune{'_'}, + ranges: []rune{'0', '9'}, + classes: []*unicode.RangeTable{rangeTable("L")}, + ignoreCase: false, + inverted: false, + }, + }, + }, + }, + }, + &ruleRefExpr{ + pos: position{line: 259, col: 5, offset: 5414}, + name: "ObjectExpression", + }, + &ruleRefExpr{ + pos: position{line: 260, col: 5, offset: 5435}, + name: "ArrowFunctionExpression", + }, + &ruleRefExpr{ + pos: position{line: 261, col: 5, offset: 5463}, + name: "Parens", + }, + }, + }, + }, + { + name: "Parens", + pos: position{line: 273, col: 1, offset: 5628}, + expr: &actionExpr{ + pos: position{line: 274, col: 5, offset: 5639}, + run: (*parser).callonParens1, + expr: &seqExpr{ + pos: position{line: 274, col: 5, offset: 5639}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 274, col: 5, offset: 5639}, + val: "(", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 274, col: 12, offset: 5646}, + label: "expr", + expr: &ruleRefExpr{ + pos: position{line: 274, col: 17, offset: 5651}, + name: "Expr", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 274, col: 25, offset: 5659}, + val: ")", + ignoreCase: false, + }, + }, + }, + }, + }, + { + name: "Array", + pos: position{line: 278, col: 1, offset: 5695}, + expr: &actionExpr{ + pos: position{line: 279, col: 5, offset: 5705}, + run: (*parser).callonArray1, + expr: &seqExpr{ + pos: position{line: 279, col: 5, offset: 5705}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 279, col: 5, offset: 5705}, + val: "[", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 279, col: 12, offset: 5712}, + label: "elements", + expr: &zeroOrOneExpr{ + pos: position{line: 279, col: 21, offset: 5721}, + expr: &ruleRefExpr{ + pos: position{line: 279, col: 21, offset: 5721}, + name: "ArrayElements", + }, + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &litMatcher{ + pos: position{line: 279, col: 39, offset: 5739}, + val: "]", + ignoreCase: false, + }, + }, + }, + }, + }, + { + name: "ArrayElements", + pos: position{line: 283, col: 1, offset: 5779}, + expr: &actionExpr{ + pos: position{line: 284, col: 5, offset: 5797}, + run: (*parser).callonArrayElements1, + expr: &seqExpr{ + pos: position{line: 284, col: 5, offset: 5797}, + exprs: []interface{}{ + &labeledExpr{ + pos: position{line: 284, col: 5, offset: 5797}, + label: "first", + expr: &ruleRefExpr{ + pos: position{line: 284, col: 11, offset: 5803}, + name: "Primary", + }, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 284, col: 22, offset: 5814}, + label: "rest", + expr: &zeroOrMoreExpr{ + pos: position{line: 284, col: 27, offset: 5819}, + expr: &ruleRefExpr{ + pos: position{line: 284, col: 27, offset: 5819}, + name: "ArrayRest", + }, + }, + }, + }, + }, + }, + }, + { + name: "ArrayRest", + pos: position{line: 288, col: 1, offset: 5891}, + expr: &actionExpr{ + pos: position{line: 289, col: 5, offset: 5905}, + run: (*parser).callonArrayRest1, + expr: &seqExpr{ + pos: position{line: 289, col: 5, offset: 5905}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 289, col: 5, offset: 5905}, + val: ",", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 468, col: 5, offset: 8728}, + expr: &choiceExpr{ + pos: position{line: 468, col: 7, offset: 8730}, + alternatives: []interface{}{ + &charClassMatcher{ + pos: position{line: 474, col: 5, offset: 8791}, + val: "[ \\t\\r\\n]", + chars: []rune{' ', '\t', '\r', '\n'}, + ignoreCase: false, + inverted: false, + }, + &seqExpr{ + pos: position{line: 471, col: 5, offset: 8765}, + exprs: []interface{}{ + &litMatcher{ + pos: position{line: 471, col: 5, offset: 8765}, + val: "//", + ignoreCase: false, + }, + &zeroOrMoreExpr{ + pos: position{line: 471, col: 10, offset: 8770}, + expr: &charClassMatcher{ + pos: position{line: 471, col: 10, offset: 8770}, + val: "[^\\r\\n]", + chars: []rune{'\r', '\n'}, + ignoreCase: false, + inverted: true, + }, + }, + &litMatcher{ + pos: position{line: 480, col: 5, offset: 8837}, + val: "\n", + ignoreCase: false, + }, + }, + }, + }, + }, + }, + &labeledExpr{ + pos: position{line: 289, col: 12, offset: 5912}, + label: "element", + expr: &ruleRefExpr{ + pos: position{line: 289, col: 20, offset: 5920}, + name: "Primary", + }, + }, + }, + }, + }, + }, + }, +} + +func (c *current) onStart1(program interface{}) (interface{}, error) { + return program, nil + +} + +func (p *parser) callonStart1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onStart1(stack["program"]) +} + +func (c *current) onProgram1(body interface{}) (interface{}, error) { + return program(body, c.text, c.pos) + +} + +func (p *parser) callonProgram1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onProgram1(stack["body"]) +} + +func (c *current) onSourceElements1(head, tail interface{}) (interface{}, error) { + return srcElems(head, tail) + +} + +func (p *parser) callonSourceElements1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onSourceElements1(stack["head"], stack["tail"]) +} + +func (c *current) onVariableStatement1(declaration interface{}) (interface{}, error) { + return varstmt(declaration, c.text, c.pos) + +} + +func (p *parser) callonVariableStatement1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onVariableStatement1(stack["declaration"]) +} + +func (c *current) onReturnStatement1(argument interface{}) (interface{}, error) { + return returnstmt(argument, c.text, c.pos) + +} + +func (p *parser) callonReturnStatement1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onReturnStatement1(stack["argument"]) +} + +func (c *current) onExpressionStatement1(expr interface{}) (interface{}, error) { + return exprstmt(expr, c.text, c.pos) + +} + +func (p *parser) callonExpressionStatement1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onExpressionStatement1(stack["expr"]) +} + +func (c *current) onBlockStatement1(body interface{}) (interface{}, error) { + return blockstmt(body, c.text, c.pos) + +} + +func (p *parser) callonBlockStatement1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onBlockStatement1(stack["body"]) +} + +func (c *current) onVariableDeclaration4() (interface{}, error) { + return identifier(c.text, c.pos) + +} + +func (p *parser) callonVariableDeclaration4() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onVariableDeclaration4() +} + +func (c *current) onVariableDeclaration1(id, init interface{}) (interface{}, error) { + return vardecl(id, init, c.text, c.pos) + +} + +func (p *parser) callonVariableDeclaration1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onVariableDeclaration1(stack["id"], stack["init"]) +} + +func (c *current) onMemberExpressions4() (interface{}, error) { + return identifier(c.text, c.pos) + +} + +func (p *parser) callonMemberExpressions4() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onMemberExpressions4() +} + +func (c *current) onMemberExpressions11(property interface{}) (interface{}, error) { + return property, nil + +} + +func (p *parser) callonMemberExpressions11() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onMemberExpressions11(stack["property"]) +} + +func (c *current) onMemberExpressions1(head, tail interface{}) (interface{}, error) { + return memberexprs(head, tail, c.text, c.pos) + +} + +func (p *parser) callonMemberExpressions1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onMemberExpressions1(stack["head"], stack["tail"]) +} + +func (c *current) onMemberExpressionProperty14() (interface{}, error) { + return identifier(c.text, c.pos) + +} + +func (p *parser) callonMemberExpressionProperty14() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onMemberExpressionProperty14() +} + +func (c *current) onMemberExpressionProperty2(property interface{}) (interface{}, error) { + return property, nil + +} + +func (p *parser) callonMemberExpressionProperty2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onMemberExpressionProperty2(stack["property"]) +} + +func (c *current) onMemberExpressionProperty19(property interface{}) (interface{}, error) { + return property, nil + +} + +func (p *parser) callonMemberExpressionProperty19() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onMemberExpressionProperty19(stack["property"]) +} + +func (c *current) onCallExpression4(callee, args interface{}) (interface{}, error) { + return callexpr(callee, args, c.text, c.pos) + +} + +func (p *parser) callonCallExpression4() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCallExpression4(stack["callee"], stack["args"]) +} + +func (c *current) onCallExpression21(args interface{}) (interface{}, error) { + return callexpr(nil, args, c.text, c.pos) + +} + +func (p *parser) callonCallExpression21() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCallExpression21(stack["args"]) +} + +func (c *current) onCallExpression33(property interface{}) (interface{}, error) { + return memberexpr(nil, property, c.text, c.pos) + +} + +func (p *parser) callonCallExpression33() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCallExpression33(stack["property"]) +} + +func (c *current) onCallExpression1(head, tail interface{}) (interface{}, error) { + return callexprs(head, tail, c.text, c.pos) + +} + +func (p *parser) callonCallExpression1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onCallExpression1(stack["head"], stack["tail"]) +} + +func (c *current) onPipeExpression1(head, tail interface{}) (interface{}, error) { + return pipeExprs(head, tail, c.text, c.pos) + +} + +func (p *parser) callonPipeExpression1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpression1(stack["head"], stack["tail"]) +} + +func (c *current) onPipeExpressionHead16() (interface{}, error) { + return nil, errors.New("invalid escape character") + +} + +func (p *parser) callonPipeExpressionHead16() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead16() +} + +func (c *current) onPipeExpressionHead3() (interface{}, error) { + return stringLiteral(c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead3() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead3() +} + +func (c *current) onPipeExpressionHead36() (interface{}, error) { + return nil, errors.New("invalid escape character") + +} + +func (p *parser) callonPipeExpressionHead36() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead36() +} + +func (c *current) onPipeExpressionHead23() (interface{}, error) { + return "", errors.New("string literal not terminated") + +} + +func (p *parser) callonPipeExpressionHead23() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead23() +} + +func (c *current) onPipeExpressionHead46() (interface{}, error) { + return booleanLiteral(true, c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead46() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead46() +} + +func (c *current) onPipeExpressionHead65() (interface{}, error) { + return booleanLiteral(false, c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead65() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead65() +} + +func (c *current) onPipeExpressionHead97() (interface{}, error) { + return c.text, nil + +} + +func (p *parser) callonPipeExpressionHead97() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead97() +} + +func (c *current) onPipeExpressionHead92(re interface{}) (interface{}, error) { + return re, nil + +} + +func (p *parser) callonPipeExpressionHead92() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead92(stack["re"]) +} + +func (c *current) onPipeExpressionHead102() (interface{}, error) { + return []byte{'/'}, nil + +} + +func (p *parser) callonPipeExpressionHead102() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead102() +} + +func (c *current) onPipeExpressionHead107() (interface{}, error) { + return c.text, nil + +} + +func (p *parser) callonPipeExpressionHead107() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead107() +} + +func (c *current) onPipeExpressionHead104() (interface{}, error) { + return c.text, nil + +} + +func (p *parser) callonPipeExpressionHead104() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead104() +} + +func (c *current) onPipeExpressionHead88(chars interface{}) (interface{}, error) { + return regexLiteral(chars, c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead88() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead88(stack["chars"]) +} + +func (c *current) onPipeExpressionHead84(pattern interface{}) (interface{}, error) { + return pattern, nil + +} + +func (p *parser) callonPipeExpressionHead84() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead84(stack["pattern"]) +} + +func (c *current) onPipeExpressionHead113() (interface{}, error) { + return pipeLiteral(c.text, c.pos), nil + +} + +func (p *parser) callonPipeExpressionHead113() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead113() +} + +func (c *current) onPipeExpressionHead115() (interface{}, error) { + return durationLiteral(c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead115() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead115() +} + +func (c *current) onPipeExpressionHead131() (interface{}, error) { + return datetime(c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead131() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead131() +} + +func (c *current) onPipeExpressionHead166() (interface{}, error) { + return numberLiteral(c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead166() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead166() +} + +func (c *current) onPipeExpressionHead177() (interface{}, error) { + return integerLiteral(c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead177() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead177() +} + +func (c *current) onPipeExpressionHead186() (interface{}, error) { + return identifier(c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionHead186() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionHead186() +} + +func (c *current) onPipeExpressionPipe1(call interface{}) (interface{}, error) { + return incompletePipeExpr(call, c.text, c.pos) + +} + +func (p *parser) callonPipeExpressionPipe1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPipeExpressionPipe1(stack["call"]) +} + +func (c *current) onArguments1(args interface{}) (interface{}, error) { + return args, nil + +} + +func (p *parser) callonArguments1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArguments1(stack["args"]) +} + +func (c *current) onArrowFunctionExpression1(params, body interface{}) (interface{}, error) { + return arrowfunc(params, body, c.text, c.pos), nil + +} + +func (p *parser) callonArrowFunctionExpression1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionExpression1(stack["params"], stack["body"]) +} + +func (c *current) onArrowFunctionParams1(first, rest interface{}) (interface{}, error) { + return append([]interface{}{first}, toIfaceSlice(rest)...), nil + +} + +func (p *parser) callonArrowFunctionParams1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionParams1(stack["first"], stack["rest"]) +} + +func (c *current) onArrowFunctionParamsRest1(arg interface{}) (interface{}, error) { + return arg, nil + +} + +func (p *parser) callonArrowFunctionParamsRest1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionParamsRest1(stack["arg"]) +} + +func (c *current) onArrowFunctionParam5() (interface{}, error) { + return identifier(c.text, c.pos) + +} + +func (p *parser) callonArrowFunctionParam5() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionParam5() +} + +func (c *current) onArrowFunctionParam2(key, value interface{}) (interface{}, error) { + return property(key, value, c.text, c.pos) + +} + +func (p *parser) callonArrowFunctionParam2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionParam2(stack["key"], stack["value"]) +} + +func (c *current) onArrowFunctionParam40() (interface{}, error) { + return identifier(c.text, c.pos) + +} + +func (p *parser) callonArrowFunctionParam40() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionParam40() +} + +func (c *current) onArrowFunctionParam37(key interface{}) (interface{}, error) { + return property(key, nil, c.text, c.pos) + +} + +func (p *parser) callonArrowFunctionParam37() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionParam37(stack["key"]) +} + +func (c *current) onArrowFunctionBody2(body interface{}) (interface{}, error) { + return body, nil + +} + +func (p *parser) callonArrowFunctionBody2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionBody2(stack["body"]) +} + +func (c *current) onArrowFunctionBody5(body interface{}) (interface{}, error) { + return body, nil + +} + +func (p *parser) callonArrowFunctionBody5() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrowFunctionBody5(stack["body"]) +} + +func (c *current) onObjectExpression1(object interface{}) (interface{}, error) { + return object, nil + +} + +func (p *parser) callonObjectExpression1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onObjectExpression1(stack["object"]) +} + +func (c *current) onObjectProperties1(first, rest interface{}) (interface{}, error) { + return objectexpr(first, rest, c.text, c.pos) + +} + +func (p *parser) callonObjectProperties1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onObjectProperties1(stack["first"], stack["rest"]) +} + +func (c *current) onPropertiesRest1(arg interface{}) (interface{}, error) { + return arg, nil + +} + +func (p *parser) callonPropertiesRest1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPropertiesRest1(stack["arg"]) +} + +func (c *current) onProperty4() (interface{}, error) { + return identifier(c.text, c.pos) + +} + +func (p *parser) callonProperty4() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onProperty4() +} + +func (c *current) onProperty1(key, value interface{}) (interface{}, error) { + return property(key, value, c.text, c.pos) + +} + +func (p *parser) callonProperty1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onProperty1(stack["key"], stack["value"]) +} + +func (c *current) onLogicalExpression16() (interface{}, error) { + return logicalOp(c.text) + +} + +func (p *parser) callonLogicalExpression16() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onLogicalExpression16() +} + +func (c *current) onLogicalExpression1(head, tail interface{}) (interface{}, error) { + return logicalExpression(head, tail, c.text, c.pos) + +} + +func (p *parser) callonLogicalExpression1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onLogicalExpression1(stack["head"], stack["tail"]) +} + +func (c *current) onEquality16() (interface{}, error) { + return operator(c.text) + +} + +func (p *parser) callonEquality16() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onEquality16() +} + +func (c *current) onEquality1(head, tail interface{}) (interface{}, error) { + return binaryExpression(head, tail, c.text, c.pos) + +} + +func (p *parser) callonEquality1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onEquality1(stack["head"], stack["tail"]) +} + +func (c *current) onRelational16() (interface{}, error) { + return operator(c.text) + +} + +func (p *parser) callonRelational16() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onRelational16() +} + +func (c *current) onRelational1(head, tail interface{}) (interface{}, error) { + return binaryExpression(head, tail, c.text, c.pos) + +} + +func (p *parser) callonRelational1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onRelational1(stack["head"], stack["tail"]) +} + +func (c *current) onAdditive16() (interface{}, error) { + return operator(c.text) + +} + +func (p *parser) callonAdditive16() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onAdditive16() +} + +func (c *current) onAdditive1(head, tail interface{}) (interface{}, error) { + return binaryExpression(head, tail, c.text, c.pos) + +} + +func (p *parser) callonAdditive1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onAdditive1(stack["head"], stack["tail"]) +} + +func (c *current) onMultiplicative16() (interface{}, error) { + return operator(c.text) + +} + +func (p *parser) callonMultiplicative16() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onMultiplicative16() +} + +func (c *current) onMultiplicative1(head, tail interface{}) (interface{}, error) { + return binaryExpression(head, tail, c.text, c.pos) + +} + +func (p *parser) callonMultiplicative1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onMultiplicative1(stack["head"], stack["tail"]) +} + +func (c *current) onUnaryExpression13() (interface{}, error) { + return operator(c.text) + +} + +func (p *parser) callonUnaryExpression13() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onUnaryExpression13() +} + +func (c *current) onUnaryExpression2(op, argument interface{}) (interface{}, error) { + return unaryExpression(op, argument, c.text, c.pos) + +} + +func (p *parser) callonUnaryExpression2() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onUnaryExpression2(stack["op"], stack["argument"]) +} + +func (c *current) onPrimary17() (interface{}, error) { + return nil, errors.New("invalid escape character") + +} + +func (p *parser) callonPrimary17() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary17() +} + +func (c *current) onPrimary4() (interface{}, error) { + return stringLiteral(c.text, c.pos) + +} + +func (p *parser) callonPrimary4() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary4() +} + +func (c *current) onPrimary37() (interface{}, error) { + return nil, errors.New("invalid escape character") + +} + +func (p *parser) callonPrimary37() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary37() +} + +func (c *current) onPrimary24() (interface{}, error) { + return "", errors.New("string literal not terminated") + +} + +func (p *parser) callonPrimary24() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary24() +} + +func (c *current) onPrimary47() (interface{}, error) { + return booleanLiteral(true, c.text, c.pos) + +} + +func (p *parser) callonPrimary47() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary47() +} + +func (c *current) onPrimary66() (interface{}, error) { + return booleanLiteral(false, c.text, c.pos) + +} + +func (p *parser) callonPrimary66() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary66() +} + +func (c *current) onPrimary98() (interface{}, error) { + return c.text, nil + +} + +func (p *parser) callonPrimary98() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary98() +} + +func (c *current) onPrimary93(re interface{}) (interface{}, error) { + return re, nil + +} + +func (p *parser) callonPrimary93() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary93(stack["re"]) +} + +func (c *current) onPrimary103() (interface{}, error) { + return []byte{'/'}, nil + +} + +func (p *parser) callonPrimary103() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary103() +} + +func (c *current) onPrimary108() (interface{}, error) { + return c.text, nil + +} + +func (p *parser) callonPrimary108() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary108() +} + +func (c *current) onPrimary105() (interface{}, error) { + return c.text, nil + +} + +func (p *parser) callonPrimary105() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary105() +} + +func (c *current) onPrimary89(chars interface{}) (interface{}, error) { + return regexLiteral(chars, c.text, c.pos) + +} + +func (p *parser) callonPrimary89() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary89(stack["chars"]) +} + +func (c *current) onPrimary85(pattern interface{}) (interface{}, error) { + return pattern, nil + +} + +func (p *parser) callonPrimary85() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary85(stack["pattern"]) +} + +func (c *current) onPrimary114() (interface{}, error) { + return pipeLiteral(c.text, c.pos), nil + +} + +func (p *parser) callonPrimary114() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary114() +} + +func (c *current) onPrimary116() (interface{}, error) { + return durationLiteral(c.text, c.pos) + +} + +func (p *parser) callonPrimary116() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary116() +} + +func (c *current) onPrimary132() (interface{}, error) { + return datetime(c.text, c.pos) + +} + +func (p *parser) callonPrimary132() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary132() +} + +func (c *current) onPrimary167() (interface{}, error) { + return numberLiteral(c.text, c.pos) + +} + +func (p *parser) callonPrimary167() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary167() +} + +func (c *current) onPrimary178() (interface{}, error) { + return integerLiteral(c.text, c.pos) + +} + +func (p *parser) callonPrimary178() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary178() +} + +func (c *current) onPrimary187() (interface{}, error) { + return identifier(c.text, c.pos) + +} + +func (p *parser) callonPrimary187() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onPrimary187() +} + +func (c *current) onParens1(expr interface{}) (interface{}, error) { + return expr, nil + +} + +func (p *parser) callonParens1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onParens1(stack["expr"]) +} + +func (c *current) onArray1(elements interface{}) (interface{}, error) { + return elements, nil + +} + +func (p *parser) callonArray1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArray1(stack["elements"]) +} + +func (c *current) onArrayElements1(first, rest interface{}) (interface{}, error) { + return array(first, rest, c.text, c.pos), nil + +} + +func (p *parser) callonArrayElements1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrayElements1(stack["first"], stack["rest"]) +} + +func (c *current) onArrayRest1(element interface{}) (interface{}, error) { + return element, nil + +} + +func (p *parser) callonArrayRest1() (interface{}, error) { + stack := p.vstack[len(p.vstack)-1] + _ = stack + return p.cur.onArrayRest1(stack["element"]) +} + +var ( + // errNoRule is returned when the grammar to parse has no rule. + errNoRule = errors.New("grammar has no rule") + + // errInvalidEncoding is returned when the source is not properly + // utf8-encoded. + errInvalidEncoding = errors.New("invalid encoding") + + // errMaxExprCnt is used to signal that the maximum number of + // expressions have been parsed. + errMaxExprCnt = errors.New("max number of expresssions parsed") +) + +// Option is a function that can set an option on the parser. It returns +// the previous setting as an Option. +type Option func(*parser) Option + +// MaxExpressions creates an Option to stop parsing after the provided +// number of expressions have been parsed, if the value is 0 then the parser will +// parse for as many steps as needed (possibly an infinite number). +// +// The default for maxExprCnt is 0. +func MaxExpressions(maxExprCnt uint64) Option { + return func(p *parser) Option { + oldMaxExprCnt := p.maxExprCnt + p.maxExprCnt = maxExprCnt + return MaxExpressions(oldMaxExprCnt) + } +} + +// Recover creates an Option to set the recover flag to b. When set to +// true, this causes the parser to recover from panics and convert it +// to an error. Setting it to false can be useful while debugging to +// access the full stack trace. +// +// The default is true. +func Recover(b bool) Option { + return func(p *parser) Option { + old := p.recover + p.recover = b + return Recover(old) + } +} + +// GlobalStore creates an Option to set a key to a certain value in +// the globalStore. +func GlobalStore(key string, value interface{}) Option { + return func(p *parser) Option { + old := p.cur.globalStore[key] + p.cur.globalStore[key] = value + return GlobalStore(key, old) + } +} + +// ParseFile parses the file identified by filename. +func ParseFile(filename string, opts ...Option) (i interface{}, err error) { + f, err := os.Open(filename) + if err != nil { + return nil, err + } + defer func() { + if closeErr := f.Close(); closeErr != nil { + err = closeErr + } + }() + return ParseReader(filename, f, opts...) +} + +// ParseReader parses the data from r using filename as information in the +// error messages. +func ParseReader(filename string, r io.Reader, opts ...Option) (interface{}, error) { + b, err := ioutil.ReadAll(r) + if err != nil { + return nil, err + } + + return Parse(filename, b, opts...) +} + +// Parse parses the data from b using filename as information in the +// error messages. +func Parse(filename string, b []byte, opts ...Option) (interface{}, error) { + return newParser(filename, b, opts...).parse(g) +} + +// position records a position in the text. +type position struct { + line, col, offset int +} + +func (p position) String() string { + return fmt.Sprintf("%d:%d [%d]", p.line, p.col, p.offset) +} + +// savepoint stores all state required to go back to this point in the +// parser. +type savepoint struct { + position + rn rune + w int +} + +type current struct { + pos position // start position of the match + text []byte // raw text of the match + + // the globalStore allows the parser to store arbitrary values + globalStore map[string]interface{} +} + +// the AST types... + +type grammar struct { + pos position + rules []*rule +} + +type rule struct { + pos position + name string + displayName string + expr interface{} +} + +type choiceExpr struct { + pos position + alternatives []interface{} +} + +type actionExpr struct { + pos position + expr interface{} + run func(*parser) (interface{}, error) +} + +type recoveryExpr struct { + pos position + expr interface{} + recoverExpr interface{} + failureLabel []string +} + +type seqExpr struct { + pos position + exprs []interface{} +} + +type throwExpr struct { + pos position + label string +} + +type labeledExpr struct { + pos position + label string + expr interface{} +} + +type expr struct { + pos position + expr interface{} +} + +type andExpr expr +type notExpr expr +type zeroOrOneExpr expr +type zeroOrMoreExpr expr +type oneOrMoreExpr expr + +type ruleRefExpr struct { + pos position + name string +} + +type andCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +type notCodeExpr struct { + pos position + run func(*parser) (bool, error) +} + +type litMatcher struct { + pos position + val string + ignoreCase bool +} + +type charClassMatcher struct { + pos position + val string + basicLatinChars [128]bool + chars []rune + ranges []rune + classes []*unicode.RangeTable + ignoreCase bool + inverted bool +} + +type anyMatcher position + +// errList cumulates the errors found by the parser. +type errList []error + +func (e *errList) add(err error) { + *e = append(*e, err) +} + +func (e errList) err() error { + if len(e) == 0 { + return nil + } + e.dedupe() + return e +} + +func (e *errList) dedupe() { + var cleaned []error + set := make(map[string]bool) + for _, err := range *e { + if msg := err.Error(); !set[msg] { + set[msg] = true + cleaned = append(cleaned, err) + } + } + *e = cleaned +} + +func (e errList) Error() string { + switch len(e) { + case 0: + return "" + case 1: + return e[0].Error() + default: + var buf bytes.Buffer + + for i, err := range e { + if i > 0 { + buf.WriteRune('\n') + } + buf.WriteString(err.Error()) + } + return buf.String() + } +} + +// parserError wraps an error with a prefix indicating the rule in which +// the error occurred. The original error is stored in the Inner field. +type parserError struct { + Inner error + pos position + prefix string + expected []string +} + +// Error returns the error message. +func (p *parserError) Error() string { + return p.prefix + ": " + p.Inner.Error() +} + +// newParser creates a parser with the specified input source and options. +func newParser(filename string, b []byte, opts ...Option) *parser { + stats := Stats{ + ChoiceAltCnt: make(map[string]map[string]int), + } + + p := &parser{ + filename: filename, + errs: new(errList), + data: b, + pt: savepoint{position: position{line: 1}}, + recover: true, + cur: current{ + globalStore: make(map[string]interface{}), + }, + maxFailPos: position{col: 1, line: 1}, + maxFailExpected: make([]string, 0, 20), + Stats: &stats, + } + p.setOptions(opts) + + if p.maxExprCnt == 0 { + p.maxExprCnt = math.MaxUint64 + } + + return p +} + +// setOptions applies the options to the parser. +func (p *parser) setOptions(opts []Option) { + for _, opt := range opts { + opt(p) + } +} + +type resultTuple struct { + v interface{} + b bool + end savepoint +} + +const choiceNoMatch = -1 + +// Stats stores some statistics, gathered during parsing +type Stats struct { + // ExprCnt counts the number of expressions processed during parsing + // This value is compared to the maximum number of expressions allowed + // (set by the MaxExpressions option). + ExprCnt uint64 + + // ChoiceAltCnt is used to count for each ordered choice expression, + // which alternative is used how may times. + // These numbers allow to optimize the order of the ordered choice expression + // to increase the performance of the parser + // + // The outer key of ChoiceAltCnt is composed of the name of the rule as well + // as the line and the column of the ordered choice. + // The inner key of ChoiceAltCnt is the number (one-based) of the matching alternative. + // For each alternative the number of matches are counted. If an ordered choice does not + // match, a special counter is incremented. The name of this counter is set with + // the parser option Statistics. + // For an alternative to be included in ChoiceAltCnt, it has to match at least once. + ChoiceAltCnt map[string]map[string]int +} + +type parser struct { + filename string + pt savepoint + cur current + + data []byte + errs *errList + + depth int + recover bool + + // rules table, maps the rule identifier to the rule node + rules map[string]*rule + // variables stack, map of label to value + vstack []map[string]interface{} + // rule stack, allows identification of the current rule in errors + rstack []*rule + + // parse fail + maxFailPos position + maxFailExpected []string + maxFailInvertExpected bool + + // max number of expressions to be parsed + maxExprCnt uint64 + + *Stats + + choiceNoMatch string + // recovery expression stack, keeps track of the currently available recovery expression, these are traversed in reverse + recoveryStack []map[string]interface{} +} + +// push a variable set on the vstack. +func (p *parser) pushV() { + if cap(p.vstack) == len(p.vstack) { + // create new empty slot in the stack + p.vstack = append(p.vstack, nil) + } else { + // slice to 1 more + p.vstack = p.vstack[:len(p.vstack)+1] + } + + // get the last args set + m := p.vstack[len(p.vstack)-1] + if m != nil && len(m) == 0 { + // empty map, all good + return + } + + m = make(map[string]interface{}) + p.vstack[len(p.vstack)-1] = m +} + +// pop a variable set from the vstack. +func (p *parser) popV() { + // if the map is not empty, clear it + m := p.vstack[len(p.vstack)-1] + if len(m) > 0 { + // GC that map + p.vstack[len(p.vstack)-1] = nil + } + p.vstack = p.vstack[:len(p.vstack)-1] +} + +// push a recovery expression with its labels to the recoveryStack +func (p *parser) pushRecovery(labels []string, expr interface{}) { + if cap(p.recoveryStack) == len(p.recoveryStack) { + // create new empty slot in the stack + p.recoveryStack = append(p.recoveryStack, nil) + } else { + // slice to 1 more + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)+1] + } + + m := make(map[string]interface{}, len(labels)) + for _, fl := range labels { + m[fl] = expr + } + p.recoveryStack[len(p.recoveryStack)-1] = m +} + +// pop a recovery expression from the recoveryStack +func (p *parser) popRecovery() { + // GC that map + p.recoveryStack[len(p.recoveryStack)-1] = nil + + p.recoveryStack = p.recoveryStack[:len(p.recoveryStack)-1] +} + +func (p *parser) addErr(err error) { + p.addErrAt(err, p.pt.position, []string{}) +} + +func (p *parser) addErrAt(err error, pos position, expected []string) { + var buf bytes.Buffer + if p.filename != "" { + buf.WriteString(p.filename) + } + if buf.Len() > 0 { + buf.WriteString(":") + } + buf.WriteString(fmt.Sprintf("%d:%d (%d)", pos.line, pos.col, pos.offset)) + if len(p.rstack) > 0 { + if buf.Len() > 0 { + buf.WriteString(": ") + } + rule := p.rstack[len(p.rstack)-1] + if rule.displayName != "" { + buf.WriteString("rule " + rule.displayName) + } else { + buf.WriteString("rule " + rule.name) + } + } + pe := &parserError{Inner: err, pos: pos, prefix: buf.String(), expected: expected} + p.errs.add(pe) +} + +func (p *parser) failAt(fail bool, pos position, want string) { + // process fail if parsing fails and not inverted or parsing succeeds and invert is set + if fail == p.maxFailInvertExpected { + if pos.offset < p.maxFailPos.offset { + return + } + + if pos.offset > p.maxFailPos.offset { + p.maxFailPos = pos + p.maxFailExpected = p.maxFailExpected[:0] + } + + if p.maxFailInvertExpected { + want = "!" + want + } + p.maxFailExpected = append(p.maxFailExpected, want) + } +} + +// read advances the parser to the next rune. +func (p *parser) read() { + p.pt.offset += p.pt.w + rn, n := utf8.DecodeRune(p.data[p.pt.offset:]) + p.pt.rn = rn + p.pt.w = n + p.pt.col++ + if rn == '\n' { + p.pt.line++ + p.pt.col = 0 + } + + if rn == utf8.RuneError { + if n == 1 { + p.addErr(errInvalidEncoding) + } + } +} + +// restore parser position to the savepoint pt. +func (p *parser) restore(pt savepoint) { + if pt.offset == p.pt.offset { + return + } + p.pt = pt +} + +// get the slice of bytes from the savepoint start to the current position. +func (p *parser) sliceFrom(start savepoint) []byte { + return p.data[start.position.offset:p.pt.position.offset] +} + +func (p *parser) buildRulesTable(g *grammar) { + p.rules = make(map[string]*rule, len(g.rules)) + for _, r := range g.rules { + p.rules[r.name] = r + } +} + +func (p *parser) parse(g *grammar) (val interface{}, err error) { + if len(g.rules) == 0 { + p.addErr(errNoRule) + return nil, p.errs.err() + } + + // TODO : not super critical but this could be generated + p.buildRulesTable(g) + + if p.recover { + // panic can be used in action code to stop parsing immediately + // and return the panic as an error. + defer func() { + if e := recover(); e != nil { + val = nil + switch e := e.(type) { + case error: + p.addErr(e) + default: + p.addErr(fmt.Errorf("%v", e)) + } + err = p.errs.err() + } + }() + } + + // start rule is rule [0] + p.read() // advance to first rune + val, ok := p.parseRule(g.rules[0]) + if !ok { + if len(*p.errs) == 0 { + // If parsing fails, but no errors have been recorded, the expected values + // for the farthest parser position are returned as error. + maxFailExpectedMap := make(map[string]struct{}, len(p.maxFailExpected)) + for _, v := range p.maxFailExpected { + maxFailExpectedMap[v] = struct{}{} + } + expected := make([]string, 0, len(maxFailExpectedMap)) + eof := false + if _, ok := maxFailExpectedMap["!."]; ok { + delete(maxFailExpectedMap, "!.") + eof = true + } + for k := range maxFailExpectedMap { + expected = append(expected, k) + } + sort.Strings(expected) + if eof { + expected = append(expected, "EOF") + } + p.addErrAt(errors.New("no match found, expected: "+listJoin(expected, ", ", "or")), p.maxFailPos, expected) + } + + return nil, p.errs.err() + } + return val, p.errs.err() +} + +func listJoin(list []string, sep string, lastSep string) string { + switch len(list) { + case 0: + return "" + case 1: + return list[0] + default: + return fmt.Sprintf("%s %s %s", strings.Join(list[:len(list)-1], sep), lastSep, list[len(list)-1]) + } +} + +func (p *parser) parseRule(rule *rule) (interface{}, bool) { + p.rstack = append(p.rstack, rule) + p.pushV() + val, ok := p.parseExpr(rule.expr) + p.popV() + p.rstack = p.rstack[:len(p.rstack)-1] + return val, ok +} + +func (p *parser) parseExpr(expr interface{}) (interface{}, bool) { + + p.ExprCnt++ + if p.ExprCnt > p.maxExprCnt { + panic(errMaxExprCnt) + } + + var val interface{} + var ok bool + switch expr := expr.(type) { + case *actionExpr: + val, ok = p.parseActionExpr(expr) + case *andCodeExpr: + val, ok = p.parseAndCodeExpr(expr) + case *andExpr: + val, ok = p.parseAndExpr(expr) + case *anyMatcher: + val, ok = p.parseAnyMatcher(expr) + case *charClassMatcher: + val, ok = p.parseCharClassMatcher(expr) + case *choiceExpr: + val, ok = p.parseChoiceExpr(expr) + case *labeledExpr: + val, ok = p.parseLabeledExpr(expr) + case *litMatcher: + val, ok = p.parseLitMatcher(expr) + case *notCodeExpr: + val, ok = p.parseNotCodeExpr(expr) + case *notExpr: + val, ok = p.parseNotExpr(expr) + case *oneOrMoreExpr: + val, ok = p.parseOneOrMoreExpr(expr) + case *recoveryExpr: + val, ok = p.parseRecoveryExpr(expr) + case *ruleRefExpr: + val, ok = p.parseRuleRefExpr(expr) + case *seqExpr: + val, ok = p.parseSeqExpr(expr) + case *throwExpr: + val, ok = p.parseThrowExpr(expr) + case *zeroOrMoreExpr: + val, ok = p.parseZeroOrMoreExpr(expr) + case *zeroOrOneExpr: + val, ok = p.parseZeroOrOneExpr(expr) + default: + panic(fmt.Sprintf("unknown expression type %T", expr)) + } + return val, ok +} + +func (p *parser) parseActionExpr(act *actionExpr) (interface{}, bool) { + start := p.pt + val, ok := p.parseExpr(act.expr) + if ok { + p.cur.pos = start.position + p.cur.text = p.sliceFrom(start) + actVal, err := act.run(p) + if err != nil { + p.addErrAt(err, start.position, []string{}) + } + val = actVal + } + return val, ok +} + +func (p *parser) parseAndCodeExpr(and *andCodeExpr) (interface{}, bool) { + ok, err := and.run(p) + if err != nil { + p.addErr(err) + } + return nil, ok +} + +func (p *parser) parseAndExpr(and *andExpr) (interface{}, bool) { + pt := p.pt + p.pushV() + _, ok := p.parseExpr(and.expr) + p.popV() + p.restore(pt) + return nil, ok +} + +func (p *parser) parseAnyMatcher(any *anyMatcher) (interface{}, bool) { + if p.pt.rn != utf8.RuneError { + start := p.pt + p.read() + p.failAt(true, start.position, ".") + return p.sliceFrom(start), true + } + p.failAt(false, p.pt.position, ".") + return nil, false +} + +func (p *parser) parseCharClassMatcher(chr *charClassMatcher) (interface{}, bool) { + cur := p.pt.rn + start := p.pt + + // can't match EOF + if cur == utf8.RuneError { + p.failAt(false, start.position, chr.val) + return nil, false + } + + if chr.ignoreCase { + cur = unicode.ToLower(cur) + } + + // try to match in the list of available chars + for _, rn := range chr.chars { + if rn == cur { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of ranges + for i := 0; i < len(chr.ranges); i += 2 { + if cur >= chr.ranges[i] && cur <= chr.ranges[i+1] { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + // try to match in the list of Unicode classes + for _, cl := range chr.classes { + if unicode.Is(cl, cur) { + if chr.inverted { + p.failAt(false, start.position, chr.val) + return nil, false + } + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + } + + if chr.inverted { + p.read() + p.failAt(true, start.position, chr.val) + return p.sliceFrom(start), true + } + p.failAt(false, start.position, chr.val) + return nil, false +} + +func (p *parser) parseChoiceExpr(ch *choiceExpr) (interface{}, bool) { + for altI, alt := range ch.alternatives { + // dummy assignment to prevent compile error if optimized + _ = altI + + p.pushV() + val, ok := p.parseExpr(alt) + p.popV() + if ok { + return val, ok + } + } + return nil, false +} + +func (p *parser) parseLabeledExpr(lab *labeledExpr) (interface{}, bool) { + p.pushV() + val, ok := p.parseExpr(lab.expr) + p.popV() + if ok && lab.label != "" { + m := p.vstack[len(p.vstack)-1] + m[lab.label] = val + } + return val, ok +} + +func (p *parser) parseLitMatcher(lit *litMatcher) (interface{}, bool) { + ignoreCase := "" + if lit.ignoreCase { + ignoreCase = "i" + } + val := fmt.Sprintf("%q%s", lit.val, ignoreCase) + start := p.pt + for _, want := range lit.val { + cur := p.pt.rn + if lit.ignoreCase { + cur = unicode.ToLower(cur) + } + if cur != want { + p.failAt(false, start.position, val) + p.restore(start) + return nil, false + } + p.read() + } + p.failAt(true, start.position, val) + return p.sliceFrom(start), true +} + +func (p *parser) parseNotCodeExpr(not *notCodeExpr) (interface{}, bool) { + ok, err := not.run(p) + if err != nil { + p.addErr(err) + } + return nil, !ok +} + +func (p *parser) parseNotExpr(not *notExpr) (interface{}, bool) { + pt := p.pt + p.pushV() + p.maxFailInvertExpected = !p.maxFailInvertExpected + _, ok := p.parseExpr(not.expr) + p.maxFailInvertExpected = !p.maxFailInvertExpected + p.popV() + p.restore(pt) + return nil, !ok +} + +func (p *parser) parseOneOrMoreExpr(expr *oneOrMoreExpr) (interface{}, bool) { + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + if len(vals) == 0 { + // did not match once, no match + return nil, false + } + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseRecoveryExpr(recover *recoveryExpr) (interface{}, bool) { + + p.pushRecovery(recover.failureLabel, recover.recoverExpr) + val, ok := p.parseExpr(recover.expr) + p.popRecovery() + + return val, ok +} + +func (p *parser) parseRuleRefExpr(ref *ruleRefExpr) (interface{}, bool) { + if ref.name == "" { + panic(fmt.Sprintf("%s: invalid rule: missing name", ref.pos)) + } + + rule := p.rules[ref.name] + if rule == nil { + p.addErr(fmt.Errorf("undefined rule: %s", ref.name)) + return nil, false + } + return p.parseRule(rule) +} + +func (p *parser) parseSeqExpr(seq *seqExpr) (interface{}, bool) { + vals := make([]interface{}, 0, len(seq.exprs)) + + pt := p.pt + for _, expr := range seq.exprs { + val, ok := p.parseExpr(expr) + if !ok { + p.restore(pt) + return nil, false + } + vals = append(vals, val) + } + return vals, true +} + +func (p *parser) parseThrowExpr(expr *throwExpr) (interface{}, bool) { + + for i := len(p.recoveryStack) - 1; i >= 0; i-- { + if recoverExpr, ok := p.recoveryStack[i][expr.label]; ok { + if val, ok := p.parseExpr(recoverExpr); ok { + return val, ok + } + } + } + + return nil, false +} + +func (p *parser) parseZeroOrMoreExpr(expr *zeroOrMoreExpr) (interface{}, bool) { + var vals []interface{} + + for { + p.pushV() + val, ok := p.parseExpr(expr.expr) + p.popV() + if !ok { + return vals, true + } + vals = append(vals, val) + } +} + +func (p *parser) parseZeroOrOneExpr(expr *zeroOrOneExpr) (interface{}, bool) { + p.pushV() + val, _ := p.parseExpr(expr.expr) + p.popV() + // whether it matched or not, consider it a match + return val, true +} + +func rangeTable(class string) *unicode.RangeTable { + if rt, ok := unicode.Categories[class]; ok { + return rt + } + if rt, ok := unicode.Properties[class]; ok { + return rt + } + if rt, ok := unicode.Scripts[class]; ok { + return rt + } + + // cannot happen + panic(fmt.Sprintf("invalid Unicode class: %s", class)) +} diff --git a/vendor/github.com/influxdata/ifql/parser/ifql.peg b/vendor/github.com/influxdata/ifql/parser/ifql.peg new file mode 100644 index 000000000..b084df52b --- /dev/null +++ b/vendor/github.com/influxdata/ifql/parser/ifql.peg @@ -0,0 +1,483 @@ +{ +package parser + +// DO NOT EDIT: This file is auto generated by the pigeon PEG parser generator. + +} + +Start + = __ program:Program __ EOF { + return program, nil + } + +Program + = body:SourceElements { + return program(body, c.text, c.pos) + } + +SourceElements + = head:SourceElement tail:(__ SourceElement __)* { + return srcElems(head, tail) + } + +SourceElement + = Statement + +Statement + = VariableStatement + / ReturnStatement + / ExpressionStatement + / BlockStatement + + +VariableStatement + = declaration:VariableDeclaration { + return varstmt(declaration, c.text, c.pos) + } + +ReturnStatement + = "return" __ argument:Expr { + return returnstmt(argument, c.text, c.pos) + } + +ExpressionStatement + = expr:Expr { + return exprstmt(expr, c.text, c.pos) + } + +BlockStatement + = "{" __ body:( __ Statement __ )* __ "}" { + return blockstmt(body, c.text, c.pos) + } + +VariableDeclaration + = id:Identifier __ "=" __ init:Expr { + return vardecl(id, init, c.text, c.pos) + } + + +MemberExpressions + = head:Identifier // TODO: should be primary + tail:( + __ property:MemberExpressionProperty { + return property, nil + } + )* + { + return memberexprs(head, tail, c.text, c.pos) + } + +MemberExpressionProperty + = "." __ property:Identifier { + return property, nil + } + / "[" __ property:Primary __ "]" __ { + return property, nil + } + +CallExpression + = head:( + callee:MemberExpressions __ args:Arguments { + return callexpr(callee, args, c.text, c.pos) + } + ) + tail:( + __ args:Arguments { + return callexpr(nil, args, c.text, c.pos) + } + / __ property:MemberExpressionProperty { + return memberexpr(nil, property, c.text, c.pos) + } + )* + { + return callexprs(head, tail, c.text, c.pos) + } + +PipeExpression + = head:PipeExpressionHead __ tail:(__ PipeExpressionPipe __)+ { + return pipeExprs(head, tail, c.text, c.pos) + } + +PipeExpressionHead + = CallExpression // TODO(nathanielc): Allow for more expressions as pipe heads. Cannot use Expr because it causes infinite left recursion. + / Literal + / Array + / MemberExpressions + / Identifier + / ObjectExpression + / ArrowFunctionExpression + / Parens + +PipeExpressionPipe + = "|>" __ call:CallExpression { + return incompletePipeExpr(call, c.text, c.pos) + } + +Arguments + = "(" __ args:(ObjectProperties)? __ ")" { + return args, nil + } + +ArrowFunctionExpression + = "(" __ params:ArrowFunctionParams? __ ")" __ "=>" __ body:ArrowFunctionBody __ { + return arrowfunc(params, body, c.text, c.pos), nil + } + +ArrowFunctionParams + = first:ArrowFunctionParam __ rest:ArrowFunctionParamsRest* ","? { + return append([]interface{}{first}, toIfaceSlice(rest)...), nil + } + +ArrowFunctionParamsRest + = "," __ arg:ArrowFunctionParam __ { + return arg, nil + } + +ArrowFunctionParam + = key:Identifier __ "=" __ value:Primary __ { + return property(key, value, c.text, c.pos) + } + / key:Identifier __ { + return property(key, nil, c.text, c.pos) + } + + +ArrowFunctionBody + = body:Expr { + return body, nil + } + / body:BlockStatement { + return body, nil + } + +ObjectExpression + = "{" __ object:(ObjectProperties)? __ "}" { + return object, nil + } + +ObjectProperties + = first:Property __ rest:PropertiesRest* __ ","? { + return objectexpr(first, rest, c.text, c.pos) + } + +PropertiesRest + = "," __ arg:Property { + return arg, nil + } + +Property + = key:Identifier __ ":" __ value:Expr { + return property(key, value, c.text, c.pos) + } + +// + - +// <= < >= > startsWith IN NOT EMPTY EMPTY +// == != +// and or +// Lowest to Highest Priority. +// Highest Priority includes the valid primary +// primary contains the Lowest Priority +Expr + = LogicalExpression + +LogicalOperators + = ("or"i / "and"i) { + return logicalOp(c.text) + } + +LogicalExpression + = head:Equality tail:( __ LogicalOperators __ Equality )* { + return logicalExpression(head, tail, c.text, c.pos) + } + +EqualityOperators + = ("==" / "!=" / "=~" / "!~") { + return operator(c.text) + } + +Equality + = head:Relational tail:( __ EqualityOperators __ Relational )* { + return binaryExpression(head, tail, c.text, c.pos) + } + +RelationalOperators + = ( "<=" + / "<" + / ">=" + / ">" + / "startswith"i + / "in"i + / "not empty"i + / "empty"i + ) { + return operator(c.text) + } + +Relational + = head:Additive tail:( __ RelationalOperators __ Additive )* { + return binaryExpression(head, tail, c.text, c.pos) + } + +AdditiveOperator + = ("+" / "-") { + return operator(c.text) + } + +Additive + = head:Multiplicative tail:( __ AdditiveOperator __ Multiplicative )* { + return binaryExpression(head, tail, c.text, c.pos) + } + +MultiplicativeOperator + = ("*" / "/") { + return operator(c.text) + } + +Multiplicative + = head:UnaryExpression tail:( __ MultiplicativeOperator __ UnaryExpression )* { + return binaryExpression(head, tail, c.text, c.pos) + } + +UnaryOperator + = ("-" / "not") { + return operator(c.text) + } + +UnaryExpression + = __ op:UnaryOperator __ argument:Primary __ { + return unaryExpression(op, argument, c.text, c.pos) + } + / Primary + +Primary + = PipeExpression + / Array + / Literal + / CallExpression + / MemberExpressions + / Identifier + / ObjectExpression + / ArrowFunctionExpression + / Parens + +Literal + = StringLiteral + / BooleanLiteral + / RegexpLiteral + / PipeLiteral + / DurationLiteral + / DateTimeLiteral + / NumberLiteral + / IntegerLiteral + +Parens + = "(" __ expr:Expr __ ")" { + return expr, nil + } + +Array + = "[" __ elements:ArrayElements? __ "]" { + return elements, nil + } + +ArrayElements + = first:Primary __ rest:ArrayRest* { + return array(first, rest, c.text, c.pos), nil + } + +ArrayRest + = "," __ element:Primary { + return element, nil + } + +DateFullYear + = Digit Digit Digit Digit + +DateMonth + // 01-12 + = Digit Digit + +DateMDay + // 01-28, 01-29, 01-30, 01-31 based on + // month/year + = Digit Digit + +TimeHour + // 00-23 + = Digit Digit + +TimeMinute + // 00-59 + = Digit Digit + +TimeSecond + // 00-58, 00-59, 00-60 based on leap second + // rules + = Digit Digit + +TimeSecFrac + = "." Digit+ + +TimeNumOffset + = ("+" / "-") TimeHour ":" TimeMinute + +TimeOffset + = ("Z" / TimeNumOffset) + +PartialTime + = TimeHour ":" TimeMinute ":" TimeSecond TimeSecFrac? + +FullDate + = DateFullYear "-" DateMonth "-" DateMDay + +FullTime + = PartialTime TimeOffset + +DateTimeLiteral + = FullDate "T" FullTime { + return datetime(c.text, c.pos) + } + +NanoSecondUnits + = "ns" + +MicroSecondUnits + = ("us" / "µs" / "μs") + +MilliSecondUnits + = "ms" + +SecondUnits + = "s" + +MinuteUnits + = "m" + +HourUnits + = "h" + +DurationUnits + = ( + NanoSecondUnits + / MicroSecondUnits + / MilliSecondUnits + / SecondUnits + / MinuteUnits + / HourUnits + ) + +SingleDuration + = Integer DurationUnits + +DurationLiteral + = SingleDuration+ { + return durationLiteral(c.text, c.pos) + } + +StringLiteral + = ( '"' DoubleStringChar* '"' ) { + return stringLiteral(c.text, c.pos) + } + / ( '"' DoubleStringChar* ( EOL / EOF ) ) { + return "", errors.New("string literal not terminated") + } + +DoubleStringChar + = !( '"' / "\\" / EOL ) SourceChar + / "\\" DoubleStringEscape + +DoubleStringEscape + = '"' + / ( SourceChar / EOL / EOF ) { + return nil, errors.New("invalid escape character") + } + + +RegexpLiteral + = "/" pattern:RegexpBody "/" { + return pattern, nil + } + +RegexpBody + = chars:RegexpChar+ { + return regexLiteral(chars, c.text, c.pos) + } + +RegexpChar + = ![\\/] re:RegexpNonTerminator { + return re, nil + } + / RegexpBackslashSequence + +RegexpBackslashSequence + = "\\/" { + return []byte{'/'}, nil + } + / "\\" RegexpNonTerminator { + return c.text, nil + } + +RegexpNonTerminator + = !LineTerminator SourceChar { + return c.text, nil + } + +BooleanLiteral + = __ "true" __ { + return booleanLiteral(true, c.text, c.pos) + } + / __ "false" __ { + return booleanLiteral(false, c.text, c.pos) + } + +NumberLiteral + = Integer "." Digit+ { + return numberLiteral(c.text, c.pos) + } + +Integer + = ("0" / NonZeroDigit Digit*) + +IntegerLiteral + = Integer { + return integerLiteral(c.text, c.pos) + } + +NonZeroDigit + = [1-9] + +Digit + = [0-9] + +PipeLiteral + = "<-" { + return pipeLiteral(c.text, c.pos), nil + } + + +Identifier + // Allow any unicode letter possibly followed by any number of unicode letters, underscores and numbers. + = [_\pL][_0-9\pL]* { + return identifier(c.text, c.pos) + } + + +SourceChar + = . +__ + = ( ws / EOL / Comment )* + +Comment + = "//" [^\r\n]* EOL + +ws + = [ \t\r\n] + +LineTerminator + = [\n\r] + +EOL + = "\n" + +EOF + = !. diff --git a/vendor/github.com/influxdata/ifql/parser/parser.go b/vendor/github.com/influxdata/ifql/parser/parser.go new file mode 100644 index 000000000..31d640bdd --- /dev/null +++ b/vendor/github.com/influxdata/ifql/parser/parser.go @@ -0,0 +1,18 @@ +// +build !parser_debug + +package parser + +//go:generate pigeon -optimize-parser -optimize-grammar -o ifql.go ifql.peg + +import ( + "github.com/influxdata/ifql/ast" +) + +// NewAST parses ifql query and produces an ast.Program +func NewAST(ifql string, opts ...Option) (*ast.Program, error) { + f, err := Parse("", []byte(ifql), opts...) + if err != nil { + return nil, err + } + return f.(*ast.Program), nil +} diff --git a/vendor/github.com/influxdata/ifql/parser/parser_debug.go b/vendor/github.com/influxdata/ifql/parser/parser_debug.go new file mode 100644 index 000000000..45f57e3f2 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/parser/parser_debug.go @@ -0,0 +1,21 @@ +// +build parser_debug + +package parser + +//go:generate pigeon -optimize-grammar -o ifql.go ifql.peg + +import ( + "github.com/influxdata/ifql/ast" +) + +// NewAST parses ifql query and produces an ast.Program +func NewAST(ifql string, opts ...Option) (*ast.Program, error) { + // Turn on Debugging + opts = append(opts, Debug(true)) + + f, err := Parse("", []byte(ifql), opts...) + if err != nil { + return nil, err + } + return f.(*ast.Program), nil +} diff --git a/vendor/github.com/influxdata/ifql/parser/parser_test.go b/vendor/github.com/influxdata/ifql/parser/parser_test.go new file mode 100644 index 000000000..3b3eb461f --- /dev/null +++ b/vendor/github.com/influxdata/ifql/parser/parser_test.go @@ -0,0 +1,1609 @@ +package parser_test + +import ( + "regexp" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/ast/asttest" + "github.com/influxdata/ifql/parser" +) + +func TestParse(t *testing.T) { + tests := []struct { + name string + raw string + want *ast.Program + wantErr bool + }{ + { + name: "from", + raw: `from()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "from", + }, + }, + }, + }, + }, + }, + { + name: "comment", + raw: `// Comment + from()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "from", + }, + }, + }, + }, + }, + }, + { + name: "identifier with number", + raw: `tan2()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "tan2", + }, + }, + }, + }, + }, + }, + { + name: "regex literal", + raw: `/.*/`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.RegexpLiteral{ + Value: regexp.MustCompile(".*"), + }, + }, + }, + }, + }, + { + name: "regex literal with escape sequence", + raw: `/a\/b\\c\d/`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.RegexpLiteral{ + Value: regexp.MustCompile(`a/b\\c\d`), + }, + }, + }, + }, + }, + { + name: "regex match operators", + raw: `"a" =~ /.*/ and "b" !~ /c/`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.LogicalExpression{ + Operator: ast.AndOperator, + Left: &ast.BinaryExpression{ + Operator: ast.RegexpMatchOperator, + Left: &ast.StringLiteral{Value: "a"}, + Right: &ast.RegexpLiteral{Value: regexp.MustCompile(".*")}, + }, + Right: &ast.BinaryExpression{ + Operator: ast.NotRegexpMatchOperator, + Left: &ast.StringLiteral{Value: "b"}, + Right: &ast.RegexpLiteral{Value: regexp.MustCompile("c")}, + }, + }, + }, + }, + }, + }, + { + name: "declare variable as an int", + raw: `howdy = 1`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{Name: "howdy"}, + Init: &ast.IntegerLiteral{Value: 1}, + }}, + }, + }, + }, + }, + { + name: "declare variable as a float", + raw: `howdy = 1.1`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{Name: "howdy"}, + Init: &ast.FloatLiteral{Value: 1.1}, + }}, + }, + }, + }, + }, + { + name: "declare variable as an array", + raw: `howdy = [1, 2, 3, 4]`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{Name: "howdy"}, + Init: &ast.ArrayExpression{ + Elements: []ast.Expression{ + &ast.IntegerLiteral{Value: 1}, + &ast.IntegerLiteral{Value: 2}, + &ast.IntegerLiteral{Value: 3}, + &ast.IntegerLiteral{Value: 4}, + }, + }, + }}, + }, + }, + }, + }, + { + name: "use variable to declare something", + raw: `howdy = 1 + from()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{Name: "howdy"}, + Init: &ast.IntegerLiteral{Value: 1}, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "from", + }, + }, + }, + }, + }, + }, + { + name: "variable is from statement", + raw: `howdy = from() + howdy.count()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "howdy", + }, + Init: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "from", + }, + }, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.MemberExpression{ + Object: &ast.Identifier{ + Name: "howdy", + }, + Property: &ast.Identifier{ + Name: "count", + }, + }, + }, + }, + }, + }, + }, + { + name: "pipe expression", + raw: `from() |> count()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: nil, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "count"}, + Arguments: nil, + }, + }, + }, + }, + }, + }, + { + name: "literal pipe expression", + raw: `5 |> pow2()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.IntegerLiteral{Value: 5}, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "pow2"}, + Arguments: nil, + }, + }, + }, + }, + }, + }, + { + name: "member expression pipe expression", + raw: `foo.bar |> baz()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "foo"}, + Property: &ast.Identifier{Name: "bar"}, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "baz"}, + Arguments: nil, + }, + }, + }, + }, + }, + }, + { + name: "multiple pipe expressions", + raw: `from() |> range() |> filter() |> count()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.PipeExpression{ + Argument: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "range"}, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "filter"}, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "count"}, + }, + }, + }, + }, + }, + }, + { + name: "two variables for two froms", + raw: `howdy = from() + doody = from() + howdy|>count() + doody|>sum()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "howdy", + }, + Init: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "from", + }, + }, + }}, + }, + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "doody", + }, + Init: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "from", + }, + }, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.Identifier{Name: "howdy"}, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "count", + }, + }, + }, + }, + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.Identifier{Name: "doody"}, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "sum", + }, + }, + }, + }, + }, + }, + }, + { + name: "from with database", + raw: `from(db:"telegraf")`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "from", + }, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{ + Name: "db", + }, + Value: &ast.StringLiteral{ + Value: "telegraf", + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "map member expressions", + raw: `m = {key1: 1, key2:"value2"} + m.key1 + m["key2"] + `, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "m", + }, + Init: &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "key1"}, + Value: &ast.IntegerLiteral{Value: 1}, + }, + { + Key: &ast.Identifier{Name: "key2"}, + Value: &ast.StringLiteral{Value: "value2"}, + }, + }, + }, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "m"}, + Property: &ast.Identifier{Name: "key1"}, + }, + }, + &ast.ExpressionStatement{ + Expression: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "m"}, + Property: &ast.StringLiteral{Value: "key2"}, + }, + }, + }, + }, + }, + { + name: "var as binary expression of other vars", + raw: `a = 1 + b = 2 + c = a + b + d = a`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "a", + }, + Init: &ast.IntegerLiteral{Value: 1}, + }}, + }, + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "b", + }, + Init: &ast.IntegerLiteral{Value: 2}, + }}, + }, + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "c", + }, + Init: &ast.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &ast.Identifier{Name: "a"}, + Right: &ast.Identifier{Name: "b"}, + }, + }}, + }, + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "d", + }, + Init: &ast.Identifier{Name: "a"}, + }}, + }, + }, + }, + }, + { + name: "var as unary expression of other vars", + raw: `a = 5 + c = -a`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "a", + }, + Init: &ast.IntegerLiteral{Value: 5}, + }}, + }, + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "c", + }, + Init: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.Identifier{Name: "a"}, + }, + }}, + }, + }, + }, + }, + { + name: "var as both binary and unary expressions", + raw: `a = 5 + c = 10 * -a`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "a", + }, + Init: &ast.IntegerLiteral{Value: 5}, + }}, + }, + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "c", + }, + Init: &ast.BinaryExpression{ + Operator: ast.MultiplicationOperator, + Left: &ast.IntegerLiteral{Value: 10}, + Right: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.Identifier{Name: "a"}, + }, + }, + }}, + }, + }, + }, + }, + { + name: "unary expressions within logical expression", + raw: `a = 5.0 + 10.0 * -a == -0.5 or a == 6.0`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "a", + }, + Init: &ast.FloatLiteral{Value: 5}, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.LogicalExpression{ + Operator: ast.OrOperator, + Left: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.BinaryExpression{ + Operator: ast.MultiplicationOperator, + Left: &ast.FloatLiteral{Value: 10}, + Right: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.Identifier{Name: "a"}, + }, + }, + Right: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.FloatLiteral{Value: 0.5}, + }, + }, + Right: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.Identifier{Name: "a"}, + Right: &ast.FloatLiteral{Value: 6}, + }, + }, + }, + }, + }, + }, + { + name: "unary expressions with too many comments", + raw: `// define a +a = 5.0 +// eval this +10.0 * -a == -0.5 + // or this + or a == 6.0`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "a", + }, + Init: &ast.FloatLiteral{Value: 5}, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.LogicalExpression{ + Operator: ast.OrOperator, + Left: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.BinaryExpression{ + Operator: ast.MultiplicationOperator, + Left: &ast.FloatLiteral{Value: 10}, + Right: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.Identifier{Name: "a"}, + }, + }, + Right: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.FloatLiteral{Value: 0.5}, + }, + }, + Right: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.Identifier{Name: "a"}, + Right: &ast.FloatLiteral{Value: 6}, + }, + }, + }, + }, + }, + }, + { + name: "expressions with function calls", + raw: `a = foo() == 10`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "a", + }, + Init: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "foo"}, + }, + Right: &ast.IntegerLiteral{Value: 10}, + }, + }}, + }, + }, + }, + }, + { + name: "mix unary logical and binary expressions", + raw: ` + not (f() == 6.0 * x) or fail()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.LogicalExpression{ + Operator: ast.OrOperator, + Left: &ast.UnaryExpression{ + Operator: ast.NotOperator, + Argument: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "f"}, + }, + Right: &ast.BinaryExpression{ + Operator: ast.MultiplicationOperator, + Left: &ast.FloatLiteral{Value: 6}, + Right: &ast.Identifier{Name: "x"}, + }, + }, + }, + Right: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "fail"}, + }, + }, + }, + }, + }, + }, + { + name: "mix unary logical and binary expressions with extra parens", + raw: ` + (not (f() == 6.0 * x) or fail())`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.LogicalExpression{ + Operator: ast.OrOperator, + Left: &ast.UnaryExpression{ + Operator: ast.NotOperator, + Argument: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "f"}, + }, + Right: &ast.BinaryExpression{ + Operator: ast.MultiplicationOperator, + Left: &ast.FloatLiteral{Value: 6}, + Right: &ast.Identifier{Name: "x"}, + }, + }, + }, + Right: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "fail"}, + }, + }, + }, + }, + }, + }, + { + name: "arrow function called", + raw: `plusOne = (r) => r + 1 + plusOne(r:5) + `, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "plusOne", + }, + Init: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, + Body: &ast.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &ast.Identifier{Name: "r"}, + Right: &ast.IntegerLiteral{Value: 1}, + }, + }, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "plusOne"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{ + Name: "r", + }, + Value: &ast.IntegerLiteral{ + Value: 5, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "arrow function return map", + raw: `toMap = (r) =>({r:r})`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "toMap", + }, + Init: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, + Body: &ast.ObjectExpression{ + Properties: []*ast.Property{{ + Key: &ast.Identifier{Name: "r"}, + Value: &ast.Identifier{Name: "r"}, + }}, + }, + }, + }}, + }, + }, + }, + }, + { + name: "arrow function with default arg", + raw: `addN = (r, n=5) => r + n`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "addN", + }, + Init: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{ + {Key: &ast.Identifier{Name: "r"}}, + {Key: &ast.Identifier{Name: "n"}, Value: &ast.IntegerLiteral{Value: 5}}, + }, + Body: &ast.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &ast.Identifier{Name: "r"}, + Right: &ast.Identifier{Name: "n"}, + }, + }, + }}, + }, + }, + }, + }, + { + name: "arrow function called in binary expression", + raw: ` + plusOne = (r) => r + 1 + plusOne(r:5) == 6 or die() + `, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "plusOne", + }, + Init: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, + Body: &ast.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &ast.Identifier{Name: "r"}, + Right: &ast.IntegerLiteral{Value: 1}, + }, + }, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.LogicalExpression{ + Operator: ast.OrOperator, + Left: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "plusOne"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{ + Name: "r", + }, + Value: &ast.IntegerLiteral{ + Value: 5, + }, + }, + }, + }, + }, + }, + Right: &ast.IntegerLiteral{Value: 6}, + }, + Right: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "die"}, + }, + }, + }, + }, + }, + }, + { + name: "arrow function as single expression", + raw: `f = (r) => r["_measurement"] == "cpu"`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "f", + }, + Init: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, + Body: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "r"}, + Property: &ast.StringLiteral{Value: "_measurement"}, + }, + Right: &ast.StringLiteral{Value: "cpu"}, + }, + }, + }}, + }, + }, + }, + }, + { + name: "arrow function as block", + raw: `f = (r) => { + m = r["_measurement"] + return m == "cpu" + }`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "f", + }, + Init: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, + Body: &ast.BlockStatement{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "m", + }, + Init: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "r"}, + Property: &ast.StringLiteral{Value: "_measurement"}, + }, + }}, + }, + &ast.ReturnStatement{ + Argument: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.Identifier{Name: "m"}, + Right: &ast.StringLiteral{Value: "cpu"}, + }, + }, + }, + }, + }, + }}, + }, + }, + }, + }, + { + name: "from with filter with no parens", + raw: `from(db:"telegraf").filter(fn: (r) => r["other"]=="mem" and r["this"]=="that" or r["these"]!="those")`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.MemberExpression{ + Property: &ast.Identifier{Name: "filter"}, + Object: &ast.CallExpression{ + Callee: &ast.Identifier{ + Name: "from", + }, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "telegraf"}, + }, + }, + }, + }, + }, + }, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "fn"}, + Value: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, + Body: &ast.LogicalExpression{ + Operator: ast.OrOperator, + Left: &ast.LogicalExpression{ + Operator: ast.AndOperator, + Left: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "r"}, + Property: &ast.StringLiteral{Value: "other"}, + }, + Right: &ast.StringLiteral{Value: "mem"}, + }, + Right: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "r"}, + Property: &ast.StringLiteral{Value: "this"}, + }, + Right: &ast.StringLiteral{Value: "that"}, + }, + }, + Right: &ast.BinaryExpression{ + Operator: ast.NotEqualOperator, + Left: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "r"}, + Property: &ast.StringLiteral{Value: "these"}, + }, + Right: &ast.StringLiteral{Value: "those"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "from with range", + raw: `from(db:"telegraf")|>range(start:-1h, end:10m)`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "telegraf"}, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "range"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "start"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: time.Hour}, + }, + }, + { + Key: &ast.Identifier{Name: "end"}, + Value: &ast.DurationLiteral{Value: 10 * time.Minute}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "from with limit", + raw: `from(db:"telegraf")|>limit(limit:100, offset:10)`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "telegraf"}, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "limit"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "limit"}, + Value: &ast.IntegerLiteral{Value: 100}, + }, + { + Key: &ast.Identifier{Name: "offset"}, + Value: &ast.IntegerLiteral{Value: 10}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "from with range and count", + raw: `from(db:"mydb") + |> range(start:-4h, stop:-2h) + |> count()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "mydb"}, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "range"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "start"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: 4 * time.Hour}, + }, + }, + { + Key: &ast.Identifier{Name: "stop"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: 2 * time.Hour}, + }, + }, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "count"}, + }, + }, + }, + }, + }, + }, + { + name: "from with range, limit and count", + raw: `from(db:"mydb") + |> range(start:-4h, stop:-2h) + |> limit(n:10) + |> count()`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.ExpressionStatement{ + Expression: &ast.PipeExpression{ + Argument: &ast.PipeExpression{ + Argument: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "mydb"}, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "range"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "start"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: 4 * time.Hour}, + }, + }, + { + Key: &ast.Identifier{Name: "stop"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: 2 * time.Hour}, + }, + }, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "limit"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{{ + Key: &ast.Identifier{Name: "n"}, + Value: &ast.IntegerLiteral{Value: 10}, + }}, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "count"}, + }, + }, + }, + }, + }, + }, + { + name: "from with join", + raw: ` +a = from(db:"dbA") |> range(start:-1h) +b = from(db:"dbB") |> range(start:-1h) +join(tables:[a,b], on:["host"], fn: (a,b) => a["_field"] + b["_field"])`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "a", + }, + Init: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "dbA"}, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "range"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "start"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: 1 * time.Hour}, + }, + }, + }, + }, + }, + }, + }, + }}, + }, + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "b", + }, + Init: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "dbB"}, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "range"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "start"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: 1 * time.Hour}, + }, + }, + }, + }, + }, + }, + }, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "join"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "tables"}, + Value: &ast.ArrayExpression{ + Elements: []ast.Expression{ + &ast.Identifier{Name: "a"}, + &ast.Identifier{Name: "b"}, + }, + }, + }, + { + Key: &ast.Identifier{Name: "on"}, + Value: &ast.ArrayExpression{ + Elements: []ast.Expression{&ast.StringLiteral{Value: "host"}}, + }, + }, + { + Key: &ast.Identifier{Name: "fn"}, + Value: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{ + {Key: &ast.Identifier{Name: "a"}}, + {Key: &ast.Identifier{Name: "b"}}, + }, + Body: &ast.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "a"}, + Property: &ast.StringLiteral{Value: "_field"}, + }, + Right: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "b"}, + Property: &ast.StringLiteral{Value: "_field"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "from with join with complex expression", + raw: ` +a = from(db:"ifql") + |> filter(fn: (r) => r["_measurement"] == "a") + |> range(start:-1h) + +b = from(db:"ifql") + |> filter(fn: (r) => r["_measurement"] == "b") + |> range(start:-1h) + +join(tables:[a,b], on:["t1"], fn: (a,b) => (a["_field"] - b["_field"]) / b["_field"]) +`, + want: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "a", + }, + Init: &ast.PipeExpression{ + Argument: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "ifql"}, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "filter"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "fn"}, + Value: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, + Body: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "r"}, + Property: &ast.StringLiteral{Value: "_measurement"}, + }, + Right: &ast.StringLiteral{Value: "a"}, + }, + }, + }, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "range"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "start"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: 1 * time.Hour}, + }, + }, + }, + }, + }, + }, + }, + }}, + }, + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{ + Name: "b", + }, + Init: &ast.PipeExpression{ + Argument: &ast.PipeExpression{ + Argument: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "from"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "db"}, + Value: &ast.StringLiteral{Value: "ifql"}, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "filter"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "fn"}, + Value: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{{Key: &ast.Identifier{Name: "r"}}}, + Body: &ast.BinaryExpression{ + Operator: ast.EqualOperator, + Left: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "r"}, + Property: &ast.StringLiteral{Value: "_measurement"}, + }, + Right: &ast.StringLiteral{Value: "b"}, + }, + }, + }, + }, + }, + }, + }, + }, + Call: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "range"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "start"}, + Value: &ast.UnaryExpression{ + Operator: ast.SubtractionOperator, + Argument: &ast.DurationLiteral{Value: 1 * time.Hour}, + }, + }, + }, + }, + }, + }, + }, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "join"}, + Arguments: []ast.Expression{ + &ast.ObjectExpression{ + Properties: []*ast.Property{ + { + Key: &ast.Identifier{Name: "tables"}, + Value: &ast.ArrayExpression{ + Elements: []ast.Expression{ + &ast.Identifier{Name: "a"}, + &ast.Identifier{Name: "b"}, + }, + }, + }, + { + Key: &ast.Identifier{Name: "on"}, + Value: &ast.ArrayExpression{ + Elements: []ast.Expression{ + &ast.StringLiteral{ + Value: "t1", + }, + }, + }, + }, + { + Key: &ast.Identifier{Name: "fn"}, + Value: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{ + {Key: &ast.Identifier{Name: "a"}}, + {Key: &ast.Identifier{Name: "b"}}, + }, + Body: &ast.BinaryExpression{ + Operator: ast.DivisionOperator, + Left: &ast.BinaryExpression{ + Operator: ast.SubtractionOperator, + Left: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "a"}, + Property: &ast.StringLiteral{Value: "_field"}, + }, + Right: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "b"}, + Property: &ast.StringLiteral{Value: "_field"}, + }, + }, + Right: &ast.MemberExpression{ + Object: &ast.Identifier{Name: "b"}, + Property: &ast.StringLiteral{Value: "_field"}, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + }, + { + name: "parse error extra gibberish", + raw: `from(db:"ifql") &^*&H#IUJBN`, + wantErr: true, + }, + { + name: "parse error extra gibberish and valid content", + raw: `from(db:"ifql") &^*&H#IUJBN from(db:"other")`, + wantErr: true, + }, + } + for _, tt := range tests { + tt := tt + t.Run(tt.name, func(t *testing.T) { + t.Parallel() + // Set the env var`GO_TAGS=parser_debug` in order + // to turn on parser debugging as it is turned off by default. + got, err := parser.NewAST(tt.raw) + if (err != nil) != tt.wantErr { + t.Errorf("ifql.NewAST() error = %v, wantErr %v", err, tt.wantErr) + return + } + if tt.wantErr { + return + } + if !cmp.Equal(tt.want, got, asttest.CompareOptions...) { + t.Errorf("ifql.NewAST() = -want/+got %s", cmp.Diff(tt.want, got, asttest.CompareOptions...)) + } + }) + } +} + +var benchmarkQuery = []byte(` +start = -10s + +do = (cpu) => + from(db:"telegraf") + .filter(fn: (r) => + r["_measurement"] == "cpu" + and + r["cpu"] == cpu) + .range(start:start) + +cpu0 = do(cpu:"cpu0") +cpu1 = do(cpu:"cpu1") + +join( + tables:[cpu0, cpu1], + on:["_measurement","_field","host"], + fn: (a,b) => a["_value"] - b["_value"], +) +`) + +var benchmarkProgram interface{} + +func BenchmarkParse(b *testing.B) { + b.ReportAllocs() + var err error + for n := 0; n < b.N; n++ { + benchmarkProgram, err = parser.Parse("", benchmarkQuery) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/influxdata/ifql/parser/types.go b/vendor/github.com/influxdata/ifql/parser/types.go new file mode 100644 index 000000000..771cf9fa4 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/parser/types.go @@ -0,0 +1,363 @@ +package parser + +import ( + "regexp" + "strconv" + "strings" + "time" + + "github.com/influxdata/ifql/ast" +) + +func toIfaceSlice(v interface{}) []interface{} { + if v == nil { + return nil + } + return v.([]interface{}) +} + +func program(body interface{}, text []byte, pos position) (*ast.Program, error) { + return &ast.Program{ + Body: body.([]ast.Statement), + BaseNode: base(text, pos), + }, nil +} + +func srcElems(head, tails interface{}) ([]ast.Statement, error) { + elems := []ast.Statement{head.(ast.Statement)} + for _, tail := range toIfaceSlice(tails) { + elem := toIfaceSlice(tail)[1] // Skip whitespace + elems = append(elems, elem.(ast.Statement)) + } + return elems, nil +} + +func blockstmt(body interface{}, text []byte, pos position) (*ast.BlockStatement, error) { + bodySlice := toIfaceSlice(body) + statements := make([]ast.Statement, len(bodySlice)) + for i, s := range bodySlice { + stmt := toIfaceSlice(s)[1] // Skip whitespace + statements[i] = stmt.(ast.Statement) + } + return &ast.BlockStatement{ + BaseNode: base(text, pos), + Body: statements, + }, nil +} + +func varstmt(declaration interface{}, text []byte, pos position) (*ast.VariableDeclaration, error) { + return &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{declaration.(*ast.VariableDeclarator)}, + BaseNode: base(text, pos), + }, nil +} + +func vardecl(id, initializer interface{}, text []byte, pos position) (*ast.VariableDeclarator, error) { + return &ast.VariableDeclarator{ + ID: id.(*ast.Identifier), + Init: initializer.(ast.Expression), + }, nil +} + +func exprstmt(expr interface{}, text []byte, pos position) (*ast.ExpressionStatement, error) { + return &ast.ExpressionStatement{ + Expression: expr.(ast.Expression), + BaseNode: base(text, pos), + }, nil +} + +func returnstmt(argument interface{}, text []byte, pos position) (*ast.ReturnStatement, error) { + return &ast.ReturnStatement{ + BaseNode: base(text, pos), + Argument: argument.(ast.Expression), + }, nil +} + +func pipeExprs(head, tail interface{}, text []byte, pos position) (*ast.PipeExpression, error) { + var arg ast.Expression + arg = head.(ast.Expression) + + var pe *ast.PipeExpression + for _, t := range toIfaceSlice(tail) { + pe = toIfaceSlice(t)[1].(*ast.PipeExpression) + pe.Argument = arg + arg = pe + } + return pe, nil +} + +func incompletePipeExpr(call interface{}, text []byte, pos position) (*ast.PipeExpression, error) { + return &ast.PipeExpression{ + Call: call.(*ast.CallExpression), + BaseNode: base(text, pos), + }, nil +} + +func memberexprs(head, tail interface{}, text []byte, pos position) (ast.Expression, error) { + res := head.(ast.Expression) + for _, prop := range toIfaceSlice(tail) { + res = &ast.MemberExpression{ + Object: res, + Property: prop.(ast.Expression), + BaseNode: base(text, pos), + } + } + return res, nil +} + +func memberexpr(object, property interface{}, text []byte, pos position) (*ast.MemberExpression, error) { + m := &ast.MemberExpression{ + BaseNode: base(text, pos), + } + + if object != nil { + m.Object = object.(ast.Expression) + } + + if property != nil { + m.Property = property.(*ast.Identifier) + } + + return m, nil +} + +func callexpr(callee, args interface{}, text []byte, pos position) (*ast.CallExpression, error) { + c := &ast.CallExpression{ + BaseNode: base(text, pos), + } + + if callee != nil { + c.Callee = callee.(ast.Expression) + } + + if args != nil { + c.Arguments = []ast.Expression{args.(*ast.ObjectExpression)} + } + return c, nil +} + +func callexprs(head, tail interface{}, text []byte, pos position) (ast.Expression, error) { + expr := head.(ast.Expression) + for _, i := range toIfaceSlice(tail) { + switch elem := i.(type) { + case *ast.CallExpression: + elem.Callee = expr + expr = elem + case *ast.MemberExpression: + elem.Object = expr + expr = elem + } + } + return expr, nil +} + +func arrowfunc(params interface{}, body interface{}, text []byte, pos position) *ast.ArrowFunctionExpression { + paramsSlice := toIfaceSlice(params) + paramsList := make([]*ast.Property, len(paramsSlice)) + for i, p := range paramsSlice { + paramsList[i] = p.(*ast.Property) + } + return &ast.ArrowFunctionExpression{ + BaseNode: base(text, pos), + Params: paramsList, + Body: body.(ast.Node), + } +} + +func objectexpr(first, rest interface{}, text []byte, pos position) (*ast.ObjectExpression, error) { + props := []*ast.Property{first.(*ast.Property)} + if rest != nil { + for _, prop := range toIfaceSlice(rest) { + props = append(props, prop.(*ast.Property)) + } + } + + return &ast.ObjectExpression{ + Properties: props, + BaseNode: base(text, pos), + }, nil +} + +func property(key, value interface{}, text []byte, pos position) (*ast.Property, error) { + var v ast.Expression + if value != nil { + v = value.(ast.Expression) + } + return &ast.Property{ + Key: key.(*ast.Identifier), + Value: v, + BaseNode: base(text, pos), + }, nil +} + +func identifier(text []byte, pos position) (*ast.Identifier, error) { + return &ast.Identifier{ + Name: string(text), + BaseNode: base(text, pos), + }, nil +} + +func array(first, rest interface{}, text []byte, pos position) *ast.ArrayExpression { + var elements []ast.Expression + if first != nil { + elements = append(elements, first.(ast.Expression)) + } + if rest != nil { + for _, el := range rest.([]interface{}) { + elements = append(elements, el.(ast.Expression)) + } + } + return &ast.ArrayExpression{ + Elements: elements, + BaseNode: base(text, pos), + } +} + +func logicalExpression(head, tails interface{}, text []byte, pos position) (ast.Expression, error) { + res := head.(ast.Expression) + for _, tail := range toIfaceSlice(tails) { + right := toIfaceSlice(tail) + res = &ast.LogicalExpression{ + Left: res, + Right: right[3].(ast.Expression), + Operator: right[1].(ast.LogicalOperatorKind), + BaseNode: base(text, pos), + } + } + return res, nil +} + +func logicalOp(text []byte) (ast.LogicalOperatorKind, error) { + return ast.LogicalOperatorLookup(strings.ToLower(string(text))), nil +} + +func binaryExpression(head, tails interface{}, text []byte, pos position) (ast.Expression, error) { + res := head.(ast.Expression) + for _, tail := range toIfaceSlice(tails) { + right := toIfaceSlice(tail) + res = &ast.BinaryExpression{ + Left: res, + Right: right[3].(ast.Expression), + Operator: right[1].(ast.OperatorKind), + BaseNode: base(text, pos), + } + } + return res, nil +} + +func unaryExpression(op, argument interface{}, text []byte, pos position) (*ast.UnaryExpression, error) { + return &ast.UnaryExpression{ + Operator: op.(ast.OperatorKind), + Argument: argument.(ast.Expression), + BaseNode: base(text, pos), + }, nil +} + +func operator(text []byte) (ast.OperatorKind, error) { + return ast.OperatorLookup(strings.ToLower(string(text))), nil +} + +func stringLiteral(text []byte, pos position) (*ast.StringLiteral, error) { + s, err := strconv.Unquote(string(text)) + if err != nil { + return nil, err + } + return &ast.StringLiteral{ + BaseNode: base(text, pos), + Value: s, + }, nil +} + +func pipeLiteral(text []byte, pos position) *ast.PipeLiteral { + return &ast.PipeLiteral{ + BaseNode: base(text, pos), + } +} + +func booleanLiteral(b bool, text []byte, pos position) (*ast.BooleanLiteral, error) { + return &ast.BooleanLiteral{ + BaseNode: base(text, pos), + Value: b, + }, nil +} + +func integerLiteral(text []byte, pos position) (*ast.IntegerLiteral, error) { + n, err := strconv.ParseInt(string(text), 10, 64) + if err != nil { + return nil, err + } + return &ast.IntegerLiteral{ + BaseNode: base(text, pos), + Value: n, + }, nil +} + +func numberLiteral(text []byte, pos position) (*ast.FloatLiteral, error) { + n, err := strconv.ParseFloat(string(text), 64) + if err != nil { + return nil, err + } + return &ast.FloatLiteral{ + BaseNode: base(text, pos), + Value: n, + }, nil +} + +func regexLiteral(chars interface{}, text []byte, pos position) (*ast.RegexpLiteral, error) { + b := new(strings.Builder) + for _, char := range toIfaceSlice(chars) { + b.Write(char.([]byte)) + } + + r, err := regexp.Compile(b.String()) + if err != nil { + return nil, err + } + return &ast.RegexpLiteral{ + BaseNode: base(text, pos), + Value: r, + }, nil +} + +func durationLiteral(text []byte, pos position) (*ast.DurationLiteral, error) { + d, err := time.ParseDuration(string(text)) + if err != nil { + return nil, err + } + return &ast.DurationLiteral{ + BaseNode: base(text, pos), + Value: d, + }, nil +} + +func datetime(text []byte, pos position) (*ast.DateTimeLiteral, error) { + t, err := time.Parse(time.RFC3339Nano, string(text)) + if err != nil { + return nil, err + } + return &ast.DateTimeLiteral{ + BaseNode: base(text, pos), + Value: t, + }, nil +} + +func base(text []byte, pos position) *ast.BaseNode { + return &ast.BaseNode{ + Loc: &ast.SourceLocation{ + Start: ast.Position{ + Line: pos.line, + Column: pos.col, + }, + End: ast.Position{ + Line: pos.line, + Column: pos.col + len(text), + }, + Source: source(text), + }, + } +} + +func source(text []byte) *string { + str := string(text) + return &str +} diff --git a/vendor/github.com/influxdata/ifql/query.go b/vendor/github.com/influxdata/ifql/query.go new file mode 100644 index 000000000..cf45be690 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query.go @@ -0,0 +1,63 @@ +/* +Package ifql contains the parser, query engine, query functions +and a basic server and HTTP client for the IFQL query language and +engine. +*/ +package ifql + +import ( + + // Import functions + + "github.com/influxdata/ifql/complete" + _ "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + + "github.com/influxdata/ifql/query/control" + "github.com/influxdata/ifql/query/execute" + "github.com/pkg/errors" +) + +func init() { + query.FinalizeRegistration() +} + +type Config struct { + Hosts []string + + ConcurrencyQuota int + MemoryBytesQuota int + + Verbose bool +} + +// Use type aliases to expose simple API for entire project + +// Controller provides a central location to manage all incoming queries. +// The controller is responsible for queueing, planning, and executing queries. +type Controller = control.Controller + +// Query represents a single request. +type Query = control.Query + +func NewController(conf Config) (*Controller, error) { + s, err := execute.NewStorageReader(conf.Hosts) + if err != nil { + return nil, errors.Wrap(err, "failed to create storage reader") + } + c := control.Config{ + ConcurrencyQuota: conf.ConcurrencyQuota, + MemoryBytesQuota: int64(conf.MemoryBytesQuota), + ExecutorConfig: execute.Config{ + StorageReader: s, + }, + Verbose: conf.Verbose, + } + return control.New(c), nil +} + +// DefaultCompleter create a completer with builtin scope and declarations +func DefaultCompleter() complete.Completer { + scope, declarations := query.BuiltIns() + return complete.NewCompleter(scope, declarations) +} diff --git a/vendor/github.com/influxdata/ifql/query/compile.go b/vendor/github.com/influxdata/ifql/query/compile.go new file mode 100644 index 000000000..74877de57 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/compile.go @@ -0,0 +1,432 @@ +package query + +import ( + "context" + "fmt" + "log" + "sort" + "time" + + "github.com/influxdata/ifql/interpreter" + "github.com/influxdata/ifql/parser" + "github.com/influxdata/ifql/semantic" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" +) + +const ( + TableParameter = "table" + tableIDKey = "id" + tableKindKey = "kind" + tableParentsKey = "parents" + tableSpecKey = "spec" +) + +type Option func(*options) + +func Verbose(v bool) Option { + return func(o *options) { + o.verbose = v + } +} + +type options struct { + verbose bool +} + +// Compile evaluates an IFQL script producing a query Spec. +func Compile(ctx context.Context, q string, opts ...Option) (*Spec, error) { + o := new(options) + for _, opt := range opts { + opt(o) + } + s, _ := opentracing.StartSpanFromContext(ctx, "parse") + astProg, err := parser.NewAST(q) + if err != nil { + return nil, err + } + s.Finish() + s, _ = opentracing.StartSpanFromContext(ctx, "compile") + defer s.Finish() + + // Convert AST program to a semantic program + semProg, err := semantic.New(astProg, builtinDeclarations.Copy()) + if err != nil { + return nil, err + } + + // Create top-level builtin scope + scope := builtinScope.Nest() + + // Create new query domain + d := new(queryDomain) + if err := interpreter.Eval(semProg, scope, d); err != nil { + return nil, err + } + spec := d.ToSpec() + + if o.verbose { + log.Println("Query Spec: ", Formatted(spec, FmtJSON)) + } + return spec, nil +} + +type CreateOperationSpec func(args Arguments, a *Administration) (OperationSpec, error) + +var functionsMap = make(map[string]function) + +// RegisterFunction adds a new builtin top level function. +func RegisterFunction(name string, c CreateOperationSpec, sig semantic.FunctionSignature) { + if finalized { + panic(errors.New("already finalized, cannot register function")) + } + if _, ok := functionsMap[name]; ok { + panic(fmt.Errorf("duplicate registration for function %q", name)) + } + f := function{ + name: name, + createOpSpec: c, + } + functionsMap[name] = f + builtinScope.Set(name, f) + builtinDeclarations[name] = semantic.NewExternalVariableDeclaration( + name, + semantic.NewFunctionType(sig), + ) +} + +var TableObjectType = semantic.NewObjectType(map[string]semantic.Type{ + tableIDKey: semantic.String, + tableKindKey: semantic.String, + // TODO(nathanielc): The spec types vary significantly making type comparisons impossible, for now the solution is to state the type as an empty object. + tableSpecKey: semantic.EmptyObject, + // TODO(nathanielc): Support recursive types, for now we state that the array has empty objects. + tableParentsKey: semantic.NewArrayType(semantic.EmptyObject), +}) + +type TableObject struct { + interpreter.Object +} + +func NewTableObject(t interpreter.Object) (TableObject, error) { + if typ := t.Type(); typ != TableObjectType { + return TableObject{}, fmt.Errorf("cannot create table object, wrong type: %v exp: %v", typ, TableObjectType) + } + return TableObject{ + Object: t, + }, nil +} + +func (t TableObject) ID() OperationID { + return OperationID(t.Properties[tableIDKey].Value().(string)) +} + +func (t TableObject) Kind() OperationKind { + return OperationKind(t.Properties[tableKindKey].Value().(string)) +} + +func (t TableObject) Spec() OperationSpec { + return t.Properties[tableSpecKey].Value().(OperationSpec) +} +func (t TableObject) Operation() *Operation { + return &Operation{ + ID: t.ID(), + Spec: t.Spec(), + } +} + +func (t TableObject) String() string { + return fmt.Sprintf("{id: %q, kind: %q}", t.ID(), t.Kind()) +} + +func (t TableObject) ToSpec() *Spec { + visited := make(map[OperationID]bool) + spec := new(Spec) + t.buildSpec(spec, visited) + return spec +} + +func (t TableObject) buildSpec(spec *Spec, visited map[OperationID]bool) { + id := t.ID() + parents := t.Properties[tableParentsKey].(interpreter.Array).Elements + for i := range parents { + p := parents[i].(TableObject) + if !visited[p.ID()] { + // rescurse up parents + p.buildSpec(spec, visited) + } + + spec.Edges = append(spec.Edges, Edge{ + Parent: p.ID(), + Child: id, + }) + } + + visited[id] = true + spec.Operations = append(spec.Operations, t.Operation()) +} + +// DefaultFunctionSignature returns a FunctionSignature for standard functions which accept a table piped argument. +// It is safe to modify the returned signature. +func DefaultFunctionSignature() semantic.FunctionSignature { + return semantic.FunctionSignature{ + Params: map[string]semantic.Type{ + TableParameter: TableObjectType, + }, + ReturnType: TableObjectType, + PipeArgument: TableParameter, + } +} + +var builtinScope = interpreter.NewScope() +var builtinDeclarations = make(semantic.DeclarationScope) + +// list of builtin scripts +var builtins = make(map[string]string) +var finalized bool + +// RegisterBuiltIn adds any variable declarations in the script to the builtin scope. +func RegisterBuiltIn(name, script string) { + if finalized { + panic(errors.New("already finalized, cannot register builtin")) + } + builtins[name] = script +} + +// FinalizeRegistration must be called to complete registration. +// Future calls to RegisterFunction or RegisterBuiltIn will panic. +func FinalizeRegistration() { + finalized = true + for name, script := range builtins { + astProg, err := parser.NewAST(script) + if err != nil { + panic(errors.Wrapf(err, "failed to parse builtin %q", name)) + } + semProg, err := semantic.New(astProg, builtinDeclarations) + if err != nil { + panic(errors.Wrapf(err, "failed to create semantic graph for builtin %q", name)) + } + + // Create new query domain + d := new(queryDomain) + + if err := interpreter.Eval(semProg, builtinScope, d); err != nil { + panic(errors.Wrapf(err, "failed to evaluate builtin %q", name)) + } + } + // free builtins list + builtins = nil +} + +func BuiltIns() (*interpreter.Scope, semantic.DeclarationScope) { + return builtinScope.Nest(), builtinDeclarations.Copy() +} + +type Administration struct { + id OperationID + parents interpreter.Array +} + +func newAdministration(id OperationID) *Administration { + return &Administration{ + id: id, + // TODO(nathanielc): Once we can support recursive types change this to, + // interpreter.NewArray(TableObjectType) + parents: interpreter.NewArray(semantic.EmptyObject), + } +} + +// AddParentFromArgs reads the args for the `table` argument and adds the value as a parent. +func (a *Administration) AddParentFromArgs(args Arguments) error { + parent, err := args.GetRequiredObject(TableParameter) + if err != nil { + return err + } + p, err := NewTableObject(parent) + if err != nil { + return err + } + a.AddParent(p) + return nil +} + +// AddParent instructs the evaluation Context that a new edge should be created from the parent to the current operation. +// Duplicate parents will be removed, so the caller need not concern itself with which parents have already been added. +func (a *Administration) AddParent(np TableObject) { + // Check for duplicates + for _, p := range a.parents.Elements { + if p.(TableObject).ID() == np.ID() { + return + } + } + a.parents.Elements = append(a.parents.Elements, np) +} + +type Domain interface { + interpreter.Domain + ToSpec() *Spec +} + +func NewDomain() Domain { + return new(queryDomain) +} + +type queryDomain struct { + id int + + operations []TableObject +} + +func (d *queryDomain) NewID(name string) OperationID { + return OperationID(fmt.Sprintf("%s%d", name, d.nextID())) +} + +func (d *queryDomain) nextID() int { + id := d.id + d.id++ + return id +} + +func (d *queryDomain) ToSpec() *Spec { + spec := new(Spec) + visited := make(map[OperationID]bool) + for _, t := range d.operations { + t.buildSpec(spec, visited) + } + return spec +} + +type function struct { + name string + createOpSpec CreateOperationSpec +} + +func (f function) Type() semantic.Type { + //TODO(nathanielc): Return a complete function type + return semantic.Function +} + +func (f function) Value() interface{} { + return f +} +func (f function) Property(name string) (interpreter.Value, error) { + return nil, fmt.Errorf("property %q does not exist", name) +} +func (f function) Resolve() (*semantic.FunctionExpression, error) { + return nil, fmt.Errorf("function %q cannot be resolved", f.name) +} + +func (f function) Call(args interpreter.Arguments, d interpreter.Domain) (interpreter.Value, error) { + qd := d.(*queryDomain) + id := qd.NewID(f.name) + + a := newAdministration(id) + + spec, err := f.createOpSpec(Arguments{Arguments: args}, a) + if err != nil { + return nil, err + } + + if len(a.parents.Elements) > 1 { + // Always add parents in a consistent order + sort.Slice(a.parents.Elements, func(i, j int) bool { + return a.parents.Elements[i].(TableObject).ID() < a.parents.Elements[j].(TableObject).ID() + }) + } + + t, err := NewTableObject(interpreter.Object{ + Properties: map[string]interpreter.Value{ + tableIDKey: interpreter.NewStringValue(string(id)), + tableKindKey: interpreter.NewStringValue(string(spec.Kind())), + tableSpecKey: specValue{spec: spec}, + tableParentsKey: a.parents, + }, + }) + if err != nil { + return nil, err + } + qd.operations = append(qd.operations, t) + return t, nil +} + +type specValue struct { + spec OperationSpec +} + +func (v specValue) Type() semantic.Type { + return semantic.EmptyObject +} + +func (v specValue) Value() interface{} { + return v.spec +} + +func (v specValue) Property(name string) (interpreter.Value, error) { + return nil, errors.New("spec does not have properties") +} + +type Arguments struct { + interpreter.Arguments +} + +func (a Arguments) GetTime(name string) (Time, bool, error) { + v, ok := a.Get(name) + if !ok { + return Time{}, false, nil + } + qt, err := ToQueryTime(v) + if err != nil { + return Time{}, ok, err + } + return qt, ok, nil +} + +func (a Arguments) GetRequiredTime(name string) (Time, error) { + qt, ok, err := a.GetTime(name) + if err != nil { + return Time{}, err + } + if !ok { + return Time{}, fmt.Errorf("missing required keyword argument %q", name) + } + return qt, nil +} + +func (a Arguments) GetDuration(name string) (Duration, bool, error) { + v, ok := a.Get(name) + if !ok { + return 0, false, nil + } + return (Duration)(v.Value().(time.Duration)), ok, nil +} + +func (a Arguments) GetRequiredDuration(name string) (Duration, error) { + d, ok, err := a.GetDuration(name) + if err != nil { + return 0, err + } + if !ok { + return 0, fmt.Errorf("missing required keyword argument %q", name) + } + return d, nil +} + +func ToQueryTime(value interpreter.Value) (Time, error) { + switch v := value.Value().(type) { + case time.Time: + return Time{ + Absolute: v, + }, nil + case time.Duration: + return Time{ + Relative: v, + IsRelative: true, + }, nil + case int64: + return Time{ + Absolute: time.Unix(v, 0), + }, nil + default: + return Time{}, fmt.Errorf("value is not a time, got %v", value.Type()) + } +} diff --git a/vendor/github.com/influxdata/ifql/query/control/controller.go b/vendor/github.com/influxdata/ifql/query/control/controller.go new file mode 100644 index 000000000..95591b169 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/control/controller.go @@ -0,0 +1,554 @@ +package control + +import ( + "context" + "log" + "math" + "sync" + "time" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/plan" + opentracing "github.com/opentracing/opentracing-go" + "github.com/pkg/errors" +) + +// Controller provides a central location to manage all incoming queries. +// The controller is responsible for queueing, planning, and executing queries. +type Controller struct { + newQueries chan *Query + lastID QueryID + queriesMu sync.RWMutex + queries map[QueryID]*Query + queryDone chan *Query + cancelRequest chan QueryID + + verbose bool + + lplanner plan.LogicalPlanner + pplanner plan.Planner + executor execute.Executor + + maxConcurrency int + availableConcurrency int + availableMemory int64 +} + +type Config struct { + ConcurrencyQuota int + MemoryBytesQuota int64 + ExecutorConfig execute.Config + Verbose bool +} + +type QueryID uint64 + +func New(c Config) *Controller { + ctrl := &Controller{ + newQueries: make(chan *Query), + queries: make(map[QueryID]*Query), + queryDone: make(chan *Query), + cancelRequest: make(chan QueryID), + maxConcurrency: c.ConcurrencyQuota, + availableConcurrency: c.ConcurrencyQuota, + availableMemory: c.MemoryBytesQuota, + lplanner: plan.NewLogicalPlanner(), + pplanner: plan.NewPlanner(), + executor: execute.NewExecutor(c.ExecutorConfig), + verbose: c.Verbose, + } + go ctrl.run() + return ctrl +} + +// QueryWithCompile submits a query for execution returning immediately. +// The query will first be compiled before submitting for execution. +// Done must be called on any returned Query objects. +func (c *Controller) QueryWithCompile(ctx context.Context, queryStr string) (*Query, error) { + q := c.createQuery(ctx) + err := c.compileQuery(q, queryStr) + if err != nil { + return nil, err + } + err = c.enqueueQuery(q) + return q, err +} + +// Query submits a query for execution returning immediately. +// The spec must not be modified while the query is still active. +// Done must be called on any returned Query objects. +func (c *Controller) Query(ctx context.Context, qSpec *query.Spec) (*Query, error) { + q := c.createQuery(ctx) + q.Spec = *qSpec + err := c.enqueueQuery(q) + return q, err +} + +func (c *Controller) createQuery(ctx context.Context) *Query { + id := c.nextID() + cctx, cancel := context.WithCancel(ctx) + ready := make(chan map[string]execute.Result, 1) + return &Query{ + id: id, + state: Created, + c: c, + now: time.Now().UTC(), + ready: ready, + Ready: ready, + parentCtx: cctx, + cancel: cancel, + } +} + +func (c *Controller) compileQuery(q *Query, queryStr string) error { + q.compile() + spec, err := query.Compile(q.compilingCtx, queryStr, query.Verbose(c.verbose)) + if err != nil { + return errors.Wrap(err, "failed to compile query") + } + q.Spec = *spec + return nil +} + +func (c *Controller) enqueueQuery(q *Query) error { + if c.verbose { + log.Println("query", query.Formatted(&q.Spec, query.FmtJSON)) + } + q.queue() + if err := q.Spec.Validate(); err != nil { + return errors.Wrap(err, "invalid query") + } + // Add query to the queue + c.newQueries <- q + return nil +} + +func (c *Controller) nextID() QueryID { + c.queriesMu.RLock() + defer c.queriesMu.RUnlock() + ok := true + for ok { + c.lastID++ + _, ok = c.queries[c.lastID] + } + return c.lastID +} + +// Queries reports the active queries. +func (c *Controller) Queries() []*Query { + c.queriesMu.RLock() + defer c.queriesMu.RUnlock() + queries := make([]*Query, 0, len(c.queries)) + for _, q := range c.queries { + queries = append(queries, q) + } + return queries +} + +func (c *Controller) run() { + pq := newPriorityQueue() + for { + select { + // Wait for resources to free + case q := <-c.queryDone: + c.free(q) + c.queriesMu.Lock() + delete(c.queries, q.id) + c.queriesMu.Unlock() + // Wait for new queries + case q := <-c.newQueries: + pq.Push(q) + c.queriesMu.Lock() + c.queries[q.id] = q + c.queriesMu.Unlock() + // Wait for cancel query requests + case id := <-c.cancelRequest: + c.queriesMu.RLock() + q := c.queries[id] + c.queriesMu.RUnlock() + q.Cancel() + } + + // Peek at head of priority queue + q := pq.Peek() + if q != nil { + err := c.processQuery(pq, q) + if err != nil { + go q.setErr(err) + } + } + } +} + +func (c *Controller) processQuery(pq *PriorityQueue, q *Query) error { + if q.tryPlan() { + // Plan query to determine needed resources + lp, err := c.lplanner.Plan(&q.Spec) + if err != nil { + return errors.Wrap(err, "failed to create logical plan") + } + if c.verbose { + log.Println("logical plan", plan.Formatted(lp)) + } + + p, err := c.pplanner.Plan(lp, nil, q.now) + if err != nil { + return errors.Wrap(err, "failed to create physical plan") + } + q.plan = p + q.concurrency = p.Resources.ConcurrencyQuota + if q.concurrency > c.maxConcurrency { + q.concurrency = c.maxConcurrency + } + q.memory = p.Resources.MemoryBytesQuota + if c.verbose { + log.Println("physical plan", plan.Formatted(q.plan)) + } + } + + // Check if we have enough resources + if c.check(q) { + // Update resource gauges + c.consume(q) + + // Remove the query from the queue + pq.Pop() + + // Execute query + if q.tryExec() { + r, err := c.executor.Execute(q.executeCtx, q.plan) + if err != nil { + return errors.Wrap(err, "failed to execute query") + } + q.setResults(r) + } + } else { + // update state to queueing + q.tryRequeue() + } + return nil +} + +func (c *Controller) check(q *Query) bool { + return c.availableConcurrency >= q.concurrency && (q.memory == math.MaxInt64 || c.availableMemory >= q.memory) +} +func (c *Controller) consume(q *Query) { + c.availableConcurrency -= q.concurrency + + if q.memory != math.MaxInt64 { + c.availableMemory -= q.memory + } +} + +func (c *Controller) free(q *Query) { + c.availableConcurrency += q.concurrency + + if q.memory != math.MaxInt64 { + c.availableMemory += q.memory + } +} + +// Query represents a single request. +type Query struct { + id QueryID + c *Controller + + Spec query.Spec + now time.Time + + err error + + ready chan<- map[string]execute.Result + // Ready is a channel that will deliver the query results. + // The channel may be closed before any results arrive, in which case the query should be + // inspected for an error using Err(). + Ready <-chan map[string]execute.Result + + mu sync.Mutex + state State + cancel func() + + parentCtx, + compilingCtx, + queueCtx, + planCtx, + requeueCtx, + executeCtx context.Context + + compilingSpan, + queueSpan, + planSpan, + requeueSpan, + executeSpan *span + + plan *plan.PlanSpec + + concurrency int + memory int64 +} + +// ID reports an ephemeral unique ID for the query. +func (q *Query) ID() QueryID { + return q.id +} + +// Cancel will stop the query execution. +// Done must still be called to free resources. +func (q *Query) Cancel() { + q.mu.Lock() + defer q.mu.Unlock() + q.cancel() + if q.state != Errored { + q.state = Canceled + } + // Finish the query immediately. + // This allows for receiving from the Ready channel in the same goroutine + // that has called defer q.Done() + q.finish() +} + +// finish informs the controller and the Ready channel that the query is finished. +func (q *Query) finish() { + q.c.queryDone <- q + close(q.ready) + q.recordMetrics() +} + +// Done must always be called to free resources. +func (q *Query) Done() { + q.mu.Lock() + defer q.mu.Unlock() + switch q.state { + case Queueing: + queueingGauge.Dec() + case Planning: + planningGauge.Dec() + case Requeueing: + requeueingGauge.Dec() + case Executing: + q.executeSpan.Finish() + executingGauge.Dec() + + q.state = Finished + case Errored: + // The query has already been finished in the call to setErr. + return + case Canceled: + // The query has already been finished in the call to Cancel. + return + default: + panic("unreachable, all states have been accounted for") + } + q.finish() +} + +func (q *Query) recordMetrics() { + if q.compilingSpan != nil { + compilingHist.Observe(q.compilingSpan.Duration.Seconds()) + } + if q.queueSpan != nil { + queueingHist.Observe(q.queueSpan.Duration.Seconds()) + } + if q.requeueSpan != nil { + requeueingHist.Observe(q.requeueSpan.Duration.Seconds()) + } + if q.planSpan != nil { + planningHist.Observe(q.planSpan.Duration.Seconds()) + } + if q.executeSpan != nil { + executingHist.Observe(q.executeSpan.Duration.Seconds()) + } +} + +// State reports the current state of the query. +func (q *Query) State() State { + q.mu.Lock() + s := q.state + q.mu.Unlock() + return s +} + +func (q *Query) isOK() bool { + q.mu.Lock() + ok := q.state != Canceled && q.state != Errored + q.mu.Unlock() + return ok +} + +// Err reports any error the query may have encountered. +func (q *Query) Err() error { + q.mu.Lock() + err := q.err + q.mu.Unlock() + return err +} +func (q *Query) setErr(err error) { + q.mu.Lock() + defer q.mu.Unlock() + q.err = err + q.state = Errored + + // Finish the query immediately. + // This allows for receiving from the Ready channel in the same goroutine + // that has called defer q.Done() + q.finish() +} + +func (q *Query) setResults(r map[string]execute.Result) { + q.mu.Lock() + if q.state == Executing { + q.ready <- r + } + q.mu.Unlock() +} + +// compile transitions the query into the Compiling state. +func (q *Query) compile() { + q.mu.Lock() + + q.compilingSpan, q.compilingCtx = StartSpanFromContext(q.parentCtx, "compiling") + compilingGauge.Inc() + + q.state = Compiling + q.mu.Unlock() +} + +// queue transitions the query into the Queueing state. +func (q *Query) queue() { + q.mu.Lock() + if q.state == Compiling { + q.compilingSpan.Finish() + compilingGauge.Dec() + } + q.queueSpan, q.queueCtx = StartSpanFromContext(q.parentCtx, "queueing") + queueingGauge.Inc() + + q.state = Queueing + q.mu.Unlock() +} + +// tryRequeue attempts to transition the query into the Requeueing state. +func (q *Query) tryRequeue() bool { + q.mu.Lock() + if q.state == Planning { + q.planSpan.Finish() + planningGauge.Dec() + + q.requeueSpan, q.requeueCtx = StartSpanFromContext(q.parentCtx, "requeueing") + requeueingGauge.Inc() + + q.state = Requeueing + q.mu.Unlock() + return true + } + q.mu.Unlock() + return false +} + +// tryPlan attempts to transition the query into the Planning state. +func (q *Query) tryPlan() bool { + q.mu.Lock() + if q.state == Queueing { + q.queueSpan.Finish() + queueingGauge.Dec() + + q.planSpan, q.planCtx = StartSpanFromContext(q.parentCtx, "planning") + planningGauge.Inc() + + q.state = Planning + q.mu.Unlock() + return true + } + q.mu.Unlock() + return false +} + +// tryExec attempts to transition the query into the Executing state. +func (q *Query) tryExec() bool { + q.mu.Lock() + if q.state == Requeueing || q.state == Planning { + switch q.state { + case Requeueing: + q.requeueSpan.Finish() + requeueingGauge.Dec() + case Planning: + q.planSpan.Finish() + planningGauge.Dec() + } + + q.executeSpan, q.executeCtx = StartSpanFromContext(q.parentCtx, "executing") + executingGauge.Inc() + + q.state = Executing + q.mu.Unlock() + return true + } + q.mu.Unlock() + return false +} + +// State is the query state. +type State int + +const ( + Created State = iota + Compiling + Queueing + Planning + Requeueing + Executing + Errored + Finished + Canceled +) + +func (s State) String() string { + switch s { + case Created: + return "created" + case Compiling: + return "compiling" + case Queueing: + return "queueing" + case Planning: + return "planning" + case Requeueing: + return "requeing" + case Executing: + return "executing" + case Errored: + return "errored" + case Finished: + return "finished" + case Canceled: + return "canceled" + default: + return "unknown" + } +} + +// span is a simple wrapper around opentracing.Span in order to +// get access to the duration of the span for metrics reporting. +type span struct { + s opentracing.Span + start time.Time + Duration time.Duration +} + +func StartSpanFromContext(ctx context.Context, operationName string) (*span, context.Context) { + start := time.Now() + s, sctx := opentracing.StartSpanFromContext(ctx, operationName, opentracing.StartTime(start)) + return &span{ + s: s, + start: start, + }, sctx +} + +func (s *span) Finish() { + finish := time.Now() + s.Duration = finish.Sub(s.start) + s.s.FinishWithOptions(opentracing.FinishOptions{ + FinishTime: finish, + }) +} diff --git a/vendor/github.com/influxdata/ifql/query/control/metrics.go b/vendor/github.com/influxdata/ifql/query/control/metrics.go new file mode 100644 index 000000000..e0bd9daa5 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/control/metrics.go @@ -0,0 +1,62 @@ +package control + +import "github.com/prometheus/client_golang/prometheus" + +var compilingGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ifql_control_current_compiling", + Help: "Number of queries currently compiling", +}) +var queueingGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ifql_control_current_queueing", + Help: "Number of queries currently queueing", +}) +var requeueingGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ifql_control_current_requeueing", + Help: "Number of queries currently requeueing", +}) +var planningGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ifql_control_current_planning", + Help: "Number of queries currently planning", +}) +var executingGauge = prometheus.NewGauge(prometheus.GaugeOpts{ + Name: "ifql_control_current_executing", + Help: "Number of queries currently executing", +}) + +var compilingHist = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ifql_control_compiling", + Help: "Histogram of compiling durations", + Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), +}) +var queueingHist = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ifql_control_queueing", + Help: "Histogram of queueing durations", + Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), +}) +var requeueingHist = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ifql_control_requeueing", + Help: "Histogram of requeueing durations", + Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), +}) +var planningHist = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ifql_control_planning", + Help: "Histogram of planning durations", + Buckets: prometheus.ExponentialBuckets(1e-5, 5, 7), +}) +var executingHist = prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "ifql_control_executing", + Help: "Histogram of executing durations", + Buckets: prometheus.ExponentialBuckets(1e-3, 5, 7), +}) + +func init() { + prometheus.MustRegister(queueingGauge) + prometheus.MustRegister(requeueingGauge) + prometheus.MustRegister(planningGauge) + prometheus.MustRegister(executingGauge) + + prometheus.MustRegister(queueingHist) + prometheus.MustRegister(requeueingHist) + prometheus.MustRegister(planningHist) + prometheus.MustRegister(executingHist) +} diff --git a/vendor/github.com/influxdata/ifql/query/control/queue.go b/vendor/github.com/influxdata/ifql/query/control/queue.go new file mode 100644 index 000000000..b1e123df0 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/control/queue.go @@ -0,0 +1,67 @@ +package control + +import "container/heap" + +// priorityQueue implements heap.Interface and holds Query objects. +type priorityQueue []*Query + +func (pq priorityQueue) Len() int { return len(pq) } + +func (pq priorityQueue) Less(i, j int) bool { + return pq[i].Spec.Resources.Priority < pq[j].Spec.Resources.Priority +} + +func (pq priorityQueue) Swap(i, j int) { + pq[i], pq[j] = pq[j], pq[i] +} + +func (pq *priorityQueue) Push(x interface{}) { + q := x.(*Query) + *pq = append(*pq, q) +} + +func (pq *priorityQueue) Pop() interface{} { + old := *pq + n := len(old) + q := old[n-1] + *pq = old[0 : n-1] + return q +} + +type PriorityQueue struct { + queue priorityQueue +} + +func newPriorityQueue() *PriorityQueue { + return &PriorityQueue{ + queue: make(priorityQueue, 0, 100), + } +} + +func (p *PriorityQueue) Push(q *Query) { + heap.Push(&p.queue, q) +} + +func (p *PriorityQueue) Peek() *Query { + for { + if p.queue.Len() == 0 { + return nil + } + q := p.queue[0] + if q.isOK() { + return q + } + heap.Pop(&p.queue) + } +} +func (p *PriorityQueue) Pop() *Query { + for { + if p.queue.Len() == 0 { + return nil + } + q := heap.Pop(&p.queue).(*Query) + if q.isOK() { + return q + } + } +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/aggergate_test.go b/vendor/github.com/influxdata/ifql/query/execute/aggergate_test.go new file mode 100644 index 000000000..97f2e3270 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/aggergate_test.go @@ -0,0 +1,379 @@ +package execute_test + +import ( + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" +) + +func TestAggregate_Process(t *testing.T) { + sumAgg := new(functions.SumAgg) + countAgg := new(functions.CountAgg) + testCases := []struct { + name string + bounds execute.Bounds + agg execute.Aggregate + data []*executetest.Block + want func(b execute.Bounds) []*executetest.Block + }{ + { + name: "single", + bounds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + agg: sumAgg, + data: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }}, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 45.0}, + }, + }} + }, + }, + { + name: "multiple blocks", + bounds: execute.Bounds{ + Start: 0, + Stop: 200, + }, + agg: sumAgg, + data: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 10.0}, + {execute.Time(110), 11.0}, + {execute.Time(120), 12.0}, + {execute.Time(130), 13.0}, + {execute.Time(140), 14.0}, + {execute.Time(150), 15.0}, + {execute.Time(160), 16.0}, + {execute.Time(170), 17.0}, + {execute.Time(180), 18.0}, + {execute.Time(190), 19.0}, + }, + }, + }, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 45.0}, + {execute.Time(200), 145.0}, + }, + }} + }, + }, + { + name: "multiple blocks with tags", + bounds: execute.Bounds{ + Start: 0, + Stop: 200, + }, + agg: sumAgg, + data: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0, "a"}, + {execute.Time(10), 1.0, "a"}, + {execute.Time(20), 2.0, "a"}, + {execute.Time(30), 3.0, "a"}, + {execute.Time(40), 4.0, "a"}, + {execute.Time(50), 5.0, "a"}, + {execute.Time(60), 6.0, "a"}, + {execute.Time(70), 7.0, "a"}, + {execute.Time(80), 8.0, "a"}, + {execute.Time(90), 9.0, "a"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.3, "b"}, + {execute.Time(10), 1.3, "b"}, + {execute.Time(20), 2.3, "b"}, + {execute.Time(30), 3.3, "b"}, + {execute.Time(40), 4.3, "b"}, + {execute.Time(50), 5.3, "b"}, + {execute.Time(60), 6.3, "b"}, + {execute.Time(70), 7.3, "b"}, + {execute.Time(80), 8.3, "b"}, + {execute.Time(90), 9.3, "b"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(100), 10.0, "a"}, + {execute.Time(110), 11.0, "a"}, + {execute.Time(120), 12.0, "a"}, + {execute.Time(130), 13.0, "a"}, + {execute.Time(140), 14.0, "a"}, + {execute.Time(150), 15.0, "a"}, + {execute.Time(160), 16.0, "a"}, + {execute.Time(170), 17.0, "a"}, + {execute.Time(180), 18.0, "a"}, + {execute.Time(190), 19.0, "a"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(100), 10.3, "b"}, + {execute.Time(110), 11.3, "b"}, + {execute.Time(120), 12.3, "b"}, + {execute.Time(130), 13.3, "b"}, + {execute.Time(140), 14.3, "b"}, + {execute.Time(150), 15.3, "b"}, + {execute.Time(160), 16.3, "b"}, + {execute.Time(170), 17.3, "b"}, + {execute.Time(180), 18.3, "b"}, + {execute.Time(190), 19.3, "b"}, + }, + }, + }, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(100), 45.0, "a"}, + {execute.Time(200), 145.0, "a"}, + }, + }, + { + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + }, + Data: [][]interface{}{ + {execute.Time(100), 48.0, "b"}, + {execute.Time(200), 148.0, "b"}, + }, + }, + } + }, + }, + { + name: "multiple values", + bounds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + agg: sumAgg, + data: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0, 0.0}, + {execute.Time(10), 1.0, -1.0}, + {execute.Time(20), 2.0, -2.0}, + {execute.Time(30), 3.0, -3.0}, + {execute.Time(40), 4.0, -4.0}, + {execute.Time(50), 5.0, -5.0}, + {execute.Time(60), 6.0, -6.0}, + {execute.Time(70), 7.0, -7.0}, + {execute.Time(80), 8.0, -8.0}, + {execute.Time(90), 9.0, -9.0}, + }, + }}, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 45.0, -45.0}, + }, + }} + }, + }, + { + name: "multiple values changing types", + bounds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + agg: countAgg, + data: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0, 0.0}, + {execute.Time(10), 1.0, -1.0}, + {execute.Time(20), 2.0, -2.0}, + {execute.Time(30), 3.0, -3.0}, + {execute.Time(40), 4.0, -4.0}, + {execute.Time(50), 5.0, -5.0}, + {execute.Time(60), 6.0, -6.0}, + {execute.Time(70), 7.0, -7.0}, + {execute.Time(80), 8.0, -8.0}, + {execute.Time(90), 9.0, -9.0}, + }, + }}, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TInt, Kind: execute.ValueColKind}, + {Label: "y", Type: execute.TInt, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), int64(10), int64(10)}, + }, + }} + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + d := executetest.NewDataset(executetest.RandomDatasetID()) + c := execute.NewBlockBuilderCache(executetest.UnlimitedAllocator) + c.SetTriggerSpec(execute.DefaultTriggerSpec) + + agg := execute.NewAggregateTransformation(d, c, tc.bounds, tc.agg) + + parentID := executetest.RandomDatasetID() + for _, b := range tc.data { + if err := agg.Process(parentID, b); err != nil { + t.Fatal(err) + } + } + + want := tc.want(tc.bounds) + got := executetest.BlocksFromCache(c) + + sort.Sort(executetest.SortedBlocks(got)) + sort.Sort(executetest.SortedBlocks(want)) + + if !cmp.Equal(want, got, cmpopts.EquateNaNs()) { + t.Errorf("unexpected blocks -want/+got\n%s", cmp.Diff(want, got)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/aggregate.go b/vendor/github.com/influxdata/ifql/query/execute/aggregate.go new file mode 100644 index 000000000..c3781a3db --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/aggregate.go @@ -0,0 +1,190 @@ +package execute + +type aggregateTransformation struct { + d Dataset + cache BlockBuilderCache + bounds Bounds + agg Aggregate +} + +func NewAggregateTransformation(d Dataset, c BlockBuilderCache, bounds Bounds, agg Aggregate) *aggregateTransformation { + return &aggregateTransformation{ + d: d, + cache: c, + bounds: bounds, + agg: agg, + } +} + +func NewAggregateTransformationAndDataset(id DatasetID, mode AccumulationMode, bounds Bounds, agg Aggregate, a *Allocator) (*aggregateTransformation, Dataset) { + cache := NewBlockBuilderCache(a) + d := NewDataset(id, mode, cache) + return NewAggregateTransformation(d, cache, bounds, agg), d +} + +func (t *aggregateTransformation) RetractBlock(id DatasetID, meta BlockMetadata) error { + //TODO(nathanielc): Store intermediate state for retractions + key := ToBlockKey(meta) + return t.d.RetractBlock(key) +} + +func (t *aggregateTransformation) Process(id DatasetID, b Block) error { + builder, new := t.cache.BlockBuilder(blockMetadata{ + bounds: t.bounds, + tags: b.Tags(), + }) + if new { + cols := b.Cols() + for j, c := range cols { + switch c.Kind { + case TimeColKind: + builder.AddCol(c) + case TagColKind: + if c.Common { + builder.AddCol(c) + builder.SetCommonString(j, b.Tags()[c.Label]) + } + case ValueColKind: + var vf ValueFunc + switch c.Type { + case TBool: + vf = t.agg.NewBoolAgg() + case TInt: + vf = t.agg.NewIntAgg() + case TUInt: + vf = t.agg.NewUIntAgg() + case TFloat: + vf = t.agg.NewFloatAgg() + case TString: + vf = t.agg.NewStringAgg() + } + builder.AddCol(ColMeta{ + Label: c.Label, + Type: vf.Type(), + Kind: ValueColKind, + }) + } + } + } + // Add row for aggregate values + timeIdx := TimeIdx(builder.Cols()) + builder.AppendTime(timeIdx, b.Bounds().Stop) + + for j, c := range b.Cols() { + if c.Kind != ValueColKind { + continue + } + + // TODO(nathanielc): This reads the block multiple times (once per value column), is that OK? + values := b.Col(j) + var vf ValueFunc + switch c.Type { + case TBool: + f := t.agg.NewBoolAgg() + values.DoBool(func(vs []bool, _ RowReader) { + f.DoBool(vs) + }) + vf = f + case TInt: + f := t.agg.NewIntAgg() + values.DoInt(func(vs []int64, _ RowReader) { + f.DoInt(vs) + }) + vf = f + case TUInt: + f := t.agg.NewUIntAgg() + values.DoUInt(func(vs []uint64, _ RowReader) { + f.DoUInt(vs) + }) + vf = f + case TFloat: + f := t.agg.NewFloatAgg() + values.DoFloat(func(vs []float64, _ RowReader) { + f.DoFloat(vs) + }) + vf = f + case TString: + f := t.agg.NewStringAgg() + values.DoString(func(vs []string, _ RowReader) { + f.DoString(vs) + }) + vf = f + } + switch vf.Type() { + case TBool: + v := vf.(BoolValueFunc) + builder.AppendBool(j, v.ValueBool()) + case TInt: + v := vf.(IntValueFunc) + builder.AppendInt(j, v.ValueInt()) + case TUInt: + v := vf.(UIntValueFunc) + builder.AppendUInt(j, v.ValueUInt()) + case TFloat: + v := vf.(FloatValueFunc) + builder.AppendFloat(j, v.ValueFloat()) + case TString: + v := vf.(StringValueFunc) + builder.AppendString(j, v.ValueString()) + } + } + return nil +} + +func (t *aggregateTransformation) UpdateWatermark(id DatasetID, mark Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *aggregateTransformation) UpdateProcessingTime(id DatasetID, pt Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *aggregateTransformation) Finish(id DatasetID, err error) { + t.d.Finish(err) +} + +type Aggregate interface { + NewBoolAgg() DoBoolAgg + NewIntAgg() DoIntAgg + NewUIntAgg() DoUIntAgg + NewFloatAgg() DoFloatAgg + NewStringAgg() DoStringAgg +} + +type ValueFunc interface { + Type() DataType +} +type DoBoolAgg interface { + ValueFunc + DoBool([]bool) +} +type DoFloatAgg interface { + ValueFunc + DoFloat([]float64) +} +type DoIntAgg interface { + ValueFunc + DoInt([]int64) +} +type DoUIntAgg interface { + ValueFunc + DoUInt([]uint64) +} +type DoStringAgg interface { + ValueFunc + DoString([]string) +} + +type BoolValueFunc interface { + ValueBool() bool +} +type FloatValueFunc interface { + ValueFloat() float64 +} +type IntValueFunc interface { + ValueInt() int64 +} +type UIntValueFunc interface { + ValueUInt() uint64 +} +type StringValueFunc interface { + ValueString() string +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/allocator.go b/vendor/github.com/influxdata/ifql/query/execute/allocator.go new file mode 100644 index 000000000..48ffecf70 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/allocator.go @@ -0,0 +1,170 @@ +package execute + +import ( + "fmt" + "sync/atomic" +) + +const ( + boolSize = 1 + int64Size = 8 + uint64Size = 8 + float64Size = 8 + stringSize = 16 + timeSize = 8 +) + +// Allocator tracks the amount of memory being consumed by a query. +// The allocator provides methods similar to make and append, to allocate large slices of data. +// The allocator also provides a Free method to account for when memory will be freed. +type Allocator struct { + Limit int64 + bytesAllocated int64 + maxAllocated int64 +} + +func (a *Allocator) count(n, size int) (c int64) { + c = atomic.AddInt64(&a.bytesAllocated, int64(n*size)) + for max := atomic.LoadInt64(&a.maxAllocated); c > max; max = atomic.LoadInt64(&a.maxAllocated) { + if atomic.CompareAndSwapInt64(&a.maxAllocated, max, c) { + return + } + } + return +} + +// Free informs the allocator that memory has been freed. +func (a *Allocator) Free(n, size int) { + a.count(-n, size) +} + +// Max reports the maximum amount of allocated memory at any point in the query. +func (a *Allocator) Max() int64 { + return atomic.LoadInt64(&a.maxAllocated) +} + +func (a *Allocator) account(n, size int) { + if want := a.count(n, size); want > a.Limit { + allocated := a.count(-n, size) + panic(AllocError{ + Limit: a.Limit, + Allocated: allocated, + Wanted: want - allocated, + }) + } +} + +// Bools makes a slice of bool values. +func (a *Allocator) Bools(l, c int) []bool { + a.account(c, boolSize) + return make([]bool, l, c) +} + +// AppendBools appends bools to a slice +func (a *Allocator) AppendBools(slice []bool, vs ...bool) []bool { + if cap(slice)-len(slice) > len(vs) { + return append(slice, vs...) + } + s := append(slice, vs...) + diff := cap(s) - cap(slice) + a.account(diff, boolSize) + return s +} + +// Ints makes a slice of int64 values. +func (a *Allocator) Ints(l, c int) []int64 { + a.account(c, int64Size) + return make([]int64, l, c) +} + +// AppendInts appends int64s to a slice +func (a *Allocator) AppendInts(slice []int64, vs ...int64) []int64 { + if cap(slice)-len(slice) > len(vs) { + return append(slice, vs...) + } + s := append(slice, vs...) + diff := cap(s) - cap(slice) + a.account(diff, int64Size) + return s +} + +// UInts makes a slice of uint64 values. +func (a *Allocator) UInts(l, c int) []uint64 { + a.account(c, uint64Size) + return make([]uint64, l, c) +} + +// AppendUInts appends uint64s to a slice +func (a *Allocator) AppendUInts(slice []uint64, vs ...uint64) []uint64 { + if cap(slice)-len(slice) > len(vs) { + return append(slice, vs...) + } + s := append(slice, vs...) + diff := cap(s) - cap(slice) + a.account(diff, uint64Size) + return s +} + +// Floats makes a slice of float64 values. +func (a *Allocator) Floats(l, c int) []float64 { + a.account(c, float64Size) + return make([]float64, l, c) +} + +// AppendFloats appends float64s to a slice +func (a *Allocator) AppendFloats(slice []float64, vs ...float64) []float64 { + if cap(slice)-len(slice) > len(vs) { + return append(slice, vs...) + } + s := append(slice, vs...) + diff := cap(s) - cap(slice) + a.account(diff, float64Size) + return s +} + +// Strings makes a slice of string values. +// Only the string headers are accounted for. +func (a *Allocator) Strings(l, c int) []string { + a.account(c, stringSize) + return make([]string, l, c) +} + +// AppendStrings appends strings to a slice. +// Only the string headers are accounted for. +func (a *Allocator) AppendStrings(slice []string, vs ...string) []string { + //TODO(nathanielc): Account for actual size of strings + if cap(slice)-len(slice) > len(vs) { + return append(slice, vs...) + } + s := append(slice, vs...) + diff := cap(s) - cap(slice) + a.account(diff, stringSize) + return s +} + +// Times makes a slice of Time values. +func (a *Allocator) Times(l, c int) []Time { + a.account(c, timeSize) + return make([]Time, l, c) +} + +// AppendTimes appends Times to a slice +func (a *Allocator) AppendTimes(slice []Time, vs ...Time) []Time { + if cap(slice)-len(slice) > len(vs) { + return append(slice, vs...) + } + s := append(slice, vs...) + diff := cap(s) - cap(slice) + a.account(diff, timeSize) + return s +} + +type AllocError struct { + Limit int64 + Allocated int64 + Wanted int64 +} + +func (a AllocError) Error() string { + return fmt.Sprintf("allocation limit reached: limit %d, allocated: %d, wanted: %d", a.Limit, a.Allocated, a.Wanted) +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/block.go b/vendor/github.com/influxdata/ifql/query/execute/block.go new file mode 100644 index 000000000..8e78b3ddd --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/block.go @@ -0,0 +1,1303 @@ +package execute + +import ( + "bytes" + "fmt" + "sort" + "sync/atomic" + + "github.com/influxdata/ifql/query" +) + +type BlockMetadata interface { + Bounds() Bounds + Tags() Tags +} + +type BlockKey string + +func ToBlockKey(meta BlockMetadata) BlockKey { + // TODO: Make this not a hack + return BlockKey(fmt.Sprintf("%s:%d-%d", meta.Tags().Key(), meta.Bounds().Start, meta.Bounds().Stop)) +} + +type Block interface { + BlockMetadata + + Cols() []ColMeta + // Col returns an iterator to consume the values for a given column. + Col(c int) ValueIterator + + // Times returns an iterator to consume the values for the "_time" column. + Times() ValueIterator + // Values returns an iterator to consume the values for the "_value" column. + // If no column exists and error is returned + Values() (ValueIterator, error) + + // RefCount modifies the reference count on the block by n. + // When the RefCount goes to zero, the block is freed. + RefCount(n int) +} + +// OneTimeBlock is a Block that permits reading data only once. +// Specifically the ValueIterator may only be consumed once from any of the columns. +type OneTimeBlock interface { + Block + onetime() +} + +// CacheOneTimeBlock returns a block that can be read multiple times. +// If the block is not a OneTimeBlock it is returned directly. +// Otherwise its contents are read into a new block. +func CacheOneTimeBlock(b Block, a *Allocator) Block { + _, ok := b.(OneTimeBlock) + if !ok { + return b + } + return CopyBlock(b, a) +} + +// CopyBlock returns a copy of the block and is OneTimeBlock safe. +func CopyBlock(b Block, a *Allocator) Block { + builder := NewColListBlockBuilder(a) + builder.SetBounds(b.Bounds()) + + cols := b.Cols() + colMap := make([]int, len(cols)) + for j, c := range cols { + colMap[j] = j + builder.AddCol(c) + if c.IsTag() && c.Common { + builder.SetCommonString(j, b.Tags()[c.Label]) + } + } + + AppendBlock(b, builder, colMap) + // ColListBlockBuilders do not error + nb, _ := builder.Block() + return nb +} + +// AddBlockCols adds the columns of b onto builder. +func AddBlockCols(b Block, builder BlockBuilder) { + cols := b.Cols() + for j, c := range cols { + builder.AddCol(c) + if c.IsTag() && c.Common { + builder.SetCommonString(j, b.Tags()[c.Label]) + } + } +} + +// AddNewCols adds the columns of b onto builder that did not already exist. +// Returns the mapping of builder cols to block cols. +func AddNewCols(b Block, builder BlockBuilder) []int { + cols := b.Cols() + existing := builder.Cols() + colMap := make([]int, len(existing)) + for j, c := range cols { + found := false + for ej, ec := range existing { + if c.Label == ec.Label { + colMap[ej] = j + found = true + break + } + } + if !found { + builder.AddCol(c) + colMap = append(colMap, j) + + if c.IsTag() && c.Common { + builder.SetCommonString(j, b.Tags()[c.Label]) + } + } + } + return colMap +} + +// AppendBlock append data from block b onto builder. +// The colMap is a map of builder columnm index to block column index. +// AppendBlock is OneTimeBlock safe. +func AppendBlock(b Block, builder BlockBuilder, colMap []int) { + times := b.Times() + + cols := builder.Cols() + timeIdx := TimeIdx(cols) + times.DoTime(func(ts []Time, rr RowReader) { + builder.AppendTimes(timeIdx, ts) + for j, c := range cols { + if j == timeIdx || c.Common { + continue + } + for i := range ts { + switch c.Type { + case TBool: + builder.AppendBool(j, rr.AtBool(i, colMap[j])) + case TInt: + builder.AppendInt(j, rr.AtInt(i, colMap[j])) + case TUInt: + builder.AppendUInt(j, rr.AtUInt(i, colMap[j])) + case TFloat: + builder.AppendFloat(j, rr.AtFloat(i, colMap[j])) + case TString: + builder.AppendString(j, rr.AtString(i, colMap[j])) + case TTime: + builder.AppendTime(j, rr.AtTime(i, colMap[j])) + default: + PanicUnknownType(c.Type) + } + } + } + }) +} + +// AppendRow appends a single row from rr onto builder. +// The colMap is a map of builder columnm index to rr column index. +func AppendRow(i int, rr RowReader, builder BlockBuilder, colMap []int) { + for j, c := range builder.Cols() { + switch c.Type { + case TBool: + builder.AppendBool(j, rr.AtBool(i, colMap[j])) + case TInt: + builder.AppendInt(j, rr.AtInt(i, colMap[j])) + case TUInt: + builder.AppendUInt(j, rr.AtUInt(i, colMap[j])) + case TFloat: + builder.AppendFloat(j, rr.AtFloat(i, colMap[j])) + case TString: + builder.AppendString(j, rr.AtString(i, colMap[j])) + case TTime: + builder.AppendTime(j, rr.AtTime(i, colMap[j])) + default: + PanicUnknownType(c.Type) + } + } +} + +// AppendRowForCols appends a single row from rr onto builder for the specified cols. +// The colMap is a map of builder columnm index to rr column index. +func AppendRowForCols(i int, rr RowReader, builder BlockBuilder, cols []ColMeta, colMap []int) { + for j, c := range cols { + switch c.Type { + case TBool: + builder.AppendBool(j, rr.AtBool(i, colMap[j])) + case TInt: + builder.AppendInt(j, rr.AtInt(i, colMap[j])) + case TUInt: + builder.AppendUInt(j, rr.AtUInt(i, colMap[j])) + case TFloat: + builder.AppendFloat(j, rr.AtFloat(i, colMap[j])) + case TString: + builder.AppendString(j, rr.AtString(i, colMap[j])) + case TTime: + builder.AppendTime(j, rr.AtTime(i, colMap[j])) + default: + PanicUnknownType(c.Type) + } + } +} + +// AddTags add columns to the builder for the given tags. +// It is assumed that all tags are common to all rows of this block. +func AddTags(t Tags, b BlockBuilder) { + keys := t.Keys() + for _, k := range keys { + j := b.AddCol(ColMeta{ + Label: k, + Type: TString, + Kind: TagColKind, + Common: true, + }) + b.SetCommonString(j, t[k]) + } +} + +var NoDefaultValueColumn = fmt.Errorf("no default value column %q found.", DefaultValueColLabel) + +func ValueCol(cols []ColMeta) (ColMeta, error) { + for _, c := range cols { + if c.Label == DefaultValueColLabel { + return c, nil + } + } + return ColMeta{}, NoDefaultValueColumn +} +func ValueIdx(cols []ColMeta) int { + return ColIdx(DefaultValueColLabel, cols) +} +func TimeIdx(cols []ColMeta) int { + return ColIdx(TimeColLabel, cols) +} +func ColIdx(label string, cols []ColMeta) int { + for j, c := range cols { + if c.Label == label { + return j + } + } + return -1 +} + +// BlockBuilder builds blocks that can be used multiple times +type BlockBuilder interface { + SetBounds(Bounds) + + BlockMetadata + + NRows() int + NCols() int + Cols() []ColMeta + + // AddCol increases the size of the block by one column. + // The index of the column is returned. + AddCol(ColMeta) int + + // Set sets the value at the specified coordinates + // The rows and columns must exist before calling set, otherwise Set panics. + SetBool(i, j int, value bool) + SetInt(i, j int, value int64) + SetUInt(i, j int, value uint64) + SetFloat(i, j int, value float64) + SetString(i, j int, value string) + SetTime(i, j int, value Time) + + // SetCommonString sets a single value for the entire column. + SetCommonString(j int, value string) + + AppendBool(j int, value bool) + AppendInt(j int, value int64) + AppendUInt(j int, value uint64) + AppendFloat(j int, value float64) + AppendString(j int, value string) + AppendTime(j int, value Time) + + AppendFloats(j int, values []float64) + AppendStrings(j int, values []string) + AppendTimes(j int, values []Time) + + // Sort the rows of the by the values of the columns in the order listed. + Sort(cols []string, desc bool) + + // Clear removes all rows, while preserving the column meta data. + ClearData() + + // Block returns the block that has been built. + // Further modifications of the builder will not effect the returned block. + Block() (Block, error) +} + +type DataType int + +const ( + TInvalid DataType = iota + TBool + TInt + TUInt + TFloat + TString + TTime +) + +func (t DataType) String() string { + switch t { + case TInvalid: + return "invalid" + case TBool: + return "bool" + case TInt: + return "int" + case TUInt: + return "uint" + case TFloat: + return "float" + case TString: + return "string" + case TTime: + return "time" + default: + return "unknown" + } +} + +type ColMeta struct { + Label string + Type DataType + Kind ColKind + // Common indicates that the value for the column is shared by all rows. + Common bool +} + +func (c ColMeta) IsTime() bool { + return c.Kind == TimeColKind +} +func (c ColMeta) IsTag() bool { + return c.Kind == TagColKind +} +func (c ColMeta) IsValue() bool { + return c.Kind == ValueColKind +} + +const ( + DefaultValueColLabel = "_value" + TimeColLabel = "_time" +) + +type ColKind int + +const ( + InvalidColKind = iota + TimeColKind + TagColKind + ValueColKind +) + +func (k ColKind) String() string { + switch k { + case InvalidColKind: + return "invalid" + case TimeColKind: + return "time" + case TagColKind: + return "tag" + case ValueColKind: + return "value" + default: + return "unknown" + } +} + +var ( + TimeCol = ColMeta{ + Label: TimeColLabel, + Type: TTime, + Kind: TimeColKind, + } +) + +type BlockIterator interface { + Do(f func(Block) error) error +} + +type ValueIterator interface { + DoBool(f func([]bool, RowReader)) + DoInt(f func([]int64, RowReader)) + DoUInt(f func([]uint64, RowReader)) + DoFloat(f func([]float64, RowReader)) + DoString(f func([]string, RowReader)) + DoTime(f func([]Time, RowReader)) +} + +type RowReader interface { + Cols() []ColMeta + // AtBool returns the bool value of another column and given index. + AtBool(i, j int) bool + // AtInt returns the int value of another column and given index. + AtInt(i, j int) int64 + // AtUInt returns the uint value of another column and given index. + AtUInt(i, j int) uint64 + // AtFloat returns the float value of another column and given index. + AtFloat(i, j int) float64 + // AtString returns the string value of another column and given index. + AtString(i, j int) string + // AtTime returns the time value of another column and given index. + AtTime(i, j int) Time +} + +func TagsForRow(i int, rr RowReader) Tags { + cols := rr.Cols() + tags := make(Tags, len(cols)) + for j, c := range cols { + if c.IsTag() { + tags[c.Label] = rr.AtString(i, j) + } + } + return tags +} + +type Tags map[string]string + +func (t Tags) Copy() Tags { + nt := make(Tags, len(t)) + for k, v := range t { + nt[k] = v + } + return nt +} + +func (t Tags) Equal(o Tags) bool { + if len(t) != len(o) { + return false + } + for k, v := range t { + if o[k] != v { + return false + } + } + return true +} + +func (t Tags) Keys() []string { + keys := make([]string, 0, len(t)) + for k := range t { + keys = append(keys, k) + } + sort.Strings(keys) + return keys +} + +type TagsKey string + +func (t Tags) Key() TagsKey { + keys := make([]string, 0, len(t)) + for k := range t { + keys = append(keys, k) + } + sort.Strings(keys) + return TagsToKey(keys, t) +} + +// Subset creates a new Tags that is a subset of t, using the list of keys. +// If a keys is provided that does not exist on t, then a subset is not possible and +// the boolean return value is false. +func (t Tags) Subset(keys []string) (Tags, bool) { + subset := make(Tags, len(keys)) + for _, k := range keys { + v, ok := t[k] + if !ok { + return nil, false + } + subset[k] = v + } + return subset, true +} + +func (t Tags) IntersectingSubset(keys []string) Tags { + subset := make(Tags, len(keys)) + for _, k := range keys { + v, ok := t[k] + if ok { + subset[k] = v + } + } + return subset +} + +func TagsToKey(order []string, t Tags) TagsKey { + var buf bytes.Buffer + for i, k := range order { + if i > 0 { + buf.WriteRune(',') + } + buf.WriteString(k) + buf.WriteRune('=') + buf.WriteString(t[k]) + } + return TagsKey(buf.String()) +} + +type blockMetadata struct { + tags Tags + bounds Bounds +} + +func (m blockMetadata) Tags() Tags { + return m.tags +} +func (m blockMetadata) Bounds() Bounds { + return m.bounds +} + +type ColListBlockBuilder struct { + blk *ColListBlock + key BlockKey + alloc *Allocator +} + +func NewColListBlockBuilder(a *Allocator) *ColListBlockBuilder { + return &ColListBlockBuilder{ + blk: new(ColListBlock), + alloc: a, + } +} + +func (b ColListBlockBuilder) SetBounds(bounds Bounds) { + b.blk.bounds = bounds +} +func (b ColListBlockBuilder) Bounds() Bounds { + return b.blk.bounds +} + +func (b ColListBlockBuilder) Tags() Tags { + return b.blk.tags +} +func (b ColListBlockBuilder) NRows() int { + return b.blk.nrows +} +func (b ColListBlockBuilder) NCols() int { + return len(b.blk.cols) +} +func (b ColListBlockBuilder) Cols() []ColMeta { + return b.blk.colMeta +} + +func (b ColListBlockBuilder) AddCol(c ColMeta) int { + var col column + switch c.Type { + case TBool: + col = &boolColumn{ + ColMeta: c, + alloc: b.alloc, + } + case TInt: + col = &intColumn{ + ColMeta: c, + alloc: b.alloc, + } + case TUInt: + col = &uintColumn{ + ColMeta: c, + alloc: b.alloc, + } + case TFloat: + col = &floatColumn{ + ColMeta: c, + alloc: b.alloc, + } + case TString: + if c.Common { + col = &commonStrColumn{ + ColMeta: c, + } + } else { + col = &stringColumn{ + ColMeta: c, + alloc: b.alloc, + } + } + case TTime: + col = &timeColumn{ + ColMeta: c, + alloc: b.alloc, + } + default: + PanicUnknownType(c.Type) + } + b.blk.colMeta = append(b.blk.colMeta, c) + b.blk.cols = append(b.blk.cols, col) + return len(b.blk.cols) - 1 +} + +func (b ColListBlockBuilder) SetBool(i int, j int, value bool) { + b.checkColType(j, TBool) + b.blk.cols[j].(*boolColumn).data[i] = value +} +func (b ColListBlockBuilder) AppendBool(j int, value bool) { + b.checkColType(j, TBool) + col := b.blk.cols[j].(*boolColumn) + col.data = b.alloc.AppendBools(col.data, value) + b.blk.nrows = len(col.data) +} +func (b ColListBlockBuilder) AppendBools(j int, values []bool) { + b.checkColType(j, TBool) + col := b.blk.cols[j].(*boolColumn) + col.data = b.alloc.AppendBools(col.data, values...) + b.blk.nrows = len(col.data) +} + +func (b ColListBlockBuilder) SetInt(i int, j int, value int64) { + b.checkColType(j, TInt) + b.blk.cols[j].(*intColumn).data[i] = value +} +func (b ColListBlockBuilder) AppendInt(j int, value int64) { + b.checkColType(j, TInt) + col := b.blk.cols[j].(*intColumn) + col.data = b.alloc.AppendInts(col.data, value) + b.blk.nrows = len(col.data) +} +func (b ColListBlockBuilder) AppendInts(j int, values []int64) { + b.checkColType(j, TInt) + col := b.blk.cols[j].(*intColumn) + col.data = b.alloc.AppendInts(col.data, values...) + b.blk.nrows = len(col.data) +} + +func (b ColListBlockBuilder) SetUInt(i int, j int, value uint64) { + b.checkColType(j, TUInt) + b.blk.cols[j].(*uintColumn).data[i] = value +} +func (b ColListBlockBuilder) AppendUInt(j int, value uint64) { + b.checkColType(j, TUInt) + col := b.blk.cols[j].(*uintColumn) + col.data = b.alloc.AppendUInts(col.data, value) + b.blk.nrows = len(col.data) +} +func (b ColListBlockBuilder) AppendUInts(j int, values []uint64) { + b.checkColType(j, TUInt) + col := b.blk.cols[j].(*uintColumn) + col.data = b.alloc.AppendUInts(col.data, values...) + b.blk.nrows = len(col.data) +} + +func (b ColListBlockBuilder) SetFloat(i int, j int, value float64) { + b.checkColType(j, TFloat) + b.blk.cols[j].(*floatColumn).data[i] = value +} +func (b ColListBlockBuilder) AppendFloat(j int, value float64) { + b.checkColType(j, TFloat) + col := b.blk.cols[j].(*floatColumn) + col.data = b.alloc.AppendFloats(col.data, value) + b.blk.nrows = len(col.data) +} +func (b ColListBlockBuilder) AppendFloats(j int, values []float64) { + b.checkColType(j, TFloat) + col := b.blk.cols[j].(*floatColumn) + col.data = b.alloc.AppendFloats(col.data, values...) + b.blk.nrows = len(col.data) +} + +func (b ColListBlockBuilder) SetString(i int, j int, value string) { + b.checkColType(j, TString) + b.blk.cols[j].(*stringColumn).data[i] = value +} +func (b ColListBlockBuilder) AppendString(j int, value string) { + meta := b.blk.cols[j].Meta() + checkColType(meta, TString) + if meta.Common { + v := b.blk.cols[j].(*commonStrColumn).value + if value != v { + panic(fmt.Errorf("attempting to append a different value to the column %s, which has all common values", meta.Label)) + } + return + } + col := b.blk.cols[j].(*stringColumn) + col.data = b.alloc.AppendStrings(col.data, value) + b.blk.nrows = len(col.data) +} +func (b ColListBlockBuilder) AppendStrings(j int, values []string) { + b.checkColType(j, TString) + col := b.blk.cols[j].(*stringColumn) + col.data = b.alloc.AppendStrings(col.data, values...) + b.blk.nrows = len(col.data) +} +func (b ColListBlockBuilder) SetCommonString(j int, value string) { + meta := b.blk.cols[j].Meta() + checkColType(meta, TString) + if !meta.Common { + panic(fmt.Errorf("cannot set common value for column %s, column is not marked as common", meta.Label)) + } + b.blk.cols[j].(*commonStrColumn).value = value + if meta.IsTag() { + if b.blk.tags == nil { + b.blk.tags = make(Tags) + } + b.blk.tags[meta.Label] = value + } +} + +func (b ColListBlockBuilder) SetTime(i int, j int, value Time) { + b.checkColType(j, TTime) + b.blk.cols[j].(*timeColumn).data[i] = value +} +func (b ColListBlockBuilder) AppendTime(j int, value Time) { + b.checkColType(j, TTime) + col := b.blk.cols[j].(*timeColumn) + col.data = b.alloc.AppendTimes(col.data, value) + b.blk.nrows = len(col.data) +} +func (b ColListBlockBuilder) AppendTimes(j int, values []Time) { + b.checkColType(j, TTime) + col := b.blk.cols[j].(*timeColumn) + col.data = b.alloc.AppendTimes(col.data, values...) + b.blk.nrows = len(col.data) +} + +func (b ColListBlockBuilder) checkColType(j int, typ DataType) { + checkColType(b.blk.colMeta[j], typ) +} + +func checkColType(col ColMeta, typ DataType) { + if col.Type != typ { + panic(fmt.Errorf("column %s is not of type %v", col.Label, typ)) + } +} + +func PanicUnknownType(typ DataType) { + panic(fmt.Errorf("unknown type %v", typ)) +} + +func (b ColListBlockBuilder) Block() (Block, error) { + // Create copy in mutable state + return b.blk.Copy(), nil +} + +// RawBlock returns the underlying block being constructed. +// The Block returned will be modified by future calls to any BlockBuilder methods. +func (b ColListBlockBuilder) RawBlock() *ColListBlock { + // Create copy in mutable state + return b.blk +} + +func (b ColListBlockBuilder) ClearData() { + for _, c := range b.blk.cols { + c.Clear() + } + b.blk.nrows = 0 +} + +func (b ColListBlockBuilder) Sort(cols []string, desc bool) { + colIdxs := make([]int, len(cols)) + for i, label := range cols { + for j, c := range b.blk.colMeta { + if c.Label == label { + colIdxs[i] = j + break + } + } + } + s := colListBlockSorter{cols: colIdxs, desc: desc, b: b.blk} + sort.Sort(s) +} + +// ColListBlock implements Block using list of columns. +// All data for the block is stored in RAM. +// As a result At* methods are provided directly on the block for easy access. +type ColListBlock struct { + bounds Bounds + tags Tags + + colMeta []ColMeta + cols []column + nrows int + + refCount int32 +} + +func (b *ColListBlock) RefCount(n int) { + c := atomic.AddInt32(&b.refCount, int32(n)) + if c == 0 { + for _, c := range b.cols { + c.Clear() + } + } +} + +func (b *ColListBlock) Bounds() Bounds { + return b.bounds +} + +func (b *ColListBlock) Tags() Tags { + return b.tags +} + +func (b *ColListBlock) Cols() []ColMeta { + return b.colMeta +} +func (b ColListBlock) NRows() int { + return b.nrows +} + +func (b *ColListBlock) Col(c int) ValueIterator { + return colListValueIterator{ + col: c, + colMeta: b.colMeta, + cols: b.cols, + nrows: b.nrows, + } +} + +func (b *ColListBlock) Values() (ValueIterator, error) { + j := ValueIdx(b.colMeta) + if j >= 0 { + return colListValueIterator{ + col: j, + colMeta: b.colMeta, + cols: b.cols, + nrows: b.nrows, + }, nil + } + return nil, NoDefaultValueColumn +} + +func (b *ColListBlock) Times() ValueIterator { + j := TimeIdx(b.colMeta) + if j >= 0 { + return colListValueIterator{ + col: j, + colMeta: b.colMeta, + cols: b.cols, + nrows: b.nrows, + } + } + return nil +} +func (b *ColListBlock) AtBool(i, j int) bool { + checkColType(b.colMeta[j], TBool) + return b.cols[j].(*boolColumn).data[i] +} +func (b *ColListBlock) AtInt(i, j int) int64 { + checkColType(b.colMeta[j], TInt) + return b.cols[j].(*intColumn).data[i] +} +func (b *ColListBlock) AtUInt(i, j int) uint64 { + checkColType(b.colMeta[j], TUInt) + return b.cols[j].(*uintColumn).data[i] +} +func (b *ColListBlock) AtFloat(i, j int) float64 { + checkColType(b.colMeta[j], TFloat) + return b.cols[j].(*floatColumn).data[i] +} +func (b *ColListBlock) AtString(i, j int) string { + meta := b.colMeta[j] + checkColType(meta, TString) + if meta.IsTag() && meta.Common { + return b.cols[j].(*commonStrColumn).value + } + return b.cols[j].(*stringColumn).data[i] +} +func (b *ColListBlock) AtTime(i, j int) Time { + checkColType(b.colMeta[j], TTime) + return b.cols[j].(*timeColumn).data[i] +} + +func (b *ColListBlock) Copy() *ColListBlock { + cpy := new(ColListBlock) + cpy.bounds = b.bounds + cpy.tags = b.tags.Copy() + cpy.nrows = b.nrows + + cpy.colMeta = make([]ColMeta, len(b.colMeta)) + copy(cpy.colMeta, b.colMeta) + + cpy.cols = make([]column, len(b.cols)) + for i, c := range b.cols { + cpy.cols[i] = c.Copy() + } + + return cpy +} + +type colListValueIterator struct { + col int + cols []column + colMeta []ColMeta + nrows int +} + +func (itr colListValueIterator) Cols() []ColMeta { + return itr.colMeta +} +func (itr colListValueIterator) DoBool(f func([]bool, RowReader)) { + checkColType(itr.colMeta[itr.col], TBool) + f(itr.cols[itr.col].(*boolColumn).data, itr) +} +func (itr colListValueIterator) DoInt(f func([]int64, RowReader)) { + checkColType(itr.colMeta[itr.col], TInt) + f(itr.cols[itr.col].(*intColumn).data, itr) +} +func (itr colListValueIterator) DoUInt(f func([]uint64, RowReader)) { + checkColType(itr.colMeta[itr.col], TUInt) + f(itr.cols[itr.col].(*uintColumn).data, itr) +} +func (itr colListValueIterator) DoFloat(f func([]float64, RowReader)) { + checkColType(itr.colMeta[itr.col], TFloat) + f(itr.cols[itr.col].(*floatColumn).data, itr) +} +func (itr colListValueIterator) DoString(f func([]string, RowReader)) { + meta := itr.colMeta[itr.col] + checkColType(meta, TString) + if meta.IsTag() && meta.Common { + value := itr.cols[itr.col].(*commonStrColumn).value + strs := make([]string, itr.nrows) + for i := range strs { + strs[i] = value + } + f(strs, itr) + } + f(itr.cols[itr.col].(*stringColumn).data, itr) +} +func (itr colListValueIterator) DoTime(f func([]Time, RowReader)) { + checkColType(itr.colMeta[itr.col], TTime) + f(itr.cols[itr.col].(*timeColumn).data, itr) +} +func (itr colListValueIterator) AtBool(i, j int) bool { + checkColType(itr.colMeta[j], TBool) + return itr.cols[j].(*boolColumn).data[i] +} +func (itr colListValueIterator) AtInt(i, j int) int64 { + checkColType(itr.colMeta[j], TInt) + return itr.cols[j].(*intColumn).data[i] +} +func (itr colListValueIterator) AtUInt(i, j int) uint64 { + checkColType(itr.colMeta[j], TUInt) + return itr.cols[j].(*uintColumn).data[i] +} +func (itr colListValueIterator) AtFloat(i, j int) float64 { + checkColType(itr.colMeta[j], TFloat) + return itr.cols[j].(*floatColumn).data[i] +} +func (itr colListValueIterator) AtString(i, j int) string { + meta := itr.colMeta[j] + checkColType(meta, TString) + if meta.IsTag() && meta.Common { + return itr.cols[j].(*commonStrColumn).value + } + return itr.cols[j].(*stringColumn).data[i] +} +func (itr colListValueIterator) AtTime(i, j int) Time { + checkColType(itr.colMeta[j], TTime) + return itr.cols[j].(*timeColumn).data[i] +} + +type colListBlockSorter struct { + cols []int + desc bool + b *ColListBlock +} + +func (c colListBlockSorter) Len() int { + return c.b.nrows +} + +func (c colListBlockSorter) Less(x int, y int) (less bool) { + for _, j := range c.cols { + if !c.b.cols[j].Equal(x, y) { + less = c.b.cols[j].Less(x, y) + break + } + } + if c.desc { + less = !less + } + return +} + +func (c colListBlockSorter) Swap(x int, y int) { + for _, col := range c.b.cols { + col.Swap(x, y) + } +} + +type column interface { + Meta() ColMeta + Clear() + Copy() column + Equal(i, j int) bool + Less(i, j int) bool + Swap(i, j int) +} + +type boolColumn struct { + ColMeta + data []bool + alloc *Allocator +} + +func (c *boolColumn) Meta() ColMeta { + return c.ColMeta +} + +func (c *boolColumn) Clear() { + c.alloc.Free(len(c.data), boolSize) + c.data = c.data[0:0] +} +func (c *boolColumn) Copy() column { + cpy := &boolColumn{ + ColMeta: c.ColMeta, + alloc: c.alloc, + } + l := len(c.data) + cpy.data = c.alloc.Bools(l, l) + copy(cpy.data, c.data) + return cpy +} +func (c *boolColumn) Equal(i, j int) bool { + return c.data[i] == c.data[j] +} +func (c *boolColumn) Less(i, j int) bool { + if c.data[i] == c.data[j] { + return false + } + return c.data[i] +} +func (c *boolColumn) Swap(i, j int) { + c.data[i], c.data[j] = c.data[j], c.data[i] +} + +type intColumn struct { + ColMeta + data []int64 + alloc *Allocator +} + +func (c *intColumn) Meta() ColMeta { + return c.ColMeta +} + +func (c *intColumn) Clear() { + c.alloc.Free(len(c.data), int64Size) + c.data = c.data[0:0] +} +func (c *intColumn) Copy() column { + cpy := &intColumn{ + ColMeta: c.ColMeta, + alloc: c.alloc, + } + l := len(c.data) + cpy.data = c.alloc.Ints(l, l) + copy(cpy.data, c.data) + return cpy +} +func (c *intColumn) Equal(i, j int) bool { + return c.data[i] == c.data[j] +} +func (c *intColumn) Less(i, j int) bool { + return c.data[i] < c.data[j] +} +func (c *intColumn) Swap(i, j int) { + c.data[i], c.data[j] = c.data[j], c.data[i] +} + +type uintColumn struct { + ColMeta + data []uint64 + alloc *Allocator +} + +func (c *uintColumn) Meta() ColMeta { + return c.ColMeta +} + +func (c *uintColumn) Clear() { + c.alloc.Free(len(c.data), uint64Size) + c.data = c.data[0:0] +} +func (c *uintColumn) Copy() column { + cpy := &uintColumn{ + ColMeta: c.ColMeta, + alloc: c.alloc, + } + l := len(c.data) + cpy.data = c.alloc.UInts(l, l) + copy(cpy.data, c.data) + return cpy +} +func (c *uintColumn) Equal(i, j int) bool { + return c.data[i] == c.data[j] +} +func (c *uintColumn) Less(i, j int) bool { + return c.data[i] < c.data[j] +} +func (c *uintColumn) Swap(i, j int) { + c.data[i], c.data[j] = c.data[j], c.data[i] +} + +type floatColumn struct { + ColMeta + data []float64 + alloc *Allocator +} + +func (c *floatColumn) Meta() ColMeta { + return c.ColMeta +} + +func (c *floatColumn) Clear() { + c.alloc.Free(len(c.data), float64Size) + c.data = c.data[0:0] +} +func (c *floatColumn) Copy() column { + cpy := &floatColumn{ + ColMeta: c.ColMeta, + alloc: c.alloc, + } + l := len(c.data) + cpy.data = c.alloc.Floats(l, l) + copy(cpy.data, c.data) + return cpy +} +func (c *floatColumn) Equal(i, j int) bool { + return c.data[i] == c.data[j] +} +func (c *floatColumn) Less(i, j int) bool { + return c.data[i] < c.data[j] +} +func (c *floatColumn) Swap(i, j int) { + c.data[i], c.data[j] = c.data[j], c.data[i] +} + +type stringColumn struct { + ColMeta + data []string + alloc *Allocator +} + +func (c *stringColumn) Meta() ColMeta { + return c.ColMeta +} + +func (c *stringColumn) Clear() { + c.alloc.Free(len(c.data), stringSize) + c.data = c.data[0:0] +} +func (c *stringColumn) Copy() column { + cpy := &stringColumn{ + ColMeta: c.ColMeta, + alloc: c.alloc, + } + + l := len(c.data) + cpy.data = c.alloc.Strings(l, l) + copy(cpy.data, c.data) + return cpy +} +func (c *stringColumn) Equal(i, j int) bool { + return c.data[i] == c.data[j] +} +func (c *stringColumn) Less(i, j int) bool { + return c.data[i] < c.data[j] +} +func (c *stringColumn) Swap(i, j int) { + c.data[i], c.data[j] = c.data[j], c.data[i] +} + +type timeColumn struct { + ColMeta + data []Time + alloc *Allocator +} + +func (c *timeColumn) Meta() ColMeta { + return c.ColMeta +} + +func (c *timeColumn) Clear() { + c.alloc.Free(len(c.data), timeSize) + c.data = c.data[0:0] +} +func (c *timeColumn) Copy() column { + cpy := &timeColumn{ + ColMeta: c.ColMeta, + alloc: c.alloc, + } + l := len(c.data) + cpy.data = c.alloc.Times(l, l) + copy(cpy.data, c.data) + return cpy +} +func (c *timeColumn) Equal(i, j int) bool { + return c.data[i] == c.data[j] +} +func (c *timeColumn) Less(i, j int) bool { + return c.data[i] < c.data[j] +} +func (c *timeColumn) Swap(i, j int) { + c.data[i], c.data[j] = c.data[j], c.data[i] +} + +//commonStrColumn has the same string value for all rows +type commonStrColumn struct { + ColMeta + value string +} + +func (c *commonStrColumn) Meta() ColMeta { + return c.ColMeta +} +func (c *commonStrColumn) Clear() { +} +func (c *commonStrColumn) Copy() column { + cpy := new(commonStrColumn) + *cpy = *c + return cpy +} +func (c *commonStrColumn) Equal(i, j int) bool { + return true +} +func (c *commonStrColumn) Less(i, j int) bool { + return false +} +func (c *commonStrColumn) Swap(i, j int) {} + +type BlockBuilderCache interface { + // BlockBuilder returns an existing or new BlockBuilder for the given meta data. + // The boolean return value indicates if BlockBuilder is new. + BlockBuilder(meta BlockMetadata) (BlockBuilder, bool) + ForEachBuilder(f func(BlockKey, BlockBuilder)) +} + +type blockBuilderCache struct { + blocks map[BlockKey]blockState + alloc *Allocator + + triggerSpec query.TriggerSpec +} + +func NewBlockBuilderCache(a *Allocator) *blockBuilderCache { + return &blockBuilderCache{ + blocks: make(map[BlockKey]blockState), + alloc: a, + } +} + +type blockState struct { + builder BlockBuilder + trigger Trigger +} + +func (d *blockBuilderCache) SetTriggerSpec(ts query.TriggerSpec) { + d.triggerSpec = ts +} + +func (d *blockBuilderCache) Block(key BlockKey) (Block, error) { + return d.blocks[key].builder.Block() +} +func (d *blockBuilderCache) BlockMetadata(key BlockKey) BlockMetadata { + return d.blocks[key].builder +} + +// BlockBuilder will return the builder for the specified block. +// If no builder exists, one will be created. +func (d *blockBuilderCache) BlockBuilder(meta BlockMetadata) (BlockBuilder, bool) { + key := ToBlockKey(meta) + b, ok := d.blocks[key] + if !ok { + builder := NewColListBlockBuilder(d.alloc) + builder.SetBounds(meta.Bounds()) + t := NewTriggerFromSpec(d.triggerSpec) + b = blockState{ + builder: builder, + trigger: t, + } + d.blocks[key] = b + } + return b.builder, !ok +} + +func (d *blockBuilderCache) ForEachBuilder(f func(BlockKey, BlockBuilder)) { + for k, b := range d.blocks { + f(k, b.builder) + } +} + +func (d *blockBuilderCache) DiscardBlock(key BlockKey) { + d.blocks[key].builder.ClearData() +} +func (d *blockBuilderCache) ExpireBlock(key BlockKey) { + d.blocks[key].builder.ClearData() + delete(d.blocks, key) +} + +func (d *blockBuilderCache) ForEach(f func(BlockKey)) { + for bk := range d.blocks { + f(bk) + } +} + +func (d *blockBuilderCache) ForEachWithContext(f func(BlockKey, Trigger, BlockContext)) { + for bk, b := range d.blocks { + f(bk, b.trigger, BlockContext{ + Bounds: b.builder.Bounds(), + Count: b.builder.NRows(), + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/bounds.go b/vendor/github.com/influxdata/ifql/query/execute/bounds.go new file mode 100644 index 000000000..eea8ebb72 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/bounds.go @@ -0,0 +1,33 @@ +package execute + +import "fmt" + +type Bounds struct { + Start Time + Stop Time +} + +var AllTime = Bounds{ + Start: MinTime, + Stop: MaxTime, +} + +func (b Bounds) String() string { + return fmt.Sprintf("[%v, %v)", b.Start, b.Stop) +} + +func (b Bounds) Contains(t Time) bool { + return t >= b.Start && t < b.Stop +} + +func (b Bounds) Overlaps(o Bounds) bool { + return b.Contains(o.Start) || b.Contains(o.Stop) +} + +func (b Bounds) Equal(o Bounds) bool { + return b == o +} + +func (b Bounds) Shift(d Duration) Bounds { + return Bounds{Start: b.Start.Add(d), Stop: b.Stop.Add(d)} +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/dataset.go b/vendor/github.com/influxdata/ifql/query/execute/dataset.go new file mode 100644 index 000000000..3ecd2ce37 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/dataset.go @@ -0,0 +1,189 @@ +package execute + +import ( + "github.com/influxdata/ifql/query" + uuid "github.com/satori/go.uuid" +) + +// Dataset represents the set of data produced by a transformation. +type Dataset interface { + Node + + RetractBlock(key BlockKey) error + UpdateProcessingTime(t Time) error + UpdateWatermark(mark Time) error + Finish(error) + + SetTriggerSpec(t query.TriggerSpec) +} + +// DataCache holds all working data for a transformation. +type DataCache interface { + BlockMetadata(BlockKey) BlockMetadata + Block(BlockKey) (Block, error) + + ForEach(func(BlockKey)) + ForEachWithContext(func(BlockKey, Trigger, BlockContext)) + + DiscardBlock(BlockKey) + ExpireBlock(BlockKey) + + SetTriggerSpec(t query.TriggerSpec) +} + +type AccumulationMode int + +const ( + DiscardingMode AccumulationMode = iota + AccumulatingMode + AccumulatingRetractingMode +) + +type DatasetID uuid.UUID + +func (id DatasetID) String() string { + return uuid.UUID(id).String() +} + +var ZeroDatasetID DatasetID + +func (id DatasetID) IsZero() bool { + return id == ZeroDatasetID +} + +type dataset struct { + id DatasetID + + ts []Transformation + accMode AccumulationMode + + watermark Time + processingTime Time + + cache DataCache +} + +func NewDataset(id DatasetID, accMode AccumulationMode, cache DataCache) *dataset { + return &dataset{ + id: id, + accMode: accMode, + cache: cache, + } +} + +func (d *dataset) AddTransformation(t Transformation) { + d.ts = append(d.ts, t) +} + +func (d *dataset) SetTriggerSpec(spec query.TriggerSpec) { + d.cache.SetTriggerSpec(spec) +} + +func (d *dataset) UpdateWatermark(mark Time) error { + d.watermark = mark + if err := d.evalTriggers(); err != nil { + return err + } + for _, t := range d.ts { + if err := t.UpdateWatermark(d.id, mark); err != nil { + return err + } + } + return nil +} + +func (d *dataset) UpdateProcessingTime(time Time) error { + d.processingTime = time + if err := d.evalTriggers(); err != nil { + return err + } + for _, t := range d.ts { + if err := t.UpdateProcessingTime(d.id, time); err != nil { + return err + } + } + return nil +} + +func (d *dataset) evalTriggers() (err error) { + d.cache.ForEachWithContext(func(bk BlockKey, trigger Trigger, bc BlockContext) { + if err != nil { + // Skip the rest once we have encountered an error + return + } + c := TriggerContext{ + Block: bc, + Watermark: d.watermark, + CurrentProcessingTime: d.processingTime, + } + + if trigger.Triggered(c) { + err = d.triggerBlock(bk) + } + if trigger.Finished() { + d.expireBlock(bk) + } + }) + return err +} + +func (d *dataset) triggerBlock(key BlockKey) error { + b, err := d.cache.Block(key) + if err != nil { + return err + } + b.RefCount(len(d.ts)) + switch d.accMode { + case DiscardingMode: + for _, t := range d.ts { + if err := t.Process(d.id, b); err != nil { + return err + } + } + d.cache.DiscardBlock(key) + case AccumulatingRetractingMode: + for _, t := range d.ts { + if err := t.RetractBlock(d.id, b); err != nil { + return err + } + } + fallthrough + case AccumulatingMode: + for _, t := range d.ts { + if err := t.Process(d.id, b); err != nil { + return err + } + } + } + return nil +} + +func (d *dataset) expireBlock(key BlockKey) { + d.cache.ExpireBlock(key) +} + +func (d *dataset) RetractBlock(key BlockKey) error { + d.cache.DiscardBlock(key) + for _, t := range d.ts { + if err := t.RetractBlock(d.id, d.cache.BlockMetadata(key)); err != nil { + return err + } + } + return nil +} + +func (d *dataset) Finish(err error) { + if err == nil { + // Only trigger blocks we if we not finishing because of an error. + d.cache.ForEach(func(bk BlockKey) { + if err != nil { + return + } + err = d.triggerBlock(bk) + d.cache.ExpireBlock(bk) + }) + } + for _, t := range d.ts { + t.Finish(d.id, err) + } +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/dispatcher.go b/vendor/github.com/influxdata/ifql/query/execute/dispatcher.go new file mode 100644 index 000000000..42f8169f2 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/dispatcher.go @@ -0,0 +1,119 @@ +package execute + +import ( + "context" + "fmt" + "runtime/debug" + "sync" +) + +// Dispatcher schedules work for a query. +// Each transformation submits work to be done to the dispatcher. +// Then the dispatcher schedules to work based on the available resources. +type Dispatcher interface { + // Schedule fn to be executed + Schedule(fn ScheduleFunc) +} + +// ScheduleFunc is a function that represents work to do. +// The throughput is the maximum number of messages to process for this scheduling. +type ScheduleFunc func(throughput int) + +// poolDispatcher implements Dispatcher using a pool of goroutines. +type poolDispatcher struct { + work chan ScheduleFunc + + throughput int + + mu sync.Mutex + closed bool + closing chan struct{} + wg sync.WaitGroup + err error + errC chan error +} + +func newPoolDispatcher(throughput int) *poolDispatcher { + return &poolDispatcher{ + throughput: throughput, + work: make(chan ScheduleFunc, 100), + closing: make(chan struct{}), + errC: make(chan error, 1), + } +} + +func (d *poolDispatcher) Schedule(fn ScheduleFunc) { + select { + case d.work <- fn: + case <-d.closing: + } +} + +func (d *poolDispatcher) Start(n int, ctx context.Context) { + d.wg.Add(n) + for i := 0; i < n; i++ { + go func() { + defer d.wg.Done() + // Setup panic handling on the worker goroutines + defer func() { + if e := recover(); e != nil { + var err error + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf("%v", e) + } + d.setErr(fmt.Errorf("panic: %v\n%s", err, debug.Stack())) + } + }() + d.run(ctx) + }() + } +} + +// Err returns a channel with will produce an error if encountered. +func (d *poolDispatcher) Err() <-chan error { + d.mu.Lock() + defer d.mu.Unlock() + return d.errC +} + +func (d *poolDispatcher) setErr(err error) { + d.mu.Lock() + defer d.mu.Unlock() + // TODO(nathanielc): Collect all error information. + if d.err == nil { + d.err = err + d.errC <- err + } +} + +//Stop the dispatcher. +func (d *poolDispatcher) Stop() error { + d.mu.Lock() + defer d.mu.Unlock() + if d.closed { + return d.err + } + d.closed = true + close(d.closing) + d.wg.Wait() + return d.err +} + +// run is the logic executed by each worker goroutine in the pool. +func (d *poolDispatcher) run(ctx context.Context) { + for { + select { + case <-ctx.Done(): + // Immediately return, do not process any more work + return + case <-d.closing: + // We are done, nothing left to do. + return + case fn := <-d.work: + fn(d.throughput) + } + } +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/executor.go b/vendor/github.com/influxdata/ifql/query/execute/executor.go new file mode 100644 index 000000000..58ebb1e4b --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/executor.go @@ -0,0 +1,227 @@ +package execute + +import ( + "context" + "fmt" + "runtime/debug" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/plan" + "github.com/pkg/errors" +) + +type Executor interface { + Execute(context.Context, *plan.PlanSpec) (map[string]Result, error) +} + +type executor struct { + c Config +} + +type Config struct { + StorageReader StorageReader +} + +func NewExecutor(c Config) Executor { + e := &executor{ + c: c, + } + return e +} + +type executionState struct { + p *plan.PlanSpec + c *Config + + alloc *Allocator + + resources query.ResourceManagement + + bounds Bounds + + results map[string]Result + sources []Source + + transports []Transport + + dispatcher *poolDispatcher +} + +func (e *executor) Execute(ctx context.Context, p *plan.PlanSpec) (map[string]Result, error) { + es, err := e.createExecutionState(ctx, p) + if err != nil { + return nil, errors.Wrap(err, "failed to initialize execute state") + } + es.do(ctx) + return es.results, nil +} + +func validatePlan(p *plan.PlanSpec) error { + if p.Resources.ConcurrencyQuota == 0 { + return errors.New("plan must have a non-zero concurrency quota") + } + return nil +} + +func (e *executor) createExecutionState(ctx context.Context, p *plan.PlanSpec) (*executionState, error) { + if err := validatePlan(p); err != nil { + return nil, errors.Wrap(err, "invalid plan") + } + es := &executionState{ + p: p, + c: &e.c, + alloc: &Allocator{ + Limit: p.Resources.MemoryBytesQuota, + }, + resources: p.Resources, + results: make(map[string]Result, len(p.Results)), + // TODO(nathanielc): Have the planner specify the dispatcher throughput + dispatcher: newPoolDispatcher(10), + bounds: Bounds{ + Start: Time(p.Bounds.Start.Time(p.Now).UnixNano()), + Stop: Time(p.Bounds.Stop.Time(p.Now).UnixNano()), + }, + } + for name, yield := range p.Results { + ds, err := es.createNode(ctx, p.Procedures[yield.ID]) + if err != nil { + return nil, err + } + rs := newResultSink(yield) + ds.AddTransformation(rs) + es.results[name] = rs + } + return es, nil +} + +// DefaultTriggerSpec defines the triggering that should be used for datasets +// whose parent transformation is not a windowing transformation. +var DefaultTriggerSpec = query.AfterWatermarkTriggerSpec{} + +type triggeringSpec interface { + TriggerSpec() query.TriggerSpec +} + +func (es *executionState) createNode(ctx context.Context, pr *plan.Procedure) (Node, error) { + // Build execution context + ec := executionContext{ + es: es, + } + if len(pr.Parents) > 0 { + ec.parents = make([]DatasetID, len(pr.Parents)) + for i, parentID := range pr.Parents { + ec.parents[i] = DatasetID(parentID) + } + } + + // If source create source + if createS, ok := procedureToSource[pr.Spec.Kind()]; ok { + s := createS(pr.Spec, DatasetID(pr.ID), es.c.StorageReader, ec) + es.sources = append(es.sources, s) + return s, nil + } + + createT, ok := procedureToTransformation[pr.Spec.Kind()] + if !ok { + return nil, fmt.Errorf("unsupported procedure %v", pr.Spec.Kind()) + } + + // Create the transformation + t, ds, err := createT(DatasetID(pr.ID), AccumulatingMode, pr.Spec, ec) + if err != nil { + return nil, err + } + + // Setup triggering + var ts query.TriggerSpec = DefaultTriggerSpec + if t, ok := pr.Spec.(triggeringSpec); ok { + ts = t.TriggerSpec() + } + ds.SetTriggerSpec(ts) + + // Recurse creating parents + for _, parentID := range pr.Parents { + parent, err := es.createNode(ctx, es.p.Procedures[parentID]) + if err != nil { + return nil, err + } + transport := newConescutiveTransport(es.dispatcher, t) + es.transports = append(es.transports, transport) + parent.AddTransformation(transport) + } + + return ds, nil +} + +func (es *executionState) abort(err error) { + for _, r := range es.results { + r.abort(err) + } +} + +func (es *executionState) do(ctx context.Context) { + for _, src := range es.sources { + go func(src Source) { + // Setup panic handling on the source goroutines + defer func() { + if e := recover(); e != nil { + // We had a panic, abort the entire execution. + var err error + switch e := e.(type) { + case error: + err = e + default: + err = fmt.Errorf("%v", e) + } + es.abort(fmt.Errorf("panic: %v\n%s", err, debug.Stack())) + } + }() + src.Run(ctx) + }(src) + } + es.dispatcher.Start(es.resources.ConcurrencyQuota, ctx) + go func() { + // Wait for all transports to finish + for _, t := range es.transports { + select { + case <-t.Finished(): + case <-ctx.Done(): + es.abort(errors.New("context done")) + case err := <-es.dispatcher.Err(): + if err != nil { + es.abort(err) + } + } + } + // Check for any errors on the dispatcher + err := es.dispatcher.Stop() + if err != nil { + es.abort(err) + } + }() +} + +type executionContext struct { + es *executionState + parents []DatasetID +} + +// Satisfy the ExecutionContext interface + +func (ec executionContext) ResolveTime(qt query.Time) Time { + return Time(qt.Time(ec.es.p.Now).UnixNano()) +} +func (ec executionContext) Bounds() Bounds { + return ec.es.bounds +} + +func (ec executionContext) Allocator() *Allocator { + return ec.es.alloc +} + +func (ec executionContext) Parents() []DatasetID { + return ec.parents +} +func (ec executionContext) ConvertID(id plan.ProcedureID) DatasetID { + return DatasetID(id) +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/executor_test.go b/vendor/github.com/influxdata/ifql/query/execute/executor_test.go new file mode 100644 index 000000000..9b1d873af --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/executor_test.go @@ -0,0 +1,401 @@ +package execute_test + +import ( + "context" + "math" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/semantic" +) + +var epoch = time.Unix(0, 0) + +func TestExecutor_Execute(t *testing.T) { + testCases := []struct { + name string + src []execute.Block + plan *plan.PlanSpec + exp map[string][]*executetest.Block + }{ + { + name: "simple aggregate", + src: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + execute.TimeCol, + execute.ColMeta{ + Label: execute.DefaultValueColLabel, + Type: execute.TFloat, + Kind: execute.ValueColKind, + }, + }, + Data: [][]interface{}{ + {execute.Time(0), 1.0}, + {execute.Time(1), 2.0}, + {execute.Time(2), 3.0}, + {execute.Time(3), 4.0}, + {execute.Time(4), 5.0}, + }, + }}, + plan: &plan.PlanSpec{ + Now: epoch.Add(5), + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: math.MaxInt64, + }, + Bounds: plan.BoundsSpec{ + Start: query.Time{Absolute: time.Unix(0, 1)}, + Stop: query.Time{Absolute: time.Unix(0, 5)}, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + Relative: -5, + IsRelative: true, + }, + }, + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("sum")}, + }, + plan.ProcedureIDFromOperationID("sum"): { + ID: plan.ProcedureIDFromOperationID("sum"), + Spec: &functions.SumProcedureSpec{}, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + }, + Children: nil, + }, + }, + Results: map[string]plan.YieldSpec{ + plan.DefaultYieldName: {ID: plan.ProcedureIDFromOperationID("sum")}, + }, + }, + exp: map[string][]*executetest.Block{ + plan.DefaultYieldName: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + execute.TimeCol, + execute.ColMeta{ + Label: execute.DefaultValueColLabel, + Type: execute.TFloat, + Kind: execute.ValueColKind, + }, + }, + Data: [][]interface{}{ + {execute.Time(5), 15.0}, + }, + }}, + }, + }, + { + name: "simple join", + src: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + execute.TimeCol, + execute.ColMeta{ + Label: execute.DefaultValueColLabel, + Type: execute.TInt, + Kind: execute.ValueColKind, + }, + }, + Data: [][]interface{}{ + {execute.Time(0), int64(1)}, + {execute.Time(1), int64(2)}, + {execute.Time(2), int64(3)}, + {execute.Time(3), int64(4)}, + {execute.Time(4), int64(5)}, + }, + }}, + plan: &plan.PlanSpec{ + Now: epoch.Add(5), + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: math.MaxInt64, + }, + Bounds: plan.BoundsSpec{ + Start: query.Time{Absolute: time.Unix(0, 1)}, + Stop: query.Time{Absolute: time.Unix(0, 5)}, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + Relative: -5, + IsRelative: true, + }, + }, + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("sum")}, + }, + plan.ProcedureIDFromOperationID("sum"): { + ID: plan.ProcedureIDFromOperationID("sum"), + Spec: &functions.SumProcedureSpec{}, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("join")}, + }, + plan.ProcedureIDFromOperationID("count"): { + ID: plan.ProcedureIDFromOperationID("count"), + Spec: &functions.CountProcedureSpec{}, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("join")}, + }, + plan.ProcedureIDFromOperationID("join"): { + ID: plan.ProcedureIDFromOperationID("join"), + Spec: &functions.MergeJoinProcedureSpec{ + TableNames: map[plan.ProcedureID]string{ + plan.ProcedureIDFromOperationID("sum"): "sum", + plan.ProcedureIDFromOperationID("count"): "count", + }, + Fn: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "t"}}}, + Body: &semantic.BinaryExpression{ + Operator: ast.DivisionOperator, + Left: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "sum", + }, + Property: "_value", + }, + Right: &semantic.MemberExpression{ + Object: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{ + Name: "t", + }, + Property: "count", + }, + Property: "_value", + }, + }, + }, + }, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("sum"), + plan.ProcedureIDFromOperationID("count"), + }, + Children: nil, + }, + }, + Results: map[string]plan.YieldSpec{ + plan.DefaultYieldName: {ID: plan.ProcedureIDFromOperationID("join")}, + }, + }, + exp: map[string][]*executetest.Block{ + plan.DefaultYieldName: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + execute.TimeCol, + execute.ColMeta{ + Label: execute.DefaultValueColLabel, + Type: execute.TInt, + Kind: execute.ValueColKind, + }, + }, + Data: [][]interface{}{ + {execute.Time(5), int64(3)}, + }, + }}, + }, + }, + { + name: "multiple aggregates", + src: []execute.Block{&executetest.Block{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + execute.TimeCol, + execute.ColMeta{ + Label: execute.DefaultValueColLabel, + Type: execute.TFloat, + Kind: execute.ValueColKind, + }, + }, + Data: [][]interface{}{ + {execute.Time(0), 1.0}, + {execute.Time(1), 2.0}, + {execute.Time(2), 3.0}, + {execute.Time(3), 4.0}, + {execute.Time(4), 5.0}, + }, + }}, + plan: &plan.PlanSpec{ + Now: epoch.Add(5), + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: math.MaxInt64, + }, + Bounds: plan.BoundsSpec{ + Start: query.Time{Absolute: time.Unix(0, 1)}, + Stop: query.Time{Absolute: time.Unix(0, 5)}, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + Relative: -5, + IsRelative: true, + }, + }, + }, + Parents: nil, + Children: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("sum"), + plan.ProcedureIDFromOperationID("mean"), + }, + }, + plan.ProcedureIDFromOperationID("sum"): { + ID: plan.ProcedureIDFromOperationID("sum"), + Spec: &functions.SumProcedureSpec{}, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + }, + Children: nil, + }, + plan.ProcedureIDFromOperationID("mean"): { + ID: plan.ProcedureIDFromOperationID("mean"), + Spec: &functions.MeanProcedureSpec{}, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + }, + Children: nil, + }, + }, + Results: map[string]plan.YieldSpec{ + "sum": {ID: plan.ProcedureIDFromOperationID("sum")}, + "mean": {ID: plan.ProcedureIDFromOperationID("mean")}, + }, + }, + exp: map[string][]*executetest.Block{ + "sum": []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + execute.TimeCol, + execute.ColMeta{ + Label: execute.DefaultValueColLabel, + Type: execute.TFloat, + Kind: execute.ValueColKind, + }, + }, + Data: [][]interface{}{ + {execute.Time(5), 15.0}, + }, + }}, + "mean": []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 1, + Stop: 5, + }, + ColMeta: []execute.ColMeta{ + execute.TimeCol, + execute.ColMeta{ + Label: execute.DefaultValueColLabel, + Type: execute.TFloat, + Kind: execute.ValueColKind, + }, + }, + Data: [][]interface{}{ + {execute.Time(5), 3.0}, + }, + }}, + }, + }, + } + + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + c := execute.Config{ + StorageReader: &storageReader{blocks: tc.src}, + } + exe := execute.NewExecutor(c) + results, err := exe.Execute(context.Background(), tc.plan) + if err != nil { + t.Fatal(err) + } + got := make(map[string][]*executetest.Block, len(results)) + for name, r := range results { + if err := r.Blocks().Do(func(b execute.Block) error { + got[name] = append(got[name], executetest.ConvertBlock(b)) + return nil + }); err != nil { + t.Fatal(err) + } + } + + if !cmp.Equal(got, tc.exp) { + t.Error("unexpected results -want/+got", cmp.Diff(tc.exp, got)) + } + }) + } +} + +type storageReader struct { + blocks []execute.Block +} + +func (s storageReader) Close() {} +func (s storageReader) Read(context.Context, map[string]string, execute.ReadSpec, execute.Time, execute.Time) (execute.BlockIterator, error) { + return &storageBlockIterator{ + s: s, + }, nil +} + +type storageBlockIterator struct { + s storageReader +} + +func (bi *storageBlockIterator) Do(f func(execute.Block) error) error { + for _, b := range bi.s.blocks { + if err := f(b); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/expression_internal_test.go b/vendor/github.com/influxdata/ifql/query/execute/expression_internal_test.go new file mode 100644 index 000000000..0d380309f --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/expression_internal_test.go @@ -0,0 +1,285 @@ +package execute + +//func TestBinaryFuncs(t *testing.T) { +// testCases := []struct { +// op expression.Operator +// l, r interface{} +// want interface{} +// noFunc bool +// }{ +// {op: expression.AdditionOperator, l: int64(6), r: int64(7), want: int64(13)}, +// {op: expression.AdditionOperator, l: int64(6), r: uint64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: int64(6), r: float64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.AdditionOperator, l: uint64(6), r: int64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: uint64(6), r: uint64(7), want: uint64(13)}, +// {op: expression.AdditionOperator, l: uint64(6), r: float64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.AdditionOperator, l: float64(6), r: int64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: float64(6), r: uint64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: float64(6), r: float64(7), want: float64(13)}, +// {op: expression.AdditionOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.AdditionOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.AdditionOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.SubtractionOperator, l: int64(6), r: int64(7), want: int64(-1)}, +// {op: expression.SubtractionOperator, l: int64(6), r: uint64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: int64(6), r: float64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.SubtractionOperator, l: uint64(6), r: int64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: uint64(7), r: uint64(6), want: uint64(1)}, +// {op: expression.SubtractionOperator, l: uint64(6), r: float64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.SubtractionOperator, l: float64(6), r: int64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: float64(6), r: uint64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: float64(6), r: float64(7), want: float64(-1)}, +// {op: expression.SubtractionOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.SubtractionOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.SubtractionOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.MultiplicationOperator, l: int64(6), r: int64(7), want: int64(42)}, +// {op: expression.MultiplicationOperator, l: int64(6), r: uint64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: int64(6), r: float64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.MultiplicationOperator, l: uint64(6), r: int64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: uint64(6), r: uint64(7), want: uint64(42)}, +// {op: expression.MultiplicationOperator, l: uint64(6), r: float64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.MultiplicationOperator, l: float64(6), r: int64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: float64(6), r: uint64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: float64(6), r: float64(7), want: float64(42)}, +// {op: expression.MultiplicationOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.MultiplicationOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.MultiplicationOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.DivisionOperator, l: int64(6), r: int64(3), want: int64(2)}, +// {op: expression.DivisionOperator, l: int64(6), r: uint64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: int64(6), r: float64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.DivisionOperator, l: uint64(6), r: int64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: uint64(6), r: uint64(2), want: uint64(3)}, +// {op: expression.DivisionOperator, l: uint64(6), r: float64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.DivisionOperator, l: float64(6), r: int64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: float64(6), r: uint64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: float64(6), r: float64(7), want: float64(6.0 / 7.0)}, +// {op: expression.DivisionOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.DivisionOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.DivisionOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.LessThanEqualOperator, l: int64(6), r: int64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: int64(6), r: uint64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: int64(6), r: float64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.LessThanEqualOperator, l: uint64(6), r: int64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: uint64(6), r: uint64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: uint64(6), r: float64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.LessThanEqualOperator, l: float64(6), r: int64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: float64(6), r: uint64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: float64(6), r: float64(7), want: true}, +// {op: expression.LessThanEqualOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.LessThanEqualOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.LessThanEqualOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.LessThanEqualOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.LessThanEqualOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.LessThanOperator, l: int64(6), r: int64(7), want: true}, +// {op: expression.LessThanOperator, l: int64(6), r: uint64(7), want: true}, +// {op: expression.LessThanOperator, l: int64(6), r: float64(7), want: true}, +// {op: expression.LessThanOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.LessThanOperator, l: uint64(6), r: int64(7), want: true}, +// {op: expression.LessThanOperator, l: uint64(6), r: uint64(7), want: true}, +// {op: expression.LessThanOperator, l: uint64(6), r: float64(7), want: true}, +// {op: expression.LessThanOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.LessThanOperator, l: float64(6), r: int64(7), want: true}, +// {op: expression.LessThanOperator, l: float64(6), r: uint64(7), want: true}, +// {op: expression.LessThanOperator, l: float64(6), r: float64(7), want: true}, +// {op: expression.LessThanOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.LessThanOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.LessThanOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.LessThanOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.LessThanOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.GreaterThanEqualOperator, l: int64(6), r: int64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: int64(6), r: uint64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: int64(6), r: float64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.GreaterThanEqualOperator, l: uint64(6), r: int64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: uint64(6), r: uint64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: uint64(6), r: float64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.GreaterThanEqualOperator, l: float64(6), r: int64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: float64(6), r: uint64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: float64(6), r: float64(7), want: false}, +// {op: expression.GreaterThanEqualOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.GreaterThanEqualOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.GreaterThanEqualOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.GreaterThanEqualOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.GreaterThanEqualOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.GreaterThanOperator, l: int64(6), r: int64(7), want: false}, +// {op: expression.GreaterThanOperator, l: int64(6), r: uint64(7), want: false}, +// {op: expression.GreaterThanOperator, l: int64(6), r: float64(7), want: false}, +// {op: expression.GreaterThanOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.GreaterThanOperator, l: uint64(6), r: int64(7), want: false}, +// {op: expression.GreaterThanOperator, l: uint64(6), r: uint64(7), want: false}, +// {op: expression.GreaterThanOperator, l: uint64(6), r: float64(7), want: false}, +// {op: expression.GreaterThanOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.GreaterThanOperator, l: float64(6), r: int64(7), want: false}, +// {op: expression.GreaterThanOperator, l: float64(6), r: uint64(7), want: false}, +// {op: expression.GreaterThanOperator, l: float64(6), r: float64(7), want: false}, +// {op: expression.GreaterThanOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.GreaterThanOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.GreaterThanOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.GreaterThanOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.GreaterThanOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.EqualOperator, l: int64(6), r: int64(7), want: false}, +// {op: expression.EqualOperator, l: int64(6), r: uint64(7), want: false}, +// {op: expression.EqualOperator, l: int64(6), r: float64(7), want: false}, +// {op: expression.EqualOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.EqualOperator, l: uint64(6), r: int64(7), want: false}, +// {op: expression.EqualOperator, l: uint64(6), r: uint64(7), want: false}, +// {op: expression.EqualOperator, l: uint64(6), r: float64(7), want: false}, +// {op: expression.EqualOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.EqualOperator, l: float64(6), r: int64(7), want: false}, +// {op: expression.EqualOperator, l: float64(6), r: uint64(7), want: false}, +// {op: expression.EqualOperator, l: float64(6), r: float64(7), want: false}, +// {op: expression.EqualOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.EqualOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.EqualOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.EqualOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.EqualOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.NotEqualOperator, l: int64(6), r: int64(7), want: true}, +// {op: expression.NotEqualOperator, l: int64(6), r: uint64(7), want: true}, +// {op: expression.NotEqualOperator, l: int64(6), r: float64(7), want: true}, +// {op: expression.NotEqualOperator, l: int64(6), r: bool(true), noFunc: true}, +// {op: expression.NotEqualOperator, l: uint64(6), r: int64(7), want: true}, +// {op: expression.NotEqualOperator, l: uint64(6), r: uint64(7), want: true}, +// {op: expression.NotEqualOperator, l: uint64(6), r: float64(7), want: true}, +// {op: expression.NotEqualOperator, l: uint64(6), r: bool(true), noFunc: true}, +// {op: expression.NotEqualOperator, l: float64(6), r: int64(7), want: true}, +// {op: expression.NotEqualOperator, l: float64(6), r: uint64(7), want: true}, +// {op: expression.NotEqualOperator, l: float64(6), r: float64(7), want: true}, +// {op: expression.NotEqualOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.NotEqualOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.NotEqualOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.NotEqualOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.NotEqualOperator, l: bool(true), r: bool(false), noFunc: true}, +// {op: expression.AndOperator, l: int64(6), r: int64(7), noFunc: true}, +// {op: expression.AndOperator, l: int64(6), r: uint64(7), noFunc: true}, +// {op: expression.AndOperator, l: int64(6), r: float64(7), noFunc: true}, +// {op: expression.AndOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.AndOperator, l: uint64(6), r: int64(7), noFunc: true}, +// {op: expression.AndOperator, l: uint64(6), r: uint64(7), noFunc: true}, +// {op: expression.AndOperator, l: uint64(6), r: float64(7), noFunc: true}, +// {op: expression.AndOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.AndOperator, l: float64(6), r: int64(7), noFunc: true}, +// {op: expression.AndOperator, l: float64(6), r: uint64(7), noFunc: true}, +// {op: expression.AndOperator, l: float64(6), r: float64(7), noFunc: true}, +// {op: expression.AndOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.AndOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.AndOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.AndOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.AndOperator, l: bool(true), r: bool(false), want: false}, +// {op: expression.OrOperator, l: int64(6), r: int64(7), noFunc: true}, +// {op: expression.OrOperator, l: int64(6), r: uint64(7), noFunc: true}, +// {op: expression.OrOperator, l: int64(6), r: float64(7), noFunc: true}, +// {op: expression.OrOperator, l: int64(6), r: bool(false), noFunc: true}, +// {op: expression.OrOperator, l: uint64(6), r: int64(7), noFunc: true}, +// {op: expression.OrOperator, l: uint64(6), r: uint64(7), noFunc: true}, +// {op: expression.OrOperator, l: uint64(6), r: float64(7), noFunc: true}, +// {op: expression.OrOperator, l: uint64(6), r: bool(false), noFunc: true}, +// {op: expression.OrOperator, l: float64(6), r: int64(7), noFunc: true}, +// {op: expression.OrOperator, l: float64(6), r: uint64(7), noFunc: true}, +// {op: expression.OrOperator, l: float64(6), r: float64(7), noFunc: true}, +// {op: expression.OrOperator, l: float64(6), r: bool(false), noFunc: true}, +// {op: expression.OrOperator, l: bool(true), r: int64(7), noFunc: true}, +// {op: expression.OrOperator, l: bool(true), r: uint64(7), noFunc: true}, +// {op: expression.OrOperator, l: bool(true), r: float64(7), noFunc: true}, +// {op: expression.OrOperator, l: bool(true), r: bool(false), want: true}, +// } +// for i, tc := range testCases { +// tc := tc +// t.Run(fmt.Sprintf("%d: %v %v %v", i, tc.l, tc.op, tc.r), func(t *testing.T) { +// lt := typeOf(tc.l) +// rt := typeOf(tc.r) +// sig := binarySignature{ +// Operator: tc.op, +// Left: lt, +// Right: rt, +// } +// f, ok := binaryFuncs[sig] +// if !ok { +// if !tc.noFunc { +// t.Fatal("could not find matching function") +// } +// return +// } else if tc.noFunc { +// t.Fatal("expected to not find function") +// } +// left := evaluator{ +// Value: tc.l, +// } +// right := evaluator{ +// Value: tc.r, +// } +// +// got := f.Func(nil, left, right) +// want := Value{ +// Type: typeOf(tc.want), +// Value: tc.want, +// } +// +// if !cmp.Equal(got, want) { +// t.Errorf("unexpected value: -want/+got\n%s", cmp.Diff(want, got)) +// } +// }) +// } +//} +// +//func typeOf(v interface{}) DataType { +// switch v.(type) { +// case bool: +// return TBool +// case int64: +// return TInt +// case uint64: +// return TUInt +// case float64: +// return TFloat +// case string: +// return TString +// case Time: +// return TTime +// default: +// return TInvalid +// } +//} +// +//type evaluator struct { +// Value interface{} +//} +// +//func (v evaluator) Type() DataType { +// return typeOf(v.Value) +//} +//func (v evaluator) EvalBool(Scope) bool { +// return v.Value.(bool) +//} +//func (v evaluator) EvalInt(Scope) int64 { +// return v.Value.(int64) +//} +//func (v evaluator) EvalUInt(Scope) uint64 { +// return v.Value.(uint64) +//} +//func (v evaluator) EvalFloat(Scope) float64 { +// return v.Value.(float64) +//} +//func (v evaluator) EvalString(Scope) string { +// return v.Value.(string) +//} +//func (v evaluator) EvalTime(Scope) Time { +// return v.Value.(Time) +//} diff --git a/vendor/github.com/influxdata/ifql/query/execute/expression_test.go b/vendor/github.com/influxdata/ifql/query/execute/expression_test.go new file mode 100644 index 000000000..058e31ec1 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/expression_test.go @@ -0,0 +1,266 @@ +package execute_test + +//func TestCompileExpression(t *testing.T) { +// testCases := []struct { +// name string +// expr expression.Expression +// types map[string]execute.DataType +// wantErr bool +// }{ +// { +// name: "integer literal", +// expr: expression.Expression{ +// Root: &expression.IntegerLiteralNode{ +// Value: 42, +// }, +// }, +// wantErr: false, +// }, +// { +// name: "negate string", +// expr: expression.Expression{ +// Root: &expression.UnaryNode{ +// Operator: expression.SubtractionOperator, +// Node: &expression.StringLiteralNode{ +// Value: "hello", +// }, +// }, +// }, +// wantErr: true, +// }, +// { +// name: "missing type info", +// expr: expression.Expression{ +// Root: &expression.ReferenceNode{ +// Name: "a", +// }, +// }, +// wantErr: true, +// }, +// } +// for _, tc := range testCases { +// tc := tc +// t.Run(tc.name, func(t *testing.T) { +// _, err := execute.CompileExpression(tc.expr, tc.types) +// if err != nil { +// if !tc.wantErr { +// t.Errorf("unexpected compliation error: %s", err) +// } +// } else if tc.wantErr { +// t.Error("expected compliation error") +// } +// }) +// } +//} +//func TestEvaluateCompiledExpression(t *testing.T) { +// testCases := []struct { +// name string +// expr expression.Expression +// types map[string]execute.DataType +// scope execute.Scope +// want execute.Value +// wantErr bool +// }{ +// { +// name: "integer literal", +// expr: expression.Expression{ +// Root: &expression.IntegerLiteralNode{ +// Value: 42, +// }, +// }, +// want: execute.Value{ +// Type: execute.TInt, +// Value: int64(42), +// }, +// }, +// { +// name: "integer addition", +// expr: expression.Expression{ +// Root: &expression.BinaryNode{ +// Operator: expression.AdditionOperator, +// Left: &expression.IntegerLiteralNode{ +// Value: 18, +// }, +// Right: &expression.IntegerLiteralNode{ +// Value: 24, +// }, +// }, +// }, +// want: execute.Value{ +// Type: execute.TInt, +// Value: int64(42), +// }, +// }, +// { +// name: "integer addition using scope", +// expr: expression.Expression{ +// Root: &expression.BinaryNode{ +// Operator: expression.AdditionOperator, +// Left: &expression.ReferenceNode{ +// Name: "a", +// }, +// Right: &expression.ReferenceNode{ +// Name: "b", +// }, +// }, +// }, +// types: map[string]execute.DataType{ +// "a": execute.TInt, +// "b": execute.TInt, +// }, +// scope: map[string]execute.Value{ +// "a": {Type: execute.TInt, Value: int64(18)}, +// "b": {Type: execute.TInt, Value: int64(24)}, +// }, +// want: execute.Value{ +// Type: execute.TInt, +// Value: int64(42), +// }, +// }, +// { +// name: "integer addition missing scope", +// expr: expression.Expression{ +// Root: &expression.BinaryNode{ +// Operator: expression.AdditionOperator, +// Left: &expression.ReferenceNode{ +// Name: "a", +// }, +// Right: &expression.ReferenceNode{ +// Name: "b", +// }, +// }, +// }, +// types: map[string]execute.DataType{ +// "a": execute.TInt, +// "b": execute.TInt, +// }, +// scope: map[string]execute.Value{ +// "a": {Type: execute.TInt, Value: int64(18)}, +// }, +// wantErr: true, +// }, +// { +// name: "integer addition incorrect scope", +// expr: expression.Expression{ +// Root: &expression.BinaryNode{ +// Operator: expression.AdditionOperator, +// Left: &expression.ReferenceNode{ +// Name: "a", +// }, +// Right: &expression.ReferenceNode{ +// Name: "b", +// }, +// }, +// }, +// types: map[string]execute.DataType{ +// "a": execute.TInt, +// "b": execute.TInt, +// }, +// scope: map[string]execute.Value{ +// "a": {Type: execute.TInt, Value: int64(18)}, +// "b": {Type: execute.TFloat, Value: float64(18)}, +// }, +// wantErr: true, +// }, +// { +// name: "unsigned integer addition", +// expr: expression.Expression{ +// Root: &expression.BinaryNode{ +// Operator: expression.AdditionOperator, +// Left: &expression.ReferenceNode{ +// Name: "a", +// }, +// Right: &expression.ReferenceNode{ +// Name: "b", +// }, +// }, +// }, +// types: map[string]execute.DataType{ +// "a": execute.TUInt, +// "b": execute.TUInt, +// }, +// scope: map[string]execute.Value{ +// "a": {Type: execute.TUInt, Value: uint64(18)}, +// "b": {Type: execute.TUInt, Value: uint64(24)}, +// }, +// want: execute.Value{ +// Type: execute.TUInt, +// Value: uint64(42), +// }, +// }, +// { +// name: "float addition", +// expr: expression.Expression{ +// Root: &expression.BinaryNode{ +// Operator: expression.AdditionOperator, +// Left: &expression.FloatLiteralNode{ +// Value: 18, +// }, +// Right: &expression.FloatLiteralNode{ +// Value: 24, +// }, +// }, +// }, +// want: execute.Value{ +// Type: execute.TFloat, +// Value: float64(42), +// }, +// }, +// { +// name: "boolean and", +// expr: expression.Expression{ +// Root: &expression.BinaryNode{ +// Operator: expression.AndOperator, +// Left: &expression.BooleanLiteralNode{ +// Value: true, +// }, +// Right: &expression.BooleanLiteralNode{ +// Value: true, +// }, +// }, +// }, +// want: execute.Value{ +// Type: execute.TBool, +// Value: true, +// }, +// }, +// { +// name: "boolean or", +// expr: expression.Expression{ +// Root: &expression.BinaryNode{ +// Operator: expression.OrOperator, +// Left: &expression.BooleanLiteralNode{ +// Value: false, +// }, +// Right: &expression.BooleanLiteralNode{ +// Value: true, +// }, +// }, +// }, +// want: execute.Value{ +// Type: execute.TBool, +// Value: true, +// }, +// }, +// } +// for _, tc := range testCases { +// tc := tc +// t.Run(tc.name, func(t *testing.T) { +// ce, err := execute.CompileExpression(tc.expr, tc.types) +// if err != nil { +// t.Fatal(err) +// } +// got, err := ce.Eval(tc.scope) +// if err != nil { +// if !tc.wantErr { +// t.Fatal(err) +// } +// } else if tc.wantErr { +// t.Fatal("expected evaluation error") +// } +// if !cmp.Equal(got, tc.want) { +// t.Errorf("unexpected value: -want/+got\n%s", cmp.Diff(tc.want, got)) +// } +// }) +// } +//} diff --git a/vendor/github.com/influxdata/ifql/query/execute/format.go b/vendor/github.com/influxdata/ifql/query/execute/format.go new file mode 100644 index 000000000..7cc613554 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/format.go @@ -0,0 +1,293 @@ +package execute + +import ( + "io" + "sort" + "strconv" + "strings" +) + +// Formatter writes a block to a Writer. +type Formatter struct { + b Block + widths []int + maxWidth int + newWidths []int + pad []byte + dash []byte + // fmtBuf is used to format values + fmtBuf [64]byte + + opts FormatOptions + + cols orderedCols +} +type FormatOptions struct { + // RepeatHeaderCount is the number of rows to print before printing the header again. + // If zero then the headers are not repeated. + RepeatHeaderCount int +} + +func DefaultFormatOptions() *FormatOptions { + return &FormatOptions{} +} + +var eol = []byte{'\n'} + +// NewFormatter creates a Formatter for a given block. +// If opts is nil, the DefaultFormatOptions are used. +func NewFormatter(b Block, opts *FormatOptions) *Formatter { + if opts == nil { + opts = DefaultFormatOptions() + } + return &Formatter{ + b: b, + opts: *opts, + } +} + +type writeToHelper struct { + w io.Writer + n int64 + err error +} + +func (w *writeToHelper) write(data []byte) { + if w.err != nil { + return + } + n, err := w.w.Write(data) + w.n += int64(n) + w.err = err +} + +var minWidthsByType = map[DataType]int{ + TBool: 7, + TInt: 22, + TUInt: 22, + TFloat: 22, + TString: 15, + TTime: len(fixedWidthTimeFmt), + TInvalid: 10, +} + +// WriteTo writes the formatted block data to w. +func (f *Formatter) WriteTo(out io.Writer) (int64, error) { + w := &writeToHelper{w: out} + + // Sort cols + cols := f.b.Cols() + f.cols = newOrderedCols(cols) + sort.Sort(f.cols) + + // Compute header widths + f.widths = make([]int, len(cols)) + for j, c := range cols { + l := len(c.Label) + min := minWidthsByType[c.Type] + if min > l { + l = min + } + if l > f.widths[j] { + f.widths[j] = l + } + if l > f.maxWidth { + f.maxWidth = l + } + } + + // Write Block header + w.write([]byte("Block: keys: [")) + w.write([]byte(strings.Join(f.b.Tags().Keys(), ", "))) + w.write([]byte("] bounds: ")) + w.write([]byte(f.b.Bounds().String())) + w.write(eol) + + // Check err and return early + if w.err != nil { + return w.n, w.err + } + + // Write rows + r := 0 + f.b.Times().DoTime(func(ts []Time, rr RowReader) { + if r == 0 { + for i := range ts { + for oj, c := range f.cols.cols { + j := f.cols.Idx(oj) + buf := f.valueBuf(i, j, c.Type, rr) + l := len(buf) + if l > f.widths[j] { + f.widths[j] = l + } + if l > f.maxWidth { + f.maxWidth = l + } + } + } + f.makePaddingBuffers() + f.writeHeader(w) + f.writeHeaderSeparator(w) + f.newWidths = make([]int, len(f.widths)) + copy(f.newWidths, f.widths) + } + for i := range ts { + for oj, c := range f.cols.cols { + j := f.cols.Idx(oj) + buf := f.valueBuf(i, j, c.Type, rr) + l := len(buf) + padding := f.widths[j] - l + if padding >= 0 { + w.write(f.pad[:padding]) + w.write(buf) + } else { + //TODO make unicode friendly + w.write(buf[:f.widths[j]-3]) + w.write([]byte{'.', '.', '.'}) + } + w.write(f.pad[:2]) + if l > f.newWidths[j] { + f.newWidths[j] = l + } + if l > f.maxWidth { + f.maxWidth = l + } + } + w.write(eol) + r++ + if f.opts.RepeatHeaderCount > 0 && r%f.opts.RepeatHeaderCount == 0 { + copy(f.widths, f.newWidths) + f.makePaddingBuffers() + f.writeHeaderSeparator(w) + f.writeHeader(w) + f.writeHeaderSeparator(w) + } + } + }) + return w.n, w.err +} + +func (f *Formatter) makePaddingBuffers() { + if len(f.pad) != f.maxWidth { + f.pad = make([]byte, f.maxWidth) + for i := range f.pad { + f.pad[i] = ' ' + } + } + if len(f.dash) != f.maxWidth { + f.dash = make([]byte, f.maxWidth) + for i := range f.dash { + f.dash[i] = '-' + } + } +} + +func (f *Formatter) writeHeader(w *writeToHelper) { + for oj, c := range f.cols.cols { + j := f.cols.Idx(oj) + buf := []byte(c.Label) + w.write(f.pad[:f.widths[j]-len(buf)]) + w.write(buf) + w.write(f.pad[:2]) + } + w.write(eol) +} +func (f *Formatter) writeHeaderSeparator(w *writeToHelper) { + for oj := range f.cols.cols { + j := f.cols.Idx(oj) + w.write(f.dash[:f.widths[j]]) + w.write(f.pad[:2]) + } + w.write(eol) +} + +func (f *Formatter) valueBuf(i, j int, typ DataType, rr RowReader) (buf []byte) { + switch typ { + case TBool: + buf = strconv.AppendBool(f.fmtBuf[0:0], rr.AtBool(i, j)) + case TInt: + buf = strconv.AppendInt(f.fmtBuf[0:0], rr.AtInt(i, j), 10) + case TUInt: + buf = strconv.AppendUint(f.fmtBuf[0:0], rr.AtUInt(i, j), 10) + case TFloat: + // TODO allow specifying format and precision + buf = strconv.AppendFloat(f.fmtBuf[0:0], rr.AtFloat(i, j), 'f', -1, 64) + case TString: + buf = []byte(rr.AtString(i, j)) + case TTime: + buf = []byte(rr.AtTime(i, j).String()) + } + return +} + +// orderedCols sorts a list of columns: +// +// * time +// * common tags sorted by label +// * other tags sorted by label +// * value +// +type orderedCols struct { + indexMap []int + cols []ColMeta +} + +func newOrderedCols(cols []ColMeta) orderedCols { + indexMap := make([]int, len(cols)) + for i := range indexMap { + indexMap[i] = i + } + cpy := make([]ColMeta, len(cols)) + copy(cpy, cols) + return orderedCols{ + indexMap: indexMap, + cols: cpy, + } +} + +func (o orderedCols) Idx(oj int) int { + return o.indexMap[oj] +} + +func (o orderedCols) Len() int { return len(o.cols) } +func (o orderedCols) Swap(i int, j int) { + o.cols[i], o.cols[j] = o.cols[j], o.cols[i] + o.indexMap[i], o.indexMap[j] = o.indexMap[j], o.indexMap[i] +} + +func (o orderedCols) Less(i int, j int) bool { + // Time column is always first + if o.cols[i].Label == TimeColLabel { + return true + } + if o.cols[j].Label == TimeColLabel { + return false + } + + // Value column is always last + if o.cols[i].Label == DefaultValueColLabel { + return false + } + if o.cols[j].Label == DefaultValueColLabel { + return true + } + + // Common tags before other tags + if o.cols[i].IsTag() && o.cols[i].Common && o.cols[j].IsTag() && !o.cols[j].Common { + return true + } + if o.cols[i].IsTag() && !o.cols[i].Common && o.cols[j].IsTag() && o.cols[j].Common { + return false + } + + // Tags before values + if o.cols[i].IsTag() && !o.cols[j].IsTag() { + return true + } + if !o.cols[i].IsTag() && o.cols[j].IsTag() { + return false + } + + // within a class sort by label + return o.cols[i].Label < o.cols[j].Label +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/queue.go b/vendor/github.com/influxdata/ifql/query/execute/queue.go new file mode 100644 index 000000000..086cec817 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/queue.go @@ -0,0 +1,64 @@ +package execute + +import ( + "sync" + "sync/atomic" +) + +// MessageQueue provides a concurrency safe queue for messages. +// The queue must have a single consumer calling Pop. +type MessageQueue interface { + Push(Message) + Pop() Message +} + +type unboundedMessageQueue struct { + buf []Message + head int + tail int + mu sync.Mutex + len int32 +} + +func newMessageQueue(n int) *unboundedMessageQueue { + return &unboundedMessageQueue{ + buf: make([]Message, n), + } +} + +func (q *unboundedMessageQueue) Push(m Message) { + q.mu.Lock() + size := len(q.buf) + q.tail = (q.tail + 1) % size + if q.tail == q.head { + // Resize + buf := make([]Message, size*2) + copy(buf, q.buf[q.head:]) + copy(buf[size-q.head:], q.buf[:q.head]) + q.head = 0 + q.tail = size + q.buf = buf + } + atomic.AddInt32(&q.len, 1) + q.buf[q.tail] = m + q.mu.Unlock() +} + +func (q *unboundedMessageQueue) Len() int { + return int(atomic.LoadInt32(&q.len)) +} + +func (q *unboundedMessageQueue) Pop() Message { + if q.Len() == 0 { + return nil + } + + q.mu.Lock() + size := len(q.buf) + q.head = (q.head + 1) % size + m := q.buf[q.head] + q.buf[q.head] = nil + atomic.AddInt32(&q.len, -1) + q.mu.Unlock() + return m +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/result.go b/vendor/github.com/influxdata/ifql/query/execute/result.go new file mode 100644 index 000000000..55d4e31f6 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/result.go @@ -0,0 +1,118 @@ +package execute + +import ( + "sync" + + "github.com/influxdata/ifql/query/plan" +) + +type Result interface { + Blocks() BlockIterator + abort(error) +} + +// resultSink implements both the Transformation and Result interfaces, +// mapping the pushed based Transformation API to the pull based Result interface. +type resultSink struct { + mu sync.Mutex + blocks chan resultMessage + + abortErr chan error + aborted chan struct{} +} + +type resultMessage struct { + block Block + err error +} + +func newResultSink(plan.YieldSpec) *resultSink { + return &resultSink{ + // TODO(nathanielc): Currently this buffer needs to be big enough hold all result blocks :( + blocks: make(chan resultMessage, 1000), + abortErr: make(chan error, 1), + aborted: make(chan struct{}), + } +} + +func (s *resultSink) RetractBlock(DatasetID, BlockMetadata) error { + //TODO implement + return nil +} + +func (s *resultSink) Process(id DatasetID, b Block) error { + select { + case s.blocks <- resultMessage{ + block: b, + }: + case <-s.aborted: + } + return nil +} + +func (s *resultSink) Blocks() BlockIterator { + return s +} + +func (s *resultSink) Do(f func(Block) error) error { + for { + select { + case err := <-s.abortErr: + return err + case msg, more := <-s.blocks: + if !more { + return nil + } + if msg.err != nil { + return msg.err + } + if err := f(msg.block); err != nil { + return err + } + } + } +} + +func (s *resultSink) UpdateWatermark(id DatasetID, mark Time) error { + //Nothing to do + return nil +} +func (s *resultSink) UpdateProcessingTime(id DatasetID, t Time) error { + //Nothing to do + return nil +} + +func (s *resultSink) setTrigger(Trigger) { + //TODO: Change interfaces so that resultSink, does not need to implement this method. +} + +func (s *resultSink) Finish(id DatasetID, err error) { + if err != nil { + select { + case s.blocks <- resultMessage{ + err: err, + }: + case <-s.aborted: + } + } + close(s.blocks) +} + +func (s *resultSink) abort(err error) { + s.mu.Lock() + defer s.mu.Unlock() + + // Check if we have already aborted + aborted := false + select { + case <-s.aborted: + aborted = true + default: + } + if aborted { + return // already aborted + } + + s.abortErr <- err + close(s.aborted) +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/row_fn.go b/vendor/github.com/influxdata/ifql/query/execute/row_fn.go new file mode 100644 index 000000000..19913932f --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/row_fn.go @@ -0,0 +1,264 @@ +package execute + +import ( + "fmt" + + "github.com/influxdata/ifql/compiler" + "github.com/influxdata/ifql/semantic" + "github.com/pkg/errors" +) + +type rowFn struct { + fn *semantic.FunctionExpression + compilationCache *compiler.CompilationCache + scope compiler.Scope + + preparedFn compiler.Func + + recordName string + record *compiler.Object + + recordCols map[string]int + references []string +} + +func newRowFn(fn *semantic.FunctionExpression) (rowFn, error) { + if len(fn.Params) != 1 { + return rowFn{}, fmt.Errorf("function should only have a single parameter, got %d", len(fn.Params)) + } + return rowFn{ + compilationCache: compiler.NewCompilationCache(fn), + scope: make(compiler.Scope, 1), + recordName: fn.Params[0].Key.Name, + references: findColReferences(fn), + recordCols: make(map[string]int), + record: compiler.NewObject(), + }, nil +} + +func (f *rowFn) prepare(cols []ColMeta) error { + // Prepare types and recordCols + propertyTypes := make(map[string]semantic.Type, len(f.references)) + for _, r := range f.references { + found := false + for j, c := range cols { + if r == c.Label { + f.recordCols[r] = j + found = true + propertyTypes[r] = ConvertToKind(c.Type) + break + } + } + if !found { + return fmt.Errorf("function references unknown column %q", r) + } + } + // Compile fn for given types + fn, err := f.compilationCache.Compile(map[string]semantic.Type{ + f.recordName: semantic.NewObjectType(propertyTypes), + }) + if err != nil { + return err + } + f.preparedFn = fn + return nil +} + +func ConvertToKind(t DataType) semantic.Kind { + // TODO make this an array lookup. + switch t { + case TInvalid: + return semantic.Invalid + case TBool: + return semantic.Bool + case TInt: + return semantic.Int + case TUInt: + return semantic.UInt + case TFloat: + return semantic.Float + case TString: + return semantic.String + case TTime: + return semantic.Time + default: + return semantic.Invalid + } +} + +func ConvertFromKind(k semantic.Kind) DataType { + // TODO make this an array lookup. + switch k { + case semantic.Invalid: + return TInvalid + case semantic.Bool: + return TBool + case semantic.Int: + return TInt + case semantic.UInt: + return TUInt + case semantic.Float: + return TFloat + case semantic.String: + return TString + case semantic.Time: + return TTime + default: + return TInvalid + } +} + +func (f *rowFn) eval(row int, rr RowReader) (compiler.Value, error) { + for _, r := range f.references { + f.record.Set(r, ValueForRow(row, f.recordCols[r], rr)) + } + f.scope[f.recordName] = f.record + return f.preparedFn.Eval(f.scope) +} + +type RowPredicateFn struct { + rowFn +} + +func NewRowPredicateFn(fn *semantic.FunctionExpression) (*RowPredicateFn, error) { + r, err := newRowFn(fn) + if err != nil { + return nil, err + } + return &RowPredicateFn{ + rowFn: r, + }, nil +} + +func (f *RowPredicateFn) Prepare(cols []ColMeta) error { + err := f.rowFn.prepare(cols) + if err != nil { + return err + } + if f.preparedFn.Type() != semantic.Bool { + return errors.New("row predicate function does not evaluate to a boolean") + } + return nil +} + +func (f *RowPredicateFn) Eval(row int, rr RowReader) (bool, error) { + v, err := f.rowFn.eval(row, rr) + if err != nil { + return false, err + } + return v.Bool(), nil +} + +type RowMapFn struct { + rowFn + + isWrap bool + wrapObj *compiler.Object +} + +func NewRowMapFn(fn *semantic.FunctionExpression) (*RowMapFn, error) { + r, err := newRowFn(fn) + if err != nil { + return nil, err + } + return &RowMapFn{ + rowFn: r, + wrapObj: compiler.NewObject(), + }, nil +} + +func (f *RowMapFn) Prepare(cols []ColMeta) error { + err := f.rowFn.prepare(cols) + if err != nil { + return err + } + k := f.preparedFn.Type().Kind() + f.isWrap = k != semantic.Object + if f.isWrap { + f.wrapObj.SetPropertyType(DefaultValueColLabel, f.preparedFn.Type()) + } + return nil +} + +func (f *RowMapFn) Type() semantic.Type { + if f.isWrap { + return f.wrapObj.Type() + } + return f.preparedFn.Type() +} + +func (f *RowMapFn) Eval(row int, rr RowReader) (*compiler.Object, error) { + v, err := f.rowFn.eval(row, rr) + if err != nil { + return nil, err + } + if f.isWrap { + f.wrapObj.Set(DefaultValueColLabel, v) + return f.wrapObj, nil + } + return v.Object(), nil +} + +func ValueForRow(i, j int, rr RowReader) compiler.Value { + t := rr.Cols()[j].Type + switch t { + case TBool: + return compiler.NewBool(rr.AtBool(i, j)) + case TInt: + return compiler.NewInt(rr.AtInt(i, j)) + case TUInt: + return compiler.NewUInt(rr.AtUInt(i, j)) + case TFloat: + return compiler.NewFloat(rr.AtFloat(i, j)) + case TString: + return compiler.NewString(rr.AtString(i, j)) + case TTime: + return compiler.NewTime(compiler.Time(rr.AtTime(i, j))) + default: + PanicUnknownType(t) + return nil + } +} + +func AppendValue(builder BlockBuilder, j int, v compiler.Value) { + switch k := v.Type().Kind(); k { + case semantic.Bool: + builder.AppendBool(j, v.Bool()) + case semantic.Int: + builder.AppendInt(j, v.Int()) + case semantic.UInt: + builder.AppendUInt(j, v.UInt()) + case semantic.Float: + builder.AppendFloat(j, v.Float()) + case semantic.String: + builder.AppendString(j, v.Str()) + case semantic.Time: + builder.AppendTime(j, Time(v.Time())) + default: + PanicUnknownType(ConvertFromKind(k)) + } +} + +func findColReferences(fn *semantic.FunctionExpression) []string { + v := &colReferenceVisitor{ + recordName: fn.Params[0].Key.Name, + } + semantic.Walk(v, fn) + return v.refs +} + +type colReferenceVisitor struct { + recordName string + refs []string +} + +func (c *colReferenceVisitor) Visit(node semantic.Node) semantic.Visitor { + if me, ok := node.(*semantic.MemberExpression); ok { + if obj, ok := me.Object.(*semantic.IdentifierExpression); ok && obj.Name == c.recordName { + c.refs = append(c.refs, me.Property) + } + } + return c +} + +func (c *colReferenceVisitor) Done() {} diff --git a/vendor/github.com/influxdata/ifql/query/execute/selector.go b/vendor/github.com/influxdata/ifql/query/execute/selector.go new file mode 100644 index 000000000..54e2df0ae --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/selector.go @@ -0,0 +1,309 @@ +package execute + +import ( + "fmt" +) + +type selectorTransformation struct { + d Dataset + cache BlockBuilderCache + bounds Bounds + useRowTime bool + + colLabel string +} + +type rowSelectorTransformation struct { + selectorTransformation + selector RowSelector +} +type indexSelectorTransformation struct { + selectorTransformation + selector IndexSelector +} + +func NewRowSelectorTransformationAndDataset(id DatasetID, mode AccumulationMode, bounds Bounds, selector RowSelector, colLabel string, useRowTime bool, a *Allocator) (*rowSelectorTransformation, Dataset) { + cache := NewBlockBuilderCache(a) + d := NewDataset(id, mode, cache) + return NewRowSelectorTransformation(d, cache, bounds, selector, colLabel, useRowTime), d +} +func NewRowSelectorTransformation(d Dataset, c BlockBuilderCache, bounds Bounds, selector RowSelector, colLabel string, useRowTime bool) *rowSelectorTransformation { + return &rowSelectorTransformation{ + selectorTransformation: newSelectorTransformation(d, c, bounds, colLabel, useRowTime), + selector: selector, + } +} + +func NewIndexSelectorTransformationAndDataset(id DatasetID, mode AccumulationMode, bounds Bounds, selector IndexSelector, colLabel string, useRowTime bool, a *Allocator) (*indexSelectorTransformation, Dataset) { + cache := NewBlockBuilderCache(a) + d := NewDataset(id, mode, cache) + return NewIndexSelectorTransformation(d, cache, bounds, selector, colLabel, useRowTime), d +} +func NewIndexSelectorTransformation(d Dataset, c BlockBuilderCache, bounds Bounds, selector IndexSelector, colLabel string, useRowTime bool) *indexSelectorTransformation { + return &indexSelectorTransformation{ + selectorTransformation: newSelectorTransformation(d, c, bounds, colLabel, useRowTime), + selector: selector, + } +} + +func newSelectorTransformation(d Dataset, c BlockBuilderCache, bounds Bounds, colLabel string, useRowTime bool) selectorTransformation { + if colLabel == "" { + colLabel = DefaultValueColLabel + } + return selectorTransformation{ + d: d, + cache: c, + bounds: bounds, + colLabel: colLabel, + useRowTime: useRowTime, + } +} + +func (t *selectorTransformation) RetractBlock(id DatasetID, meta BlockMetadata) error { + //TODO(nathanielc): Store intermediate state for retractions + key := ToBlockKey(meta) + return t.d.RetractBlock(key) +} +func (t *selectorTransformation) UpdateWatermark(id DatasetID, mark Time) error { + return t.d.UpdateWatermark(mark) +} +func (t *selectorTransformation) UpdateProcessingTime(id DatasetID, pt Time) error { + return t.d.UpdateProcessingTime(pt) +} +func (t *selectorTransformation) Finish(id DatasetID, err error) { + t.d.Finish(err) +} + +func (t *selectorTransformation) setupBuilder(b Block) (BlockBuilder, int) { + builder, new := t.cache.BlockBuilder(blockMetadata{ + bounds: t.bounds, + tags: b.Tags(), + }) + if new { + AddBlockCols(b, builder) + } + + cols := builder.Cols() + valueIdx := ColIdx(t.colLabel, cols) + return builder, valueIdx +} + +func (t *indexSelectorTransformation) Process(id DatasetID, b Block) error { + builder, valueIdx := t.setupBuilder(b) + valueCol := builder.Cols()[valueIdx] + + values := b.Col(valueIdx) + switch valueCol.Type { + case TBool: + s := t.selector.NewBoolSelector() + values.DoBool(func(vs []bool, rr RowReader) { + selected := s.DoBool(vs) + t.appendSelected(selected, builder, rr, b.Bounds().Stop) + }) + case TInt: + s := t.selector.NewIntSelector() + values.DoInt(func(vs []int64, rr RowReader) { + selected := s.DoInt(vs) + t.appendSelected(selected, builder, rr, b.Bounds().Stop) + }) + case TUInt: + s := t.selector.NewUIntSelector() + values.DoUInt(func(vs []uint64, rr RowReader) { + selected := s.DoUInt(vs) + t.appendSelected(selected, builder, rr, b.Bounds().Stop) + }) + case TFloat: + s := t.selector.NewFloatSelector() + values.DoFloat(func(vs []float64, rr RowReader) { + selected := s.DoFloat(vs) + t.appendSelected(selected, builder, rr, b.Bounds().Stop) + }) + case TString: + s := t.selector.NewStringSelector() + values.DoString(func(vs []string, rr RowReader) { + selected := s.DoString(vs) + t.appendSelected(selected, builder, rr, b.Bounds().Stop) + }) + } + return nil +} + +func (t *rowSelectorTransformation) Process(id DatasetID, b Block) error { + builder, valueIdx := t.setupBuilder(b) + if valueIdx < 0 { + return fmt.Errorf("no column %q exists", t.colLabel) + } + valueCol := builder.Cols()[valueIdx] + + values := b.Col(valueIdx) + var rower Rower + switch valueCol.Type { + case TBool: + s := t.selector.NewBoolSelector() + values.DoBool(s.DoBool) + rower = s + case TInt: + s := t.selector.NewIntSelector() + values.DoInt(s.DoInt) + rower = s + case TUInt: + s := t.selector.NewUIntSelector() + values.DoUInt(s.DoUInt) + rower = s + case TFloat: + s := t.selector.NewFloatSelector() + values.DoFloat(s.DoFloat) + rower = s + case TString: + s := t.selector.NewStringSelector() + values.DoString(s.DoString) + rower = s + } + + rows := rower.Rows() + t.appendRows(builder, rows, b.Bounds().Stop) + return nil +} + +func (t *indexSelectorTransformation) appendSelected(selected []int, builder BlockBuilder, rr RowReader, stop Time) { + if len(selected) == 0 { + return + } + cols := builder.Cols() + for j, c := range cols { + for _, i := range selected { + switch c.Type { + case TBool: + builder.AppendBool(j, rr.AtBool(i, j)) + case TInt: + builder.AppendInt(j, rr.AtInt(i, j)) + case TUInt: + builder.AppendUInt(j, rr.AtUInt(i, j)) + case TFloat: + builder.AppendFloat(j, rr.AtFloat(i, j)) + case TString: + builder.AppendString(j, rr.AtString(i, j)) + case TTime: + time := stop + if t.useRowTime { + time = rr.AtTime(i, j) + } + builder.AppendTime(j, time) + default: + PanicUnknownType(c.Type) + } + } + } +} + +func (t *rowSelectorTransformation) appendRows(builder BlockBuilder, rows []Row, stop Time) { + cols := builder.Cols() + for j, c := range cols { + for _, row := range rows { + v := row.Values[j] + switch c.Type { + case TBool: + builder.AppendBool(j, v.(bool)) + case TInt: + builder.AppendInt(j, v.(int64)) + case TUInt: + builder.AppendUInt(j, v.(uint64)) + case TFloat: + builder.AppendFloat(j, v.(float64)) + case TString: + builder.AppendString(j, v.(string)) + case TTime: + if t.useRowTime { + builder.AppendTime(j, v.(Time)) + } else { + builder.AppendTime(j, stop) + } + default: + PanicUnknownType(c.Type) + } + } + } +} + +type IndexSelector interface { + NewBoolSelector() DoBoolIndexSelector + NewIntSelector() DoIntIndexSelector + NewUIntSelector() DoUIntIndexSelector + NewFloatSelector() DoFloatIndexSelector + NewStringSelector() DoStringIndexSelector +} +type DoBoolIndexSelector interface { + DoBool([]bool) []int +} +type DoIntIndexSelector interface { + DoInt([]int64) []int +} +type DoUIntIndexSelector interface { + DoUInt([]uint64) []int +} +type DoFloatIndexSelector interface { + DoFloat([]float64) []int +} +type DoStringIndexSelector interface { + DoString([]string) []int +} + +type RowSelector interface { + NewBoolSelector() DoBoolRowSelector + NewIntSelector() DoIntRowSelector + NewUIntSelector() DoUIntRowSelector + NewFloatSelector() DoFloatRowSelector + NewStringSelector() DoStringRowSelector +} + +type Rower interface { + Rows() []Row +} + +type DoBoolRowSelector interface { + Rower + // What if the selector doesn't know yet and needs to wait all is finalized? + DoBool(vs []bool, rr RowReader) +} +type DoIntRowSelector interface { + Rower + DoInt(vs []int64, rr RowReader) +} +type DoUIntRowSelector interface { + Rower + DoUInt(vs []uint64, rr RowReader) +} +type DoFloatRowSelector interface { + Rower + DoFloat(vs []float64, rr RowReader) +} +type DoStringRowSelector interface { + Rower + DoString(vs []string, rr RowReader) +} + +type Row struct { + Values []interface{} +} + +func ReadRow(i int, rr RowReader) (row Row) { + cols := rr.Cols() + row.Values = make([]interface{}, len(cols)) + for j, c := range cols { + switch c.Type { + case TBool: + row.Values[j] = rr.AtBool(i, j) + case TInt: + row.Values[j] = rr.AtInt(i, j) + case TUInt: + row.Values[j] = rr.AtUInt(i, j) + case TFloat: + row.Values[j] = rr.AtFloat(i, j) + case TString: + row.Values[j] = rr.AtString(i, j) + case TTime: + row.Values[j] = rr.AtTime(i, j) + } + } + return +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/selector_test.go b/vendor/github.com/influxdata/ifql/query/execute/selector_test.go new file mode 100644 index 000000000..cab901e94 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/selector_test.go @@ -0,0 +1,703 @@ +package execute_test + +import ( + "sort" + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query/execute" + "github.com/influxdata/ifql/query/execute/executetest" +) + +func TestRowSelector_Process(t *testing.T) { + // All test cases use a simple MinSelector + testCases := []struct { + name string + bounds execute.Bounds + colLabel string + useRowTime bool + data []*executetest.Block + want func(b execute.Bounds) []*executetest.Block + }{ + { + name: "single", + bounds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + data: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }}, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 0.0}, + }, + }} + }, + }, + { + name: "single useRowTime", + useRowTime: true, + bounds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + data: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }}, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + }, + }} + }, + }, + { + name: "single custom column", + colLabel: "x", + bounds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + data: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }}, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "x", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 0.0}, + }, + }} + }, + }, + { + name: "multiple blocks", + bounds: execute.Bounds{ + Start: 0, + Stop: 200, + }, + data: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 10.0}, + {execute.Time(110), 11.0}, + {execute.Time(120), 12.0}, + {execute.Time(130), 13.0}, + {execute.Time(140), 14.0}, + {execute.Time(150), 15.0}, + {execute.Time(160), 16.0}, + {execute.Time(170), 17.0}, + {execute.Time(180), 18.0}, + {execute.Time(190), 19.0}, + }, + }, + }, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 0.0}, + {execute.Time(200), 10.0}, + }, + }} + }, + }, + { + name: "multiple blocks with tags and useRowTime", + useRowTime: true, + bounds: execute.Bounds{ + Start: 0, + Stop: 200, + }, + data: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 4.0, "a", "x"}, + {execute.Time(10), 3.0, "a", "y"}, + {execute.Time(20), 6.0, "a", "x"}, + {execute.Time(30), 3.0, "a", "y"}, + {execute.Time(40), 1.0, "a", "x"}, + {execute.Time(50), 4.0, "a", "y"}, + {execute.Time(60), 7.0, "a", "x"}, + {execute.Time(70), 7.0, "a", "y"}, + {execute.Time(80), 2.0, "a", "x"}, + {execute.Time(90), 7.0, "a", "y"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 3.3, "b", "x"}, + {execute.Time(10), 5.3, "b", "y"}, + {execute.Time(20), 2.3, "b", "x"}, + {execute.Time(30), 7.3, "b", "y"}, + {execute.Time(40), 4.3, "b", "x"}, + {execute.Time(50), 6.3, "b", "y"}, + {execute.Time(60), 6.3, "b", "x"}, + {execute.Time(70), 5.3, "b", "y"}, + {execute.Time(80), 8.3, "b", "x"}, + {execute.Time(90), 1.3, "b", "y"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(100), 14.0, "a", "y"}, + {execute.Time(110), 13.0, "a", "x"}, + {execute.Time(120), 17.0, "a", "y"}, + {execute.Time(130), 13.0, "a", "x"}, + {execute.Time(140), 14.0, "a", "y"}, + {execute.Time(150), 14.0, "a", "x"}, + {execute.Time(160), 11.0, "a", "y"}, + {execute.Time(170), 15.0, "a", "x"}, + {execute.Time(180), 12.0, "a", "y"}, + {execute.Time(190), 14.0, "a", "x"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(100), 12.3, "b", "y"}, + {execute.Time(110), 11.3, "b", "x"}, + {execute.Time(120), 14.3, "b", "y"}, + {execute.Time(130), 15.3, "b", "x"}, + {execute.Time(140), 14.3, "b", "y"}, + {execute.Time(150), 13.3, "b", "x"}, + {execute.Time(160), 16.3, "b", "y"}, + {execute.Time(170), 13.3, "b", "x"}, + {execute.Time(180), 12.3, "b", "y"}, + {execute.Time(190), 17.3, "b", "x"}, + }, + }, + }, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(40), 1.0, "a", "x"}, + {execute.Time(160), 11.0, "a", "y"}, + }, + }, + { + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(90), 1.3, "b", "y"}, + {execute.Time(110), 11.3, "b", "x"}, + }, + }, + } + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + d := executetest.NewDataset(executetest.RandomDatasetID()) + c := execute.NewBlockBuilderCache(executetest.UnlimitedAllocator) + c.SetTriggerSpec(execute.DefaultTriggerSpec) + + selector := execute.NewRowSelectorTransformation(d, c, tc.bounds, new(functions.MinSelector), tc.colLabel, tc.useRowTime) + + parentID := executetest.RandomDatasetID() + for _, b := range tc.data { + if err := selector.Process(parentID, b); err != nil { + t.Fatal(err) + } + } + + want := tc.want(tc.bounds) + got := executetest.BlocksFromCache(c) + + sort.Sort(executetest.SortedBlocks(got)) + sort.Sort(executetest.SortedBlocks(want)) + + if !cmp.Equal(want, got, cmpopts.EquateNaNs()) { + t.Errorf("unexpected blocks -want/+got\n%s", cmp.Diff(want, got)) + } + }) + } +} + +func TestIndexSelector_Process(t *testing.T) { + // All test cases use a simple FirstSelector + testCases := []struct { + name string + bounds execute.Bounds + useRowTime bool + data []*executetest.Block + want func(b execute.Bounds) []*executetest.Block + }{ + { + name: "single", + bounds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + data: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }}, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 0.0}, + }, + }} + }, + }, + { + name: "single useRowTime", + useRowTime: true, + bounds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + data: []*executetest.Block{{ + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }}, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + }, + }} + }, + }, + { + name: "multiple blocks", + bounds: execute.Bounds{ + Start: 0, + Stop: 200, + }, + data: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(0), 0.0}, + {execute.Time(10), 1.0}, + {execute.Time(20), 2.0}, + {execute.Time(30), 3.0}, + {execute.Time(40), 4.0}, + {execute.Time(50), 5.0}, + {execute.Time(60), 6.0}, + {execute.Time(70), 7.0}, + {execute.Time(80), 8.0}, + {execute.Time(90), 9.0}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 10.0}, + {execute.Time(110), 11.0}, + {execute.Time(120), 12.0}, + {execute.Time(130), 13.0}, + {execute.Time(140), 14.0}, + {execute.Time(150), 15.0}, + {execute.Time(160), 16.0}, + {execute.Time(170), 17.0}, + {execute.Time(180), 18.0}, + {execute.Time(190), 19.0}, + }, + }, + }, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{{ + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + }, + Data: [][]interface{}{ + {execute.Time(100), 0.0}, + {execute.Time(200), 10.0}, + }, + }} + }, + }, + { + name: "multiple blocks with tags and useRowTime", + useRowTime: true, + bounds: execute.Bounds{ + Start: 0, + Stop: 200, + }, + data: []*executetest.Block{ + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 4.0, "a", "x"}, + {execute.Time(10), 3.0, "a", "y"}, + {execute.Time(20), 6.0, "a", "x"}, + {execute.Time(30), 3.0, "a", "y"}, + {execute.Time(40), 1.0, "a", "x"}, + {execute.Time(50), 4.0, "a", "y"}, + {execute.Time(60), 7.0, "a", "x"}, + {execute.Time(70), 7.0, "a", "y"}, + {execute.Time(80), 2.0, "a", "x"}, + {execute.Time(90), 7.0, "a", "y"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 0, + Stop: 100, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 3.3, "b", "x"}, + {execute.Time(10), 5.3, "b", "y"}, + {execute.Time(20), 2.3, "b", "x"}, + {execute.Time(30), 7.3, "b", "y"}, + {execute.Time(40), 4.3, "b", "x"}, + {execute.Time(50), 6.3, "b", "y"}, + {execute.Time(60), 6.3, "b", "x"}, + {execute.Time(70), 5.3, "b", "y"}, + {execute.Time(80), 8.3, "b", "x"}, + {execute.Time(90), 1.3, "b", "y"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(100), 14.0, "a", "y"}, + {execute.Time(110), 13.0, "a", "x"}, + {execute.Time(120), 17.0, "a", "y"}, + {execute.Time(130), 13.0, "a", "x"}, + {execute.Time(140), 14.0, "a", "y"}, + {execute.Time(150), 14.0, "a", "x"}, + {execute.Time(160), 11.0, "a", "y"}, + {execute.Time(170), 15.0, "a", "x"}, + {execute.Time(180), 12.0, "a", "y"}, + {execute.Time(190), 14.0, "a", "x"}, + }, + }, + { + Bnds: execute.Bounds{ + Start: 100, + Stop: 200, + }, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(100), 12.3, "b", "y"}, + {execute.Time(110), 11.3, "b", "x"}, + {execute.Time(120), 14.3, "b", "y"}, + {execute.Time(130), 15.3, "b", "x"}, + {execute.Time(140), 14.3, "b", "y"}, + {execute.Time(150), 13.3, "b", "x"}, + {execute.Time(160), 16.3, "b", "y"}, + {execute.Time(170), 13.3, "b", "x"}, + {execute.Time(180), 12.3, "b", "y"}, + {execute.Time(190), 17.3, "b", "x"}, + }, + }, + }, + want: func(b execute.Bounds) []*executetest.Block { + return []*executetest.Block{ + { + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 4.0, "a", "x"}, + {execute.Time(100), 14.0, "a", "y"}, + }, + }, + { + Bnds: b, + ColMeta: []execute.ColMeta{ + {Label: "_time", Type: execute.TTime, Kind: execute.TimeColKind}, + {Label: "_value", Type: execute.TFloat, Kind: execute.ValueColKind}, + {Label: "t1", Type: execute.TString, Kind: execute.TagColKind, Common: true}, + {Label: "t2", Type: execute.TString, Kind: execute.TagColKind, Common: false}, + }, + Data: [][]interface{}{ + {execute.Time(0), 3.3, "b", "x"}, + {execute.Time(100), 12.3, "b", "y"}, + }, + }, + } + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + d := executetest.NewDataset(executetest.RandomDatasetID()) + c := execute.NewBlockBuilderCache(executetest.UnlimitedAllocator) + c.SetTriggerSpec(execute.DefaultTriggerSpec) + + selector := execute.NewIndexSelectorTransformation(d, c, tc.bounds, new(functions.FirstSelector), "_value", tc.useRowTime) + + parentID := executetest.RandomDatasetID() + for _, b := range tc.data { + if err := selector.Process(parentID, b); err != nil { + t.Fatal(err) + } + } + + want := tc.want(tc.bounds) + got := executetest.BlocksFromCache(c) + + sort.Sort(executetest.SortedBlocks(got)) + sort.Sort(executetest.SortedBlocks(want)) + + if !cmp.Equal(want, got, cmpopts.EquateNaNs()) { + t.Errorf("unexpected blocks -want/+got\n%s", cmp.Diff(want, got)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/source.go b/vendor/github.com/influxdata/ifql/query/execute/source.go new file mode 100644 index 000000000..c41c3763f --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/source.go @@ -0,0 +1,122 @@ +package execute + +import ( + "context" + "fmt" + "log" + + "github.com/influxdata/ifql/query/plan" + "github.com/opentracing/opentracing-go" +) + +type Node interface { + AddTransformation(t Transformation) +} + +type Source interface { + Node + Run(ctx context.Context) +} + +type CreateSource func(spec plan.ProcedureSpec, id DatasetID, sr StorageReader, ctx Administration) Source + +var procedureToSource = make(map[plan.ProcedureKind]CreateSource) + +func RegisterSource(k plan.ProcedureKind, c CreateSource) { + if procedureToSource[k] != nil { + panic(fmt.Errorf("duplicate registration for source with procedure kind %v", k)) + } + procedureToSource[k] = c +} + +// storageSource performs storage reads +type storageSource struct { + id DatasetID + reader StorageReader + readSpec ReadSpec + window Window + bounds Bounds + + ts []Transformation + + currentTime Time +} + +func NewStorageSource(id DatasetID, r StorageReader, readSpec ReadSpec, bounds Bounds, w Window, currentTime Time) Source { + return &storageSource{ + id: id, + reader: r, + readSpec: readSpec, + bounds: bounds, + window: w, + currentTime: currentTime, + } +} + +func (s *storageSource) AddTransformation(t Transformation) { + s.ts = append(s.ts, t) +} + +func (s *storageSource) Run(ctx context.Context) { + err := s.run(ctx) + for _, t := range s.ts { + t.Finish(s.id, err) + } +} +func (s *storageSource) run(ctx context.Context) error { + + var trace map[string]string + if span := opentracing.SpanFromContext(ctx); span != nil { + trace = make(map[string]string) + span = opentracing.StartSpan("storage_source.run", opentracing.ChildOf(span.Context())) + _ = opentracing.GlobalTracer().Inject(span.Context(), opentracing.TextMap, opentracing.TextMapCarrier(trace)) + } + + //TODO(nathanielc): Pass through context to actual network I/O. + for blocks, mark, ok := s.Next(ctx, trace); ok; blocks, mark, ok = s.Next(ctx, trace) { + err := blocks.Do(func(b Block) error { + for _, t := range s.ts { + if err := t.Process(s.id, b); err != nil { + return err + } + //TODO(nathanielc): Also add mechanism to send UpdateProcessingTime calls, when no data is arriving. + // This is probably not needed for this source, but other sources should do so. + if err := t.UpdateProcessingTime(s.id, Now()); err != nil { + return err + } + } + return nil + }) + if err != nil { + return err + } + for _, t := range s.ts { + if err := t.UpdateWatermark(s.id, mark); err != nil { + return err + } + } + } + return nil +} + +func (s *storageSource) Next(ctx context.Context, trace map[string]string) (BlockIterator, Time, bool) { + start := s.currentTime - Time(s.window.Period) + stop := s.currentTime + + s.currentTime = s.currentTime + Time(s.window.Every) + if stop > s.bounds.Stop { + return nil, 0, false + } + bi, err := s.reader.Read( + ctx, + trace, + s.readSpec, + start, + stop, + ) + if err != nil { + log.Println("E!", err) + return nil, 0, false + } + return bi, stop, true +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/storage.go b/vendor/github.com/influxdata/ifql/query/execute/storage.go new file mode 100644 index 000000000..ffaf7aef6 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/storage.go @@ -0,0 +1,1044 @@ +package execute + +import ( + "bytes" + "context" + "fmt" + "io" + "strings" + + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/query/execute/storage" + "github.com/influxdata/ifql/semantic" + "github.com/influxdata/yarpc" + "github.com/pkg/errors" +) + +type StorageReader interface { + Read(ctx context.Context, trace map[string]string, rs ReadSpec, start, stop Time) (BlockIterator, error) + Close() +} + +type ReadSpec struct { + RAMLimit uint64 + Database string + Hosts []string + Predicate *semantic.FunctionExpression + PointsLimit int64 + SeriesLimit int64 + SeriesOffset int64 + Descending bool + + AggregateMethod string + + // OrderByTime indicates that series reads should produce all + // series for a time before producing any series for a larger time. + // By default this is false meaning all values of time are produced for a given series, + // before any values are produced from the next series. + OrderByTime bool + // MergeAll indicates that all series should be merged into a single group + MergeAll bool + // GroupKeys is the list of dimensions along which to group + GroupKeys []string + // GroupExcept is the list of dimensions along which to not group + GroupExcept []string + // GroupKeep is the list of tags to keep but not group by. + GroupKeep []string +} + +func NewStorageReader(hosts []string) (StorageReader, error) { + if len(hosts) == 0 { + return nil, errors.New("must provide at least one storage host") + } + conns := make([]connection, len(hosts)) + for i, h := range hosts { + conn, err := yarpc.Dial(h) + if err != nil { + return nil, err + } + conns[i] = connection{ + host: h, + conn: conn, + client: storage.NewStorageClient(conn), + } + } + return &storageReader{ + conns: conns, + }, nil +} + +type storageReader struct { + conns []connection +} + +type connection struct { + host string + conn *yarpc.ClientConn + client storage.StorageClient +} + +func (sr *storageReader) Read(ctx context.Context, trace map[string]string, readSpec ReadSpec, start, stop Time) (BlockIterator, error) { + var predicate *storage.Predicate + if readSpec.Predicate != nil { + p, err := ToStoragePredicate(readSpec.Predicate) + if err != nil { + return nil, err + } + predicate = p + } + + bi := &storageBlockIterator{ + ctx: ctx, + trace: trace, + bounds: Bounds{ + Start: start, + Stop: stop, + }, + conns: sr.conns, + readSpec: readSpec, + predicate: predicate, + } + return bi, nil +} + +func (sr *storageReader) Close() { + for _, conn := range sr.conns { + _ = conn.conn.Close() + } +} + +type storageBlockIterator struct { + ctx context.Context + trace map[string]string + bounds Bounds + conns []connection + readSpec ReadSpec + predicate *storage.Predicate +} + +func (bi *storageBlockIterator) Do(f func(Block) error) error { + // Setup read request + var req storage.ReadRequest + req.Database = bi.readSpec.Database + req.Predicate = bi.predicate + req.Descending = bi.readSpec.Descending + req.TimestampRange.Start = int64(bi.bounds.Start) + req.TimestampRange.End = int64(bi.bounds.Stop) + req.Grouping = bi.readSpec.GroupKeys + + req.SeriesLimit = uint64(bi.readSpec.SeriesLimit) + req.PointsLimit = uint64(bi.readSpec.PointsLimit) + req.SeriesOffset = uint64(bi.readSpec.SeriesOffset) + req.Trace = bi.trace + + if agg, err := determineAggregateMethod(bi.readSpec.AggregateMethod); err != nil { + return err + } else if agg != storage.AggregateTypeNone { + req.Aggregate = &storage.Aggregate{Type: agg} + } + + streams := make([]*streamState, 0, len(bi.conns)) + for _, c := range bi.conns { + if len(bi.readSpec.Hosts) > 0 { + // Filter down to only hosts provided + found := false + for _, h := range bi.readSpec.Hosts { + if c.host == h { + found = true + break + } + } + if !found { + continue + } + } + stream, err := c.client.Read(bi.ctx, &req) + if err != nil { + return err + } + streams = append(streams, &streamState{ + stream: stream, + readSpec: &bi.readSpec, + }) + } + ms := &mergedStreams{ + streams: streams, + } + + for ms.more() { + if p := ms.peek(); readFrameType(p) != seriesType { + //This means the consumer didn't read all the data off the block + return errors.New("internal error: short read") + } + frame := ms.next() + s := frame.GetSeries() + typ := convertDataType(s.DataType) + tags, keptTags := bi.determineBlockTags(s) + k := appendSeriesKey(nil, s, &bi.readSpec) + block := newStorageBlock(bi.bounds, tags, keptTags, k, ms, &bi.readSpec, typ) + + if err := f(block); err != nil { + // TODO(nathanielc): Close streams since we have abandoned the request + return err + } + // Wait until the block has been read. + block.wait() + } + return nil +} + +func determineAggregateMethod(agg string) (storage.Aggregate_AggregateType, error) { + if agg == "" { + return storage.AggregateTypeNone, nil + } + + if t, ok := storage.Aggregate_AggregateType_value[strings.ToUpper(agg)]; ok { + return storage.Aggregate_AggregateType(t), nil + } + return 0, fmt.Errorf("unknown aggregate type %q", agg) +} +func convertDataType(t storage.ReadResponse_DataType) DataType { + switch t { + case storage.DataTypeFloat: + return TFloat + case storage.DataTypeInteger: + return TInt + case storage.DataTypeUnsigned: + return TUInt + case storage.DataTypeBoolean: + return TBool + case storage.DataTypeString: + return TString + default: + return TInvalid + } +} + +func (bi *storageBlockIterator) determineBlockTags(s *storage.ReadResponse_SeriesFrame) (tags, keptTags Tags) { + if len(bi.readSpec.GroupKeys) > 0 { + tags = make(Tags, len(bi.readSpec.GroupKeys)) + for _, key := range bi.readSpec.GroupKeys { + for _, tag := range s.Tags { + if string(tag.Key) == key { + tags[key] = string(tag.Value) + break + } + } + } + if len(bi.readSpec.GroupKeep) > 0 { + keptTags = make(Tags, len(bi.readSpec.GroupKeep)) + for _, key := range bi.readSpec.GroupKeep { + for _, tag := range s.Tags { + if string(tag.Key) == key { + keptTags[key] = string(tag.Value) + break + } + } + } + } + } else if len(bi.readSpec.GroupExcept) > 0 { + tags = make(Tags, len(s.Tags)-len(bi.readSpec.GroupExcept)) + keptTags = make(Tags, len(bi.readSpec.GroupKeep)) + TAGS: + for _, t := range s.Tags { + k := string(t.Key) + for _, key := range bi.readSpec.GroupKeep { + if k == key { + keptTags[key] = string(t.Value) + continue TAGS + } + } + for _, key := range bi.readSpec.GroupExcept { + if k == key { + continue TAGS + } + } + tags[k] = string(t.Value) + } + } else if !bi.readSpec.MergeAll { + tags = make(Tags, len(s.Tags)) + for _, t := range s.Tags { + tags[string(t.Key)] = string(t.Value) + } + } else { + keptTags = make(Tags, len(bi.readSpec.GroupKeep)) + for _, t := range s.Tags { + k := string(t.Key) + for _, key := range bi.readSpec.GroupKeep { + if k == key { + keptTags[key] = string(t.Value) + } + } + } + } + return +} + +func appendSeriesKey(b key, s *storage.ReadResponse_SeriesFrame, readSpec *ReadSpec) key { + appendTag := func(t storage.Tag) { + b = append(b, t.Key...) + b = append(b, '=') + b = append(b, t.Value...) + } + if len(readSpec.GroupKeys) > 0 { + for i, key := range readSpec.GroupKeys { + if i != 0 { + b = append(b, ',') + } + for _, tag := range s.Tags { + if string(tag.Key) == key { + appendTag(tag) + break + } + } + } + } else if len(readSpec.GroupExcept) > 0 { + i := 0 + TAGS: + for _, t := range s.Tags { + k := string(t.Key) + for _, key := range readSpec.GroupKeep { + if k == key { + continue TAGS + } + } + for _, key := range readSpec.GroupExcept { + if k == key { + continue TAGS + } + } + if i != 0 { + b = append(b, ',') + } + appendTag(t) + i++ + } + } else if !readSpec.MergeAll { + for i, t := range s.Tags { + if i != 0 { + b = append(b, ',') + } + appendTag(t) + } + } + return b +} + +// storageBlock implement OneTimeBlock as it can only be read once. +// Since it can only be read once it is also a ValueIterator for itself. +type storageBlock struct { + bounds Bounds + tags Tags + tagKey key + // keptTags is a set of non common tags. + keptTags Tags + // colMeta always has at least two columns, where the first is a TimeCol + // and the second is any Value column. + colMeta []ColMeta + + readSpec *ReadSpec + + done chan struct{} + + ms *mergedStreams + + // The index of the column to iterate + col int + // colBufs are the buffers for the given columns. + colBufs [2]interface{} + + // resuable buffer for the time column + timeBuf []Time + + // resuable buffers for the different types of values + boolBuf []bool + intBuf []int64 + uintBuf []uint64 + floatBuf []float64 + stringBuf []string +} + +func newStorageBlock(bounds Bounds, tags, keptTags Tags, tagKey key, ms *mergedStreams, readSpec *ReadSpec, typ DataType) *storageBlock { + colMeta := make([]ColMeta, 2, 2+len(tags)+len(keptTags)) + colMeta[0] = TimeCol + colMeta[1] = ColMeta{ + Label: DefaultValueColLabel, + Type: typ, + Kind: ValueColKind, + } + + for _, k := range tags.Keys() { + colMeta = append(colMeta, ColMeta{ + Label: k, + Type: TString, + Kind: TagColKind, + Common: true, + }) + } + for _, k := range keptTags.Keys() { + colMeta = append(colMeta, ColMeta{ + Label: k, + Type: TString, + Kind: TagColKind, + Common: false, + }) + } + return &storageBlock{ + bounds: bounds, + tagKey: tagKey, + tags: tags, + keptTags: keptTags, + colMeta: colMeta, + readSpec: readSpec, + ms: ms, + done: make(chan struct{}), + } +} + +func (b *storageBlock) RefCount(n int) { + //TODO(nathanielc): Have the storageBlock consume the Allocator, + // once we have zero-copy serialization over the network +} + +func (b *storageBlock) wait() { + <-b.done +} + +// onetime satisfies the OneTimeBlock interface since this block may only be read once. +func (b *storageBlock) onetime() {} + +func (b *storageBlock) Bounds() Bounds { + return b.bounds +} +func (b *storageBlock) Tags() Tags { + return b.tags +} +func (b *storageBlock) Cols() []ColMeta { + return b.colMeta +} + +func (b *storageBlock) Col(c int) ValueIterator { + b.col = c + return b +} + +func (b *storageBlock) Times() ValueIterator { + return b.Col(0) +} +func (b *storageBlock) Values() (ValueIterator, error) { + return b.Col(1), nil +} + +func (b *storageBlock) DoBool(f func([]bool, RowReader)) { + checkColType(b.colMeta[b.col], TBool) + for b.advance() { + f(b.colBufs[b.col].([]bool), b) + } + close(b.done) +} +func (b *storageBlock) DoInt(f func([]int64, RowReader)) { + checkColType(b.colMeta[b.col], TInt) + for b.advance() { + f(b.colBufs[b.col].([]int64), b) + } + close(b.done) +} +func (b *storageBlock) DoUInt(f func([]uint64, RowReader)) { + checkColType(b.colMeta[b.col], TUInt) + for b.advance() { + f(b.colBufs[b.col].([]uint64), b) + } + close(b.done) +} +func (b *storageBlock) DoFloat(f func([]float64, RowReader)) { + checkColType(b.colMeta[b.col], TFloat) + for b.advance() { + f(b.colBufs[b.col].([]float64), b) + } + close(b.done) +} +func (b *storageBlock) DoString(f func([]string, RowReader)) { + defer close(b.done) + + meta := b.colMeta[b.col] + checkColType(meta, TString) + if meta.IsTag() { + // Handle creating a strs slice that can be ranged according to actual data received. + var strs []string + var value string + if meta.Common { + value = b.tags[meta.Label] + } else { + value = b.keptTags[meta.Label] + } + for b.advance() { + l := len(b.timeBuf) + if cap(strs) < l { + strs = make([]string, l) + for i := range strs { + strs[i] = value + } + } else if len(strs) < l { + new := strs[len(strs)-1 : l] + for i := range new { + new[i] = value + } + strs = strs[0:l] + } else { + strs = strs[0:l] + } + f(strs, b) + } + return + } + // Do ordinary range over column data. + for b.advance() { + f(b.colBufs[b.col].([]string), b) + } +} +func (b *storageBlock) DoTime(f func([]Time, RowReader)) { + checkColType(b.colMeta[b.col], TTime) + for b.advance() { + f(b.colBufs[b.col].([]Time), b) + } + close(b.done) +} + +func (b *storageBlock) AtBool(i, j int) bool { + checkColType(b.colMeta[j], TBool) + return b.colBufs[j].([]bool)[i] +} +func (b *storageBlock) AtInt(i, j int) int64 { + checkColType(b.colMeta[j], TInt) + return b.colBufs[j].([]int64)[i] +} +func (b *storageBlock) AtUInt(i, j int) uint64 { + checkColType(b.colMeta[j], TUInt) + return b.colBufs[j].([]uint64)[i] +} +func (b *storageBlock) AtFloat(i, j int) float64 { + checkColType(b.colMeta[j], TFloat) + return b.colBufs[j].([]float64)[i] +} +func (b *storageBlock) AtString(i, j int) string { + meta := b.colMeta[j] + checkColType(meta, TString) + if meta.IsTag() { + if meta.Common { + return b.tags[meta.Label] + } + return b.keptTags[meta.Label] + } + return b.colBufs[j].([]string)[i] +} +func (b *storageBlock) AtTime(i, j int) Time { + checkColType(b.colMeta[j], TTime) + return b.colBufs[j].([]Time)[i] +} + +func (b *storageBlock) advance() bool { + for b.ms.more() { + //reset buffers + b.timeBuf = b.timeBuf[0:0] + b.boolBuf = b.boolBuf[0:0] + b.intBuf = b.intBuf[0:0] + b.uintBuf = b.uintBuf[0:0] + b.stringBuf = b.stringBuf[0:0] + b.floatBuf = b.floatBuf[0:0] + + switch p := b.ms.peek(); readFrameType(p) { + case seriesType: + if b.ms.key().Compare(b.tagKey) != 0 { + // We have reached the end of data for this block + return false + } + s := p.GetSeries() + // Populate keptTags with new series values + b.keptTags = make(Tags, len(b.readSpec.GroupKeep)) + for _, t := range s.Tags { + k := string(t.Key) + for _, key := range b.readSpec.GroupKeep { + if k == key { + b.keptTags[key] = string(t.Value) + } + } + } + // Advance to next frame + b.ms.next() + case boolPointsType: + if b.colMeta[1].Type != TBool { + // TODO: Add error handling + // Type changed, + return false + } + // read next frame + frame := b.ms.next() + p := frame.GetBooleanPoints() + l := len(p.Timestamps) + if l > cap(b.timeBuf) { + b.timeBuf = make([]Time, l) + } else { + b.timeBuf = b.timeBuf[:l] + } + if l > cap(b.boolBuf) { + b.boolBuf = make([]bool, l) + } else { + b.boolBuf = b.boolBuf[:l] + } + + for i, c := range p.Timestamps { + b.timeBuf[i] = Time(c) + b.boolBuf[i] = p.Values[i] + } + b.colBufs[0] = b.timeBuf + b.colBufs[1] = b.boolBuf + return true + case intPointsType: + if b.colMeta[1].Type != TInt { + // TODO: Add error handling + // Type changed, + return false + } + // read next frame + frame := b.ms.next() + p := frame.GetIntegerPoints() + l := len(p.Timestamps) + if l > cap(b.timeBuf) { + b.timeBuf = make([]Time, l) + } else { + b.timeBuf = b.timeBuf[:l] + } + if l > cap(b.uintBuf) { + b.intBuf = make([]int64, l) + } else { + b.intBuf = b.intBuf[:l] + } + + for i, c := range p.Timestamps { + b.timeBuf[i] = Time(c) + b.intBuf[i] = p.Values[i] + } + b.colBufs[0] = b.timeBuf + b.colBufs[1] = b.intBuf + return true + case uintPointsType: + if b.colMeta[1].Type != TUInt { + // TODO: Add error handling + // Type changed, + return false + } + // read next frame + frame := b.ms.next() + p := frame.GetUnsignedPoints() + l := len(p.Timestamps) + if l > cap(b.timeBuf) { + b.timeBuf = make([]Time, l) + } else { + b.timeBuf = b.timeBuf[:l] + } + if l > cap(b.intBuf) { + b.uintBuf = make([]uint64, l) + } else { + b.uintBuf = b.uintBuf[:l] + } + + for i, c := range p.Timestamps { + b.timeBuf[i] = Time(c) + b.uintBuf[i] = p.Values[i] + } + b.colBufs[0] = b.timeBuf + b.colBufs[1] = b.uintBuf + return true + case floatPointsType: + if b.colMeta[1].Type != TFloat { + // TODO: Add error handling + // Type changed, + return false + } + // read next frame + frame := b.ms.next() + p := frame.GetFloatPoints() + + l := len(p.Timestamps) + if l > cap(b.timeBuf) { + b.timeBuf = make([]Time, l) + } else { + b.timeBuf = b.timeBuf[:l] + } + if l > cap(b.floatBuf) { + b.floatBuf = make([]float64, l) + } else { + b.floatBuf = b.floatBuf[:l] + } + + for i, c := range p.Timestamps { + b.timeBuf[i] = Time(c) + b.floatBuf[i] = p.Values[i] + } + b.colBufs[0] = b.timeBuf + b.colBufs[1] = b.floatBuf + return true + case stringPointsType: + if b.colMeta[1].Type != TString { + // TODO: Add error handling + // Type changed, + return false + } + // read next frame + frame := b.ms.next() + p := frame.GetStringPoints() + + l := len(p.Timestamps) + if l > cap(b.timeBuf) { + b.timeBuf = make([]Time, l) + } else { + b.timeBuf = b.timeBuf[:l] + } + if l > cap(b.stringBuf) { + b.stringBuf = make([]string, l) + } else { + b.stringBuf = b.stringBuf[:l] + } + + for i, c := range p.Timestamps { + b.timeBuf[i] = Time(c) + b.stringBuf[i] = p.Values[i] + } + b.colBufs[0] = b.timeBuf + b.colBufs[1] = b.stringBuf + return true + } + } + return false +} + +type streamState struct { + stream storage.Storage_ReadClient + rep storage.ReadResponse + currentKey key + readSpec *ReadSpec + finished bool +} + +func (s *streamState) peek() storage.ReadResponse_Frame { + return s.rep.Frames[0] +} + +func (s *streamState) more() bool { + if s.finished { + return false + } + if len(s.rep.Frames) > 0 { + return true + } + if err := s.stream.RecvMsg(&s.rep); err != nil { + s.finished = true + if err == io.EOF { + // We are done + return false + } + //TODO add proper error handling + return false + } + if len(s.rep.Frames) == 0 { + return false + } + s.computeKey() + return true +} + +func (s *streamState) key() key { + return s.currentKey +} + +func (s *streamState) computeKey() { + // Determine new currentKey + if p := s.peek(); readFrameType(p) == seriesType { + series := p.GetSeries() + s.currentKey = appendSeriesKey(s.currentKey[0:0], series, s.readSpec) + } +} +func (s *streamState) next() storage.ReadResponse_Frame { + frame := s.rep.Frames[0] + s.rep.Frames = s.rep.Frames[1:] + if len(s.rep.Frames) > 0 { + s.computeKey() + } + return frame +} + +type key []byte + +// Compare keys, a nil key is always greater. +func (k key) Compare(o key) int { + if k == nil && o == nil { + return 0 + } + if k == nil { + return 1 + } + if o == nil { + return -1 + } + return bytes.Compare([]byte(k), []byte(o)) +} + +type mergedStreams struct { + streams []*streamState + currentKey key + i int +} + +func (s *mergedStreams) key() key { + if len(s.streams) == 1 { + return s.streams[0].key() + } + return s.currentKey +} +func (s *mergedStreams) peek() storage.ReadResponse_Frame { + return s.streams[s.i].peek() +} + +func (s *mergedStreams) next() storage.ReadResponse_Frame { + return s.streams[s.i].next() +} + +func (s *mergedStreams) more() bool { + // Optimze for the case of just one stream + if len(s.streams) == 1 { + return s.streams[0].more() + } + if s.i < 0 { + return false + } + if s.currentKey == nil { + return s.determineNewKey() + } + if s.streams[s.i].more() { + cmp := s.streams[s.i].key().Compare(s.currentKey) + switch cmp { + case 0: + return true + case 1: + return s.advance() + case -1: + panic(errors.New("found smaller key, this should not be possible")) + } + } + return s.advance() +} + +func (s *mergedStreams) advance() bool { + s.i++ + if s.i == len(s.streams) { + if !s.determineNewKey() { + // no new data on any stream + return false + } + } + return s.more() +} + +func (s *mergedStreams) determineNewKey() bool { + minIdx := -1 + var minKey key + for i, stream := range s.streams { + if !stream.more() { + continue + } + k := stream.key() + if k.Compare(minKey) < 0 { + minIdx = i + minKey = k + } + } + l := len(minKey) + if cap(s.currentKey) < l { + s.currentKey = make(key, l) + } else { + s.currentKey = s.currentKey[:l] + } + copy(s.currentKey, minKey) + s.i = minIdx + return s.i >= 0 +} + +type frameType int + +const ( + seriesType frameType = iota + boolPointsType + intPointsType + uintPointsType + floatPointsType + stringPointsType +) + +func readFrameType(frame storage.ReadResponse_Frame) frameType { + switch frame.Data.(type) { + case *storage.ReadResponse_Frame_Series: + return seriesType + case *storage.ReadResponse_Frame_BooleanPoints: + return boolPointsType + case *storage.ReadResponse_Frame_IntegerPoints: + return intPointsType + case *storage.ReadResponse_Frame_UnsignedPoints: + return uintPointsType + case *storage.ReadResponse_Frame_FloatPoints: + return floatPointsType + case *storage.ReadResponse_Frame_StringPoints: + return stringPointsType + default: + panic(fmt.Errorf("unknown read response frame type: %T", frame.Data)) + } +} + +func ToStoragePredicate(f *semantic.FunctionExpression) (*storage.Predicate, error) { + if len(f.Params) != 1 { + return nil, errors.New("storage predicate functions must have exactly one parameter") + } + + root, err := toStoragePredicate(f.Body.(semantic.Expression), f.Params[0].Key.Name) + if err != nil { + return nil, err + } + + return &storage.Predicate{ + Root: root, + }, nil +} + +func toStoragePredicate(n semantic.Expression, objectName string) (*storage.Node, error) { + switch n := n.(type) { + case *semantic.LogicalExpression: + left, err := toStoragePredicate(n.Left, objectName) + if err != nil { + return nil, errors.Wrap(err, "left hand side") + } + right, err := toStoragePredicate(n.Right, objectName) + if err != nil { + return nil, errors.Wrap(err, "right hand side") + } + children := []*storage.Node{left, right} + switch n.Operator { + case ast.AndOperator: + return &storage.Node{ + NodeType: storage.NodeTypeLogicalExpression, + Value: &storage.Node_Logical_{Logical: storage.LogicalAnd}, + Children: children, + }, nil + case ast.OrOperator: + return &storage.Node{ + NodeType: storage.NodeTypeLogicalExpression, + Value: &storage.Node_Logical_{Logical: storage.LogicalOr}, + Children: children, + }, nil + default: + return nil, fmt.Errorf("unknown logical operator %v", n.Operator) + } + case *semantic.BinaryExpression: + left, err := toStoragePredicate(n.Left, objectName) + if err != nil { + return nil, errors.Wrap(err, "left hand side") + } + right, err := toStoragePredicate(n.Right, objectName) + if err != nil { + return nil, errors.Wrap(err, "right hand side") + } + children := []*storage.Node{left, right} + op, err := toComparisonOperator(n.Operator) + if err != nil { + return nil, err + } + return &storage.Node{ + NodeType: storage.NodeTypeComparisonExpression, + Value: &storage.Node_Comparison_{Comparison: op}, + Children: children, + }, nil + case *semantic.StringLiteral: + return &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_StringValue{ + StringValue: n.Value, + }, + }, nil + case *semantic.IntegerLiteral: + return &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_IntegerValue{ + IntegerValue: n.Value, + }, + }, nil + case *semantic.BooleanLiteral: + return &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_BooleanValue{ + BooleanValue: n.Value, + }, + }, nil + case *semantic.FloatLiteral: + return &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_FloatValue{ + FloatValue: n.Value, + }, + }, nil + case *semantic.RegexpLiteral: + return &storage.Node{ + NodeType: storage.NodeTypeLiteral, + Value: &storage.Node_RegexValue{ + RegexValue: n.Value.String(), + }, + }, nil + case *semantic.MemberExpression: + // Sanity check that the object is the objectName identifier + if ident, ok := n.Object.(*semantic.IdentifierExpression); !ok || ident.Name != objectName { + return nil, fmt.Errorf("unknown object %q", n.Object) + } + if n.Property == "_value" { + return &storage.Node{ + NodeType: storage.NodeTypeFieldRef, + Value: &storage.Node_FieldRefValue{ + FieldRefValue: "_value", + }, + }, nil + } + return &storage.Node{ + NodeType: storage.NodeTypeTagRef, + Value: &storage.Node_TagRefValue{ + TagRefValue: n.Property, + }, + }, nil + case *semantic.DurationLiteral: + return nil, errors.New("duration literals not supported in storage predicates") + case *semantic.DateTimeLiteral: + return nil, errors.New("time literals not supported in storage predicates") + default: + return nil, fmt.Errorf("unsupported semantic expression type %T", n) + } +} + +func toComparisonOperator(o ast.OperatorKind) (storage.Node_Comparison, error) { + switch o { + case ast.EqualOperator: + return storage.ComparisonEqual, nil + case ast.NotEqualOperator: + return storage.ComparisonNotEqual, nil + case ast.RegexpMatchOperator: + return storage.ComparisonRegex, nil + case ast.NotRegexpMatchOperator: + return storage.ComparisonNotRegex, nil + case ast.StartsWithOperator: + return storage.ComparisonStartsWith, nil + case ast.LessThanOperator: + return storage.ComparisonLess, nil + case ast.LessThanEqualOperator: + return storage.ComparisonLessEqual, nil + case ast.GreaterThanOperator: + return storage.ComparisonGreater, nil + case ast.GreaterThanEqualOperator: + return storage.ComparisonGreaterEqual, nil + default: + return 0, fmt.Errorf("unknown operator %v", o) + } +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/storage/predicate.pb.go b/vendor/github.com/influxdata/ifql/query/execute/storage/predicate.pb.go new file mode 100644 index 000000000..67f8847eb --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/storage/predicate.pb.go @@ -0,0 +1,1344 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: predicate.proto + +package storage + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +type Node_Type int32 + +const ( + NodeTypeLogicalExpression Node_Type = 0 + NodeTypeComparisonExpression Node_Type = 1 + NodeTypeParenExpression Node_Type = 2 + NodeTypeTagRef Node_Type = 3 + NodeTypeLiteral Node_Type = 4 + NodeTypeFieldRef Node_Type = 5 +) + +var Node_Type_name = map[int32]string{ + 0: "LOGICAL_EXPRESSION", + 1: "COMPARISON_EXPRESSION", + 2: "PAREN_EXPRESSION", + 3: "TAG_REF", + 4: "LITERAL", + 5: "FIELD_REF", +} +var Node_Type_value = map[string]int32{ + "LOGICAL_EXPRESSION": 0, + "COMPARISON_EXPRESSION": 1, + "PAREN_EXPRESSION": 2, + "TAG_REF": 3, + "LITERAL": 4, + "FIELD_REF": 5, +} + +func (x Node_Type) String() string { + return proto.EnumName(Node_Type_name, int32(x)) +} +func (Node_Type) EnumDescriptor() ([]byte, []int) { return fileDescriptorPredicate, []int{0, 0} } + +type Node_Comparison int32 + +const ( + ComparisonEqual Node_Comparison = 0 + ComparisonNotEqual Node_Comparison = 1 + ComparisonStartsWith Node_Comparison = 2 + ComparisonRegex Node_Comparison = 3 + ComparisonNotRegex Node_Comparison = 4 + ComparisonLess Node_Comparison = 5 + ComparisonLessEqual Node_Comparison = 6 + ComparisonGreater Node_Comparison = 7 + ComparisonGreaterEqual Node_Comparison = 8 +) + +var Node_Comparison_name = map[int32]string{ + 0: "EQUAL", + 1: "NOT_EQUAL", + 2: "STARTS_WITH", + 3: "REGEX", + 4: "NOT_REGEX", + 5: "LT", + 6: "LTE", + 7: "GT", + 8: "GTE", +} +var Node_Comparison_value = map[string]int32{ + "EQUAL": 0, + "NOT_EQUAL": 1, + "STARTS_WITH": 2, + "REGEX": 3, + "NOT_REGEX": 4, + "LT": 5, + "LTE": 6, + "GT": 7, + "GTE": 8, +} + +func (x Node_Comparison) String() string { + return proto.EnumName(Node_Comparison_name, int32(x)) +} +func (Node_Comparison) EnumDescriptor() ([]byte, []int) { return fileDescriptorPredicate, []int{0, 1} } + +// Logical operators apply to boolean values and combine to produce a single boolean result. +type Node_Logical int32 + +const ( + LogicalAnd Node_Logical = 0 + LogicalOr Node_Logical = 1 +) + +var Node_Logical_name = map[int32]string{ + 0: "AND", + 1: "OR", +} +var Node_Logical_value = map[string]int32{ + "AND": 0, + "OR": 1, +} + +func (x Node_Logical) String() string { + return proto.EnumName(Node_Logical_name, int32(x)) +} +func (Node_Logical) EnumDescriptor() ([]byte, []int) { return fileDescriptorPredicate, []int{0, 2} } + +type Node struct { + NodeType Node_Type `protobuf:"varint,1,opt,name=node_type,json=nodeType,proto3,enum=storage.Node_Type" json:"nodeType"` + Children []*Node `protobuf:"bytes,2,rep,name=children" json:"children,omitempty"` + // Types that are valid to be assigned to Value: + // *Node_StringValue + // *Node_BooleanValue + // *Node_IntegerValue + // *Node_UnsignedValue + // *Node_FloatValue + // *Node_RegexValue + // *Node_TagRefValue + // *Node_FieldRefValue + // *Node_Logical_ + // *Node_Comparison_ + Value isNode_Value `protobuf_oneof:"value"` +} + +func (m *Node) Reset() { *m = Node{} } +func (m *Node) String() string { return proto.CompactTextString(m) } +func (*Node) ProtoMessage() {} +func (*Node) Descriptor() ([]byte, []int) { return fileDescriptorPredicate, []int{0} } + +type isNode_Value interface { + isNode_Value() + MarshalTo([]byte) (int, error) + Size() int +} + +type Node_StringValue struct { + StringValue string `protobuf:"bytes,3,opt,name=string_value,json=stringValue,proto3,oneof"` +} +type Node_BooleanValue struct { + BooleanValue bool `protobuf:"varint,4,opt,name=bool_value,json=boolValue,proto3,oneof"` +} +type Node_IntegerValue struct { + IntegerValue int64 `protobuf:"varint,5,opt,name=int_value,json=intValue,proto3,oneof"` +} +type Node_UnsignedValue struct { + UnsignedValue uint64 `protobuf:"varint,6,opt,name=uint_value,json=uintValue,proto3,oneof"` +} +type Node_FloatValue struct { + FloatValue float64 `protobuf:"fixed64,7,opt,name=float_value,json=floatValue,proto3,oneof"` +} +type Node_RegexValue struct { + RegexValue string `protobuf:"bytes,8,opt,name=regex_value,json=regexValue,proto3,oneof"` +} +type Node_TagRefValue struct { + TagRefValue string `protobuf:"bytes,9,opt,name=tag_ref_value,json=tagRefValue,proto3,oneof"` +} +type Node_FieldRefValue struct { + FieldRefValue string `protobuf:"bytes,10,opt,name=field_ref_value,json=fieldRefValue,proto3,oneof"` +} +type Node_Logical_ struct { + Logical Node_Logical `protobuf:"varint,11,opt,name=logical,proto3,enum=storage.Node_Logical,oneof"` +} +type Node_Comparison_ struct { + Comparison Node_Comparison `protobuf:"varint,12,opt,name=comparison,proto3,enum=storage.Node_Comparison,oneof"` +} + +func (*Node_StringValue) isNode_Value() {} +func (*Node_BooleanValue) isNode_Value() {} +func (*Node_IntegerValue) isNode_Value() {} +func (*Node_UnsignedValue) isNode_Value() {} +func (*Node_FloatValue) isNode_Value() {} +func (*Node_RegexValue) isNode_Value() {} +func (*Node_TagRefValue) isNode_Value() {} +func (*Node_FieldRefValue) isNode_Value() {} +func (*Node_Logical_) isNode_Value() {} +func (*Node_Comparison_) isNode_Value() {} + +func (m *Node) GetValue() isNode_Value { + if m != nil { + return m.Value + } + return nil +} + +func (m *Node) GetNodeType() Node_Type { + if m != nil { + return m.NodeType + } + return NodeTypeLogicalExpression +} + +func (m *Node) GetChildren() []*Node { + if m != nil { + return m.Children + } + return nil +} + +func (m *Node) GetStringValue() string { + if x, ok := m.GetValue().(*Node_StringValue); ok { + return x.StringValue + } + return "" +} + +func (m *Node) GetBooleanValue() bool { + if x, ok := m.GetValue().(*Node_BooleanValue); ok { + return x.BooleanValue + } + return false +} + +func (m *Node) GetIntegerValue() int64 { + if x, ok := m.GetValue().(*Node_IntegerValue); ok { + return x.IntegerValue + } + return 0 +} + +func (m *Node) GetUnsignedValue() uint64 { + if x, ok := m.GetValue().(*Node_UnsignedValue); ok { + return x.UnsignedValue + } + return 0 +} + +func (m *Node) GetFloatValue() float64 { + if x, ok := m.GetValue().(*Node_FloatValue); ok { + return x.FloatValue + } + return 0 +} + +func (m *Node) GetRegexValue() string { + if x, ok := m.GetValue().(*Node_RegexValue); ok { + return x.RegexValue + } + return "" +} + +func (m *Node) GetTagRefValue() string { + if x, ok := m.GetValue().(*Node_TagRefValue); ok { + return x.TagRefValue + } + return "" +} + +func (m *Node) GetFieldRefValue() string { + if x, ok := m.GetValue().(*Node_FieldRefValue); ok { + return x.FieldRefValue + } + return "" +} + +func (m *Node) GetLogical() Node_Logical { + if x, ok := m.GetValue().(*Node_Logical_); ok { + return x.Logical + } + return LogicalAnd +} + +func (m *Node) GetComparison() Node_Comparison { + if x, ok := m.GetValue().(*Node_Comparison_); ok { + return x.Comparison + } + return ComparisonEqual +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Node) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Node_OneofMarshaler, _Node_OneofUnmarshaler, _Node_OneofSizer, []interface{}{ + (*Node_StringValue)(nil), + (*Node_BooleanValue)(nil), + (*Node_IntegerValue)(nil), + (*Node_UnsignedValue)(nil), + (*Node_FloatValue)(nil), + (*Node_RegexValue)(nil), + (*Node_TagRefValue)(nil), + (*Node_FieldRefValue)(nil), + (*Node_Logical_)(nil), + (*Node_Comparison_)(nil), + } +} + +func _Node_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Node) + // value + switch x := m.Value.(type) { + case *Node_StringValue: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.StringValue) + case *Node_BooleanValue: + t := uint64(0) + if x.BooleanValue { + t = 1 + } + _ = b.EncodeVarint(4<<3 | proto.WireVarint) + _ = b.EncodeVarint(t) + case *Node_IntegerValue: + _ = b.EncodeVarint(5<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.IntegerValue)) + case *Node_UnsignedValue: + _ = b.EncodeVarint(6<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.UnsignedValue)) + case *Node_FloatValue: + _ = b.EncodeVarint(7<<3 | proto.WireFixed64) + _ = b.EncodeFixed64(math.Float64bits(x.FloatValue)) + case *Node_RegexValue: + _ = b.EncodeVarint(8<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.RegexValue) + case *Node_TagRefValue: + _ = b.EncodeVarint(9<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.TagRefValue) + case *Node_FieldRefValue: + _ = b.EncodeVarint(10<<3 | proto.WireBytes) + _ = b.EncodeStringBytes(x.FieldRefValue) + case *Node_Logical_: + _ = b.EncodeVarint(11<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Logical)) + case *Node_Comparison_: + _ = b.EncodeVarint(12<<3 | proto.WireVarint) + _ = b.EncodeVarint(uint64(x.Comparison)) + case nil: + default: + return fmt.Errorf("Node.Value has unexpected type %T", x) + } + return nil +} + +func _Node_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Node) + switch tag { + case 3: // value.string_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &Node_StringValue{x} + return true, err + case 4: // value.bool_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Node_BooleanValue{x != 0} + return true, err + case 5: // value.int_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Node_IntegerValue{int64(x)} + return true, err + case 6: // value.uint_value + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Node_UnsignedValue{x} + return true, err + case 7: // value.float_value + if wire != proto.WireFixed64 { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeFixed64() + m.Value = &Node_FloatValue{math.Float64frombits(x)} + return true, err + case 8: // value.regex_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &Node_RegexValue{x} + return true, err + case 9: // value.tag_ref_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &Node_TagRefValue{x} + return true, err + case 10: // value.field_ref_value + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Value = &Node_FieldRefValue{x} + return true, err + case 11: // value.logical + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Node_Logical_{Node_Logical(x)} + return true, err + case 12: // value.comparison + if wire != proto.WireVarint { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeVarint() + m.Value = &Node_Comparison_{Node_Comparison(x)} + return true, err + default: + return false, nil + } +} + +func _Node_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Node) + // value + switch x := m.Value.(type) { + case *Node_StringValue: + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.StringValue))) + n += len(x.StringValue) + case *Node_BooleanValue: + n += proto.SizeVarint(4<<3 | proto.WireVarint) + n += 1 + case *Node_IntegerValue: + n += proto.SizeVarint(5<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.IntegerValue)) + case *Node_UnsignedValue: + n += proto.SizeVarint(6<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.UnsignedValue)) + case *Node_FloatValue: + n += proto.SizeVarint(7<<3 | proto.WireFixed64) + n += 8 + case *Node_RegexValue: + n += proto.SizeVarint(8<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.RegexValue))) + n += len(x.RegexValue) + case *Node_TagRefValue: + n += proto.SizeVarint(9<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.TagRefValue))) + n += len(x.TagRefValue) + case *Node_FieldRefValue: + n += proto.SizeVarint(10<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.FieldRefValue))) + n += len(x.FieldRefValue) + case *Node_Logical_: + n += proto.SizeVarint(11<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Logical)) + case *Node_Comparison_: + n += proto.SizeVarint(12<<3 | proto.WireVarint) + n += proto.SizeVarint(uint64(x.Comparison)) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type Predicate struct { + Root *Node `protobuf:"bytes,1,opt,name=root" json:"root,omitempty"` +} + +func (m *Predicate) Reset() { *m = Predicate{} } +func (m *Predicate) String() string { return proto.CompactTextString(m) } +func (*Predicate) ProtoMessage() {} +func (*Predicate) Descriptor() ([]byte, []int) { return fileDescriptorPredicate, []int{1} } + +func (m *Predicate) GetRoot() *Node { + if m != nil { + return m.Root + } + return nil +} + +func init() { + proto.RegisterType((*Node)(nil), "storage.Node") + proto.RegisterType((*Predicate)(nil), "storage.Predicate") + proto.RegisterEnum("storage.Node_Type", Node_Type_name, Node_Type_value) + proto.RegisterEnum("storage.Node_Comparison", Node_Comparison_name, Node_Comparison_value) + proto.RegisterEnum("storage.Node_Logical", Node_Logical_name, Node_Logical_value) +} +func (m *Node) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Node) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.NodeType != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintPredicate(dAtA, i, uint64(m.NodeType)) + } + if len(m.Children) > 0 { + for _, msg := range m.Children { + dAtA[i] = 0x12 + i++ + i = encodeVarintPredicate(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.Value != nil { + nn1, err := m.Value.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn1 + } + return i, nil +} + +func (m *Node_StringValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x1a + i++ + i = encodeVarintPredicate(dAtA, i, uint64(len(m.StringValue))) + i += copy(dAtA[i:], m.StringValue) + return i, nil +} +func (m *Node_BooleanValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x20 + i++ + if m.BooleanValue { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + return i, nil +} +func (m *Node_IntegerValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x28 + i++ + i = encodeVarintPredicate(dAtA, i, uint64(m.IntegerValue)) + return i, nil +} +func (m *Node_UnsignedValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x30 + i++ + i = encodeVarintPredicate(dAtA, i, uint64(m.UnsignedValue)) + return i, nil +} +func (m *Node_FloatValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x39 + i++ + i = encodeFixed64Predicate(dAtA, i, uint64(math.Float64bits(float64(m.FloatValue)))) + return i, nil +} +func (m *Node_RegexValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x42 + i++ + i = encodeVarintPredicate(dAtA, i, uint64(len(m.RegexValue))) + i += copy(dAtA[i:], m.RegexValue) + return i, nil +} +func (m *Node_TagRefValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x4a + i++ + i = encodeVarintPredicate(dAtA, i, uint64(len(m.TagRefValue))) + i += copy(dAtA[i:], m.TagRefValue) + return i, nil +} +func (m *Node_FieldRefValue) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x52 + i++ + i = encodeVarintPredicate(dAtA, i, uint64(len(m.FieldRefValue))) + i += copy(dAtA[i:], m.FieldRefValue) + return i, nil +} +func (m *Node_Logical_) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x58 + i++ + i = encodeVarintPredicate(dAtA, i, uint64(m.Logical)) + return i, nil +} +func (m *Node_Comparison_) MarshalTo(dAtA []byte) (int, error) { + i := 0 + dAtA[i] = 0x60 + i++ + i = encodeVarintPredicate(dAtA, i, uint64(m.Comparison)) + return i, nil +} +func (m *Predicate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Predicate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Root != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintPredicate(dAtA, i, uint64(m.Root.Size())) + n2, err := m.Root.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + return i, nil +} + +func encodeFixed64Predicate(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Predicate(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintPredicate(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Node) Size() (n int) { + var l int + _ = l + if m.NodeType != 0 { + n += 1 + sovPredicate(uint64(m.NodeType)) + } + if len(m.Children) > 0 { + for _, e := range m.Children { + l = e.Size() + n += 1 + l + sovPredicate(uint64(l)) + } + } + if m.Value != nil { + n += m.Value.Size() + } + return n +} + +func (m *Node_StringValue) Size() (n int) { + var l int + _ = l + l = len(m.StringValue) + n += 1 + l + sovPredicate(uint64(l)) + return n +} +func (m *Node_BooleanValue) Size() (n int) { + var l int + _ = l + n += 2 + return n +} +func (m *Node_IntegerValue) Size() (n int) { + var l int + _ = l + n += 1 + sovPredicate(uint64(m.IntegerValue)) + return n +} +func (m *Node_UnsignedValue) Size() (n int) { + var l int + _ = l + n += 1 + sovPredicate(uint64(m.UnsignedValue)) + return n +} +func (m *Node_FloatValue) Size() (n int) { + var l int + _ = l + n += 9 + return n +} +func (m *Node_RegexValue) Size() (n int) { + var l int + _ = l + l = len(m.RegexValue) + n += 1 + l + sovPredicate(uint64(l)) + return n +} +func (m *Node_TagRefValue) Size() (n int) { + var l int + _ = l + l = len(m.TagRefValue) + n += 1 + l + sovPredicate(uint64(l)) + return n +} +func (m *Node_FieldRefValue) Size() (n int) { + var l int + _ = l + l = len(m.FieldRefValue) + n += 1 + l + sovPredicate(uint64(l)) + return n +} +func (m *Node_Logical_) Size() (n int) { + var l int + _ = l + n += 1 + sovPredicate(uint64(m.Logical)) + return n +} +func (m *Node_Comparison_) Size() (n int) { + var l int + _ = l + n += 1 + sovPredicate(uint64(m.Comparison)) + return n +} +func (m *Predicate) Size() (n int) { + var l int + _ = l + if m.Root != nil { + l = m.Root.Size() + n += 1 + l + sovPredicate(uint64(l)) + } + return n +} + +func sovPredicate(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozPredicate(x uint64) (n int) { + return sovPredicate(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Node) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Node: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Node: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field NodeType", wireType) + } + m.NodeType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.NodeType |= (Node_Type(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Children", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPredicate + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Children = append(m.Children, &Node{}) + if err := m.Children[len(m.Children)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPredicate + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &Node_StringValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 4: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field BooleanValue", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + b := bool(v != 0) + m.Value = &Node_BooleanValue{b} + case 5: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field IntegerValue", wireType) + } + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &Node_IntegerValue{v} + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field UnsignedValue", wireType) + } + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &Node_UnsignedValue{v} + case 7: + if wireType != 1 { + return fmt.Errorf("proto: wrong wireType = %d for field FloatValue", wireType) + } + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + m.Value = &Node_FloatValue{float64(math.Float64frombits(v))} + case 8: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field RegexValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPredicate + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &Node_RegexValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TagRefValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPredicate + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &Node_TagRefValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FieldRefValue", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthPredicate + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = &Node_FieldRefValue{string(dAtA[iNdEx:postIndex])} + iNdEx = postIndex + case 11: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Logical", wireType) + } + var v Node_Logical + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (Node_Logical(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &Node_Logical_{v} + case 12: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Comparison", wireType) + } + var v Node_Comparison + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (Node_Comparison(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Value = &Node_Comparison_{v} + default: + iNdEx = preIndex + skippy, err := skipPredicate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPredicate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Predicate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Predicate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Predicate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Root", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowPredicate + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthPredicate + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Root == nil { + m.Root = &Node{} + } + if err := m.Root.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipPredicate(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthPredicate + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipPredicate(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPredicate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPredicate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPredicate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthPredicate + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowPredicate + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipPredicate(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthPredicate = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowPredicate = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("predicate.proto", fileDescriptorPredicate) } + +var fileDescriptorPredicate = []byte{ + // 845 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x94, 0xcf, 0x6e, 0xdb, 0x46, + 0x10, 0xc6, 0x45, 0x49, 0xb6, 0xc4, 0x91, 0x65, 0x33, 0x9b, 0x38, 0x56, 0xd9, 0x46, 0xda, 0x3a, + 0x28, 0xa0, 0x1c, 0x2a, 0xc3, 0x6e, 0x73, 0x69, 0x0e, 0x05, 0xe5, 0xd0, 0xb2, 0x00, 0x56, 0x52, + 0x29, 0xa6, 0xc9, 0x4d, 0xa0, 0xa5, 0x15, 0x4d, 0x80, 0xe1, 0xaa, 0xcb, 0x55, 0x91, 0xbc, 0x41, + 0xc1, 0x53, 0xef, 0x05, 0x4f, 0x7d, 0x99, 0x02, 0x45, 0x81, 0x3e, 0x81, 0x50, 0xa8, 0xb7, 0x3e, + 0x45, 0xc1, 0xe5, 0x3f, 0xa9, 0xc9, 0x6d, 0x67, 0xbe, 0xef, 0x37, 0xb3, 0xbb, 0x1c, 0x2e, 0x9c, + 0xac, 0x18, 0x59, 0xb8, 0x73, 0x9b, 0x93, 0xde, 0x8a, 0x51, 0x4e, 0x51, 0x2d, 0xe0, 0x94, 0xd9, + 0x0e, 0x51, 0xbf, 0x74, 0x5c, 0x7e, 0xbf, 0xbe, 0xeb, 0xcd, 0xe9, 0xdb, 0x0b, 0x87, 0x3a, 0xf4, + 0x42, 0xe8, 0x77, 0xeb, 0xa5, 0x88, 0x44, 0x20, 0x56, 0x09, 0x77, 0xfe, 0x07, 0x40, 0x75, 0x44, + 0x17, 0x04, 0x0d, 0x41, 0xf6, 0xe9, 0x82, 0xcc, 0xf8, 0xfb, 0x15, 0x69, 0x49, 0x58, 0xea, 0x1e, + 0x5f, 0xa1, 0x5e, 0x5a, 0xb4, 0x17, 0x3b, 0x7a, 0xd6, 0xfb, 0x15, 0xe9, 0xb7, 0xb6, 0x9b, 0x4e, + 0x3d, 0x0e, 0xe3, 0xe8, 0xdf, 0x4d, 0xa7, 0xee, 0xa7, 0x6b, 0x33, 0x5f, 0xa1, 0x67, 0x50, 0x9f, + 0xdf, 0xbb, 0xde, 0x82, 0x11, 0xbf, 0x55, 0xc6, 0x95, 0x6e, 0xe3, 0xaa, 0xb9, 0x57, 0xc9, 0xcc, + 0x65, 0xf4, 0x35, 0x1c, 0x05, 0x9c, 0xb9, 0xbe, 0x33, 0xfb, 0xc9, 0xf6, 0xd6, 0xa4, 0x55, 0xc1, + 0x52, 0x57, 0xee, 0x9f, 0x6c, 0x37, 0x9d, 0xc6, 0x54, 0xe4, 0x7f, 0x88, 0xd3, 0xb7, 0x25, 0xb3, + 0x11, 0x14, 0x21, 0xba, 0x04, 0xb8, 0xa3, 0xd4, 0x4b, 0x99, 0x2a, 0x96, 0xba, 0xf5, 0xbe, 0xb2, + 0xdd, 0x74, 0x8e, 0xfa, 0x94, 0x7a, 0xc4, 0xf6, 0x33, 0x48, 0x8e, 0x5d, 0x09, 0x72, 0x01, 0xb2, + 0xeb, 0xf3, 0x94, 0x38, 0xc0, 0x52, 0xb7, 0x92, 0x10, 0x43, 0x9f, 0x13, 0x87, 0xb0, 0x8c, 0xa8, + 0xbb, 0x3e, 0x4f, 0x80, 0x2b, 0x80, 0x75, 0x41, 0x1c, 0x62, 0xa9, 0x5b, 0xed, 0x3f, 0xd8, 0x6e, + 0x3a, 0xcd, 0x57, 0x7e, 0xe0, 0x3a, 0x3e, 0x59, 0xe4, 0x4d, 0xd6, 0x39, 0x73, 0x09, 0x8d, 0xa5, + 0x47, 0xed, 0x0c, 0xaa, 0x61, 0xa9, 0x2b, 0xf5, 0x8f, 0xb7, 0x9b, 0x0e, 0xdc, 0xc4, 0xe9, 0x8c, + 0x80, 0x65, 0x1e, 0xc5, 0x08, 0x23, 0x0e, 0x79, 0x97, 0x22, 0x75, 0x71, 0x7e, 0x81, 0x98, 0x71, + 0x3a, 0x47, 0x58, 0x1e, 0xa1, 0xe7, 0xd0, 0xe4, 0xb6, 0x33, 0x63, 0x64, 0x99, 0x42, 0x72, 0x71, + 0x69, 0x96, 0xed, 0x98, 0x64, 0x99, 0x5f, 0x1a, 0x2f, 0x42, 0xf4, 0x02, 0x4e, 0x96, 0x2e, 0xf1, + 0x16, 0x3b, 0x20, 0x08, 0x50, 0x9c, 0xea, 0x26, 0x96, 0x76, 0xd0, 0xe6, 0x72, 0x37, 0x81, 0x2e, + 0xa1, 0xe6, 0x51, 0xc7, 0x9d, 0xdb, 0x5e, 0xab, 0x21, 0x66, 0xe3, 0x74, 0x7f, 0x36, 0x8c, 0x44, + 0xbc, 0x2d, 0x99, 0x99, 0x0f, 0x7d, 0x03, 0x30, 0xa7, 0x6f, 0x57, 0x36, 0x73, 0x03, 0xea, 0xb7, + 0x8e, 0x04, 0xd5, 0xda, 0xa7, 0xae, 0x73, 0x3d, 0x3e, 0x62, 0xe1, 0x3e, 0xff, 0xb5, 0x0c, 0x55, + 0x31, 0x4a, 0xcf, 0x01, 0x19, 0xe3, 0xc1, 0xf0, 0x5a, 0x33, 0x66, 0xfa, 0x9b, 0x89, 0xa9, 0x4f, + 0xa7, 0xc3, 0xf1, 0x48, 0x29, 0xa9, 0x4f, 0xc2, 0x08, 0x7f, 0x92, 0x8d, 0x61, 0xda, 0x5c, 0x7f, + 0xb7, 0x62, 0x24, 0x08, 0x5c, 0xea, 0xa3, 0x17, 0x70, 0x7a, 0x3d, 0xfe, 0x6e, 0xa2, 0x99, 0xc3, + 0xe9, 0x78, 0xb4, 0x4b, 0x4a, 0x2a, 0x0e, 0x23, 0xfc, 0x59, 0x46, 0x16, 0x1b, 0xd8, 0x81, 0x2f, + 0x41, 0x99, 0x68, 0xa6, 0xbe, 0xc7, 0x95, 0xd5, 0x4f, 0xc3, 0x08, 0x9f, 0x65, 0xdc, 0xc4, 0x66, + 0x64, 0x17, 0xe9, 0x40, 0xcd, 0xd2, 0x06, 0x33, 0x53, 0xbf, 0x51, 0x2a, 0x2a, 0x0a, 0x23, 0x7c, + 0x9c, 0x39, 0x93, 0x0f, 0x82, 0x30, 0xd4, 0x8c, 0xa1, 0xa5, 0x9b, 0x9a, 0xa1, 0x54, 0xd5, 0x87, + 0x61, 0x84, 0x4f, 0xf2, 0xcd, 0xbb, 0x9c, 0x30, 0xdb, 0x43, 0x4f, 0x41, 0xbe, 0x19, 0xea, 0xc6, + 0x4b, 0x51, 0xe4, 0x40, 0x7d, 0x14, 0x46, 0x58, 0xc9, 0x3c, 0xd9, 0xc7, 0x51, 0xab, 0x3f, 0xff, + 0xd6, 0x2e, 0x9d, 0xff, 0x59, 0x06, 0x28, 0x76, 0x8e, 0xda, 0x70, 0xa0, 0x7f, 0xff, 0x4a, 0x33, + 0x94, 0x52, 0x52, 0x79, 0xe7, 0x50, 0x3f, 0xae, 0x6d, 0x0f, 0x7d, 0x01, 0xf2, 0x68, 0x6c, 0xcd, + 0x12, 0x8f, 0xa4, 0x3e, 0x0e, 0x23, 0x8c, 0x0a, 0xcf, 0x88, 0xf2, 0xc4, 0xf6, 0x0c, 0x1a, 0x53, + 0x4b, 0x33, 0xad, 0xe9, 0xec, 0xf5, 0xd0, 0xba, 0x55, 0xca, 0x6a, 0x2b, 0x8c, 0xf0, 0xa3, 0xc2, + 0x38, 0xe5, 0x36, 0xe3, 0xc1, 0x6b, 0x97, 0xdf, 0xc7, 0x1d, 0x4d, 0x7d, 0xa0, 0xbf, 0x51, 0x2a, + 0xff, 0xef, 0x28, 0x86, 0x36, 0xeb, 0x98, 0x78, 0xaa, 0x1f, 0xe9, 0x98, 0xd8, 0x54, 0x28, 0x1b, + 0x96, 0x72, 0x90, 0x5c, 0x58, 0xa1, 0x1b, 0x24, 0x08, 0x10, 0x86, 0x8a, 0x61, 0xe9, 0xca, 0xa1, + 0x7a, 0x16, 0x46, 0xf8, 0xe1, 0xbe, 0x98, 0xec, 0xf7, 0x09, 0x94, 0x07, 0x96, 0x52, 0x53, 0x4f, + 0xc3, 0x08, 0x3f, 0x28, 0x0c, 0x03, 0x46, 0x6c, 0x4e, 0x18, 0x7a, 0x0a, 0x95, 0x81, 0xa5, 0x2b, + 0x75, 0x55, 0x0d, 0x23, 0xfc, 0xf8, 0x03, 0x5d, 0xd4, 0x48, 0xef, 0xf3, 0x5b, 0xa8, 0xa5, 0x23, + 0x84, 0xce, 0xa0, 0xa2, 0x8d, 0x5e, 0x2a, 0x25, 0xf5, 0x38, 0x8c, 0x30, 0xa4, 0x59, 0xcd, 0x5f, + 0xa0, 0x53, 0x28, 0x8f, 0x4d, 0x45, 0x52, 0x9b, 0x61, 0x84, 0xe5, 0x34, 0x3f, 0x66, 0x49, 0x81, + 0x7e, 0x0d, 0x0e, 0xc4, 0x0f, 0x75, 0xde, 0x03, 0x79, 0x92, 0x3d, 0xcc, 0xe8, 0x73, 0xa8, 0x32, + 0x4a, 0xb9, 0x78, 0x4c, 0x3f, 0x78, 0x02, 0x85, 0xd4, 0x57, 0x7e, 0xdf, 0xb6, 0xa5, 0xbf, 0xb6, + 0x6d, 0xe9, 0xef, 0x6d, 0x5b, 0xfa, 0xe5, 0x9f, 0x76, 0xe9, 0xee, 0x50, 0x3c, 0xcb, 0x5f, 0xfd, + 0x17, 0x00, 0x00, 0xff, 0xff, 0xfb, 0xde, 0x9f, 0x18, 0xe1, 0x05, 0x00, 0x00, +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/storage/storage.pb.go b/vendor/github.com/influxdata/ifql/query/execute/storage/storage.pb.go new file mode 100644 index 000000000..f83cfcb89 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/storage/storage.pb.go @@ -0,0 +1,3871 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: storage.proto + +/* + Package storage is a generated protocol buffer package. + + It is generated from these files: + storage.proto + predicate.proto + + It has these top-level messages: + ReadRequest + Aggregate + Tag + ReadResponse + CapabilitiesResponse + HintsResponse + TimestampRange + Node + Predicate +*/ +package storage + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import _ "github.com/gogo/protobuf/types" +import _ "github.com/influxdata/yarpc/yarpcproto" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Aggregate_AggregateType int32 + +const ( + AggregateTypeNone Aggregate_AggregateType = 0 + AggregateTypeSum Aggregate_AggregateType = 1 + AggregateTypeCount Aggregate_AggregateType = 2 +) + +var Aggregate_AggregateType_name = map[int32]string{ + 0: "NONE", + 1: "SUM", + 2: "COUNT", +} +var Aggregate_AggregateType_value = map[string]int32{ + "NONE": 0, + "SUM": 1, + "COUNT": 2, +} + +func (x Aggregate_AggregateType) String() string { + return proto.EnumName(Aggregate_AggregateType_name, int32(x)) +} +func (Aggregate_AggregateType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{1, 0} +} + +type ReadResponse_FrameType int32 + +const ( + FrameTypeSeries ReadResponse_FrameType = 0 + FrameTypePoints ReadResponse_FrameType = 1 +) + +var ReadResponse_FrameType_name = map[int32]string{ + 0: "SERIES", + 1: "POINTS", +} +var ReadResponse_FrameType_value = map[string]int32{ + "SERIES": 0, + "POINTS": 1, +} + +func (x ReadResponse_FrameType) String() string { + return proto.EnumName(ReadResponse_FrameType_name, int32(x)) +} +func (ReadResponse_FrameType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{3, 0} +} + +type ReadResponse_DataType int32 + +const ( + DataTypeFloat ReadResponse_DataType = 0 + DataTypeInteger ReadResponse_DataType = 1 + DataTypeUnsigned ReadResponse_DataType = 2 + DataTypeBoolean ReadResponse_DataType = 3 + DataTypeString ReadResponse_DataType = 4 +) + +var ReadResponse_DataType_name = map[int32]string{ + 0: "FLOAT", + 1: "INTEGER", + 2: "UNSIGNED", + 3: "BOOLEAN", + 4: "STRING", +} +var ReadResponse_DataType_value = map[string]int32{ + "FLOAT": 0, + "INTEGER": 1, + "UNSIGNED": 2, + "BOOLEAN": 3, + "STRING": 4, +} + +func (x ReadResponse_DataType) String() string { + return proto.EnumName(ReadResponse_DataType_name, int32(x)) +} +func (ReadResponse_DataType) EnumDescriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{3, 1} +} + +// Request message for Storage.Read. +type ReadRequest struct { + // Database specifies the name of the database to issue the read request. + Database string `protobuf:"bytes,1,opt,name=database,proto3" json:"database,omitempty"` + TimestampRange TimestampRange `protobuf:"bytes,2,opt,name=timestamp_range,json=timestampRange" json:"timestamp_range"` + // Descending indicates whether points should be returned in descending order. + Descending bool `protobuf:"varint,3,opt,name=descending,proto3" json:"descending,omitempty"` + // Grouping specifies a list of tags used to order the data + Grouping []string `protobuf:"bytes,4,rep,name=grouping" json:"grouping,omitempty"` + // Aggregate specifies an optional aggregate to apply to the data. + // TODO(sgc): switch to slice for multiple aggregates in a single request + Aggregate *Aggregate `protobuf:"bytes,9,opt,name=aggregate" json:"aggregate,omitempty"` + Predicate *Predicate `protobuf:"bytes,5,opt,name=predicate" json:"predicate,omitempty"` + // SeriesLimit determines the maximum number of series to be returned for the request. Specify 0 for no limit. + SeriesLimit uint64 `protobuf:"varint,6,opt,name=series_limit,json=seriesLimit,proto3" json:"series_limit,omitempty"` + // SeriesOffset determines how many series to skip before processing the request. + SeriesOffset uint64 `protobuf:"varint,7,opt,name=series_offset,json=seriesOffset,proto3" json:"series_offset,omitempty"` + // PointsLimit determines the maximum number of values per series to be returned for the request. + // Specify 0 for no limit. + PointsLimit uint64 `protobuf:"varint,8,opt,name=points_limit,json=pointsLimit,proto3" json:"points_limit,omitempty"` + // Trace contains opaque data if a trace is active. + Trace map[string]string `protobuf:"bytes,10,rep,name=trace" json:"trace,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *ReadRequest) Reset() { *m = ReadRequest{} } +func (m *ReadRequest) String() string { return proto.CompactTextString(m) } +func (*ReadRequest) ProtoMessage() {} +func (*ReadRequest) Descriptor() ([]byte, []int) { return fileDescriptorStorage, []int{0} } + +type Aggregate struct { + Type Aggregate_AggregateType `protobuf:"varint,1,opt,name=type,proto3,enum=storage.Aggregate_AggregateType" json:"type,omitempty"` +} + +func (m *Aggregate) Reset() { *m = Aggregate{} } +func (m *Aggregate) String() string { return proto.CompactTextString(m) } +func (*Aggregate) ProtoMessage() {} +func (*Aggregate) Descriptor() ([]byte, []int) { return fileDescriptorStorage, []int{1} } + +type Tag struct { + Key []byte `protobuf:"bytes,1,opt,name=key,proto3" json:"key,omitempty"` + Value []byte `protobuf:"bytes,2,opt,name=value,proto3" json:"value,omitempty"` +} + +func (m *Tag) Reset() { *m = Tag{} } +func (m *Tag) String() string { return proto.CompactTextString(m) } +func (*Tag) ProtoMessage() {} +func (*Tag) Descriptor() ([]byte, []int) { return fileDescriptorStorage, []int{2} } + +// Response message for Storage.Read. +type ReadResponse struct { + Frames []ReadResponse_Frame `protobuf:"bytes,1,rep,name=frames" json:"frames"` +} + +func (m *ReadResponse) Reset() { *m = ReadResponse{} } +func (m *ReadResponse) String() string { return proto.CompactTextString(m) } +func (*ReadResponse) ProtoMessage() {} +func (*ReadResponse) Descriptor() ([]byte, []int) { return fileDescriptorStorage, []int{3} } + +type ReadResponse_Frame struct { + // Types that are valid to be assigned to Data: + // *ReadResponse_Frame_Series + // *ReadResponse_Frame_FloatPoints + // *ReadResponse_Frame_IntegerPoints + // *ReadResponse_Frame_UnsignedPoints + // *ReadResponse_Frame_BooleanPoints + // *ReadResponse_Frame_StringPoints + Data isReadResponse_Frame_Data `protobuf_oneof:"data"` +} + +func (m *ReadResponse_Frame) Reset() { *m = ReadResponse_Frame{} } +func (m *ReadResponse_Frame) String() string { return proto.CompactTextString(m) } +func (*ReadResponse_Frame) ProtoMessage() {} +func (*ReadResponse_Frame) Descriptor() ([]byte, []int) { return fileDescriptorStorage, []int{3, 0} } + +type isReadResponse_Frame_Data interface { + isReadResponse_Frame_Data() + MarshalTo([]byte) (int, error) + Size() int +} + +type ReadResponse_Frame_Series struct { + Series *ReadResponse_SeriesFrame `protobuf:"bytes,1,opt,name=series,oneof"` +} +type ReadResponse_Frame_FloatPoints struct { + FloatPoints *ReadResponse_FloatPointsFrame `protobuf:"bytes,2,opt,name=float_points,json=floatPoints,oneof"` +} +type ReadResponse_Frame_IntegerPoints struct { + IntegerPoints *ReadResponse_IntegerPointsFrame `protobuf:"bytes,3,opt,name=integer_points,json=integerPoints,oneof"` +} +type ReadResponse_Frame_UnsignedPoints struct { + UnsignedPoints *ReadResponse_UnsignedPointsFrame `protobuf:"bytes,4,opt,name=unsigned_points,json=unsignedPoints,oneof"` +} +type ReadResponse_Frame_BooleanPoints struct { + BooleanPoints *ReadResponse_BooleanPointsFrame `protobuf:"bytes,5,opt,name=boolean_points,json=booleanPoints,oneof"` +} +type ReadResponse_Frame_StringPoints struct { + StringPoints *ReadResponse_StringPointsFrame `protobuf:"bytes,6,opt,name=string_points,json=stringPoints,oneof"` +} + +func (*ReadResponse_Frame_Series) isReadResponse_Frame_Data() {} +func (*ReadResponse_Frame_FloatPoints) isReadResponse_Frame_Data() {} +func (*ReadResponse_Frame_IntegerPoints) isReadResponse_Frame_Data() {} +func (*ReadResponse_Frame_UnsignedPoints) isReadResponse_Frame_Data() {} +func (*ReadResponse_Frame_BooleanPoints) isReadResponse_Frame_Data() {} +func (*ReadResponse_Frame_StringPoints) isReadResponse_Frame_Data() {} + +func (m *ReadResponse_Frame) GetData() isReadResponse_Frame_Data { + if m != nil { + return m.Data + } + return nil +} + +func (m *ReadResponse_Frame) GetSeries() *ReadResponse_SeriesFrame { + if x, ok := m.GetData().(*ReadResponse_Frame_Series); ok { + return x.Series + } + return nil +} + +func (m *ReadResponse_Frame) GetFloatPoints() *ReadResponse_FloatPointsFrame { + if x, ok := m.GetData().(*ReadResponse_Frame_FloatPoints); ok { + return x.FloatPoints + } + return nil +} + +func (m *ReadResponse_Frame) GetIntegerPoints() *ReadResponse_IntegerPointsFrame { + if x, ok := m.GetData().(*ReadResponse_Frame_IntegerPoints); ok { + return x.IntegerPoints + } + return nil +} + +func (m *ReadResponse_Frame) GetUnsignedPoints() *ReadResponse_UnsignedPointsFrame { + if x, ok := m.GetData().(*ReadResponse_Frame_UnsignedPoints); ok { + return x.UnsignedPoints + } + return nil +} + +func (m *ReadResponse_Frame) GetBooleanPoints() *ReadResponse_BooleanPointsFrame { + if x, ok := m.GetData().(*ReadResponse_Frame_BooleanPoints); ok { + return x.BooleanPoints + } + return nil +} + +func (m *ReadResponse_Frame) GetStringPoints() *ReadResponse_StringPointsFrame { + if x, ok := m.GetData().(*ReadResponse_Frame_StringPoints); ok { + return x.StringPoints + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*ReadResponse_Frame) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _ReadResponse_Frame_OneofMarshaler, _ReadResponse_Frame_OneofUnmarshaler, _ReadResponse_Frame_OneofSizer, []interface{}{ + (*ReadResponse_Frame_Series)(nil), + (*ReadResponse_Frame_FloatPoints)(nil), + (*ReadResponse_Frame_IntegerPoints)(nil), + (*ReadResponse_Frame_UnsignedPoints)(nil), + (*ReadResponse_Frame_BooleanPoints)(nil), + (*ReadResponse_Frame_StringPoints)(nil), + } +} + +func _ReadResponse_Frame_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*ReadResponse_Frame) + // data + switch x := m.Data.(type) { + case *ReadResponse_Frame_Series: + _ = b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Series); err != nil { + return err + } + case *ReadResponse_Frame_FloatPoints: + _ = b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.FloatPoints); err != nil { + return err + } + case *ReadResponse_Frame_IntegerPoints: + _ = b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.IntegerPoints); err != nil { + return err + } + case *ReadResponse_Frame_UnsignedPoints: + _ = b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.UnsignedPoints); err != nil { + return err + } + case *ReadResponse_Frame_BooleanPoints: + _ = b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.BooleanPoints); err != nil { + return err + } + case *ReadResponse_Frame_StringPoints: + _ = b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.StringPoints); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("ReadResponse_Frame.Data has unexpected type %T", x) + } + return nil +} + +func _ReadResponse_Frame_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*ReadResponse_Frame) + switch tag { + case 1: // data.series + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReadResponse_SeriesFrame) + err := b.DecodeMessage(msg) + m.Data = &ReadResponse_Frame_Series{msg} + return true, err + case 2: // data.float_points + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReadResponse_FloatPointsFrame) + err := b.DecodeMessage(msg) + m.Data = &ReadResponse_Frame_FloatPoints{msg} + return true, err + case 3: // data.integer_points + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReadResponse_IntegerPointsFrame) + err := b.DecodeMessage(msg) + m.Data = &ReadResponse_Frame_IntegerPoints{msg} + return true, err + case 4: // data.unsigned_points + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReadResponse_UnsignedPointsFrame) + err := b.DecodeMessage(msg) + m.Data = &ReadResponse_Frame_UnsignedPoints{msg} + return true, err + case 5: // data.boolean_points + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReadResponse_BooleanPointsFrame) + err := b.DecodeMessage(msg) + m.Data = &ReadResponse_Frame_BooleanPoints{msg} + return true, err + case 6: // data.string_points + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ReadResponse_StringPointsFrame) + err := b.DecodeMessage(msg) + m.Data = &ReadResponse_Frame_StringPoints{msg} + return true, err + default: + return false, nil + } +} + +func _ReadResponse_Frame_OneofSizer(msg proto.Message) (n int) { + m := msg.(*ReadResponse_Frame) + // data + switch x := m.Data.(type) { + case *ReadResponse_Frame_Series: + s := proto.Size(x.Series) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ReadResponse_Frame_FloatPoints: + s := proto.Size(x.FloatPoints) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ReadResponse_Frame_IntegerPoints: + s := proto.Size(x.IntegerPoints) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ReadResponse_Frame_UnsignedPoints: + s := proto.Size(x.UnsignedPoints) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ReadResponse_Frame_BooleanPoints: + s := proto.Size(x.BooleanPoints) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *ReadResponse_Frame_StringPoints: + s := proto.Size(x.StringPoints) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +type ReadResponse_SeriesFrame struct { + Tags []Tag `protobuf:"bytes,1,rep,name=tags" json:"tags"` + DataType ReadResponse_DataType `protobuf:"varint,2,opt,name=data_type,json=dataType,proto3,enum=storage.ReadResponse_DataType" json:"data_type,omitempty"` +} + +func (m *ReadResponse_SeriesFrame) Reset() { *m = ReadResponse_SeriesFrame{} } +func (m *ReadResponse_SeriesFrame) String() string { return proto.CompactTextString(m) } +func (*ReadResponse_SeriesFrame) ProtoMessage() {} +func (*ReadResponse_SeriesFrame) Descriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{3, 1} +} + +type ReadResponse_FloatPointsFrame struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []float64 `protobuf:"fixed64,2,rep,packed,name=values" json:"values,omitempty"` +} + +func (m *ReadResponse_FloatPointsFrame) Reset() { *m = ReadResponse_FloatPointsFrame{} } +func (m *ReadResponse_FloatPointsFrame) String() string { return proto.CompactTextString(m) } +func (*ReadResponse_FloatPointsFrame) ProtoMessage() {} +func (*ReadResponse_FloatPointsFrame) Descriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{3, 2} +} + +type ReadResponse_IntegerPointsFrame struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []int64 `protobuf:"varint,2,rep,packed,name=values" json:"values,omitempty"` +} + +func (m *ReadResponse_IntegerPointsFrame) Reset() { *m = ReadResponse_IntegerPointsFrame{} } +func (m *ReadResponse_IntegerPointsFrame) String() string { return proto.CompactTextString(m) } +func (*ReadResponse_IntegerPointsFrame) ProtoMessage() {} +func (*ReadResponse_IntegerPointsFrame) Descriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{3, 3} +} + +type ReadResponse_UnsignedPointsFrame struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []uint64 `protobuf:"varint,2,rep,packed,name=values" json:"values,omitempty"` +} + +func (m *ReadResponse_UnsignedPointsFrame) Reset() { *m = ReadResponse_UnsignedPointsFrame{} } +func (m *ReadResponse_UnsignedPointsFrame) String() string { return proto.CompactTextString(m) } +func (*ReadResponse_UnsignedPointsFrame) ProtoMessage() {} +func (*ReadResponse_UnsignedPointsFrame) Descriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{3, 4} +} + +type ReadResponse_BooleanPointsFrame struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []bool `protobuf:"varint,2,rep,packed,name=values" json:"values,omitempty"` +} + +func (m *ReadResponse_BooleanPointsFrame) Reset() { *m = ReadResponse_BooleanPointsFrame{} } +func (m *ReadResponse_BooleanPointsFrame) String() string { return proto.CompactTextString(m) } +func (*ReadResponse_BooleanPointsFrame) ProtoMessage() {} +func (*ReadResponse_BooleanPointsFrame) Descriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{3, 5} +} + +type ReadResponse_StringPointsFrame struct { + Timestamps []int64 `protobuf:"fixed64,1,rep,packed,name=timestamps" json:"timestamps,omitempty"` + Values []string `protobuf:"bytes,2,rep,name=values" json:"values,omitempty"` +} + +func (m *ReadResponse_StringPointsFrame) Reset() { *m = ReadResponse_StringPointsFrame{} } +func (m *ReadResponse_StringPointsFrame) String() string { return proto.CompactTextString(m) } +func (*ReadResponse_StringPointsFrame) ProtoMessage() {} +func (*ReadResponse_StringPointsFrame) Descriptor() ([]byte, []int) { + return fileDescriptorStorage, []int{3, 6} +} + +type CapabilitiesResponse struct { + Caps map[string]string `protobuf:"bytes,1,rep,name=caps" json:"caps,omitempty" protobuf_key:"bytes,1,opt,name=key,proto3" protobuf_val:"bytes,2,opt,name=value,proto3"` +} + +func (m *CapabilitiesResponse) Reset() { *m = CapabilitiesResponse{} } +func (m *CapabilitiesResponse) String() string { return proto.CompactTextString(m) } +func (*CapabilitiesResponse) ProtoMessage() {} +func (*CapabilitiesResponse) Descriptor() ([]byte, []int) { return fileDescriptorStorage, []int{4} } + +type HintsResponse struct { +} + +func (m *HintsResponse) Reset() { *m = HintsResponse{} } +func (m *HintsResponse) String() string { return proto.CompactTextString(m) } +func (*HintsResponse) ProtoMessage() {} +func (*HintsResponse) Descriptor() ([]byte, []int) { return fileDescriptorStorage, []int{5} } + +// Specifies a continuous range of nanosecond timestamps. +type TimestampRange struct { + // Start defines the inclusive lower bound. + Start int64 `protobuf:"varint,1,opt,name=start,proto3" json:"start,omitempty"` + // End defines the inclusive upper bound. + End int64 `protobuf:"varint,2,opt,name=end,proto3" json:"end,omitempty"` +} + +func (m *TimestampRange) Reset() { *m = TimestampRange{} } +func (m *TimestampRange) String() string { return proto.CompactTextString(m) } +func (*TimestampRange) ProtoMessage() {} +func (*TimestampRange) Descriptor() ([]byte, []int) { return fileDescriptorStorage, []int{6} } + +func init() { + proto.RegisterType((*ReadRequest)(nil), "storage.ReadRequest") + proto.RegisterType((*Aggregate)(nil), "storage.Aggregate") + proto.RegisterType((*Tag)(nil), "storage.Tag") + proto.RegisterType((*ReadResponse)(nil), "storage.ReadResponse") + proto.RegisterType((*ReadResponse_Frame)(nil), "storage.ReadResponse.Frame") + proto.RegisterType((*ReadResponse_SeriesFrame)(nil), "storage.ReadResponse.SeriesFrame") + proto.RegisterType((*ReadResponse_FloatPointsFrame)(nil), "storage.ReadResponse.FloatPointsFrame") + proto.RegisterType((*ReadResponse_IntegerPointsFrame)(nil), "storage.ReadResponse.IntegerPointsFrame") + proto.RegisterType((*ReadResponse_UnsignedPointsFrame)(nil), "storage.ReadResponse.UnsignedPointsFrame") + proto.RegisterType((*ReadResponse_BooleanPointsFrame)(nil), "storage.ReadResponse.BooleanPointsFrame") + proto.RegisterType((*ReadResponse_StringPointsFrame)(nil), "storage.ReadResponse.StringPointsFrame") + proto.RegisterType((*CapabilitiesResponse)(nil), "storage.CapabilitiesResponse") + proto.RegisterType((*HintsResponse)(nil), "storage.HintsResponse") + proto.RegisterType((*TimestampRange)(nil), "storage.TimestampRange") + proto.RegisterEnum("storage.Aggregate_AggregateType", Aggregate_AggregateType_name, Aggregate_AggregateType_value) + proto.RegisterEnum("storage.ReadResponse_FrameType", ReadResponse_FrameType_name, ReadResponse_FrameType_value) + proto.RegisterEnum("storage.ReadResponse_DataType", ReadResponse_DataType_name, ReadResponse_DataType_value) +} +func (m *ReadRequest) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadRequest) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Database) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Database))) + i += copy(dAtA[i:], m.Database) + } + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.TimestampRange.Size())) + n1, err := m.TimestampRange.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n1 + if m.Descending { + dAtA[i] = 0x18 + i++ + if m.Descending { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + if len(m.Grouping) > 0 { + for _, s := range m.Grouping { + dAtA[i] = 0x22 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + if m.Predicate != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.Predicate.Size())) + n2, err := m.Predicate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n2 + } + if m.SeriesLimit != 0 { + dAtA[i] = 0x30 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.SeriesLimit)) + } + if m.SeriesOffset != 0 { + dAtA[i] = 0x38 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.SeriesOffset)) + } + if m.PointsLimit != 0 { + dAtA[i] = 0x40 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.PointsLimit)) + } + if m.Aggregate != nil { + dAtA[i] = 0x4a + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.Aggregate.Size())) + n3, err := m.Aggregate.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n3 + } + if len(m.Trace) > 0 { + for k, _ := range m.Trace { + dAtA[i] = 0x52 + i++ + v := m.Trace[k] + mapSize := 1 + len(k) + sovStorage(uint64(len(k))) + 1 + len(v) + sovStorage(uint64(len(v))) + i = encodeVarintStorage(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *Aggregate) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Aggregate) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Type != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.Type)) + } + return i, nil +} + +func (m *Tag) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Tag) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Key) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Key))) + i += copy(dAtA[i:], m.Key) + } + if len(m.Value) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Value))) + i += copy(dAtA[i:], m.Value) + } + return i, nil +} + +func (m *ReadResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Frames) > 0 { + for _, msg := range m.Frames { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + return i, nil +} + +func (m *ReadResponse_Frame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse_Frame) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Data != nil { + nn4, err := m.Data.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += nn4 + } + return i, nil +} + +func (m *ReadResponse_Frame_Series) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.Series != nil { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.Series.Size())) + n5, err := m.Series.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n5 + } + return i, nil +} +func (m *ReadResponse_Frame_FloatPoints) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.FloatPoints != nil { + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.FloatPoints.Size())) + n6, err := m.FloatPoints.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n6 + } + return i, nil +} +func (m *ReadResponse_Frame_IntegerPoints) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.IntegerPoints != nil { + dAtA[i] = 0x1a + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.IntegerPoints.Size())) + n7, err := m.IntegerPoints.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n7 + } + return i, nil +} +func (m *ReadResponse_Frame_UnsignedPoints) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.UnsignedPoints != nil { + dAtA[i] = 0x22 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.UnsignedPoints.Size())) + n8, err := m.UnsignedPoints.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n8 + } + return i, nil +} +func (m *ReadResponse_Frame_BooleanPoints) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.BooleanPoints != nil { + dAtA[i] = 0x2a + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.BooleanPoints.Size())) + n9, err := m.BooleanPoints.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n9 + } + return i, nil +} +func (m *ReadResponse_Frame_StringPoints) MarshalTo(dAtA []byte) (int, error) { + i := 0 + if m.StringPoints != nil { + dAtA[i] = 0x32 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.StringPoints.Size())) + n10, err := m.StringPoints.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n10 + } + return i, nil +} +func (m *ReadResponse_SeriesFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse_SeriesFrame) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Tags) > 0 { + for _, msg := range m.Tags { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(msg.Size())) + n, err := msg.MarshalTo(dAtA[i:]) + if err != nil { + return 0, err + } + i += n + } + } + if m.DataType != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.DataType)) + } + return i, nil +} + +func (m *ReadResponse_FloatPointsFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse_FloatPointsFrame) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Values)*8)) + for _, num := range m.Values { + f11 := math.Float64bits(float64(num)) + dAtA[i] = uint8(f11) + i++ + dAtA[i] = uint8(f11 >> 8) + i++ + dAtA[i] = uint8(f11 >> 16) + i++ + dAtA[i] = uint8(f11 >> 24) + i++ + dAtA[i] = uint8(f11 >> 32) + i++ + dAtA[i] = uint8(f11 >> 40) + i++ + dAtA[i] = uint8(f11 >> 48) + i++ + dAtA[i] = uint8(f11 >> 56) + i++ + } + } + return i, nil +} + +func (m *ReadResponse_IntegerPointsFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse_IntegerPointsFrame) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + dAtA13 := make([]byte, len(m.Values)*10) + var j12 int + for _, num1 := range m.Values { + num := uint64(num1) + for num >= 1<<7 { + dAtA13[j12] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j12++ + } + dAtA13[j12] = uint8(num) + j12++ + } + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(j12)) + i += copy(dAtA[i:], dAtA13[:j12]) + } + return i, nil +} + +func (m *ReadResponse_UnsignedPointsFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse_UnsignedPointsFrame) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + dAtA15 := make([]byte, len(m.Values)*10) + var j14 int + for _, num := range m.Values { + for num >= 1<<7 { + dAtA15[j14] = uint8(uint64(num)&0x7f | 0x80) + num >>= 7 + j14++ + } + dAtA15[j14] = uint8(num) + j14++ + } + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(j14)) + i += copy(dAtA[i:], dAtA15[:j14]) + } + return i, nil +} + +func (m *ReadResponse_BooleanPointsFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse_BooleanPointsFrame) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Values))) + for _, b := range m.Values { + if b { + dAtA[i] = 1 + } else { + dAtA[i] = 0 + } + i++ + } + } + return i, nil +} + +func (m *ReadResponse_StringPointsFrame) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *ReadResponse_StringPointsFrame) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Timestamps) > 0 { + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(m.Timestamps)*8)) + for _, num := range m.Timestamps { + dAtA[i] = uint8(num) + i++ + dAtA[i] = uint8(num >> 8) + i++ + dAtA[i] = uint8(num >> 16) + i++ + dAtA[i] = uint8(num >> 24) + i++ + dAtA[i] = uint8(num >> 32) + i++ + dAtA[i] = uint8(num >> 40) + i++ + dAtA[i] = uint8(num >> 48) + i++ + dAtA[i] = uint8(num >> 56) + i++ + } + } + if len(m.Values) > 0 { + for _, s := range m.Values { + dAtA[i] = 0x12 + i++ + l = len(s) + for l >= 1<<7 { + dAtA[i] = uint8(uint64(l)&0x7f | 0x80) + l >>= 7 + i++ + } + dAtA[i] = uint8(l) + i++ + i += copy(dAtA[i:], s) + } + } + return i, nil +} + +func (m *CapabilitiesResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *CapabilitiesResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if len(m.Caps) > 0 { + for k, _ := range m.Caps { + dAtA[i] = 0xa + i++ + v := m.Caps[k] + mapSize := 1 + len(k) + sovStorage(uint64(len(k))) + 1 + len(v) + sovStorage(uint64(len(v))) + i = encodeVarintStorage(dAtA, i, uint64(mapSize)) + dAtA[i] = 0xa + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(k))) + i += copy(dAtA[i:], k) + dAtA[i] = 0x12 + i++ + i = encodeVarintStorage(dAtA, i, uint64(len(v))) + i += copy(dAtA[i:], v) + } + } + return i, nil +} + +func (m *HintsResponse) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *HintsResponse) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + return i, nil +} + +func (m *TimestampRange) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *TimestampRange) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Start != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.Start)) + } + if m.End != 0 { + dAtA[i] = 0x10 + i++ + i = encodeVarintStorage(dAtA, i, uint64(m.End)) + } + return i, nil +} + +func encodeFixed64Storage(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Storage(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintStorage(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *ReadRequest) Size() (n int) { + var l int + _ = l + l = len(m.Database) + if l > 0 { + n += 1 + l + sovStorage(uint64(l)) + } + l = m.TimestampRange.Size() + n += 1 + l + sovStorage(uint64(l)) + if m.Descending { + n += 2 + } + if len(m.Grouping) > 0 { + for _, s := range m.Grouping { + l = len(s) + n += 1 + l + sovStorage(uint64(l)) + } + } + if m.Predicate != nil { + l = m.Predicate.Size() + n += 1 + l + sovStorage(uint64(l)) + } + if m.SeriesLimit != 0 { + n += 1 + sovStorage(uint64(m.SeriesLimit)) + } + if m.SeriesOffset != 0 { + n += 1 + sovStorage(uint64(m.SeriesOffset)) + } + if m.PointsLimit != 0 { + n += 1 + sovStorage(uint64(m.PointsLimit)) + } + if m.Aggregate != nil { + l = m.Aggregate.Size() + n += 1 + l + sovStorage(uint64(l)) + } + if len(m.Trace) > 0 { + for k, v := range m.Trace { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovStorage(uint64(len(k))) + 1 + len(v) + sovStorage(uint64(len(v))) + n += mapEntrySize + 1 + sovStorage(uint64(mapEntrySize)) + } + } + return n +} + +func (m *Aggregate) Size() (n int) { + var l int + _ = l + if m.Type != 0 { + n += 1 + sovStorage(uint64(m.Type)) + } + return n +} + +func (m *Tag) Size() (n int) { + var l int + _ = l + l = len(m.Key) + if l > 0 { + n += 1 + l + sovStorage(uint64(l)) + } + l = len(m.Value) + if l > 0 { + n += 1 + l + sovStorage(uint64(l)) + } + return n +} + +func (m *ReadResponse) Size() (n int) { + var l int + _ = l + if len(m.Frames) > 0 { + for _, e := range m.Frames { + l = e.Size() + n += 1 + l + sovStorage(uint64(l)) + } + } + return n +} + +func (m *ReadResponse_Frame) Size() (n int) { + var l int + _ = l + if m.Data != nil { + n += m.Data.Size() + } + return n +} + +func (m *ReadResponse_Frame_Series) Size() (n int) { + var l int + _ = l + if m.Series != nil { + l = m.Series.Size() + n += 1 + l + sovStorage(uint64(l)) + } + return n +} +func (m *ReadResponse_Frame_FloatPoints) Size() (n int) { + var l int + _ = l + if m.FloatPoints != nil { + l = m.FloatPoints.Size() + n += 1 + l + sovStorage(uint64(l)) + } + return n +} +func (m *ReadResponse_Frame_IntegerPoints) Size() (n int) { + var l int + _ = l + if m.IntegerPoints != nil { + l = m.IntegerPoints.Size() + n += 1 + l + sovStorage(uint64(l)) + } + return n +} +func (m *ReadResponse_Frame_UnsignedPoints) Size() (n int) { + var l int + _ = l + if m.UnsignedPoints != nil { + l = m.UnsignedPoints.Size() + n += 1 + l + sovStorage(uint64(l)) + } + return n +} +func (m *ReadResponse_Frame_BooleanPoints) Size() (n int) { + var l int + _ = l + if m.BooleanPoints != nil { + l = m.BooleanPoints.Size() + n += 1 + l + sovStorage(uint64(l)) + } + return n +} +func (m *ReadResponse_Frame_StringPoints) Size() (n int) { + var l int + _ = l + if m.StringPoints != nil { + l = m.StringPoints.Size() + n += 1 + l + sovStorage(uint64(l)) + } + return n +} +func (m *ReadResponse_SeriesFrame) Size() (n int) { + var l int + _ = l + if len(m.Tags) > 0 { + for _, e := range m.Tags { + l = e.Size() + n += 1 + l + sovStorage(uint64(l)) + } + } + if m.DataType != 0 { + n += 1 + sovStorage(uint64(m.DataType)) + } + return n +} + +func (m *ReadResponse_FloatPointsFrame) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovStorage(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + n += 1 + sovStorage(uint64(len(m.Values)*8)) + len(m.Values)*8 + } + return n +} + +func (m *ReadResponse_IntegerPointsFrame) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovStorage(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + l = 0 + for _, e := range m.Values { + l += sovStorage(uint64(e)) + } + n += 1 + sovStorage(uint64(l)) + l + } + return n +} + +func (m *ReadResponse_UnsignedPointsFrame) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovStorage(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + l = 0 + for _, e := range m.Values { + l += sovStorage(uint64(e)) + } + n += 1 + sovStorage(uint64(l)) + l + } + return n +} + +func (m *ReadResponse_BooleanPointsFrame) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovStorage(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + n += 1 + sovStorage(uint64(len(m.Values))) + len(m.Values)*1 + } + return n +} + +func (m *ReadResponse_StringPointsFrame) Size() (n int) { + var l int + _ = l + if len(m.Timestamps) > 0 { + n += 1 + sovStorage(uint64(len(m.Timestamps)*8)) + len(m.Timestamps)*8 + } + if len(m.Values) > 0 { + for _, s := range m.Values { + l = len(s) + n += 1 + l + sovStorage(uint64(l)) + } + } + return n +} + +func (m *CapabilitiesResponse) Size() (n int) { + var l int + _ = l + if len(m.Caps) > 0 { + for k, v := range m.Caps { + _ = k + _ = v + mapEntrySize := 1 + len(k) + sovStorage(uint64(len(k))) + 1 + len(v) + sovStorage(uint64(len(v))) + n += mapEntrySize + 1 + sovStorage(uint64(mapEntrySize)) + } + } + return n +} + +func (m *HintsResponse) Size() (n int) { + var l int + _ = l + return n +} + +func (m *TimestampRange) Size() (n int) { + var l int + _ = l + if m.Start != 0 { + n += 1 + sovStorage(uint64(m.Start)) + } + if m.End != 0 { + n += 1 + sovStorage(uint64(m.End)) + } + return n +} + +func sovStorage(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozStorage(x uint64) (n int) { + return sovStorage(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *ReadRequest) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadRequest: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadRequest: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Database", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Database = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field TimestampRange", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if err := m.TimestampRange.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 3: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Descending", wireType) + } + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Descending = bool(v != 0) + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Grouping", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Grouping = append(m.Grouping, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Predicate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Predicate == nil { + m.Predicate = &Predicate{} + } + if err := m.Predicate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 6: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesLimit", wireType) + } + m.SeriesLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SeriesLimit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 7: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field SeriesOffset", wireType) + } + m.SeriesOffset = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.SeriesOffset |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 8: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field PointsLimit", wireType) + } + m.PointsLimit = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.PointsLimit |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 9: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Aggregate", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Aggregate == nil { + m.Aggregate = &Aggregate{} + } + if err := m.Aggregate.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 10: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Trace", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Trace == nil { + m.Trace = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStorage + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthStorage + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Trace[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Aggregate) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Aggregate: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Aggregate: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Type", wireType) + } + m.Type = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Type |= (Aggregate_AggregateType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *Tag) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Tag: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Tag: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Key", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Key = append(m.Key[:0], dAtA[iNdEx:postIndex]...) + if m.Key == nil { + m.Key = []byte{} + } + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Value", wireType) + } + var byteLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + byteLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if byteLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + byteLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Value = append(m.Value[:0], dAtA[iNdEx:postIndex]...) + if m.Value == nil { + m.Value = []byte{} + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: ReadResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: ReadResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Frames", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Frames = append(m.Frames, ReadResponse_Frame{}) + if err := m.Frames[len(m.Frames)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse_Frame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Frame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Frame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Series", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReadResponse_SeriesFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &ReadResponse_Frame_Series{v} + iNdEx = postIndex + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field FloatPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReadResponse_FloatPointsFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &ReadResponse_Frame_FloatPoints{v} + iNdEx = postIndex + case 3: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field IntegerPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReadResponse_IntegerPointsFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &ReadResponse_Frame_IntegerPoints{v} + iNdEx = postIndex + case 4: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field UnsignedPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReadResponse_UnsignedPointsFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &ReadResponse_Frame_UnsignedPoints{v} + iNdEx = postIndex + case 5: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field BooleanPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReadResponse_BooleanPointsFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &ReadResponse_Frame_BooleanPoints{v} + iNdEx = postIndex + case 6: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field StringPoints", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + v := &ReadResponse_StringPointsFrame{} + if err := v.Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + m.Data = &ReadResponse_Frame_StringPoints{v} + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse_SeriesFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: SeriesFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: SeriesFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Tags", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Tags = append(m.Tags, Tag{}) + if err := m.Tags[len(m.Tags)-1].Unmarshal(dAtA[iNdEx:postIndex]); err != nil { + return err + } + iNdEx = postIndex + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field DataType", wireType) + } + m.DataType = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.DataType |= (ReadResponse_DataType(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse_FloatPointsFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: FloatPointsFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: FloatPointsFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType == 1 { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.Values = append(m.Values, v2) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = uint64(dAtA[iNdEx-8]) + v |= uint64(dAtA[iNdEx-7]) << 8 + v |= uint64(dAtA[iNdEx-6]) << 16 + v |= uint64(dAtA[iNdEx-5]) << 24 + v |= uint64(dAtA[iNdEx-4]) << 32 + v |= uint64(dAtA[iNdEx-3]) << 40 + v |= uint64(dAtA[iNdEx-2]) << 48 + v |= uint64(dAtA[iNdEx-1]) << 56 + v2 := float64(math.Float64frombits(v)) + m.Values = append(m.Values, v2) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse_IntegerPointsFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: IntegerPointsFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: IntegerPointsFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType == 0 { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse_UnsignedPointsFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: UnsignedPointsFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: UnsignedPointsFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType == 0 { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse_BooleanPointsFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: BooleanPointsFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: BooleanPointsFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType == 0 { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, bool(v != 0)) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + v |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + m.Values = append(m.Values, bool(v != 0)) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *ReadResponse_StringPointsFrame) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: StringPointsFrame: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: StringPointsFrame: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType == 1 { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } else if wireType == 2 { + var packedLen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + packedLen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if packedLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + packedLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + for iNdEx < postIndex { + var v int64 + if (iNdEx + 8) > l { + return io.ErrUnexpectedEOF + } + iNdEx += 8 + v = int64(dAtA[iNdEx-8]) + v |= int64(dAtA[iNdEx-7]) << 8 + v |= int64(dAtA[iNdEx-6]) << 16 + v |= int64(dAtA[iNdEx-5]) << 24 + v |= int64(dAtA[iNdEx-4]) << 32 + v |= int64(dAtA[iNdEx-3]) << 40 + v |= int64(dAtA[iNdEx-2]) << 48 + v |= int64(dAtA[iNdEx-1]) << 56 + m.Timestamps = append(m.Timestamps, v) + } + } else { + return fmt.Errorf("proto: wrong wireType = %d for field Timestamps", wireType) + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Values", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Values = append(m.Values, string(dAtA[iNdEx:postIndex])) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *CapabilitiesResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: CapabilitiesResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: CapabilitiesResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Caps", wireType) + } + var msglen int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + msglen |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + if msglen < 0 { + return ErrInvalidLengthStorage + } + postIndex := iNdEx + msglen + if postIndex > l { + return io.ErrUnexpectedEOF + } + if m.Caps == nil { + m.Caps = make(map[string]string) + } + var mapkey string + var mapvalue string + for iNdEx < postIndex { + entryPreIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + if fieldNum == 1 { + var stringLenmapkey uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapkey |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapkey := int(stringLenmapkey) + if intStringLenmapkey < 0 { + return ErrInvalidLengthStorage + } + postStringIndexmapkey := iNdEx + intStringLenmapkey + if postStringIndexmapkey > l { + return io.ErrUnexpectedEOF + } + mapkey = string(dAtA[iNdEx:postStringIndexmapkey]) + iNdEx = postStringIndexmapkey + } else if fieldNum == 2 { + var stringLenmapvalue uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLenmapvalue |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLenmapvalue := int(stringLenmapvalue) + if intStringLenmapvalue < 0 { + return ErrInvalidLengthStorage + } + postStringIndexmapvalue := iNdEx + intStringLenmapvalue + if postStringIndexmapvalue > l { + return io.ErrUnexpectedEOF + } + mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue]) + iNdEx = postStringIndexmapvalue + } else { + iNdEx = entryPreIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > postIndex { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + m.Caps[mapkey] = mapvalue + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *HintsResponse) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: HintsResponse: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: HintsResponse: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func (m *TimestampRange) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: TimestampRange: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: TimestampRange: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Start", wireType) + } + m.Start = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Start |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field End", wireType) + } + m.End = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStorage + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.End |= (int64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + default: + iNdEx = preIndex + skippy, err := skipStorage(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStorage + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStorage(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthStorage + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStorage + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStorage(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStorage = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStorage = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("storage.proto", fileDescriptorStorage) } + +var fileDescriptorStorage = []byte{ + // 1206 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0x41, 0x8f, 0xdb, 0x44, + 0x14, 0xb6, 0xd7, 0x4e, 0x76, 0xf3, 0x92, 0xec, 0x7a, 0xa7, 0xdb, 0x25, 0x72, 0x69, 0xe2, 0xe6, + 0x50, 0xc2, 0xa1, 0x69, 0x15, 0x40, 0x14, 0x2a, 0x24, 0x9a, 0x36, 0xed, 0x2e, 0xdd, 0x26, 0xd5, + 0x24, 0x2b, 0x71, 0x40, 0x5a, 0x26, 0x9b, 0x89, 0x6b, 0x91, 0xd8, 0xc6, 0x9e, 0xa0, 0xee, 0x8d, + 0x23, 0x5a, 0x71, 0xe0, 0xc0, 0x35, 0x27, 0x7e, 0x03, 0x5c, 0x90, 0x38, 0x70, 0xea, 0x91, 0x23, + 0xa7, 0x08, 0xc2, 0x1f, 0x41, 0x33, 0x63, 0x3b, 0xf6, 0x6e, 0x5a, 0x69, 0x2f, 0xd1, 0xbc, 0xf7, + 0xbe, 0xf7, 0xbd, 0xf7, 0x66, 0xde, 0x7b, 0x31, 0x94, 0x43, 0xe6, 0x05, 0xc4, 0xa6, 0x4d, 0x3f, + 0xf0, 0x98, 0x87, 0x36, 0x23, 0xd1, 0xbc, 0x63, 0x3b, 0xec, 0xe5, 0x6c, 0xd8, 0x3c, 0xf5, 0xa6, + 0x77, 0x6d, 0xcf, 0xf6, 0xee, 0x0a, 0xfb, 0x70, 0x36, 0x16, 0x92, 0x10, 0xc4, 0x49, 0xfa, 0x99, + 0x37, 0x6c, 0xcf, 0xb3, 0x27, 0x74, 0x85, 0xa2, 0x53, 0x9f, 0x9d, 0x45, 0xc6, 0x56, 0x8a, 0xcb, + 0x71, 0xc7, 0x93, 0xd9, 0xab, 0x11, 0x61, 0xe4, 0xee, 0x19, 0x09, 0xfc, 0x53, 0xf9, 0x2b, 0xf9, + 0xc4, 0x31, 0xf2, 0xd9, 0xf1, 0x03, 0x3a, 0x72, 0x4e, 0x09, 0x8b, 0x32, 0xab, 0xff, 0xa1, 0x43, + 0x11, 0x53, 0x32, 0xc2, 0xf4, 0xdb, 0x19, 0x0d, 0x19, 0x32, 0x61, 0x8b, 0xb3, 0x0c, 0x49, 0x48, + 0x2b, 0xaa, 0xa5, 0x36, 0x0a, 0x38, 0x91, 0xd1, 0x97, 0xb0, 0xc3, 0x9c, 0x29, 0x0d, 0x19, 0x99, + 0xfa, 0x27, 0x01, 0x71, 0x6d, 0x5a, 0xd9, 0xb0, 0xd4, 0x46, 0xb1, 0xf5, 0x4e, 0x33, 0x2e, 0x77, + 0x10, 0xdb, 0x31, 0x37, 0xb7, 0xf7, 0x5f, 0x2f, 0x6a, 0xca, 0x72, 0x51, 0xdb, 0xce, 0xea, 0xf1, + 0x36, 0xcb, 0xc8, 0xa8, 0x0a, 0x30, 0xa2, 0xe1, 0x29, 0x75, 0x47, 0x8e, 0x6b, 0x57, 0x34, 0x4b, + 0x6d, 0x6c, 0xe1, 0x94, 0x86, 0x67, 0x65, 0x07, 0xde, 0xcc, 0xe7, 0x56, 0xdd, 0xd2, 0x78, 0x56, + 0xb1, 0x8c, 0xee, 0x41, 0x21, 0x29, 0xaa, 0x92, 0x13, 0xf9, 0xa0, 0x24, 0x9f, 0x17, 0xb1, 0x05, + 0xaf, 0x40, 0xa8, 0x05, 0xa5, 0x90, 0x06, 0x0e, 0x0d, 0x4f, 0x26, 0xce, 0xd4, 0x61, 0x95, 0xbc, + 0xa5, 0x36, 0xf4, 0xf6, 0xce, 0x72, 0x51, 0x2b, 0xf6, 0x85, 0xfe, 0x88, 0xab, 0x71, 0x31, 0x5c, + 0x09, 0xe8, 0x23, 0x28, 0x47, 0x3e, 0xde, 0x78, 0x1c, 0x52, 0x56, 0xd9, 0x14, 0x4e, 0xc6, 0x72, + 0x51, 0x2b, 0x49, 0xa7, 0x9e, 0xd0, 0xe3, 0x88, 0x5a, 0x4a, 0x3c, 0x94, 0xef, 0x39, 0x2e, 0x8b, + 0x43, 0x6d, 0xad, 0x42, 0xbd, 0x10, 0xfa, 0x28, 0x94, 0xbf, 0x12, 0x78, 0x41, 0xc4, 0xb6, 0x03, + 0x6a, 0xf3, 0x82, 0x0a, 0x17, 0x0a, 0x7a, 0x18, 0x5b, 0xf0, 0x0a, 0x84, 0x3e, 0x87, 0x1c, 0x0b, + 0xc8, 0x29, 0xad, 0x80, 0xa5, 0x35, 0x8a, 0xad, 0x5a, 0x82, 0x4e, 0xbd, 0x6c, 0x73, 0xc0, 0x11, + 0x1d, 0x97, 0x05, 0x67, 0xed, 0xc2, 0x72, 0x51, 0xcb, 0x09, 0x19, 0x4b, 0x47, 0xf3, 0x3e, 0xc0, + 0xca, 0x8e, 0x0c, 0xd0, 0xbe, 0xa1, 0x67, 0xd1, 0xfb, 0xf3, 0x23, 0xda, 0x83, 0xdc, 0x77, 0x64, + 0x32, 0x93, 0x0f, 0x5e, 0xc0, 0x52, 0xf8, 0x74, 0xe3, 0xbe, 0x5a, 0xff, 0x5d, 0x85, 0x42, 0x92, + 0x14, 0xfa, 0x10, 0x74, 0x76, 0xe6, 0xcb, 0xd6, 0xd9, 0x6e, 0x59, 0x97, 0xd3, 0x5e, 0x9d, 0x06, + 0x67, 0x3e, 0xc5, 0x02, 0x5d, 0x7f, 0x05, 0xe5, 0x8c, 0x1a, 0xd5, 0x40, 0xef, 0xf6, 0xba, 0x1d, + 0x43, 0x31, 0xaf, 0x9f, 0xcf, 0xad, 0xdd, 0x8c, 0xb1, 0xeb, 0xb9, 0x14, 0xdd, 0x04, 0xad, 0x7f, + 0xfc, 0xdc, 0x50, 0xcd, 0xbd, 0xf3, 0xb9, 0x65, 0x64, 0xec, 0xfd, 0xd9, 0x14, 0xdd, 0x82, 0xdc, + 0xa3, 0xde, 0x71, 0x77, 0x60, 0x6c, 0x98, 0xfb, 0xe7, 0x73, 0x0b, 0x65, 0x00, 0x8f, 0xbc, 0x99, + 0xcb, 0x4c, 0xfd, 0x87, 0x5f, 0xaa, 0x4a, 0xfd, 0x0e, 0x68, 0x03, 0x62, 0xa7, 0x0b, 0x2e, 0xad, + 0x29, 0xb8, 0x14, 0x15, 0x5c, 0xff, 0xb9, 0x08, 0x25, 0x79, 0xa7, 0xa1, 0xef, 0xb9, 0x21, 0x45, + 0x9f, 0x40, 0x7e, 0x1c, 0x90, 0x29, 0x0d, 0x2b, 0xaa, 0xb8, 0xfa, 0x1b, 0x17, 0xae, 0x5e, 0xc2, + 0x9a, 0x4f, 0x38, 0xa6, 0xad, 0xf3, 0x69, 0xc0, 0x91, 0x83, 0xf9, 0xa7, 0x0e, 0x39, 0xa1, 0x47, + 0x0f, 0x20, 0x2f, 0x9b, 0x46, 0x24, 0x50, 0x6c, 0xdd, 0x5a, 0x4f, 0x22, 0xdb, 0x4c, 0xb8, 0x1c, + 0x28, 0x38, 0x72, 0x41, 0x5f, 0x41, 0x69, 0x3c, 0xf1, 0x08, 0x3b, 0x91, 0x2d, 0x14, 0x4d, 0xe4, + 0xed, 0x37, 0xe4, 0xc1, 0x91, 0xb2, 0xf1, 0x64, 0x4a, 0xa2, 0x13, 0x53, 0xda, 0x03, 0x05, 0x17, + 0xc7, 0x2b, 0x11, 0x8d, 0x60, 0xdb, 0x71, 0x19, 0xb5, 0x69, 0x10, 0xf3, 0x6b, 0x82, 0xbf, 0xb1, + 0x9e, 0xff, 0x50, 0x62, 0xd3, 0x11, 0x76, 0x97, 0x8b, 0x5a, 0x39, 0xa3, 0x3f, 0x50, 0x70, 0xd9, + 0x49, 0x2b, 0xd0, 0x4b, 0xd8, 0x99, 0xb9, 0xa1, 0x63, 0xbb, 0x74, 0x14, 0x87, 0xd1, 0x45, 0x98, + 0xf7, 0xd7, 0x87, 0x39, 0x8e, 0xc0, 0xe9, 0x38, 0x88, 0xaf, 0x99, 0xac, 0xe1, 0x40, 0xc1, 0xdb, + 0xb3, 0x8c, 0x86, 0xd7, 0x33, 0xf4, 0xbc, 0x09, 0x25, 0x6e, 0x1c, 0x28, 0xf7, 0xb6, 0x7a, 0xda, + 0x12, 0x7b, 0xa9, 0x9e, 0x8c, 0x9e, 0xd7, 0x33, 0x4c, 0x2b, 0xd0, 0xd7, 0x7c, 0xff, 0x07, 0x8e, + 0x6b, 0xc7, 0x41, 0xf2, 0x22, 0xc8, 0x7b, 0x6f, 0x78, 0x57, 0x01, 0x4d, 0xc7, 0x90, 0x5b, 0x25, + 0xa5, 0x3e, 0x50, 0x70, 0x29, 0x4c, 0xc9, 0xed, 0x3c, 0xe8, 0x7c, 0x2d, 0x9b, 0x01, 0x14, 0x53, + 0x6d, 0x81, 0x6e, 0x83, 0xce, 0x88, 0x1d, 0x37, 0x63, 0x69, 0xb5, 0x96, 0x89, 0x1d, 0x75, 0x9f, + 0xb0, 0xa3, 0x07, 0x50, 0xe0, 0xee, 0x27, 0x62, 0x56, 0x37, 0xc4, 0xac, 0x56, 0xd7, 0x27, 0xf7, + 0x98, 0x30, 0x22, 0x26, 0x55, 0xfc, 0x0d, 0xf0, 0x93, 0xf9, 0x05, 0x18, 0x17, 0xfb, 0x88, 0x2f, + 0xf0, 0x64, 0xa5, 0xcb, 0xf0, 0x06, 0x4e, 0x69, 0xd0, 0x3e, 0xe4, 0xc5, 0x04, 0xf1, 0xfe, 0xd4, + 0x1a, 0x2a, 0x8e, 0x24, 0xf3, 0x08, 0xd0, 0xe5, 0x9e, 0xb9, 0x22, 0x9b, 0x96, 0xb0, 0x3d, 0x87, + 0x6b, 0x6b, 0x5a, 0xe3, 0x8a, 0x74, 0x7a, 0x3a, 0xb9, 0xcb, 0x0d, 0x70, 0x45, 0xb6, 0xad, 0x84, + 0xed, 0x19, 0xec, 0x5e, 0x7a, 0xe9, 0x2b, 0x92, 0x15, 0x62, 0xb2, 0x7a, 0x1f, 0x0a, 0x82, 0x20, + 0xda, 0x96, 0xf9, 0x7e, 0x07, 0x1f, 0x76, 0xfa, 0x86, 0x62, 0x5e, 0x3b, 0x9f, 0x5b, 0x3b, 0x89, + 0x49, 0xf6, 0x06, 0x07, 0xbc, 0xe8, 0x1d, 0x76, 0x07, 0x7d, 0x43, 0xbd, 0x00, 0x90, 0xb9, 0x44, + 0xcb, 0xf0, 0x37, 0x15, 0xb6, 0xe2, 0xf7, 0x46, 0xef, 0x42, 0xee, 0xc9, 0x51, 0xef, 0xe1, 0xc0, + 0x50, 0xcc, 0xdd, 0xf3, 0xb9, 0x55, 0x8e, 0x0d, 0xe2, 0xe9, 0x91, 0x05, 0x9b, 0x87, 0xdd, 0x41, + 0xe7, 0x69, 0x07, 0xc7, 0x94, 0xb1, 0x3d, 0x7a, 0x4e, 0x54, 0x87, 0xad, 0xe3, 0x6e, 0xff, 0xf0, + 0x69, 0xb7, 0xf3, 0xd8, 0xd8, 0x90, 0x6b, 0x3a, 0x86, 0xc4, 0x6f, 0xc4, 0x59, 0xda, 0xbd, 0xde, + 0x51, 0xe7, 0x61, 0xd7, 0xd0, 0xb2, 0x2c, 0xd1, 0xbd, 0xa3, 0x2a, 0xe4, 0xfb, 0x03, 0x7c, 0xd8, + 0x7d, 0x6a, 0xe8, 0x26, 0x3a, 0x9f, 0x5b, 0xdb, 0x31, 0x40, 0x5e, 0x65, 0x94, 0xf8, 0x8f, 0x2a, + 0xec, 0x3d, 0x22, 0x3e, 0x19, 0x3a, 0x13, 0x87, 0x39, 0x34, 0x4c, 0xd6, 0xf3, 0x03, 0xd0, 0x4f, + 0x89, 0x1f, 0xcf, 0xc3, 0x6a, 0xfe, 0xd6, 0x81, 0xb9, 0x32, 0x14, 0xff, 0x7f, 0x58, 0x38, 0x99, + 0x1f, 0x43, 0x21, 0x51, 0x5d, 0xe9, 0x2f, 0x71, 0x07, 0xca, 0x07, 0xfc, 0x5a, 0x63, 0xe6, 0xfa, + 0x7d, 0xb8, 0xf0, 0x01, 0xc4, 0x9d, 0x43, 0x46, 0x02, 0x26, 0x08, 0x35, 0x2c, 0x05, 0x1e, 0x84, + 0xba, 0x23, 0x41, 0xa8, 0x61, 0x7e, 0x6c, 0xfd, 0xad, 0xc2, 0x66, 0x5f, 0x26, 0xcd, 0x8b, 0xe1, + 0xa3, 0x89, 0xf6, 0xd6, 0xfd, 0xbd, 0x9b, 0xd7, 0xd7, 0xce, 0x6f, 0x5d, 0xff, 0xfe, 0xd7, 0x8a, + 0x72, 0x4f, 0x45, 0xcf, 0xa0, 0x94, 0x2e, 0x1a, 0xed, 0x37, 0xe5, 0xa7, 0x65, 0x33, 0xfe, 0xb4, + 0x6c, 0x76, 0xf8, 0xa7, 0xa5, 0x79, 0xf3, 0xad, 0x77, 0x24, 0xe8, 0x54, 0xf4, 0x19, 0xe4, 0x44, + 0x81, 0x6f, 0x64, 0xd9, 0x4f, 0x58, 0xb2, 0x17, 0xc1, 0xdd, 0x37, 0x4c, 0x91, 0x53, 0x7b, 0xef, + 0xf5, 0xbf, 0x55, 0xe5, 0xf5, 0xb2, 0xaa, 0xfe, 0xb5, 0xac, 0xaa, 0xff, 0x2c, 0xab, 0xea, 0x4f, + 0xff, 0x55, 0x95, 0x61, 0x5e, 0x30, 0x7d, 0xf0, 0x7f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x91, 0xdc, + 0x3a, 0xb6, 0x41, 0x0b, 0x00, 0x00, +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/storage/storage.yarpc.go b/vendor/github.com/influxdata/ifql/query/execute/storage/storage.yarpc.go new file mode 100644 index 000000000..4c4374ed6 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/storage/storage.yarpc.go @@ -0,0 +1,191 @@ +// Code generated by protoc-gen-yarpc. DO NOT EDIT. +// source: storage.proto + +/* +Package storage is a generated protocol buffer package. + +It is generated from these files: + storage.proto + predicate.proto + +It has these top-level messages: + ReadRequest + Aggregate + Tag + ReadResponse + CapabilitiesResponse + HintsResponse + TimestampRange + Node + Predicate +*/ +package storage + +import ( + context "context" + + yarpc "github.com/influxdata/yarpc" +) + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import google_protobuf1 "github.com/gogo/protobuf/types" +import _ "github.com/influxdata/yarpc/yarpcproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ yarpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the yarpc package it is being compiled against. +const _ = yarpc.SupportPackageIsVersion1 + +// Client API for Storage service + +type StorageClient interface { + // Read performs a read operation using the given ReadRequest + Read(ctx context.Context, in *ReadRequest) (Storage_ReadClient, error) + // Capabilities returns a map of keys and values identifying the capabilities supported by the storage engine + Capabilities(ctx context.Context, in *google_protobuf1.Empty) (*CapabilitiesResponse, error) + Hints(ctx context.Context, in *google_protobuf1.Empty) (*HintsResponse, error) +} + +type storageClient struct { + cc *yarpc.ClientConn +} + +func NewStorageClient(cc *yarpc.ClientConn) StorageClient { + return &storageClient{cc} +} + +func (c *storageClient) Read(ctx context.Context, in *ReadRequest) (Storage_ReadClient, error) { + stream, err := yarpc.NewClientStream(ctx, &_Storage_serviceDesc.Streams[0], c.cc, 0x0000) + if err != nil { + return nil, err + } + x := &storageReadClient{stream} + if err := x.ClientStream.SendMsg(in); err != nil { + return nil, err + } + return x, nil +} + +type Storage_ReadClient interface { + Recv() (*ReadResponse, error) + yarpc.ClientStream +} + +type storageReadClient struct { + yarpc.ClientStream +} + +func (x *storageReadClient) Recv() (*ReadResponse, error) { + m := new(ReadResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +func (c *storageClient) Capabilities(ctx context.Context, in *google_protobuf1.Empty) (*CapabilitiesResponse, error) { + out := new(CapabilitiesResponse) + err := yarpc.Invoke(ctx, 0x0001, in, out, c.cc) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *storageClient) Hints(ctx context.Context, in *google_protobuf1.Empty) (*HintsResponse, error) { + out := new(HintsResponse) + err := yarpc.Invoke(ctx, 0x0002, in, out, c.cc) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Storage service + +type StorageServer interface { + // Read performs a read operation using the given ReadRequest + Read(*ReadRequest, Storage_ReadServer) error + // Capabilities returns a map of keys and values identifying the capabilities supported by the storage engine + Capabilities(context.Context, *google_protobuf1.Empty) (*CapabilitiesResponse, error) + Hints(context.Context, *google_protobuf1.Empty) (*HintsResponse, error) +} + +func RegisterStorageServer(s *yarpc.Server, srv StorageServer) { + s.RegisterService(&_Storage_serviceDesc, srv) +} + +func _Storage_Read_Handler(srv interface{}, stream yarpc.ServerStream) error { + m := new(ReadRequest) + if err := stream.RecvMsg(m); err != nil { + return err + } + return srv.(StorageServer).Read(m, &storageReadServer{stream}) +} + +type Storage_ReadServer interface { + Send(*ReadResponse) error + yarpc.ServerStream +} + +type storageReadServer struct { + yarpc.ServerStream +} + +func (x *storageReadServer) Send(m *ReadResponse) error { + return x.ServerStream.SendMsg(m) +} + +func _Storage_Capabilities_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(google_protobuf1.Empty) + if err := dec(in); err != nil { + return nil, err + } + return srv.(StorageServer).Capabilities(ctx, in) +} + +func _Storage_Hints_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) { + in := new(google_protobuf1.Empty) + if err := dec(in); err != nil { + return nil, err + } + return srv.(StorageServer).Hints(ctx, in) +} + +var _Storage_serviceDesc = yarpc.ServiceDesc{ + ServiceName: "storage.Storage", + Index: 0, + HandlerType: (*StorageServer)(nil), + Methods: []yarpc.MethodDesc{ + { + MethodName: "Capabilities", + Index: 1, + Handler: _Storage_Capabilities_Handler, + }, + { + MethodName: "Hints", + Index: 2, + Handler: _Storage_Hints_Handler, + }, + }, + Streams: []yarpc.StreamDesc{ + { + StreamName: "Read", + Index: 0, + Handler: _Storage_Read_Handler, + ServerStreams: true, + }, + }, + Metadata: "storage.proto", +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/time.go b/vendor/github.com/influxdata/ifql/query/execute/time.go new file mode 100644 index 000000000..a033d685b --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/time.go @@ -0,0 +1,66 @@ +package execute + +import ( + "math" + "time" +) + +type Time int64 +type Duration int64 + +const ( + MaxTime = math.MaxInt64 + MinTime = math.MinInt64 + + fixedWidthTimeFmt = "2006-01-02T15:04:05.000000000Z" +) + +func (t Time) Round(d Duration) Time { + if d <= 0 { + return t + } + r := remainder(t, d) + if lessThanHalf(r, d) { + return t - Time(r) + } + return t + Time(d-r) +} + +func (t Time) Truncate(d Duration) Time { + if d <= 0 { + return t + } + r := remainder(t, d) + return t - Time(r) +} + +func (t Time) Add(d Duration) Time { + return t + Time(d) +} + +func Now() Time { + return Time(time.Now().UnixNano()) +} + +// lessThanHalf reports whether x+x < y but avoids overflow, +// assuming x and y are both positive (Duration is signed). +func lessThanHalf(x, y Duration) bool { + return uint64(x)+uint64(x) < uint64(y) +} + +// remainder divides t by d and returns the remainder. +func remainder(t Time, d Duration) (r Duration) { + return Duration(int64(t) % int64(d)) +} + +func (t Time) String() string { + return t.Time().Format(fixedWidthTimeFmt) +} + +func (t Time) Time() time.Time { + return time.Unix(0, int64(t)).UTC() +} + +func (d Duration) String() string { + return time.Duration(d).String() +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/transformation.go b/vendor/github.com/influxdata/ifql/query/execute/transformation.go new file mode 100644 index 000000000..c80f1bcfd --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/transformation.go @@ -0,0 +1,35 @@ +package execute + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/plan" +) + +type Transformation interface { + RetractBlock(id DatasetID, meta BlockMetadata) error + Process(id DatasetID, b Block) error + UpdateWatermark(id DatasetID, t Time) error + UpdateProcessingTime(id DatasetID, t Time) error + Finish(id DatasetID, err error) +} + +type Administration interface { + ResolveTime(qt query.Time) Time + Bounds() Bounds + Allocator() *Allocator + Parents() []DatasetID + ConvertID(plan.ProcedureID) DatasetID +} + +type CreateTransformation func(id DatasetID, mode AccumulationMode, spec plan.ProcedureSpec, a Administration) (Transformation, Dataset, error) + +var procedureToTransformation = make(map[plan.ProcedureKind]CreateTransformation) + +func RegisterTransformation(k plan.ProcedureKind, c CreateTransformation) { + if procedureToTransformation[k] != nil { + panic(fmt.Errorf("duplicate registration for transformation with procedure kind %v", k)) + } + procedureToTransformation[k] = c +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/transport.go b/vendor/github.com/influxdata/ifql/query/execute/transport.go new file mode 100644 index 000000000..11fedd115 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/transport.go @@ -0,0 +1,314 @@ +package execute + +import ( + "sync" + "sync/atomic" +) + +type Transport interface { + Transformation + // Finished reports when the Transport has completed and there is no more work to do. + Finished() <-chan struct{} +} + +// consecutiveTransport implements Transport by transporting data consecutively to the downstream Transformation. +type consecutiveTransport struct { + dispatcher Dispatcher + + t Transformation + messages MessageQueue + + finished chan struct{} + errMu sync.Mutex + errValue error + + schedulerState int32 + inflight int32 +} + +func newConescutiveTransport(dispatcher Dispatcher, t Transformation) *consecutiveTransport { + return &consecutiveTransport{ + dispatcher: dispatcher, + t: t, + // TODO(nathanielc): Have planner specify message queue initial buffer size. + messages: newMessageQueue(64), + finished: make(chan struct{}), + } +} + +func (t *consecutiveTransport) setErr(err error) { + t.errMu.Lock() + t.errValue = err + t.errMu.Unlock() +} +func (t *consecutiveTransport) err() error { + t.errMu.Lock() + err := t.errValue + t.errMu.Unlock() + return err +} + +func (t *consecutiveTransport) Finished() <-chan struct{} { + return t.finished +} + +func (t *consecutiveTransport) RetractBlock(id DatasetID, meta BlockMetadata) error { + select { + case <-t.finished: + return t.err() + default: + } + t.pushMsg(&retractBlockMsg{ + srcMessage: srcMessage(id), + blockMetadata: meta, + }) + return nil +} + +func (t *consecutiveTransport) Process(id DatasetID, b Block) error { + select { + case <-t.finished: + return t.err() + default: + } + t.pushMsg(&processMsg{ + srcMessage: srcMessage(id), + block: b, + }) + return nil +} + +func (t *consecutiveTransport) UpdateWatermark(id DatasetID, time Time) error { + select { + case <-t.finished: + return t.err() + default: + } + t.pushMsg(&updateWatermarkMsg{ + srcMessage: srcMessage(id), + time: time, + }) + return nil +} + +func (t *consecutiveTransport) UpdateProcessingTime(id DatasetID, time Time) error { + select { + case <-t.finished: + return t.err() + default: + } + t.pushMsg(&updateProcessingTimeMsg{ + srcMessage: srcMessage(id), + time: time, + }) + return nil +} + +func (t *consecutiveTransport) Finish(id DatasetID, err error) { + select { + case <-t.finished: + return + default: + } + t.pushMsg(&finishMsg{ + srcMessage: srcMessage(id), + err: err, + }) +} + +func (t *consecutiveTransport) pushMsg(m Message) { + t.messages.Push(m) + atomic.AddInt32(&t.inflight, 1) + t.schedule() +} + +const ( + // consecutiveTransport schedule states + idle int32 = iota + running + finished +) + +// schedule indicates that there is work available to schedule. +func (t *consecutiveTransport) schedule() { + if t.tryTransition(idle, running) { + t.dispatcher.Schedule(t.processMessages) + } +} + +// tryTransition attempts to transition into the new state and returns true on success. +func (t *consecutiveTransport) tryTransition(old, new int32) bool { + return atomic.CompareAndSwapInt32(&t.schedulerState, old, new) +} + +// transition sets the new state. +func (t *consecutiveTransport) transition(new int32) { + atomic.StoreInt32(&t.schedulerState, new) +} + +func (t *consecutiveTransport) processMessages(throughput int) { +PROCESS: + i := 0 + for m := t.messages.Pop(); m != nil; m = t.messages.Pop() { + atomic.AddInt32(&t.inflight, -1) + if f, err := processMessage(t.t, m); err != nil || f { + // Set the error if there was any + t.setErr(err) + + // Transition to the finished state. + if t.tryTransition(running, finished) { + // Call Finish if we have not already + if !f { + t.t.Finish(m.SrcDatasetID(), err) + } + // We are finished + close(t.finished) + return + } + } + i++ + if i >= throughput { + // We have done enough work. + // Transition to the idle state and reschedule for later. + t.transition(idle) + t.schedule() + return + } + } + + t.transition(idle) + // Check if more messages arrived after the above loop finished. + // This check must happen in the idle state. + if atomic.LoadInt32(&t.inflight) > 0 { + if t.tryTransition(idle, running) { + goto PROCESS + } // else we have already been scheduled again, we can return + } +} + +// processMessage processes the message on t. +// The return value is true if the message was a FinishMsg. +func processMessage(t Transformation, m Message) (finished bool, err error) { + switch m := m.(type) { + case RetractBlockMsg: + err = t.RetractBlock(m.SrcDatasetID(), m.BlockMetadata()) + case ProcessMsg: + b := m.Block() + err = t.Process(m.SrcDatasetID(), b) + b.RefCount(-1) + case UpdateWatermarkMsg: + err = t.UpdateWatermark(m.SrcDatasetID(), m.WatermarkTime()) + case UpdateProcessingTimeMsg: + err = t.UpdateProcessingTime(m.SrcDatasetID(), m.ProcessingTime()) + case FinishMsg: + t.Finish(m.SrcDatasetID(), m.Error()) + finished = true + } + return +} + +type Message interface { + Type() MessageType + SrcDatasetID() DatasetID +} + +type MessageType int + +const ( + RetractBlockType MessageType = iota + ProcessType + UpdateWatermarkType + UpdateProcessingTimeType + FinishType +) + +type srcMessage DatasetID + +func (m srcMessage) SrcDatasetID() DatasetID { + return DatasetID(m) +} + +type RetractBlockMsg interface { + Message + BlockMetadata() BlockMetadata +} + +type retractBlockMsg struct { + srcMessage + blockMetadata BlockMetadata +} + +func (m *retractBlockMsg) Type() MessageType { + return RetractBlockType +} +func (m *retractBlockMsg) BlockMetadata() BlockMetadata { + return m.blockMetadata +} + +type ProcessMsg interface { + Message + Block() Block +} + +type processMsg struct { + srcMessage + block Block +} + +func (m *processMsg) Type() MessageType { + return ProcessType +} +func (m *processMsg) Block() Block { + return m.block +} + +type UpdateWatermarkMsg interface { + Message + WatermarkTime() Time +} + +type updateWatermarkMsg struct { + srcMessage + time Time +} + +func (m *updateWatermarkMsg) Type() MessageType { + return UpdateWatermarkType +} +func (m *updateWatermarkMsg) WatermarkTime() Time { + return m.time +} + +type UpdateProcessingTimeMsg interface { + Message + ProcessingTime() Time +} + +type updateProcessingTimeMsg struct { + srcMessage + time Time +} + +func (m *updateProcessingTimeMsg) Type() MessageType { + return UpdateProcessingTimeType +} +func (m *updateProcessingTimeMsg) ProcessingTime() Time { + return m.time +} + +type FinishMsg interface { + Message + Error() error +} + +type finishMsg struct { + srcMessage + err error +} + +func (m *finishMsg) Type() MessageType { + return FinishType +} +func (m *finishMsg) Error() error { + return m.err +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/trigger.go b/vendor/github.com/influxdata/ifql/query/execute/trigger.go new file mode 100644 index 000000000..ce69a3534 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/trigger.go @@ -0,0 +1,148 @@ +package execute + +import ( + "fmt" + + "github.com/influxdata/ifql/query" +) + +type Trigger interface { + Triggered(TriggerContext) bool + Finished() bool + Reset() +} + +type TriggerContext struct { + Block BlockContext + Watermark Time + CurrentProcessingTime Time +} + +type BlockContext struct { + Bounds Bounds + Count int +} + +func NewTriggerFromSpec(spec query.TriggerSpec) Trigger { + switch s := spec.(type) { + case query.AfterWatermarkTriggerSpec: + return &afterWatermarkTrigger{ + allowedLateness: Duration(s.AllowedLateness), + } + case query.RepeatedTriggerSpec: + return &repeatedlyForever{ + t: NewTriggerFromSpec(s.Trigger), + } + case query.AfterProcessingTimeTriggerSpec: + return &afterProcessingTimeTrigger{ + duration: Duration(s.Duration), + } + case query.AfterAtLeastCountTriggerSpec: + return &afterAtLeastCount{ + atLeast: s.Count, + } + case query.OrFinallyTriggerSpec: + return &orFinally{ + main: NewTriggerFromSpec(s.Main), + finally: NewTriggerFromSpec(s.Finally), + } + default: + //TODO(nathanielc): Add proper error handling here. + // Maybe separate validation of a spec and creation of a spec so we know we cannot error during creation? + panic(fmt.Sprintf("unsupported trigger spec provided %T", spec)) + } +} + +// afterWatermarkTrigger triggers once the watermark is greater than the bounds of the block. +type afterWatermarkTrigger struct { + allowedLateness Duration + finished bool +} + +func (t *afterWatermarkTrigger) Triggered(c TriggerContext) bool { + if c.Watermark >= c.Block.Bounds.Stop+Time(t.allowedLateness) { + t.finished = true + } + return c.Watermark >= c.Block.Bounds.Stop +} +func (t *afterWatermarkTrigger) Finished() bool { + return t.finished +} +func (t *afterWatermarkTrigger) Reset() { + t.finished = false +} + +type repeatedlyForever struct { + t Trigger +} + +func (t *repeatedlyForever) Triggered(c TriggerContext) bool { + return t.t.Triggered(c) +} +func (t *repeatedlyForever) Finished() bool { + if t.t.Finished() { + t.Reset() + } + return false +} +func (t *repeatedlyForever) Reset() { + t.t.Reset() +} + +type afterProcessingTimeTrigger struct { + duration Duration + triggerTimeSet bool + triggerTime Time + current Time +} + +func (t *afterProcessingTimeTrigger) Triggered(c TriggerContext) bool { + if !t.triggerTimeSet { + t.triggerTimeSet = true + t.triggerTime = c.CurrentProcessingTime + Time(t.duration) + } + t.current = c.CurrentProcessingTime + return t.current >= t.triggerTime +} +func (t *afterProcessingTimeTrigger) Finished() bool { + return t.triggerTimeSet && t.current >= t.triggerTime +} +func (t *afterProcessingTimeTrigger) Reset() { + t.triggerTimeSet = false +} + +type afterAtLeastCount struct { + n, atLeast int +} + +func (t *afterAtLeastCount) Triggered(c TriggerContext) bool { + t.n = c.Block.Count + return t.n >= t.atLeast +} +func (t *afterAtLeastCount) Finished() bool { + return t.n >= t.atLeast +} +func (t *afterAtLeastCount) Reset() { + t.n = 0 +} + +type orFinally struct { + main Trigger + finally Trigger + finished bool +} + +func (t *orFinally) Triggered(c TriggerContext) bool { + if t.finally.Triggered(c) { + t.finished = true + return true + } + return t.main.Triggered(c) +} + +func (t *orFinally) Finished() bool { + return t.finished +} +func (t *orFinally) Reset() { + t.finished = false +} diff --git a/vendor/github.com/influxdata/ifql/query/execute/window.go b/vendor/github.com/influxdata/ifql/query/execute/window.go new file mode 100644 index 000000000..5d2059def --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/execute/window.go @@ -0,0 +1,8 @@ +package execute + +type Window struct { + Every Duration + Period Duration + Round Duration + Start Time +} diff --git a/vendor/github.com/influxdata/ifql/query/format.go b/vendor/github.com/influxdata/ifql/query/format.go new file mode 100644 index 000000000..fcbd7a8ff --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/format.go @@ -0,0 +1,55 @@ +package query + +import ( + "encoding/json" + "fmt" +) + +// TODO(nathanielc): Add better options for formatting plans as Graphviz dot format. +type FormatOption func(*formatter) + +func Formatted(q *Spec, opts ...FormatOption) fmt.Formatter { + f := formatter{ + q: q, + } + for _, o := range opts { + o(&f) + } + return f +} + +func FmtJSON(f *formatter) { f.json = true } + +type formatter struct { + q *Spec + json bool +} + +func (f formatter) Format(fs fmt.State, c rune) { + if c == 'v' && fs.Flag('#') { + fmt.Fprintf(fs, "%#v", f.q) + return + } + if f.json { + f.formatJSON(fs) + } else { + f.formatDAG(fs) + } +} +func (f formatter) formatJSON(fs fmt.State) { + e := json.NewEncoder(fs) + e.SetIndent("", " ") + e.Encode(f.q) +} + +func (f formatter) formatDAG(fs fmt.State) { + fmt.Fprint(fs, "digraph QuerySpec {\n") + _ = f.q.Walk(func(o *Operation) error { + fmt.Fprintf(fs, "%s[kind=%q];\n", o.ID, o.Spec.Kind()) + for _, child := range f.q.Children(o.ID) { + fmt.Fprintf(fs, "%s->%s;\n", o.ID, child.ID) + } + return nil + }) + fmt.Fprintln(fs, "}") +} diff --git a/vendor/github.com/influxdata/ifql/query/operation.go b/vendor/github.com/influxdata/ifql/query/operation.go new file mode 100644 index 000000000..5fabecf9a --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/operation.go @@ -0,0 +1,95 @@ +package query + +import ( + "encoding/json" + "fmt" + + "github.com/pkg/errors" +) + +// Operation denotes a single operation in a query. +type Operation struct { + ID OperationID `json:"id"` + Spec OperationSpec `json:"spec"` +} + +func (o *Operation) UnmarshalJSON(data []byte) error { + type Alias Operation + raw := struct { + *Alias + Kind OperationKind `json:"kind"` + Spec json.RawMessage `json:"spec"` + }{} + err := json.Unmarshal(data, &raw) + if err != nil { + return err + } + if raw.Alias != nil { + *o = *(*Operation)(raw.Alias) + } + spec, err := unmarshalOpSpec(raw.Kind, raw.Spec) + if err != nil { + return errors.Wrapf(err, "failed to unmarshal operation %q", o.ID) + } + o.Spec = spec + return nil +} + +func unmarshalOpSpec(k OperationKind, data []byte) (OperationSpec, error) { + createOpSpec, ok := kindToOp[k] + if !ok { + return nil, fmt.Errorf("unknown operation spec kind %v", k) + } + spec := createOpSpec() + + if len(data) > 0 { + err := json.Unmarshal(data, spec) + if err != nil { + return nil, err + } + } + return spec, nil +} + +func (o Operation) MarshalJSON() ([]byte, error) { + type Alias Operation + raw := struct { + Kind OperationKind `json:"kind"` + Alias + }{ + Kind: o.Spec.Kind(), + Alias: (Alias)(o), + } + return json.Marshal(raw) +} + +type NewOperationSpec func() OperationSpec + +// OperationSpec specifies an operation as part of a query. +type OperationSpec interface { + // Kind returns the kind of the operation. + Kind() OperationKind +} + +// OperationID is a unique ID within a query for the operation. +type OperationID string + +// OperationKind denotes the kind of operations. +type OperationKind string + +var kindToOp = make(map[OperationKind]NewOperationSpec) + +// RegisterOpSpec registers an operation spec with a given kind. +// If the kind has already been registered the call panics. +// +// TODO:(nathanielc) make this part of RegisterMethod/RegisterFunction +func RegisterOpSpec(k OperationKind, c NewOperationSpec) { + if kindToOp[k] != nil { + panic(fmt.Errorf("duplicate registration for operation kind %v", k)) + } + kindToOp[k] = c +} + +func NumberOfOperations() int { + return len(kindToOp) +} diff --git a/vendor/github.com/influxdata/ifql/query/plan/format.go b/vendor/github.com/influxdata/ifql/query/plan/format.go new file mode 100644 index 000000000..5b5622872 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/plan/format.go @@ -0,0 +1,64 @@ +package plan + +import "fmt" + +// TODO(nathanielc): Add better options for formatting plans as Graphviz dot format. +type FormatOption func(*formatter) + +func Formatted(p PlanReader, opts ...FormatOption) fmt.Formatter { + f := formatter{ + p: p, + } + for _, o := range opts { + o(&f) + } + return f +} + +func UseIDs() FormatOption { + return func(f *formatter) { + f.useIDs = true + } +} + +type PlanReader interface { + Do(func(*Procedure)) + lookup(id ProcedureID) *Procedure +} + +type formatter struct { + p PlanReader + useIDs bool +} + +func (f formatter) Format(fs fmt.State, c rune) { + if c == 'v' && fs.Flag('#') { + fmt.Fprintf(fs, "%#v", f.p) + return + } + f.format(fs) +} + +func (f formatter) format(fs fmt.State) { + fmt.Fprint(fs, "digraph PlanSpec {\n") + f.p.Do(func(pr *Procedure) { + if f.useIDs { + fmt.Fprintf(fs, "%s[kind=%q];\n", pr.ID, pr.Spec.Kind()) + } else { + fmt.Fprintf(fs, "%s[id=%q];\n", pr.Spec.Kind(), pr.ID) + } + for _, child := range pr.Children { + if f.useIDs { + fmt.Fprintf(fs, "%s->%s;\n", pr.ID, child) + } else { + c := f.p.lookup(child) + if c != nil { + fmt.Fprintf(fs, "%s->%s;\n", pr.Spec.Kind(), c.Spec.Kind()) + } else { + fmt.Fprintf(fs, "%s->%s;\n", pr.Spec.Kind(), child) + } + } + } + }) + fmt.Fprintln(fs, "}") +} diff --git a/vendor/github.com/influxdata/ifql/query/plan/logical.go b/vendor/github.com/influxdata/ifql/query/plan/logical.go new file mode 100644 index 000000000..9d44a5cae --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/plan/logical.go @@ -0,0 +1,98 @@ +package plan + +import ( + "fmt" + + "github.com/influxdata/ifql/query" + uuid "github.com/satori/go.uuid" +) + +var NilUUID uuid.UUID +var RootUUID = NilUUID + +type LogicalPlanSpec struct { + Procedures map[ProcedureID]*Procedure + Order []ProcedureID + Resources query.ResourceManagement +} + +func (lp *LogicalPlanSpec) Do(f func(pr *Procedure)) { + for _, id := range lp.Order { + f(lp.Procedures[id]) + } +} + +func (lp *LogicalPlanSpec) lookup(id ProcedureID) *Procedure { + return lp.Procedures[id] +} + +type LogicalPlanner interface { + Plan(*query.Spec) (*LogicalPlanSpec, error) +} + +type logicalPlanner struct { + plan *LogicalPlanSpec + q *query.Spec +} + +func NewLogicalPlanner() LogicalPlanner { + return new(logicalPlanner) +} + +func (p *logicalPlanner) Plan(q *query.Spec) (*LogicalPlanSpec, error) { + p.q = q + p.plan = &LogicalPlanSpec{ + Procedures: make(map[ProcedureID]*Procedure), + Resources: q.Resources, + } + err := q.Walk(p.walkQuery) + if err != nil { + return nil, err + } + return p.plan, nil +} + +func ProcedureIDFromOperationID(id query.OperationID) ProcedureID { + return ProcedureID(uuid.NewV5(RootUUID, string(id))) +} +func ProcedureIDFromParentID(id ProcedureID) ProcedureID { + return ProcedureID(uuid.NewV5(RootUUID, id.String())) +} + +func (p *logicalPlanner) walkQuery(o *query.Operation) error { + spec, err := p.createSpec(o.Spec.Kind(), o.Spec) + if err != nil { + return err + } + + pr := &Procedure{ + ID: ProcedureIDFromOperationID(o.ID), + Spec: spec, + } + p.plan.Order = append(p.plan.Order, pr.ID) + p.plan.Procedures[pr.ID] = pr + + // Link parent/child relations + parentOps := p.q.Parents(o.ID) + for _, parentOp := range parentOps { + parentID := ProcedureIDFromOperationID(parentOp.ID) + parentPr := p.plan.Procedures[parentID] + parentPr.Children = append(parentPr.Children, pr.ID) + pr.Parents = append(pr.Parents, parentID) + } + + return nil +} + +func (p *logicalPlanner) createSpec(qk query.OperationKind, spec query.OperationSpec) (ProcedureSpec, error) { + createPs, ok := queryOpToProcedure[qk] + if !ok { + return nil, fmt.Errorf("unknown query operation %v", qk) + } + //TODO(nathanielc): Support adding all procedures to logical plan instead of only the first + return createPs[0](spec, p) +} + +func (p *logicalPlanner) ConvertID(qid query.OperationID) ProcedureID { + return ProcedureIDFromOperationID(qid) +} diff --git a/vendor/github.com/influxdata/ifql/query/plan/logical_test.go b/vendor/github.com/influxdata/ifql/query/plan/logical_test.go new file mode 100644 index 000000000..be7277209 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/plan/logical_test.go @@ -0,0 +1,254 @@ +package plan_test + +import ( + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" +) + +func TestLogicalPlanner_Plan(t *testing.T) { + testCases := []struct { + q *query.Spec + ap *plan.LogicalPlanSpec + }{ + { + q: &query.Spec{ + Operations: []*query.Operation{ + { + ID: "0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "1", + Spec: &functions.RangeOpSpec{ + Start: query.Time{Relative: -1 * time.Hour}, + Stop: query.Time{}, + }, + }, + { + ID: "2", + Spec: &functions.CountOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "0", Child: "1"}, + {Parent: "1", Child: "2"}, + }, + }, + ap: &plan.LogicalPlanSpec{ + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("0"): { + ID: plan.ProcedureIDFromOperationID("0"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("1")}, + }, + plan.ProcedureIDFromOperationID("1"): { + ID: plan.ProcedureIDFromOperationID("1"), + Spec: &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{Relative: -1 * time.Hour}, + }, + }, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("0"), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("2")}, + }, + plan.ProcedureIDFromOperationID("2"): { + ID: plan.ProcedureIDFromOperationID("2"), + Spec: &functions.CountProcedureSpec{}, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("1"), + }, + Children: nil, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("0"), + plan.ProcedureIDFromOperationID("1"), + plan.ProcedureIDFromOperationID("2"), + }, + }, + }, + { + q: benchmarkQuery, + ap: &plan.LogicalPlanSpec{ + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("select0"): { + ID: plan.ProcedureIDFromOperationID("select0"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range0")}, + }, + plan.ProcedureIDFromOperationID("range0"): { + ID: plan.ProcedureIDFromOperationID("range0"), + Spec: &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{Relative: -1 * time.Hour}, + }, + }, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("select0"), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("count0")}, + }, + plan.ProcedureIDFromOperationID("count0"): { + ID: plan.ProcedureIDFromOperationID("count0"), + Spec: &functions.CountProcedureSpec{}, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("range0"), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("join")}, + }, + plan.ProcedureIDFromOperationID("select1"): { + ID: plan.ProcedureIDFromOperationID("select1"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range1")}, + }, + plan.ProcedureIDFromOperationID("range1"): { + ID: plan.ProcedureIDFromOperationID("range1"), + Spec: &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{Relative: -1 * time.Hour}, + }, + }, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("select1"), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("sum1")}, + }, + plan.ProcedureIDFromOperationID("sum1"): { + ID: plan.ProcedureIDFromOperationID("sum1"), + Spec: &functions.SumProcedureSpec{}, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("range1"), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("join")}, + }, + plan.ProcedureIDFromOperationID("join"): { + ID: plan.ProcedureIDFromOperationID("join"), + Spec: &functions.MergeJoinProcedureSpec{ + TableNames: map[plan.ProcedureID]string{ + plan.ProcedureIDFromOperationID("sum1"): "sum", + plan.ProcedureIDFromOperationID("count0"): "count", + }, + }, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("count0"), + plan.ProcedureIDFromOperationID("sum1"), + }, + Children: nil, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("select1"), + plan.ProcedureIDFromOperationID("range1"), + plan.ProcedureIDFromOperationID("sum1"), + plan.ProcedureIDFromOperationID("select0"), + plan.ProcedureIDFromOperationID("range0"), + plan.ProcedureIDFromOperationID("count0"), + plan.ProcedureIDFromOperationID("join"), + }, + }, + }, + } + for i, tc := range testCases { + tc := tc + t.Run(strconv.Itoa(i), func(t *testing.T) { + planner := plan.NewLogicalPlanner() + got, err := planner.Plan(tc.q) + if err != nil { + t.Fatal(err) + } + if !cmp.Equal(got, tc.ap, plantest.CmpOptions...) { + t.Errorf("unexpected logical plan -want/+got %s", cmp.Diff(tc.ap, got, plantest.CmpOptions...)) + } + }) + } +} + +var benchmarkQuery = &query.Spec{ + Operations: []*query.Operation{ + { + ID: "select0", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "range0", + Spec: &functions.RangeOpSpec{ + Start: query.Time{Relative: -1 * time.Hour}, + Stop: query.Time{}, + }, + }, + { + ID: "count0", + Spec: &functions.CountOpSpec{}, + }, + { + ID: "select1", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "range1", + Spec: &functions.RangeOpSpec{ + Start: query.Time{Relative: -1 * time.Hour}, + Stop: query.Time{}, + }, + }, + { + ID: "sum1", + Spec: &functions.SumOpSpec{}, + }, + { + ID: "join", + Spec: &functions.JoinOpSpec{ + TableNames: map[query.OperationID]string{ + "count0": "count", + "sum1": "sum", + }, + }, + }, + }, + Edges: []query.Edge{ + {Parent: "select0", Child: "range0"}, + {Parent: "range0", Child: "count0"}, + {Parent: "select1", Child: "range1"}, + {Parent: "range1", Child: "sum1"}, + {Parent: "count0", Child: "join"}, + {Parent: "sum1", Child: "join"}, + }, +} + +var benchLogicalPlan *plan.LogicalPlanSpec + +func BenchmarkLogicalPlan(b *testing.B) { + var err error + planner := plan.NewLogicalPlanner() + b.ResetTimer() + for n := 0; n < b.N; n++ { + benchLogicalPlan, err = planner.Plan(benchmarkQuery) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/influxdata/ifql/query/plan/physical.go b/vendor/github.com/influxdata/ifql/query/plan/physical.go new file mode 100644 index 000000000..d13ceb3ee --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/plan/physical.go @@ -0,0 +1,377 @@ +package plan + +import ( + "fmt" + "math" + "time" + + "github.com/influxdata/ifql/query" + "github.com/pkg/errors" + uuid "github.com/satori/go.uuid" +) + +// DefaultYieldName is the yield name to use in cases where no explicit yield name was specified. +const DefaultYieldName = "_result" + +type PlanSpec struct { + // Now represents the relative currentl time of the plan. + Now time.Time + Bounds BoundsSpec + // Procedures is a set of all operations + Procedures map[ProcedureID]*Procedure + Order []ProcedureID + // Results is a list of datasets that are the result of the plan + Results map[string]YieldSpec + + Resources query.ResourceManagement +} + +// YieldSpec defines how data should be yielded. +type YieldSpec struct { + ID ProcedureID +} + +func (p *PlanSpec) Do(f func(pr *Procedure)) { + for _, id := range p.Order { + f(p.Procedures[id]) + } +} +func (p *PlanSpec) lookup(id ProcedureID) *Procedure { + return p.Procedures[id] +} + +type Planner interface { + // Plan create a plan from the logical plan and available storage. + Plan(p *LogicalPlanSpec, s Storage, now time.Time) (*PlanSpec, error) +} + +type PlanRewriter interface { + IsolatePath(parent, child *Procedure) (*Procedure, error) + RemoveBranch(pr *Procedure) error + AddChild(parent *Procedure, childSpec ProcedureSpec) +} + +type planner struct { + plan *PlanSpec + + modified bool +} + +func NewPlanner() Planner { + return new(planner) +} + +func (p *planner) Plan(lp *LogicalPlanSpec, s Storage, now time.Time) (*PlanSpec, error) { + p.plan = &PlanSpec{ + Now: now, + Procedures: make(map[ProcedureID]*Procedure, len(lp.Procedures)), + Order: make([]ProcedureID, 0, len(lp.Order)), + Resources: lp.Resources, + Results: make(map[string]YieldSpec), + } + + lp.Do(func(pr *Procedure) { + pr.plan = p.plan + p.plan.Procedures[pr.ID] = pr + p.plan.Order = append(p.plan.Order, pr.ID) + }) + + // Find Limit+Where+Range+Select to push down time bounds and predicate + var order []ProcedureID + p.modified = true + for p.modified { + p.modified = false + if cap(order) < len(p.plan.Order) { + order = make([]ProcedureID, len(p.plan.Order)) + } else { + order = order[:len(p.plan.Order)] + } + copy(order, p.plan.Order) + for _, id := range order { + pr := p.plan.Procedures[id] + if pr == nil { + // Procedure was removed + continue + } + if pd, ok := pr.Spec.(PushDownProcedureSpec); ok { + rules := pd.PushDownRules() + for _, rule := range rules { + if remove, err := p.pushDownAndSearch(pr, rule, pd.PushDown); err != nil { + return nil, err + } else if remove { + if err := p.removeProcedure(pr); err != nil { + return nil, errors.Wrap(err, "failed to remove procedure") + } + } + } + } + } + } + + // Apply all rewrite rules + p.modified = true + for p.modified { + p.modified = false + for _, rule := range rewriteRules { + kind := rule.Root() + p.plan.Do(func(pr *Procedure) { + if pr == nil { + // Procedure was removed + return + } + if pr.Spec.Kind() == kind { + rule.Rewrite(pr, p) + } + }) + } + } + + // Now that plan is complete find results and time bounds + var leaves []ProcedureID + var yields []*Procedure + for _, id := range p.plan.Order { + pr := p.plan.Procedures[id] + if bounded, ok := pr.Spec.(BoundedProcedureSpec); ok { + bounds := bounded.TimeBounds() + p.plan.Bounds = p.plan.Bounds.Union(bounds, now) + } + if yield, ok := pr.Spec.(YieldProcedureSpec); ok { + if len(pr.Parents) != 1 { + return nil, errors.New("yield procedures must have exactly one parent") + } + parent := pr.Parents[0] + name := yield.YieldName() + _, ok := p.plan.Results[name] + if ok { + return nil, fmt.Errorf("found duplicate yield name %q", name) + } + p.plan.Results[name] = YieldSpec{ID: parent} + yields = append(yields, pr) + } else if len(pr.Children) == 0 { + // Capture non yield leaves + leaves = append(leaves, pr.ID) + } + } + + for _, pr := range yields { + // remove yield procedure + p.removeProcedure(pr) + } + + if len(p.plan.Results) == 0 { + if len(leaves) == 1 { + p.plan.Results[DefaultYieldName] = YieldSpec{ID: leaves[0]} + } else { + return nil, errors.New("query must specify explicit yields when there is more than one result.") + } + } + + if p.plan.Bounds.Start.IsZero() && p.plan.Bounds.Stop.IsZero() { + return nil, errors.New("unbounded queries are not supported. Add a 'range' call to bound the query.") + } + + // Update concurrency quota + if p.plan.Resources.ConcurrencyQuota == 0 { + p.plan.Resources.ConcurrencyQuota = len(p.plan.Procedures) + } + // Update memory quota + if p.plan.Resources.MemoryBytesQuota == 0 { + p.plan.Resources.MemoryBytesQuota = math.MaxInt64 + } + + return p.plan, nil +} + +func hasKind(kind ProcedureKind, kinds []ProcedureKind) bool { + for _, k := range kinds { + if k == kind { + return true + } + } + return false +} + +func (p *planner) pushDownAndSearch(pr *Procedure, rule PushDownRule, do func(parent *Procedure, dup func() *Procedure)) (bool, error) { + matched := false + for _, parent := range pr.Parents { + pp := p.plan.Procedures[parent] + pk := pp.Spec.Kind() + if pk == rule.Root { + if rule.Match == nil || rule.Match(pp.Spec) { + isolatedParent, err := p.IsolatePath(pp, pr) + if err != nil { + return false, err + } + if pp != isolatedParent { + // Wait to call push down function when the duplicate is found + return false, nil + } + do(pp, func() *Procedure { return p.duplicate(pp, false) }) + matched = true + } + } else if hasKind(pk, rule.Through) { + if _, err := p.pushDownAndSearch(pp, rule, do); err != nil { + return false, err + } + } + } + return matched, nil +} + +// IsolatePath ensures that the child is an only child of the parent. +// The return value is the parent procedure who has an only child. +func (p *planner) IsolatePath(parent, child *Procedure) (*Procedure, error) { + if len(parent.Children) == 1 { + return parent, nil + } + // Duplicate just this child branch + dup := p.duplicateChildBranch(parent, child.ID) + // Remove this entire branch since it has been duplicated. + if err := p.RemoveBranch(child); err != nil { + return nil, err + } + return dup, nil +} + +func (p *planner) AddChild(parent *Procedure, childSpec ProcedureSpec) { + child := &Procedure{ + plan: p.plan, + ID: ProcedureIDFromParentID(parent.ID), + Spec: childSpec, + } + parent.Children = append(parent.Children, child.ID) + child.Parents = []ProcedureID{parent.ID} + + p.plan.Procedures[child.ID] = child + p.plan.Order = insertAfter(p.plan.Order, parent.ID, child.ID) +} + +func (p *planner) removeProcedure(pr *Procedure) error { + // It only makes sense to remove a procedure that has a single parent. + if len(pr.Parents) > 1 { + return errors.New("cannot remove a procedure that has more than one parent") + } + + p.modified = true + delete(p.plan.Procedures, pr.ID) + p.plan.Order = removeID(p.plan.Order, pr.ID) + + for _, id := range pr.Parents { + parent := p.plan.Procedures[id] + parent.Children = removeID(parent.Children, pr.ID) + parent.Children = append(parent.Children, pr.Children...) + } + for _, id := range pr.Children { + child := p.plan.Procedures[id] + child.Parents = removeID(child.Parents, pr.ID) + child.Parents = append(child.Parents, pr.Parents...) + + if len(pr.Parents) == 1 { + if pa, ok := child.Spec.(ParentAwareProcedureSpec); ok { + pa.ParentChanged(pr.ID, pr.Parents[0]) + } + } + } + return nil +} + +func (p *planner) RemoveBranch(pr *Procedure) error { + // It only makes sense to remove a procedure that has a single parent. + if len(pr.Parents) > 1 { + return errors.New("cannot remove a branch that has more than one parent") + } + p.modified = true + delete(p.plan.Procedures, pr.ID) + p.plan.Order = removeID(p.plan.Order, pr.ID) + + for _, id := range pr.Parents { + parent := p.plan.Procedures[id] + // Check that parent hasn't already been removed + if parent != nil { + parent.Children = removeID(parent.Children, pr.ID) + } + } + + for _, id := range pr.Children { + child := p.plan.Procedures[id] + if err := p.RemoveBranch(child); err != nil { + return err + } + } + return nil +} + +func ProcedureIDForDuplicate(id ProcedureID) ProcedureID { + return ProcedureID(uuid.NewV5(RootUUID, id.String())) +} + +func (p *planner) duplicateChildBranch(pr *Procedure, child ProcedureID) *Procedure { + return p.duplicate(pr, true, child) +} +func (p *planner) duplicate(pr *Procedure, skipParents bool, onlyChildren ...ProcedureID) *Procedure { + p.modified = true + np := pr.Copy() + np.ID = ProcedureIDForDuplicate(pr.ID) + p.plan.Procedures[np.ID] = np + p.plan.Order = insertAfter(p.plan.Order, pr.ID, np.ID) + + if !skipParents { + for _, id := range np.Parents { + parent := p.plan.Procedures[id] + parent.Children = append(parent.Children, np.ID) + } + } + + newChildren := make([]ProcedureID, 0, len(np.Children)) + for _, id := range np.Children { + if len(onlyChildren) > 0 && !hasID(onlyChildren, id) { + continue + } + child := p.plan.Procedures[id] + newChild := p.duplicate(child, true) + newChild.Parents = removeID(newChild.Parents, pr.ID) + newChild.Parents = append(newChild.Parents, np.ID) + + newChildren = append(newChildren, newChild.ID) + + if pa, ok := newChild.Spec.(ParentAwareProcedureSpec); ok { + pa.ParentChanged(pr.ID, np.ID) + } + } + np.Children = newChildren + return np +} + +func hasID(ids []ProcedureID, id ProcedureID) bool { + for _, i := range ids { + if i == id { + return true + } + } + return false +} +func removeID(ids []ProcedureID, remove ProcedureID) []ProcedureID { + filtered := ids[0:0] + for i, id := range ids { + if id == remove { + filtered = append(filtered, ids[0:i]...) + filtered = append(filtered, ids[i+1:]...) + break + } + } + return filtered +} +func insertAfter(ids []ProcedureID, after, new ProcedureID) []ProcedureID { + var newIds []ProcedureID + for i, id := range ids { + if id == after { + newIds = append(newIds, ids[:i+1]...) + newIds = append(newIds, new) + if i+1 < len(ids) { + newIds = append(newIds, ids[i+1:]...) + } + break + } + } + return newIds +} diff --git a/vendor/github.com/influxdata/ifql/query/plan/physical_test.go b/vendor/github.com/influxdata/ifql/query/plan/physical_test.go new file mode 100644 index 000000000..476f08aa8 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/plan/physical_test.go @@ -0,0 +1,792 @@ +package plan_test + +import ( + "math" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" + "github.com/influxdata/ifql/query/plan" + "github.com/influxdata/ifql/query/plan/plantest" +) + +func TestPhysicalPlanner_Plan(t *testing.T) { + testCases := []struct { + name string + lp *plan.LogicalPlanSpec + pp *plan.PlanSpec + }{ + { + name: "single push down", + lp: &plan.LogicalPlanSpec{ + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: 10000, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + }, + plan.ProcedureIDFromOperationID("range"): { + ID: plan.ProcedureIDFromOperationID("range"), + Spec: &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + }, + Parents: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("count")}, + }, + plan.ProcedureIDFromOperationID("count"): { + ID: plan.ProcedureIDFromOperationID("count"), + Spec: &functions.CountProcedureSpec{}, + Parents: []plan.ProcedureID{ + (plan.ProcedureIDFromOperationID("range")), + }, + Children: nil, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("range"), + plan.ProcedureIDFromOperationID("count"), + }, + }, + pp: &plan.PlanSpec{ + Now: time.Date(2017, 8, 8, 0, 0, 0, 0, time.UTC), + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: 10000, + }, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + AggregateSet: true, + AggregateMethod: "count", + }, + Parents: nil, + Children: []plan.ProcedureID{}, + }, + }, + Results: map[string]plan.YieldSpec{ + plan.DefaultYieldName: {ID: plan.ProcedureIDFromOperationID("from")}, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + }, + }, + }, + { + name: "single push down with match", + lp: &plan.LogicalPlanSpec{ + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("last")}, + }, + plan.ProcedureIDFromOperationID("last"): { + ID: plan.ProcedureIDFromOperationID("last"), + Spec: &functions.LastProcedureSpec{}, + Parents: []plan.ProcedureID{ + (plan.ProcedureIDFromOperationID("from")), + }, + Children: nil, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("last"), + }, + }, + pp: &plan.PlanSpec{ + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: math.MaxInt64, + }, + Now: time.Date(2017, 8, 8, 0, 0, 0, 0, time.UTC), + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + LimitSet: true, + PointsLimit: 1, + DescendingSet: true, + Descending: true, + }, + Parents: nil, + Children: []plan.ProcedureID{}, + }, + }, + Results: map[string]plan.YieldSpec{ + plan.DefaultYieldName: {ID: plan.ProcedureIDFromOperationID("from")}, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + }, + }, + }, + { + name: "multiple push down", + lp: &plan.LogicalPlanSpec{ + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + }, + plan.ProcedureIDFromOperationID("range"): { + ID: plan.ProcedureIDFromOperationID("range"), + Spec: &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + }, + Parents: []plan.ProcedureID{ + (plan.ProcedureIDFromOperationID("from")), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("limit")}, + }, + plan.ProcedureIDFromOperationID("limit"): { + ID: plan.ProcedureIDFromOperationID("limit"), + Spec: &functions.LimitProcedureSpec{ + N: 10, + }, + Parents: []plan.ProcedureID{ + (plan.ProcedureIDFromOperationID("range")), + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("mean")}, + }, + plan.ProcedureIDFromOperationID("mean"): { + ID: plan.ProcedureIDFromOperationID("mean"), + Spec: &functions.MeanProcedureSpec{}, + Parents: []plan.ProcedureID{ + (plan.ProcedureIDFromOperationID("limit")), + }, + Children: nil, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("range"), + plan.ProcedureIDFromOperationID("limit"), + plan.ProcedureIDFromOperationID("mean"), + }, + }, + pp: &plan.PlanSpec{ + Now: time.Date(2017, 8, 8, 0, 0, 0, 0, time.UTC), + Resources: query.ResourceManagement{ + ConcurrencyQuota: 2, + MemoryBytesQuota: math.MaxInt64, + }, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + LimitSet: true, + PointsLimit: 10, + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("mean")}, + }, + plan.ProcedureIDFromOperationID("mean"): { + ID: plan.ProcedureIDFromOperationID("mean"), + Spec: &functions.MeanProcedureSpec{}, + Parents: []plan.ProcedureID{ + (plan.ProcedureIDFromOperationID("from")), + }, + Children: nil, + }, + }, + Results: map[string]plan.YieldSpec{ + plan.DefaultYieldName: {ID: plan.ProcedureIDFromOperationID("mean")}, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("mean"), + }, + }, + }, + { + name: "multiple yield", + lp: &plan.LogicalPlanSpec{ + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: 10000, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + }, + plan.ProcedureIDFromOperationID("range"): { + ID: plan.ProcedureIDFromOperationID("range"), + Spec: &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + }, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("from")}, + Children: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("stddev"), + plan.ProcedureIDFromOperationID("skew"), + }, + }, + plan.ProcedureIDFromOperationID("stddev"): { + ID: plan.ProcedureIDFromOperationID("stddev"), + Spec: &functions.StddevProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("yieldStddev")}, + }, + plan.ProcedureIDFromOperationID("yieldStddev"): { + ID: plan.ProcedureIDFromOperationID("yieldStddev"), + Spec: &functions.YieldProcedureSpec{Name: "stddev"}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("stddev")}, + Children: nil, + }, + plan.ProcedureIDFromOperationID("skew"): { + ID: plan.ProcedureIDFromOperationID("skew"), + Spec: &functions.SkewProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("yieldSkew")}, + }, + plan.ProcedureIDFromOperationID("yieldSkew"): { + ID: plan.ProcedureIDFromOperationID("yieldSkew"), + Spec: &functions.YieldProcedureSpec{Name: "skew"}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("skew")}, + Children: nil, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("range"), + plan.ProcedureIDFromOperationID("stddev"), + plan.ProcedureIDFromOperationID("yieldStddev"), + plan.ProcedureIDFromOperationID("skew"), + plan.ProcedureIDFromOperationID("yieldSkew"), + }, + }, + pp: &plan.PlanSpec{ + Now: time.Date(2017, 8, 8, 0, 0, 0, 0, time.UTC), + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: 10000, + }, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + }, + Parents: nil, + Children: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("stddev"), + plan.ProcedureIDFromOperationID("skew"), + }, + }, + plan.ProcedureIDFromOperationID("stddev"): { + ID: plan.ProcedureIDFromOperationID("stddev"), + Spec: &functions.StddevProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("from")}, + Children: []plan.ProcedureID{}, + }, + plan.ProcedureIDFromOperationID("skew"): { + ID: plan.ProcedureIDFromOperationID("skew"), + Spec: &functions.SkewProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("from")}, + Children: []plan.ProcedureID{}, + }, + }, + Results: map[string]plan.YieldSpec{ + "stddev": {ID: plan.ProcedureIDFromOperationID("stddev")}, + "skew": {ID: plan.ProcedureIDFromOperationID("skew")}, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("stddev"), + plan.ProcedureIDFromOperationID("skew"), + }, + }, + }, + { + name: "group with aggregate", + lp: &plan.LogicalPlanSpec{ + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: 10000, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + }, + plan.ProcedureIDFromOperationID("range"): { + ID: plan.ProcedureIDFromOperationID("range"), + Spec: &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + }, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("from")}, + Children: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("group"), + }, + }, + plan.ProcedureIDFromOperationID("group"): { + ID: plan.ProcedureIDFromOperationID("group"), + Spec: &functions.GroupProcedureSpec{ + By: []string{"host", "region"}, + }, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("sum")}, + }, + plan.ProcedureIDFromOperationID("sum"): { + ID: plan.ProcedureIDFromOperationID("sum"), + Spec: &functions.SumProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("group")}, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("range"), + plan.ProcedureIDFromOperationID("group"), + plan.ProcedureIDFromOperationID("sum"), + }, + }, + pp: &plan.PlanSpec{ + Now: time.Date(2017, 8, 8, 0, 0, 0, 0, time.UTC), + Resources: query.ResourceManagement{ + ConcurrencyQuota: 1, + MemoryBytesQuota: 10000, + }, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + GroupingSet: true, + GroupKeys: []string{"host", "region"}, + AggregateSet: true, + AggregateMethod: "sum", + }, + Parents: nil, + Children: []plan.ProcedureID{ + plan.ProcedureIDFromParentID(plan.ProcedureIDFromOperationID("from")), + }, + }, + plan.ProcedureIDFromParentID(plan.ProcedureIDFromOperationID("from")): { + ID: plan.ProcedureIDFromParentID(plan.ProcedureIDFromOperationID("from")), + Spec: &functions.SumProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("from")}, + }, + }, + Results: map[string]plan.YieldSpec{ + "_result": {ID: plan.ProcedureIDFromParentID(plan.ProcedureIDFromOperationID("from"))}, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromParentID(plan.ProcedureIDFromOperationID("from")), + }, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + PhysicalPlanTestHelper(t, tc.lp, tc.pp) + }) + } +} + +func TestPhysicalPlanner_Plan_PushDown_Branch(t *testing.T) { + lp := &plan.LogicalPlanSpec{ + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("first"), + plan.ProcedureIDFromOperationID("last"), + }, + }, + plan.ProcedureIDFromOperationID("first"): { + ID: plan.ProcedureIDFromOperationID("first"), + Spec: &functions.FirstProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("from")}, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("yieldFirst")}, + }, + plan.ProcedureIDFromOperationID("yieldFirst"): { + ID: plan.ProcedureIDFromOperationID("yieldFirst"), + Spec: &functions.YieldProcedureSpec{Name: "first"}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("first")}, + Children: nil, + }, + plan.ProcedureIDFromOperationID("last"): { + ID: plan.ProcedureIDFromOperationID("last"), + Spec: &functions.LastProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("from")}, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("yieldLast")}, + }, + plan.ProcedureIDFromOperationID("yieldLast"): { + ID: plan.ProcedureIDFromOperationID("yieldLast"), + Spec: &functions.YieldProcedureSpec{Name: "last"}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("last")}, + Children: nil, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("first"), + plan.ProcedureIDFromOperationID("yieldFirst"), + plan.ProcedureIDFromOperationID("last"), // last is last so it will be duplicated + plan.ProcedureIDFromOperationID("yieldLast"), + }, + } + + fromID := plan.ProcedureIDFromOperationID("from") + fromIDDup := plan.ProcedureIDForDuplicate(fromID) + want := &plan.PlanSpec{ + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + Resources: query.ResourceManagement{ + ConcurrencyQuota: 2, + MemoryBytesQuota: math.MaxInt64, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + fromID: { + ID: fromID, + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + LimitSet: true, + PointsLimit: 1, + DescendingSet: true, + Descending: true, // last + }, + Children: []plan.ProcedureID{}, + }, + fromIDDup: { + ID: fromIDDup, + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.MinTime, + Stop: query.Now, + }, + LimitSet: true, + PointsLimit: 1, + DescendingSet: true, + Descending: false, // first + }, + Parents: []plan.ProcedureID{}, + Children: []plan.ProcedureID{}, + }, + }, + Results: map[string]plan.YieldSpec{ + "first": {ID: fromIDDup}, + "last": {ID: fromID}, + }, + Order: []plan.ProcedureID{ + fromID, + fromIDDup, + }, + } + + PhysicalPlanTestHelper(t, lp, want) +} + +func TestPhysicalPlanner_Plan_PushDown_Mixed(t *testing.T) { + lp := &plan.LogicalPlanSpec{ + Procedures: map[plan.ProcedureID]*plan.Procedure{ + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + }, + Parents: nil, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + }, + plan.ProcedureIDFromOperationID("range"): { + ID: plan.ProcedureIDFromOperationID("range"), + Spec: &functions.RangeProcedureSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + }, + Parents: []plan.ProcedureID{ + (plan.ProcedureIDFromOperationID("from")), + }, + Children: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("sum"), + plan.ProcedureIDFromOperationID("mean"), + }, + }, + plan.ProcedureIDFromOperationID("sum"): { + ID: plan.ProcedureIDFromOperationID("sum"), + Spec: &functions.SumProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("yieldSum")}, + }, + plan.ProcedureIDFromOperationID("yieldSum"): { + ID: plan.ProcedureIDFromOperationID("yieldSum"), + Spec: &functions.YieldProcedureSpec{Name: "sum"}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("sum")}, + Children: nil, + }, + plan.ProcedureIDFromOperationID("mean"): { + ID: plan.ProcedureIDFromOperationID("mean"), + Spec: &functions.MeanProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("range")}, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("yieldMean")}, + }, + plan.ProcedureIDFromOperationID("yieldMean"): { + ID: plan.ProcedureIDFromOperationID("yieldMean"), + Spec: &functions.YieldProcedureSpec{Name: "mean"}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("mean")}, + Children: nil, + }, + }, + Order: []plan.ProcedureID{ + plan.ProcedureIDFromOperationID("from"), + plan.ProcedureIDFromOperationID("range"), + plan.ProcedureIDFromOperationID("sum"), + plan.ProcedureIDFromOperationID("yieldSum"), + plan.ProcedureIDFromOperationID("mean"), // Mean can't be pushed down, but sum can + plan.ProcedureIDFromOperationID("yieldMean"), + }, + } + + fromID := plan.ProcedureIDFromOperationID("from") + fromIDDup := plan.ProcedureIDForDuplicate(fromID) + want := &plan.PlanSpec{ + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + Resources: query.ResourceManagement{ + ConcurrencyQuota: 3, + MemoryBytesQuota: math.MaxInt64, + }, + Procedures: map[plan.ProcedureID]*plan.Procedure{ + fromIDDup: { + ID: fromIDDup, + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + AggregateSet: true, + AggregateMethod: "sum", + }, + Parents: []plan.ProcedureID{}, + Children: []plan.ProcedureID{}, + }, + plan.ProcedureIDFromOperationID("from"): { + ID: plan.ProcedureIDFromOperationID("from"), + Spec: &functions.FromProcedureSpec{ + Database: "mydb", + BoundsSet: true, + Bounds: plan.BoundsSpec{ + Start: query.Time{ + IsRelative: true, + Relative: -1 * time.Hour, + }, + }, + }, + Children: []plan.ProcedureID{plan.ProcedureIDFromOperationID("mean")}, + }, + plan.ProcedureIDFromOperationID("mean"): { + ID: plan.ProcedureIDFromOperationID("mean"), + Spec: &functions.MeanProcedureSpec{}, + Parents: []plan.ProcedureID{plan.ProcedureIDFromOperationID("from")}, + Children: []plan.ProcedureID{}, + }, + }, + Results: map[string]plan.YieldSpec{ + "sum": {ID: fromIDDup}, + "mean": {ID: plan.ProcedureIDFromOperationID("mean")}, + }, + Order: []plan.ProcedureID{ + fromID, + fromIDDup, + plan.ProcedureIDFromOperationID("mean"), + }, + } + + PhysicalPlanTestHelper(t, lp, want) +} + +func PhysicalPlanTestHelper(t *testing.T, lp *plan.LogicalPlanSpec, want *plan.PlanSpec) { + t.Helper() + // Setup expected now time + now := time.Now() + want.Now = now + + planner := plan.NewPlanner() + got, err := planner.Plan(lp, nil, now) + if err != nil { + t.Fatal(err) + } + + if !cmp.Equal(got, want, plantest.CmpOptions...) { + t.Log("Logical:", plan.Formatted(lp)) + t.Log("Want Physical:", plan.Formatted(want)) + t.Log("Got Physical:", plan.Formatted(got)) + t.Errorf("unexpected physical plan -want/+got:\n%s", cmp.Diff(want, got, plantest.CmpOptions...)) + } +} + +var benchmarkPhysicalPlan *plan.PlanSpec + +func BenchmarkPhysicalPlan(b *testing.B) { + var err error + lp, err := plan.NewLogicalPlanner().Plan(benchmarkQuery) + if err != nil { + b.Fatal(err) + } + planner := plan.NewPlanner() + now := time.Date(2017, 8, 8, 0, 0, 0, 0, time.UTC) + for n := 0; n < b.N; n++ { + benchmarkPhysicalPlan, err = planner.Plan(lp, nil, now) + if err != nil { + b.Fatal(err) + } + } +} + +var benchmarkQueryToPhysicalPlan *plan.PlanSpec + +func BenchmarkQueryToPhysicalPlan(b *testing.B) { + lp := plan.NewLogicalPlanner() + pp := plan.NewPlanner() + now := time.Date(2017, 8, 8, 0, 0, 0, 0, time.UTC) + for n := 0; n < b.N; n++ { + lp, err := lp.Plan(benchmarkQuery) + if err != nil { + b.Fatal(err) + } + benchmarkQueryToPhysicalPlan, err = pp.Plan(lp, nil, now) + if err != nil { + b.Fatal(err) + } + } +} diff --git a/vendor/github.com/influxdata/ifql/query/plan/procedure.go b/vendor/github.com/influxdata/ifql/query/plan/procedure.go new file mode 100644 index 000000000..623734715 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/plan/procedure.go @@ -0,0 +1,141 @@ +package plan + +import ( + "fmt" + "time" + + "github.com/influxdata/ifql/query" + uuid "github.com/satori/go.uuid" +) + +type ProcedureID uuid.UUID + +func (id ProcedureID) String() string { + return uuid.UUID(id).String() +} + +var ZeroProcedureID ProcedureID + +type Procedure struct { + plan *PlanSpec + ID ProcedureID + Parents []ProcedureID + Children []ProcedureID + Spec ProcedureSpec +} + +func (p *Procedure) Copy() *Procedure { + np := new(Procedure) + np.ID = p.ID + + np.plan = p.plan + + np.Parents = make([]ProcedureID, len(p.Parents)) + copy(np.Parents, p.Parents) + + np.Children = make([]ProcedureID, len(p.Children)) + copy(np.Children, p.Children) + + np.Spec = p.Spec.Copy() + + return np +} + +func (p *Procedure) DoChildren(f func(pr *Procedure)) { + for _, id := range p.Children { + f(p.plan.Procedures[id]) + } +} +func (p *Procedure) DoParents(f func(pr *Procedure)) { + for _, id := range p.Parents { + f(p.plan.Procedures[id]) + } +} +func (p *Procedure) Child(i int) *Procedure { + return p.plan.Procedures[p.Children[i]] +} + +type Administration interface { + ConvertID(query.OperationID) ProcedureID +} + +type CreateProcedureSpec func(query.OperationSpec, Administration) (ProcedureSpec, error) + +// ProcedureSpec specifies an operation as part of a query. +type ProcedureSpec interface { + // Kind returns the kind of the procedure. + Kind() ProcedureKind + Copy() ProcedureSpec +} + +type PushDownProcedureSpec interface { + PushDownRules() []PushDownRule + PushDown(root *Procedure, dup func() *Procedure) +} + +type BoundedProcedureSpec interface { + TimeBounds() BoundsSpec +} + +type YieldProcedureSpec interface { + YieldName() string +} +type AggregateProcedureSpec interface { + // AggregateMethod specifies which aggregate method to push down to the storage layer. + AggregateMethod() string + // ReAggregateSpec specifies an aggregate procedure to use when aggregating the individual pushed down results. + ReAggregateSpec() ProcedureSpec +} + +type ParentAwareProcedureSpec interface { + ParentChanged(old, new ProcedureID) +} + +// TODO(nathanielc): make this more formal using commute/associative properties +type PushDownRule struct { + Root ProcedureKind + Through []ProcedureKind + Match func(ProcedureSpec) bool +} + +// ProcedureKind denotes the kind of operations. +type ProcedureKind string + +type BoundsSpec struct { + Start query.Time + Stop query.Time +} + +func (b BoundsSpec) Union(o BoundsSpec, now time.Time) (u BoundsSpec) { + u.Start = b.Start + if u.Start.IsZero() || (!o.Start.IsZero() && o.Start.Time(now).Before(b.Start.Time(now))) { + u.Start = o.Start + } + u.Stop = b.Stop + if u.Stop.IsZero() || (!o.Start.IsZero() && o.Stop.Time(now).After(b.Stop.Time(now))) { + u.Stop = o.Stop + } + return +} + +type WindowSpec struct { + Every query.Duration + Period query.Duration + Round query.Duration + Start query.Time +} + +var kindToProcedure = make(map[ProcedureKind]CreateProcedureSpec) +var queryOpToProcedure = make(map[query.OperationKind][]CreateProcedureSpec) + +// RegisterProcedureSpec registers a new procedure with the specified kind. +// The call panics if the kind is not unique. +func RegisterProcedureSpec(k ProcedureKind, c CreateProcedureSpec, qks ...query.OperationKind) { + if kindToProcedure[k] != nil { + panic(fmt.Errorf("duplicate registration for procedure kind %v", k)) + } + kindToProcedure[k] = c + for _, qk := range qks { + queryOpToProcedure[qk] = append(queryOpToProcedure[qk], c) + } +} diff --git a/vendor/github.com/influxdata/ifql/query/plan/rules.go b/vendor/github.com/influxdata/ifql/query/plan/rules.go new file mode 100644 index 000000000..41cc0defb --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/plan/rules.go @@ -0,0 +1,12 @@ +package plan + +type RewriteRule interface { + Root() ProcedureKind + Rewrite(*Procedure, PlanRewriter) error +} + +var rewriteRules []RewriteRule + +func RegisterRewriteRule(r RewriteRule) { + rewriteRules = append(rewriteRules, r) +} diff --git a/vendor/github.com/influxdata/ifql/query/plan/storage.go b/vendor/github.com/influxdata/ifql/query/plan/storage.go new file mode 100644 index 000000000..a87cad8d3 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/plan/storage.go @@ -0,0 +1,20 @@ +package plan + +import "time" + +type Storage interface { + ShardMapping() ShardMap +} + +// ShardMap is a mapping of database names to list of shards for that database. +type ShardMap map[string][]Shard + +type Shard struct { + Node string + Range TimeRange +} + +type TimeRange struct { + Start time.Time + Stop time.Time +} diff --git a/vendor/github.com/influxdata/ifql/query/query.go b/vendor/github.com/influxdata/ifql/query/query.go new file mode 100644 index 000000000..c76612745 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/query.go @@ -0,0 +1,175 @@ +package query + +import ( + "errors" + "fmt" +) + +// Spec specifies a query. +type Spec struct { + Operations []*Operation `json:"operations"` + Edges []Edge `json:"edges"` + Resources ResourceManagement `json:"resources"` + + sorted []*Operation + children map[OperationID][]*Operation + parents map[OperationID][]*Operation +} + +// Edge is a data flow relationship between a parent and a child +type Edge struct { + Parent OperationID `json:"parent"` + Child OperationID `json:"child"` +} + +// Walk calls f on each operation exactly once. +// The function f will be called on an operation only after +// all of its parents have already been passed to f. +func (q *Spec) Walk(f func(o *Operation) error) error { + if len(q.sorted) == 0 { + if err := q.prepare(); err != nil { + return err + } + } + for _, o := range q.sorted { + err := f(o) + if err != nil { + return err + } + } + return nil +} + +// Validate ensures the query is a valid DAG. +func (q *Spec) Validate() error { + return q.prepare() +} + +// Children returns a list of children for a given operation. +// If the query is invalid no children will be returned. +func (q *Spec) Children(id OperationID) []*Operation { + if q.children == nil { + err := q.prepare() + if err != nil { + return nil + } + } + return q.children[id] +} + +// Parents returns a list of parents for a given operation. +// If the query is invalid no parents will be returned. +func (q *Spec) Parents(id OperationID) []*Operation { + if q.parents == nil { + err := q.prepare() + if err != nil { + return nil + } + } + return q.parents[id] +} + +// prepare populates the internal datastructure needed to quickly navigate the query DAG. +// As a result the query DAG is validated. +func (q *Spec) prepare() error { + q.sorted = q.sorted[0:0] + + parents, children, roots, err := q.determineParentsChildrenAndRoots() + if err != nil { + return err + } + if len(roots) == 0 { + return errors.New("query has no root nodes") + } + + q.parents = parents + q.children = children + + tMarks := make(map[OperationID]bool) + pMarks := make(map[OperationID]bool) + + for _, r := range roots { + if err := q.visit(tMarks, pMarks, r); err != nil { + return err + } + } + //reverse q.sorted + for i, j := 0, len(q.sorted)-1; i < j; i, j = i+1, j-1 { + q.sorted[i], q.sorted[j] = q.sorted[j], q.sorted[i] + } + return nil +} + +func (q *Spec) computeLookup() (map[OperationID]*Operation, error) { + lookup := make(map[OperationID]*Operation, len(q.Operations)) + for _, o := range q.Operations { + if _, ok := lookup[o.ID]; ok { + return nil, fmt.Errorf("found duplicate operation ID %q", o.ID) + } + lookup[o.ID] = o + } + return lookup, nil +} + +func (q *Spec) determineParentsChildrenAndRoots() (parents, children map[OperationID][]*Operation, roots []*Operation, _ error) { + lookup, err := q.computeLookup() + if err != nil { + return nil, nil, nil, err + } + children = make(map[OperationID][]*Operation, len(q.Operations)) + parents = make(map[OperationID][]*Operation, len(q.Operations)) + for _, e := range q.Edges { + // Build children map + c, ok := lookup[e.Child] + if !ok { + return nil, nil, nil, fmt.Errorf("edge references unknown child operation %q", e.Child) + } + children[e.Parent] = append(children[e.Parent], c) + + // Build parents map + p, ok := lookup[e.Parent] + if !ok { + return nil, nil, nil, fmt.Errorf("edge references unknown parent operation %q", e.Parent) + } + parents[e.Child] = append(parents[e.Child], p) + } + // Find roots, i.e operations with no parents. + for _, o := range q.Operations { + if len(parents[o.ID]) == 0 { + roots = append(roots, o) + } + } + return +} + +// Depth first search topological sorting of a DAG. +// https://en.wikipedia.org/wiki/Topological_sorting#Algorithms +func (q *Spec) visit(tMarks, pMarks map[OperationID]bool, o *Operation) error { + id := o.ID + if tMarks[id] { + return errors.New("found cycle in query") + } + + if !pMarks[id] { + tMarks[id] = true + for _, c := range q.children[id] { + if err := q.visit(tMarks, pMarks, c); err != nil { + return err + } + } + pMarks[id] = true + tMarks[id] = false + q.sorted = append(q.sorted, o) + } + return nil +} + +// Functions return the names of all functions used in the plan +func (q *Spec) Functions() ([]string, error) { + funcs := []string{} + err := q.Walk(func(o *Operation) error { + funcs = append(funcs, string(o.Spec.Kind())) + return nil + }) + return funcs, err +} diff --git a/vendor/github.com/influxdata/ifql/query/query_test.go b/vendor/github.com/influxdata/ifql/query/query_test.go new file mode 100644 index 000000000..1bd989b88 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/query_test.go @@ -0,0 +1,268 @@ +package query_test + +import ( + "encoding/json" + "errors" + "strconv" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/google/go-cmp/cmp/cmpopts" + "github.com/influxdata/ifql/functions" + "github.com/influxdata/ifql/query" +) + +var ignoreUnexportedQuerySpec = cmpopts.IgnoreUnexported(query.Spec{}) + +func TestQuery_JSON(t *testing.T) { + srcData := []byte(` +{ + "operations":[ + { + "id": "from", + "kind": "from", + "spec": { + "database":"mydb" + } + }, + { + "id": "range", + "kind": "range", + "spec": { + "start": "-4h", + "stop": "now" + } + }, + { + "id": "sum", + "kind": "sum" + } + ], + "edges":[ + {"parent":"from","child":"range"}, + {"parent":"range","child":"sum"} + ] +} + `) + + // Ensure we can properly unmarshal a query + gotQ := query.Spec{} + if err := json.Unmarshal(srcData, &gotQ); err != nil { + t.Fatal(err) + } + expQ := query.Spec{ + Operations: []*query.Operation{ + { + ID: "from", + Spec: &functions.FromOpSpec{ + Database: "mydb", + }, + }, + { + ID: "range", + Spec: &functions.RangeOpSpec{ + Start: query.Time{ + Relative: -4 * time.Hour, + IsRelative: true, + }, + Stop: query.Time{ + IsRelative: true, + }, + }, + }, + { + ID: "sum", + Spec: &functions.SumOpSpec{}, + }, + }, + Edges: []query.Edge{ + {Parent: "from", Child: "range"}, + {Parent: "range", Child: "sum"}, + }, + } + if !cmp.Equal(gotQ, expQ, ignoreUnexportedQuerySpec) { + t.Errorf("unexpected query:\n%s", cmp.Diff(gotQ, expQ, ignoreUnexportedQuerySpec)) + } + + // Ensure we can properly marshal a query + data, err := json.Marshal(expQ) + if err != nil { + t.Fatal(err) + } + if err := json.Unmarshal(data, &gotQ); err != nil { + t.Fatal(err) + } + if !cmp.Equal(gotQ, expQ, ignoreUnexportedQuerySpec) { + t.Errorf("unexpected query after marshalling: -want/+got %s", cmp.Diff(expQ, gotQ, ignoreUnexportedQuerySpec)) + } +} + +func TestQuery_Walk(t *testing.T) { + testCases := []struct { + query *query.Spec + walkOrder []query.OperationID + err error + }{ + { + query: &query.Spec{}, + err: errors.New("query has no root nodes"), + }, + { + query: &query.Spec{ + Operations: []*query.Operation{ + {ID: "a"}, + {ID: "b"}, + }, + Edges: []query.Edge{ + {Parent: "a", Child: "b"}, + {Parent: "a", Child: "c"}, + }, + }, + err: errors.New("edge references unknown child operation \"c\""), + }, + { + query: &query.Spec{ + Operations: []*query.Operation{ + {ID: "a"}, + {ID: "b"}, + {ID: "b"}, + }, + Edges: []query.Edge{ + {Parent: "a", Child: "b"}, + {Parent: "a", Child: "b"}, + }, + }, + err: errors.New("found duplicate operation ID \"b\""), + }, + { + query: &query.Spec{ + Operations: []*query.Operation{ + {ID: "a"}, + {ID: "b"}, + {ID: "c"}, + }, + Edges: []query.Edge{ + {Parent: "a", Child: "b"}, + {Parent: "b", Child: "c"}, + {Parent: "c", Child: "b"}, + }, + }, + err: errors.New("found cycle in query"), + }, + { + query: &query.Spec{ + Operations: []*query.Operation{ + {ID: "a"}, + {ID: "b"}, + {ID: "c"}, + {ID: "d"}, + }, + Edges: []query.Edge{ + {Parent: "a", Child: "b"}, + {Parent: "b", Child: "c"}, + {Parent: "c", Child: "d"}, + {Parent: "d", Child: "b"}, + }, + }, + err: errors.New("found cycle in query"), + }, + { + query: &query.Spec{ + Operations: []*query.Operation{ + {ID: "a"}, + {ID: "b"}, + {ID: "c"}, + {ID: "d"}, + }, + Edges: []query.Edge{ + {Parent: "a", Child: "b"}, + {Parent: "b", Child: "c"}, + {Parent: "c", Child: "d"}, + }, + }, + walkOrder: []query.OperationID{ + "a", "b", "c", "d", + }, + }, + { + query: &query.Spec{ + Operations: []*query.Operation{ + {ID: "a"}, + {ID: "b"}, + {ID: "c"}, + {ID: "d"}, + }, + Edges: []query.Edge{ + {Parent: "a", Child: "b"}, + {Parent: "a", Child: "c"}, + {Parent: "b", Child: "d"}, + {Parent: "c", Child: "d"}, + }, + }, + walkOrder: []query.OperationID{ + "a", "c", "b", "d", + }, + }, + { + query: &query.Spec{ + Operations: []*query.Operation{ + {ID: "a"}, + {ID: "b"}, + {ID: "c"}, + {ID: "d"}, + }, + Edges: []query.Edge{ + {Parent: "a", Child: "c"}, + {Parent: "b", Child: "c"}, + {Parent: "c", Child: "d"}, + }, + }, + walkOrder: []query.OperationID{ + "b", "a", "c", "d", + }, + }, + { + query: &query.Spec{ + Operations: []*query.Operation{ + {ID: "a"}, + {ID: "b"}, + {ID: "c"}, + {ID: "d"}, + }, + Edges: []query.Edge{ + {Parent: "a", Child: "c"}, + {Parent: "b", Child: "d"}, + }, + }, + walkOrder: []query.OperationID{ + "b", "d", "a", "c", + }, + }, + } + for i, tc := range testCases { + tc := tc + t.Run(strconv.Itoa(i), func(t *testing.T) { + var gotOrder []query.OperationID + err := tc.query.Walk(func(o *query.Operation) error { + gotOrder = append(gotOrder, o.ID) + return nil + }) + if tc.err == nil { + if err != nil { + t.Fatal(err) + } + } else { + if err == nil { + t.Fatalf("expected error: %q", tc.err) + } else if got, exp := err.Error(), tc.err.Error(); got != exp { + t.Fatalf("unexpected errors: got %q exp %q", got, exp) + } + } + + if !cmp.Equal(gotOrder, tc.walkOrder) { + t.Fatalf("unexpected walk order -want/+got %s", cmp.Diff(tc.walkOrder, gotOrder)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/query/resource_management.go b/vendor/github.com/influxdata/ifql/query/resource_management.go new file mode 100644 index 000000000..2660aac8a --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/resource_management.go @@ -0,0 +1,62 @@ +package query + +import ( + "math" + "strconv" + + "github.com/pkg/errors" +) + +// ResourceManagement defines how the query should consume avaliable resources. +type ResourceManagement struct { + // Priority or the query. + // Queries with a lower value will move to the front of the priority queue. + // A zero value indicates the highest priority. + Priority Priority `json:"priority"` + // ConcurrencyQuota is the number of concurrency workers allowed to process this query. + // A zero value indicates the planner can pick the optimal concurrency. + ConcurrencyQuota int `json:"concurrency_quota"` + // MemoryBytesQuota is the number of bytes of RAM this query may consume. + // There is a small amount of overhead memory being consumed by a query that will not be counted towards this limit. + // A zero value indicates unlimited. + MemoryBytesQuota int64 `json:"memory_bytes_quota"` +} + +// Priority is an integer that represents the query priority. +// Any positive 32bit integer value may be used. +// Special constants are provided to represent the extreme high and low priorities. +type Priority int32 + +const ( + // High is the highest possible priority = 0 + High Priority = 0 + // Low is the lowest possible priority = MaxInt32 + Low Priority = math.MaxInt32 +) + +func (p Priority) MarshalText() ([]byte, error) { + switch p { + case Low: + return []byte("low"), nil + case High: + return []byte("high"), nil + default: + return []byte(strconv.FormatInt(int64(p), 10)), nil + } +} + +func (p *Priority) UnmarshalText(txt []byte) error { + switch s := string(txt); s { + case "low": + *p = Low + case "high": + *p = High + default: + i, err := strconv.ParseInt(s, 10, 32) + if err != nil { + return errors.Wrap(err, "invalid priority, must be an integer or 'low','high'") + } + *p = Priority(i) + } + return nil +} diff --git a/vendor/github.com/influxdata/ifql/query/time.go b/vendor/github.com/influxdata/ifql/query/time.go new file mode 100644 index 000000000..a83a8d30d --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/time.go @@ -0,0 +1,98 @@ +package query + +import ( + "math" + "time" +) + +var ( + MinTime = Time{ + Absolute: time.Unix(0, math.MinInt64), + } + MaxTime = Time{ + Absolute: time.Unix(0, math.MaxInt64), + } + Now = Time{ + IsRelative: true, + } +) + +// Time represents either a relavite or absolute time. +// If Time is its zero value then it represents a time.Time{}. +// To represent the now time you must set IsRelative to true. +type Time struct { + IsRelative bool + Relative time.Duration + Absolute time.Time +} + +// Time returns the time specified relative to now. +func (t Time) Time(now time.Time) time.Time { + if t.IsRelative { + return now.Add(t.Relative) + } + return t.Absolute +} + +func (t Time) IsZero() bool { + return !t.IsRelative && t.Absolute.IsZero() +} + +func (t *Time) UnmarshalText(data []byte) error { + if len(data) == 0 { + t.Absolute = time.Time{} + t.Relative = 0 + t.IsRelative = false + return nil + } + + str := string(data) + if str == "now" { + t.Relative = 0 + t.Absolute = time.Time{} + t.IsRelative = true + return nil + } + d, err := time.ParseDuration(str) + if err == nil { + t.Relative = d + t.Absolute = time.Time{} + t.IsRelative = true + return nil + } + t.IsRelative = false + t.Relative = 0 + t.Absolute, err = time.Parse(time.RFC3339Nano, str) + if err != nil { + return err + } + t.Absolute = t.Absolute.UTC() + return nil +} + +func (t Time) MarshalText() ([]byte, error) { + if t.IsRelative { + if t.Relative == 0 { + return []byte("now"), nil + } + return []byte(t.Relative.String()), nil + } + return []byte(t.Absolute.Format(time.RFC3339Nano)), nil +} + +// Duration is a marshalable duration type. +//TODO make this the real duration parsing not just time.ParseDuration +type Duration time.Duration + +func (d *Duration) UnmarshalText(data []byte) error { + dur, err := time.ParseDuration(string(data)) + if err != nil { + return err + } + *d = Duration(dur) + return nil +} + +func (d Duration) MarshalText() ([]byte, error) { + return []byte(time.Duration(d).String()), nil +} diff --git a/vendor/github.com/influxdata/ifql/query/trigger.go b/vendor/github.com/influxdata/ifql/query/trigger.go new file mode 100644 index 000000000..778452108 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/query/trigger.go @@ -0,0 +1,57 @@ +package query + +type TriggerSpec interface { + Kind() TriggerKind +} +type TriggerKind int + +const ( + AfterWatermark TriggerKind = iota + Repeated + AfterProcessingTime + AfterAtLeastCount + OrFinally +) + +var DefaultTrigger = AfterWatermarkTriggerSpec{} + +type AfterWatermarkTriggerSpec struct { + AllowedLateness Duration +} + +func (AfterWatermarkTriggerSpec) Kind() TriggerKind { + return AfterWatermark +} + +type RepeatedTriggerSpec struct { + Trigger TriggerSpec +} + +func (RepeatedTriggerSpec) Kind() TriggerKind { + return Repeated +} + +type AfterProcessingTimeTriggerSpec struct { + Duration Duration +} + +func (AfterProcessingTimeTriggerSpec) Kind() TriggerKind { + return AfterProcessingTime +} + +type AfterAtLeastCountTriggerSpec struct { + Count int +} + +func (AfterAtLeastCountTriggerSpec) Kind() TriggerKind { + return AfterAtLeastCount +} + +type OrFinallyTriggerSpec struct { + Main TriggerSpec + Finally TriggerSpec +} + +func (OrFinallyTriggerSpec) Kind() TriggerKind { + return OrFinally +} diff --git a/vendor/github.com/influxdata/ifql/release.sh b/vendor/github.com/influxdata/ifql/release.sh new file mode 100755 index 000000000..f76286a45 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/release.sh @@ -0,0 +1,37 @@ +#!/bin/bash + +# Exit if any command fails +set -e + +# Get dir of script and make it is our working directory. +DIR=$(cd $(dirname "${BASH_SOURCE[0]}") && pwd) +cd $DIR + +rm -f ./bin/* + +# Build image +imagename="ifql-img" +dataname="ifql-data" + +docker build -f Dockerfile_build -t $imagename . + +# Create docker volume of repo +docker rm $dataname 2>/dev/null >/dev/null || true +docker create \ + --name $dataname \ + -v "/root/go/src/github.com/influxdata/ifqld" \ + $imagename /bin/true +docker cp "$DIR/" "$dataname:/root/go/src/github.com/influxdata/" + +# Run tests in docker +docker run \ + --rm \ + --volumes-from $dataname \ + -e "GITHUB_TOKEN=${GITHUB_TOKEN}" \ + "$imagename" \ + make dist + +docker cp "$dataname:/root/go/src/github.com/influxdata/ifql/dist" . +docker rm $dataname + +make release-docker diff --git a/vendor/github.com/influxdata/ifql/semantic/binary_types.go b/vendor/github.com/influxdata/ifql/semantic/binary_types.go new file mode 100644 index 000000000..30b4c6536 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/binary_types.go @@ -0,0 +1,118 @@ +package semantic + +import "github.com/influxdata/ifql/ast" + +type binarySignature struct { + operator ast.OperatorKind + left, right Kind +} + +var binaryTypesLookup = map[binarySignature]Kind{ + //--------------- + // Math Operators + //--------------- + {operator: ast.AdditionOperator, left: Int, right: Int}: Int, + {operator: ast.AdditionOperator, left: UInt, right: UInt}: UInt, + {operator: ast.AdditionOperator, left: Float, right: Float}: Float, + {operator: ast.SubtractionOperator, left: Int, right: Int}: Int, + {operator: ast.SubtractionOperator, left: UInt, right: UInt}: UInt, + {operator: ast.SubtractionOperator, left: Float, right: Float}: Float, + {operator: ast.MultiplicationOperator, left: Int, right: Int}: Int, + {operator: ast.MultiplicationOperator, left: UInt, right: UInt}: UInt, + {operator: ast.MultiplicationOperator, left: Float, right: Float}: Float, + {operator: ast.DivisionOperator, left: Int, right: Int}: Int, + {operator: ast.DivisionOperator, left: UInt, right: UInt}: UInt, + {operator: ast.DivisionOperator, left: Float, right: Float}: Float, + + //--------------------- + // Comparison Operators + //--------------------- + + // LessThanEqualOperator + + {operator: ast.LessThanEqualOperator, left: Int, right: Int}: Bool, + {operator: ast.LessThanEqualOperator, left: Int, right: UInt}: Bool, + {operator: ast.LessThanEqualOperator, left: Int, right: Float}: Bool, + {operator: ast.LessThanEqualOperator, left: UInt, right: Int}: Bool, + {operator: ast.LessThanEqualOperator, left: UInt, right: UInt}: Bool, + {operator: ast.LessThanEqualOperator, left: UInt, right: Float}: Bool, + {operator: ast.LessThanEqualOperator, left: Float, right: Int}: Bool, + {operator: ast.LessThanEqualOperator, left: Float, right: UInt}: Bool, + {operator: ast.LessThanEqualOperator, left: Float, right: Float}: Bool, + + // LessThanOperator + + {operator: ast.LessThanOperator, left: Int, right: Int}: Bool, + {operator: ast.LessThanOperator, left: Int, right: UInt}: Bool, + {operator: ast.LessThanOperator, left: Int, right: Float}: Bool, + {operator: ast.LessThanOperator, left: UInt, right: Int}: Bool, + {operator: ast.LessThanOperator, left: UInt, right: UInt}: Bool, + {operator: ast.LessThanOperator, left: UInt, right: Float}: Bool, + {operator: ast.LessThanOperator, left: Float, right: Int}: Bool, + {operator: ast.LessThanOperator, left: Float, right: UInt}: Bool, + {operator: ast.LessThanOperator, left: Float, right: Float}: Bool, + + // GreaterThanEqualOperator + + {operator: ast.GreaterThanEqualOperator, left: Int, right: Int}: Bool, + {operator: ast.GreaterThanEqualOperator, left: Int, right: UInt}: Bool, + {operator: ast.GreaterThanEqualOperator, left: Int, right: Float}: Bool, + {operator: ast.GreaterThanEqualOperator, left: UInt, right: Int}: Bool, + {operator: ast.GreaterThanEqualOperator, left: UInt, right: UInt}: Bool, + {operator: ast.GreaterThanEqualOperator, left: UInt, right: Float}: Bool, + {operator: ast.GreaterThanEqualOperator, left: Float, right: Int}: Bool, + {operator: ast.GreaterThanEqualOperator, left: Float, right: UInt}: Bool, + {operator: ast.GreaterThanEqualOperator, left: Float, right: Float}: Bool, + + // GreaterThanOperator + + {operator: ast.GreaterThanOperator, left: Int, right: Int}: Bool, + {operator: ast.GreaterThanOperator, left: Int, right: UInt}: Bool, + {operator: ast.GreaterThanOperator, left: Int, right: Float}: Bool, + {operator: ast.GreaterThanOperator, left: UInt, right: Int}: Bool, + {operator: ast.GreaterThanOperator, left: UInt, right: UInt}: Bool, + {operator: ast.GreaterThanOperator, left: UInt, right: Float}: Bool, + {operator: ast.GreaterThanOperator, left: Float, right: Int}: Bool, + {operator: ast.GreaterThanOperator, left: Float, right: UInt}: Bool, + {operator: ast.GreaterThanOperator, left: Float, right: Float}: Bool, + + // EqualOperator + + {operator: ast.EqualOperator, left: Int, right: Int}: Bool, + {operator: ast.EqualOperator, left: Int, right: UInt}: Bool, + {operator: ast.EqualOperator, left: Int, right: Float}: Bool, + {operator: ast.EqualOperator, left: UInt, right: Int}: Bool, + {operator: ast.EqualOperator, left: UInt, right: UInt}: Bool, + {operator: ast.EqualOperator, left: UInt, right: Float}: Bool, + {operator: ast.EqualOperator, left: Float, right: Int}: Bool, + {operator: ast.EqualOperator, left: Float, right: UInt}: Bool, + {operator: ast.EqualOperator, left: Float, right: Float}: Bool, + {operator: ast.EqualOperator, left: String, right: String}: Bool, + + // NotEqualOperator + + {operator: ast.NotEqualOperator, left: Int, right: Int}: Bool, + {operator: ast.NotEqualOperator, left: Int, right: UInt}: Bool, + {operator: ast.NotEqualOperator, left: Int, right: Float}: Bool, + {operator: ast.NotEqualOperator, left: UInt, right: Int}: Bool, + {operator: ast.NotEqualOperator, left: UInt, right: UInt}: Bool, + {operator: ast.NotEqualOperator, left: UInt, right: Float}: Bool, + {operator: ast.NotEqualOperator, left: Float, right: Int}: Bool, + {operator: ast.NotEqualOperator, left: Float, right: UInt}: Bool, + {operator: ast.NotEqualOperator, left: Float, right: Float}: Bool, + {operator: ast.NotEqualOperator, left: String, right: String}: Bool, + + //--------------- + // Regexp Operators + //--------------- + + // RegexpMatchOperator + + {operator: ast.RegexpMatchOperator, left: String, right: Regexp}: Bool, + {operator: ast.RegexpMatchOperator, left: Regexp, right: String}: Bool, + + // NotRegexpMatchOperator + + {operator: ast.NotRegexpMatchOperator, left: String, right: Regexp}: Bool, + {operator: ast.NotRegexpMatchOperator, left: Regexp, right: String}: Bool, +} diff --git a/vendor/github.com/influxdata/ifql/semantic/doc.go b/vendor/github.com/influxdata/ifql/semantic/doc.go new file mode 100644 index 000000000..b29ef8da2 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/doc.go @@ -0,0 +1,12 @@ +/* +The semantic package provides a graph structure that represents the meaning of an IFQL script. +An AST is converted into a semantic graph for use with other systems. +Using a semantic graph representation of the IFQL, enables highlevel meaning to be specified programatically. + +The semantic structures are to be designed to facilitate the interpretation and compilation of IFQL. + +For example since IFQL uses the javascript AST structures, arguments to a function are represented as a single positional argument that is always an object expression. +The semantic graph validates that the AST correctly follows these semantics, and use structures that are strongly typed for this expectation. + +*/ +package semantic diff --git a/vendor/github.com/influxdata/ifql/semantic/graph.go b/vendor/github.com/influxdata/ifql/semantic/graph.go new file mode 100644 index 000000000..793c496bc --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/graph.go @@ -0,0 +1,1275 @@ +package semantic + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "time" + + "github.com/influxdata/ifql/ast" +) + +type Node interface { + node() + NodeType() string + Copy() Node + + json.Marshaler +} + +func (*Program) node() {} + +func (*BlockStatement) node() {} +func (*ExpressionStatement) node() {} +func (*ReturnStatement) node() {} +func (*NativeVariableDeclaration) node() {} +func (*ExternalVariableDeclaration) node() {} + +func (*ArrayExpression) node() {} +func (*FunctionExpression) node() {} +func (*BinaryExpression) node() {} +func (*CallExpression) node() {} +func (*ConditionalExpression) node() {} +func (*IdentifierExpression) node() {} +func (*LogicalExpression) node() {} +func (*MemberExpression) node() {} +func (*ObjectExpression) node() {} +func (*UnaryExpression) node() {} + +func (*Identifier) node() {} +func (*Property) node() {} +func (*FunctionParam) node() {} + +func (*BooleanLiteral) node() {} +func (*DateTimeLiteral) node() {} +func (*DurationLiteral) node() {} +func (*FloatLiteral) node() {} +func (*IntegerLiteral) node() {} +func (*StringLiteral) node() {} +func (*RegexpLiteral) node() {} +func (*UnsignedIntegerLiteral) node() {} + +type Statement interface { + Node + stmt() +} + +func (*BlockStatement) stmt() {} +func (*ExpressionStatement) stmt() {} +func (*ReturnStatement) stmt() {} +func (*NativeVariableDeclaration) stmt() {} +func (*ExternalVariableDeclaration) stmt() {} + +type Expression interface { + Node + Type() Type + expression() +} + +func (*ArrayExpression) expression() {} +func (*BinaryExpression) expression() {} +func (*BooleanLiteral) expression() {} +func (*CallExpression) expression() {} +func (*ConditionalExpression) expression() {} +func (*DateTimeLiteral) expression() {} +func (*DurationLiteral) expression() {} +func (*FloatLiteral) expression() {} +func (*FunctionExpression) expression() {} +func (*IdentifierExpression) expression() {} +func (*IntegerLiteral) expression() {} +func (*LogicalExpression) expression() {} +func (*MemberExpression) expression() {} +func (*ObjectExpression) expression() {} +func (*RegexpLiteral) expression() {} +func (*StringLiteral) expression() {} +func (*UnaryExpression) expression() {} +func (*UnsignedIntegerLiteral) expression() {} + +type Literal interface { + Expression + literal() +} + +func (*BooleanLiteral) literal() {} +func (*DateTimeLiteral) literal() {} +func (*DurationLiteral) literal() {} +func (*FloatLiteral) literal() {} +func (*IntegerLiteral) literal() {} +func (*RegexpLiteral) literal() {} +func (*StringLiteral) literal() {} +func (*UnsignedIntegerLiteral) literal() {} + +type Program struct { + Body []Statement `json:"body"` +} + +func (*Program) NodeType() string { return "Program" } + +func (p *Program) Copy() Node { + if p == nil { + return p + } + np := new(Program) + *np = *p + + if len(p.Body) > 0 { + np.Body = make([]Statement, len(p.Body)) + for i, s := range p.Body { + np.Body[i] = s.Copy().(Statement) + } + } + + return np +} + +type BlockStatement struct { + Body []Statement `json:"body"` +} + +func (*BlockStatement) NodeType() string { return "BlockStatement" } + +func (s *BlockStatement) ReturnStatement() *ReturnStatement { + return s.Body[len(s.Body)-1].(*ReturnStatement) +} + +func (s *BlockStatement) Copy() Node { + if s == nil { + return s + } + ns := new(BlockStatement) + *ns = *s + + if len(s.Body) > 0 { + ns.Body = make([]Statement, len(s.Body)) + for i, stmt := range s.Body { + ns.Body[i] = stmt.Copy().(Statement) + } + } + + return ns +} + +type ExpressionStatement struct { + Expression Expression `json:"expression"` +} + +func (*ExpressionStatement) NodeType() string { return "ExpressionStatement" } + +func (s *ExpressionStatement) Copy() Node { + if s == nil { + return s + } + ns := new(ExpressionStatement) + *ns = *s + + ns.Expression = s.Expression.Copy().(Expression) + + return ns +} + +type ReturnStatement struct { + Argument Expression `json:"argument"` +} + +func (*ReturnStatement) NodeType() string { return "ReturnStatement" } + +func (s *ReturnStatement) Copy() Node { + if s == nil { + return s + } + ns := new(ReturnStatement) + *ns = *s + + ns.Argument = s.Argument.Copy().(Expression) + + return ns +} + +type VariableDeclaration interface { + Node + ID() *Identifier + InitType() Type +} + +type NativeVariableDeclaration struct { + Identifier *Identifier `json:"identifier"` + Init Expression `json:"init"` +} + +func (d *NativeVariableDeclaration) ID() *Identifier { + return d.Identifier +} +func (d *NativeVariableDeclaration) InitType() Type { + return d.Init.Type() +} + +func (*NativeVariableDeclaration) NodeType() string { return "NativeVariableDeclaration" } + +func (s *NativeVariableDeclaration) Copy() Node { + if s == nil { + return s + } + ns := new(NativeVariableDeclaration) + *ns = *s + + ns.Identifier = s.Identifier.Copy().(*Identifier) + + if s.Init != nil { + ns.Init = s.Init.Copy().(Expression) + } + + return ns +} + +type ExternalVariableDeclaration struct { + Identifier *Identifier `json:"identifier"` + Type Type `json:"type"` +} + +func NewExternalVariableDeclaration(name string, typ Type) *ExternalVariableDeclaration { + return &ExternalVariableDeclaration{ + Identifier: &Identifier{Name: name}, + Type: typ, + } +} + +func (d *ExternalVariableDeclaration) ID() *Identifier { + return d.Identifier +} +func (d *ExternalVariableDeclaration) InitType() Type { + return d.Type +} + +func (*ExternalVariableDeclaration) NodeType() string { return "ExternalVariableDeclaration" } + +func (s *ExternalVariableDeclaration) Copy() Node { + if s == nil { + return s + } + ns := new(ExternalVariableDeclaration) + *ns = *s + + ns.Identifier = s.Identifier.Copy().(*Identifier) + + return ns +} + +type ArrayExpression struct { + Elements []Expression `json:"elements"` + typ Type +} + +func (*ArrayExpression) NodeType() string { return "ArrayExpression" } +func (e *ArrayExpression) Type() Type { + if e.typ == nil { + e.typ = arrayTypeOf(e) + } + return e.typ +} + +func (e *ArrayExpression) Copy() Node { + if e == nil { + return e + } + ne := new(ArrayExpression) + *ne = *e + + if len(e.Elements) > 0 { + ne.Elements = make([]Expression, len(e.Elements)) + for i, elem := range e.Elements { + ne.Elements[i] = elem.Copy().(Expression) + } + } + + return ne +} + +type FunctionExpression struct { + Params []*FunctionParam `json:"params"` + Body Node `json:"body"` + typ Type +} + +func (*FunctionExpression) NodeType() string { return "ArrowFunctionExpression" } +func (e *FunctionExpression) Type() Type { + if e.typ == nil { + e.typ = functionTypeOf(e) + } + return e.typ + +} + +func (e *FunctionExpression) Copy() Node { + if e == nil { + return e + } + ne := new(FunctionExpression) + *ne = *e + + if len(e.Params) > 0 { + ne.Params = make([]*FunctionParam, len(e.Params)) + for i, p := range e.Params { + ne.Params[i] = p.Copy().(*FunctionParam) + } + } + ne.Body = e.Body.Copy() + + return ne +} + +type FunctionParam struct { + Key *Identifier `json:"key"` + Default Expression `json:"default"` + Piped bool `json:"piped,omitempty"` + declaration VariableDeclaration +} + +func (*FunctionParam) NodeType() string { return "FunctionParam" } + +func (f *FunctionParam) Type() Type { + if f.declaration == nil { + if f.Default != nil { + f.declaration = &NativeVariableDeclaration{ + Identifier: f.Key, + Init: f.Default, + } + } else { + return Invalid + } + } + return f.declaration.InitType() +} + +func (p *FunctionParam) Copy() Node { + if p == nil { + return p + } + np := new(FunctionParam) + *np = *p + + np.Key = p.Key.Copy().(*Identifier) + if np.Default != nil { + np.Default = p.Default.Copy().(Expression) + } + + return np +} + +type BinaryExpression struct { + Operator ast.OperatorKind `json:"operator"` + Left Expression `json:"left"` + Right Expression `json:"right"` +} + +func (*BinaryExpression) NodeType() string { return "BinaryExpression" } +func (e *BinaryExpression) Type() Type { + return binaryTypesLookup[binarySignature{ + operator: e.Operator, + left: e.Left.Type().Kind(), + right: e.Right.Type().Kind(), + }] +} + +func (e *BinaryExpression) Copy() Node { + if e == nil { + return e + } + ne := new(BinaryExpression) + *ne = *e + + ne.Left = e.Left.Copy().(Expression) + ne.Right = e.Right.Copy().(Expression) + + return ne +} + +type CallExpression struct { + Callee Expression `json:"callee"` + Arguments *ObjectExpression `json:"arguments"` +} + +func (*CallExpression) NodeType() string { return "CallExpression" } +func (e *CallExpression) Type() Type { + return e.Callee.Type() +} + +func (e *CallExpression) Copy() Node { + if e == nil { + return e + } + ne := new(CallExpression) + *ne = *e + + ne.Callee = e.Callee.Copy().(Expression) + ne.Arguments = e.Arguments.Copy().(*ObjectExpression) + + return ne +} + +type ConditionalExpression struct { + Test Expression `json:"test"` + Alternate Expression `json:"alternate"` + Consequent Expression `json:"consequent"` +} + +func (*ConditionalExpression) NodeType() string { return "ConditionalExpression" } + +func (e *ConditionalExpression) Copy() Node { + if e == nil { + return e + } + ne := new(ConditionalExpression) + *ne = *e + + ne.Test = e.Test.Copy().(Expression) + ne.Alternate = e.Alternate.Copy().(Expression) + ne.Consequent = e.Consequent.Copy().(Expression) + + return ne +} + +type LogicalExpression struct { + Operator ast.LogicalOperatorKind `json:"operator"` + Left Expression `json:"left"` + Right Expression `json:"right"` +} + +func (*LogicalExpression) NodeType() string { return "LogicalExpression" } +func (*LogicalExpression) Type() Type { return Bool } + +func (e *LogicalExpression) Copy() Node { + if e == nil { + return e + } + ne := new(LogicalExpression) + *ne = *e + + ne.Left = e.Left.Copy().(Expression) + ne.Right = e.Right.Copy().(Expression) + + return ne +} + +type MemberExpression struct { + Object Expression `json:"object"` + Property string `json:"property"` +} + +func (*MemberExpression) NodeType() string { return "MemberExpression" } + +func (e *MemberExpression) Type() Type { + t := e.Object.Type() + if t.Kind() != Object { + return Invalid + } + return e.Object.Type().PropertyType(e.Property) +} + +func (e *MemberExpression) Copy() Node { + if e == nil { + return e + } + ne := new(MemberExpression) + *ne = *e + + ne.Object = e.Object.Copy().(Expression) + + return ne +} + +type ObjectExpression struct { + Properties []*Property `json:"properties"` + typ Type +} + +func (*ObjectExpression) NodeType() string { return "ObjectExpression" } +func (e *ObjectExpression) Type() Type { + if e.typ == nil { + e.typ = objectTypeOf(e) + } + return e.typ +} + +func (e *ObjectExpression) Copy() Node { + if e == nil { + return e + } + ne := new(ObjectExpression) + *ne = *e + + if len(e.Properties) > 0 { + ne.Properties = make([]*Property, len(e.Properties)) + for i, prop := range e.Properties { + ne.Properties[i] = prop.Copy().(*Property) + } + } + + return ne +} + +type UnaryExpression struct { + Operator ast.OperatorKind `json:"operator"` + Argument Expression `json:"argument"` +} + +func (*UnaryExpression) NodeType() string { return "UnaryExpression" } +func (e *UnaryExpression) Type() Type { + return e.Argument.Type() +} + +func (e *UnaryExpression) Copy() Node { + if e == nil { + return e + } + ne := new(UnaryExpression) + *ne = *e + + ne.Argument = e.Argument.Copy().(Expression) + + return ne +} + +type Property struct { + Key *Identifier `json:"key"` + Value Expression `json:"value"` +} + +func (*Property) NodeType() string { return "Property" } + +func (p *Property) Copy() Node { + if p == nil { + return p + } + np := new(Property) + *np = *p + + np.Value = p.Value.Copy().(Expression) + + return np +} + +type IdentifierExpression struct { + Name string `json:"name"` + // declaration is the node that declares this identifier + declaration VariableDeclaration +} + +func (*IdentifierExpression) NodeType() string { return "IdentifierExpression" } + +func (e *IdentifierExpression) Type() Type { + if e.declaration == nil { + return Invalid + } + return e.declaration.InitType() +} + +func (e *IdentifierExpression) Copy() Node { + if e == nil { + return e + } + ne := new(IdentifierExpression) + *ne = *e + + if ne.declaration != nil { + ne.declaration = e.declaration.Copy().(VariableDeclaration) + } + + return ne +} + +type Identifier struct { + Name string `json:"name"` +} + +func (*Identifier) NodeType() string { return "Identifier" } + +func (i *Identifier) Copy() Node { + if i == nil { + return i + } + ni := new(Identifier) + *ni = *i + + return ni +} + +type BooleanLiteral struct { + Value bool `json:"value"` +} + +func (*BooleanLiteral) NodeType() string { return "BooleanLiteral" } +func (*BooleanLiteral) Type() Type { return Bool } + +func (l *BooleanLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(BooleanLiteral) + *nl = *l + + return nl +} + +type DateTimeLiteral struct { + Value time.Time `json:"value"` +} + +func (*DateTimeLiteral) NodeType() string { return "DateTimeLiteral" } +func (*DateTimeLiteral) Type() Type { return Time } + +func (l *DateTimeLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(DateTimeLiteral) + *nl = *l + + return nl +} + +type DurationLiteral struct { + Value time.Duration `json:"value"` +} + +func (*DurationLiteral) NodeType() string { return "DurationLiteral" } +func (*DurationLiteral) Type() Type { return Duration } + +func (l *DurationLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(DurationLiteral) + *nl = *l + + return nl +} + +type IntegerLiteral struct { + Value int64 `json:"value"` +} + +func (*IntegerLiteral) NodeType() string { return "IntegerLiteral" } +func (*IntegerLiteral) Type() Type { return Int } + +func (l *IntegerLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(IntegerLiteral) + *nl = *l + + return nl +} + +type FloatLiteral struct { + Value float64 `json:"value"` +} + +func (*FloatLiteral) NodeType() string { return "FloatLiteral" } +func (*FloatLiteral) Type() Type { return Float } + +func (l *FloatLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(FloatLiteral) + *nl = *l + + return nl +} + +type RegexpLiteral struct { + Value *regexp.Regexp `json:"value"` +} + +func (*RegexpLiteral) NodeType() string { return "RegexpLiteral" } +func (*RegexpLiteral) Type() Type { return Regexp } + +func (l *RegexpLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(RegexpLiteral) + *nl = *l + + nl.Value = l.Value.Copy() + + return nl +} + +type StringLiteral struct { + Value string `json:"value"` +} + +func (*StringLiteral) NodeType() string { return "StringLiteral" } +func (*StringLiteral) Type() Type { return String } + +func (l *StringLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(StringLiteral) + *nl = *l + + return nl +} + +type UnsignedIntegerLiteral struct { + Value uint64 `json:"value"` +} + +func (*UnsignedIntegerLiteral) NodeType() string { return "UnsignedIntegerLiteral" } +func (*UnsignedIntegerLiteral) Type() Type { return UInt } + +func (l *UnsignedIntegerLiteral) Copy() Node { + if l == nil { + return l + } + nl := new(UnsignedIntegerLiteral) + *nl = *l + + return nl +} + +// New creates a semantic graph from the provided AST and builtin declarations +// The declarations will be modified for any variable declaration found in the program. +func New(prog *ast.Program, declarations map[string]VariableDeclaration) (*Program, error) { + if declarations == nil { + // NOTE: Calls to New may expect modifications to declarations to persist outside the function. + // The check is against nil instead of len(declarations) == 0 for this reason. + declarations = make(map[string]VariableDeclaration) + } + return analyzeProgram(prog, DeclarationScope(declarations)) +} + +type DeclarationScope map[string]VariableDeclaration + +func (s DeclarationScope) Copy() DeclarationScope { + cpy := make(DeclarationScope, len(s)) + for k, v := range s { + cpy[k] = v + } + return cpy +} + +func analyzeProgram(prog *ast.Program, declarations DeclarationScope) (*Program, error) { + p := &Program{ + Body: make([]Statement, len(prog.Body)), + } + for i, s := range prog.Body { + n, err := analyzeStatment(s, declarations) + if err != nil { + return nil, err + } + p.Body[i] = n + } + return p, nil +} + +func analyzeNode(n ast.Node, declarations DeclarationScope) (Node, error) { + switch n := n.(type) { + case ast.Statement: + return analyzeStatment(n, declarations) + case ast.Expression: + return analyzeExpression(n, declarations) + default: + return nil, fmt.Errorf("unsupported node %T", n) + } +} + +func analyzeStatment(s ast.Statement, declarations DeclarationScope) (Statement, error) { + switch s := s.(type) { + case *ast.BlockStatement: + return analyzeBlockStatement(s, declarations) + case *ast.ExpressionStatement: + return analyzeExpressionStatement(s, declarations) + case *ast.ReturnStatement: + return analyzeReturnStatement(s, declarations) + case *ast.VariableDeclaration: + // Expect a single declaration + if len(s.Declarations) != 1 { + return nil, fmt.Errorf("only single variable declarations are supported, found %d declarations", len(s.Declarations)) + } + return analyzeVariableDeclaration(s.Declarations[0], declarations) + default: + return nil, fmt.Errorf("unsupported statement %T", s) + } +} + +func analyzeBlockStatement(block *ast.BlockStatement, declarations DeclarationScope) (*BlockStatement, error) { + declarations = declarations.Copy() + b := &BlockStatement{ + Body: make([]Statement, len(block.Body)), + } + for i, s := range block.Body { + n, err := analyzeStatment(s, declarations) + if err != nil { + return nil, err + } + b.Body[i] = n + } + last := len(b.Body) - 1 + if _, ok := b.Body[last].(*ReturnStatement); !ok { + return nil, errors.New("missing return statement in block") + } + return b, nil +} + +func analyzeExpressionStatement(expr *ast.ExpressionStatement, declarations DeclarationScope) (*ExpressionStatement, error) { + e, err := analyzeExpression(expr.Expression, declarations) + if err != nil { + return nil, err + } + return &ExpressionStatement{ + Expression: e, + }, nil +} + +func analyzeReturnStatement(ret *ast.ReturnStatement, declarations DeclarationScope) (*ReturnStatement, error) { + arg, err := analyzeExpression(ret.Argument, declarations) + if err != nil { + return nil, err + } + return &ReturnStatement{ + Argument: arg, + }, nil +} + +func analyzeVariableDeclaration(decl *ast.VariableDeclarator, declarations DeclarationScope) (*NativeVariableDeclaration, error) { + id, err := analyzeIdentifier(decl.ID, declarations) + if err != nil { + return nil, err + } + init, err := analyzeExpression(decl.Init, declarations) + if err != nil { + return nil, err + } + vd := &NativeVariableDeclaration{ + Identifier: id, + Init: init, + } + declarations[vd.Identifier.Name] = vd + return vd, nil +} + +func analyzeExpression(expr ast.Expression, declarations DeclarationScope) (Expression, error) { + switch expr := expr.(type) { + case *ast.ArrowFunctionExpression: + return analyzeArrowFunctionExpression(expr, declarations) + case *ast.CallExpression: + return analyzeCallExpression(expr, declarations) + case *ast.MemberExpression: + return analyzeMemberExpression(expr, declarations) + case *ast.PipeExpression: + return analyzePipeExpression(expr, declarations) + case *ast.BinaryExpression: + return analyzeBinaryExpression(expr, declarations) + case *ast.UnaryExpression: + return analyzeUnaryExpression(expr, declarations) + case *ast.LogicalExpression: + return analyzeLogicalExpression(expr, declarations) + case *ast.ObjectExpression: + return analyzeObjectExpression(expr, declarations) + case *ast.ArrayExpression: + return analyzeArrayExpression(expr, declarations) + case *ast.Identifier: + return analyzeIdentifierExpression(expr, declarations) + case ast.Literal: + return analyzeLiteral(expr, declarations) + default: + return nil, fmt.Errorf("unsupported expression %T", expr) + } +} + +func analyzeLiteral(lit ast.Literal, declarations DeclarationScope) (Literal, error) { + switch lit := lit.(type) { + case *ast.StringLiteral: + return analyzeStringLiteral(lit, declarations) + case *ast.BooleanLiteral: + return analyzeBooleanLiteral(lit, declarations) + case *ast.FloatLiteral: + return analyzeFloatLiteral(lit, declarations) + case *ast.IntegerLiteral: + return analyzeIntegerLiteral(lit, declarations) + case *ast.UnsignedIntegerLiteral: + return analyzeUnsignedIntegerLiteral(lit, declarations) + case *ast.RegexpLiteral: + return analyzeRegexpLiteral(lit, declarations) + case *ast.DurationLiteral: + return analyzeDurationLiteral(lit, declarations) + case *ast.DateTimeLiteral: + return analyzeDateTimeLiteral(lit, declarations) + case *ast.PipeLiteral: + return nil, errors.New("a pipe literal may only be used as a default value for an argument in a function definition") + default: + return nil, fmt.Errorf("unsupported literal %T", lit) + } +} + +func analyzeArrowFunctionExpression(arrow *ast.ArrowFunctionExpression, declarations DeclarationScope) (*FunctionExpression, error) { + declarations = declarations.Copy() + f := &FunctionExpression{ + Params: make([]*FunctionParam, len(arrow.Params)), + } + pipedCount := 0 + for i, p := range arrow.Params { + key, err := analyzeIdentifier(p.Key, declarations) + if err != nil { + return nil, err + } + + var ( + def Expression + declaration VariableDeclaration + piped bool + ) + if p.Value != nil { + if _, ok := p.Value.(*ast.PipeLiteral); ok { + // Special case the PipeLiteral + piped = true + pipedCount++ + if pipedCount > 1 { + return nil, errors.New("only a single argument may be piped") + } + } else { + d, err := analyzeExpression(p.Value, declarations) + if err != nil { + return nil, err + } + def = d + declaration = &NativeVariableDeclaration{ + Identifier: key, + Init: def, + } + declarations[key.Name] = declaration + } + } + + f.Params[i] = &FunctionParam{ + Key: key, + Default: def, + Piped: piped, + declaration: declaration, + } + + } + + b, err := analyzeNode(arrow.Body, declarations) + if err != nil { + return nil, err + } + f.Body = b + + return f, nil +} + +func analyzeCallExpression(call *ast.CallExpression, declarations DeclarationScope) (*CallExpression, error) { + callee, err := analyzeExpression(call.Callee, declarations) + if err != nil { + return nil, err + } + var args *ObjectExpression + if l := len(call.Arguments); l > 1 { + return nil, fmt.Errorf("arguments are not a single object expression %v", args) + } else if l == 1 { + obj, ok := call.Arguments[0].(*ast.ObjectExpression) + if !ok { + return nil, fmt.Errorf("arguments not an object expression") + } + var err error + args, err = analyzeObjectExpression(obj, declarations) + if err != nil { + return nil, err + } + } else { + args = new(ObjectExpression) + } + + expr := &CallExpression{ + Callee: callee, + Arguments: args, + } + + declarations = declarations.Copy() + for _, arg := range args.Properties { + declarations[arg.Key.Name] = &NativeVariableDeclaration{ + Identifier: arg.Key, + Init: arg.Value, + } + } + + ApplyNewDeclarations(expr.Callee, declarations) + return expr, nil +} + +func ApplyNewDeclarations(n Node, declarations map[string]VariableDeclaration) { + v := &applyDeclarationsVisitor{ + declarations: declarations, + } + Walk(v, n) +} + +type applyDeclarationsVisitor struct { + declarations DeclarationScope +} + +func (v *applyDeclarationsVisitor) Visit(n Node) Visitor { + switch n := n.(type) { + case *IdentifierExpression: + if n.declaration == nil { + n.declaration = v.declarations[n.Name] + } + // No need to walk further down this branch + return nil + case *FunctionExpression: + // Remove type information since we may have changed it. + n.typ = nil + case *FunctionParam: + if n.declaration == nil { + n.declaration = v.declarations[n.Key.Name] + } + // No need to walk further down this branch + return nil + } + return v +} +func (v *applyDeclarationsVisitor) Done() {} + +func analyzeMemberExpression(member *ast.MemberExpression, declarations DeclarationScope) (*MemberExpression, error) { + obj, err := analyzeExpression(member.Object, declarations) + if err != nil { + return nil, err + } + + var propertyName string + switch p := member.Property.(type) { + case *ast.Identifier: + propertyName = p.Name + case *ast.StringLiteral: + propertyName = p.Value + case *ast.IntegerLiteral: + propertyName = strconv.FormatInt(p.Value, 10) + default: + return nil, fmt.Errorf("unsupported member property expression of type %T", member.Property) + } + + return &MemberExpression{ + Object: obj, + Property: propertyName, + }, nil +} + +func analyzePipeExpression(pipe *ast.PipeExpression, declarations DeclarationScope) (*CallExpression, error) { + call, err := analyzeCallExpression(pipe.Call, declarations) + if err != nil { + return nil, err + } + + decl, err := resolveDeclaration(call.Callee) + if err != nil { + return nil, err + } + fnTyp := decl.InitType() + if fnTyp.Kind() != Function { + return nil, fmt.Errorf("cannot pipe into non function %q", fnTyp.Kind()) + } + key := fnTyp.PipeArgument() + if key == "" { + return nil, fmt.Errorf("function %q does not have a pipe argument", decl.ID().Name) + } + + value, err := analyzeExpression(pipe.Argument, declarations) + if err != nil { + return nil, err + } + property := &Property{ + Key: &Identifier{Name: key}, + Value: value, + } + + found := false + for i, p := range call.Arguments.Properties { + if key == p.Key.Name { + found = true + call.Arguments.Properties[i] = property + break + } + } + if !found { + call.Arguments.Properties = append(call.Arguments.Properties, property) + } + return call, nil +} + +// resolveDeclaration traverse the expression until a variable declaration is found for the expression. +func resolveDeclaration(n Node) (VariableDeclaration, error) { + switch n := n.(type) { + case *IdentifierExpression: + if n.declaration == nil { + return nil, fmt.Errorf("identifier expression %q has no declaration", n.Name) + } + return resolveDeclaration(n.declaration) + case *ExternalVariableDeclaration: + return n, nil + case *NativeVariableDeclaration: + if n.Init == nil { + return nil, fmt.Errorf("variable declaration %v has no init", n.Identifier) + } + if i, ok := n.Init.(*IdentifierExpression); ok { + return resolveDeclaration(i) + } + return n, nil + } + return nil, errors.New("no declaration found") +} + +func analyzeBinaryExpression(binary *ast.BinaryExpression, declarations DeclarationScope) (*BinaryExpression, error) { + left, err := analyzeExpression(binary.Left, declarations) + if err != nil { + return nil, err + } + right, err := analyzeExpression(binary.Right, declarations) + if err != nil { + return nil, err + } + return &BinaryExpression{ + Operator: binary.Operator, + Left: left, + Right: right, + }, nil +} + +func analyzeUnaryExpression(unary *ast.UnaryExpression, declarations DeclarationScope) (*UnaryExpression, error) { + arg, err := analyzeExpression(unary.Argument, declarations) + if err != nil { + return nil, err + } + // TODO(nathanielc): validate operand type once we have type inference working with functions. + //k := arg.Type().Kind() + //if k != Bool && k != Int && k != Float && k != Duration { + // return nil, fmt.Errorf("invalid unary operator %v on type %v", unary.Operator, k) + //} + return &UnaryExpression{ + Operator: unary.Operator, + Argument: arg, + }, nil +} +func analyzeLogicalExpression(logical *ast.LogicalExpression, declarations DeclarationScope) (*LogicalExpression, error) { + left, err := analyzeExpression(logical.Left, declarations) + if err != nil { + return nil, err + } + // TODO(nathanielc): Validate operand types once we have type inference working with functions. + //if k := left.Type().Kind(); k != Bool { + // return nil, fmt.Errorf("left operand to logical expression is not a boolean, got kind %v", k) + //} + right, err := analyzeExpression(logical.Right, declarations) + if err != nil { + return nil, err + } + //if k := right.Type().Kind(); k != Bool { + // return nil, fmt.Errorf("right operand to logical expression is not a boolean, got kind %v", k) + //} + return &LogicalExpression{ + Operator: logical.Operator, + Left: left, + Right: right, + }, nil +} +func analyzeObjectExpression(obj *ast.ObjectExpression, declarations DeclarationScope) (*ObjectExpression, error) { + o := &ObjectExpression{ + Properties: make([]*Property, len(obj.Properties)), + } + for i, p := range obj.Properties { + n, err := analyzeProperty(p, declarations) + if err != nil { + return nil, err + } + o.Properties[i] = n + } + return o, nil +} +func analyzeArrayExpression(array *ast.ArrayExpression, declarations DeclarationScope) (*ArrayExpression, error) { + a := &ArrayExpression{ + Elements: make([]Expression, len(array.Elements)), + } + for i, e := range array.Elements { + n, err := analyzeExpression(e, declarations) + if err != nil { + return nil, err + } + a.Elements[i] = n + } + return a, nil +} + +func analyzeIdentifier(ident *ast.Identifier, declarations DeclarationScope) (*Identifier, error) { + return &Identifier{ + Name: ident.Name, + }, nil +} + +func analyzeIdentifierExpression(ident *ast.Identifier, declarations DeclarationScope) (*IdentifierExpression, error) { + return &IdentifierExpression{ + Name: ident.Name, + declaration: declarations[ident.Name], + }, nil +} + +func analyzeProperty(property *ast.Property, declarations DeclarationScope) (*Property, error) { + key, err := analyzeIdentifier(property.Key, declarations) + if err != nil { + return nil, err + } + value, err := analyzeExpression(property.Value, declarations) + if err != nil { + return nil, err + } + return &Property{ + Key: key, + Value: value, + }, nil +} + +func analyzeDateTimeLiteral(lit *ast.DateTimeLiteral, declarations DeclarationScope) (*DateTimeLiteral, error) { + return &DateTimeLiteral{ + Value: lit.Value, + }, nil +} +func analyzeDurationLiteral(lit *ast.DurationLiteral, declarations DeclarationScope) (*DurationLiteral, error) { + return &DurationLiteral{ + Value: lit.Value, + }, nil +} +func analyzeFloatLiteral(lit *ast.FloatLiteral, declarations DeclarationScope) (*FloatLiteral, error) { + return &FloatLiteral{ + Value: lit.Value, + }, nil +} +func analyzeIntegerLiteral(lit *ast.IntegerLiteral, declarations DeclarationScope) (*IntegerLiteral, error) { + return &IntegerLiteral{ + Value: lit.Value, + }, nil +} +func analyzeUnsignedIntegerLiteral(lit *ast.UnsignedIntegerLiteral, declarations DeclarationScope) (*UnsignedIntegerLiteral, error) { + return &UnsignedIntegerLiteral{ + Value: lit.Value, + }, nil +} +func analyzeStringLiteral(lit *ast.StringLiteral, declarations DeclarationScope) (*StringLiteral, error) { + return &StringLiteral{ + Value: lit.Value, + }, nil +} +func analyzeBooleanLiteral(lit *ast.BooleanLiteral, declarations DeclarationScope) (*BooleanLiteral, error) { + return &BooleanLiteral{ + Value: lit.Value, + }, nil +} +func analyzeRegexpLiteral(lit *ast.RegexpLiteral, declarations DeclarationScope) (*RegexpLiteral, error) { + return &RegexpLiteral{ + Value: lit.Value, + }, nil +} diff --git a/vendor/github.com/influxdata/ifql/semantic/graph_test.go b/vendor/github.com/influxdata/ifql/semantic/graph_test.go new file mode 100644 index 000000000..aff7390da --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/graph_test.go @@ -0,0 +1,192 @@ +package semantic_test + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/semantic" + "github.com/influxdata/ifql/semantic/semantictest" +) + +func TestNew(t *testing.T) { + testCases := []struct { + name string + program *ast.Program + want *semantic.Program + wantErr bool + }{ + { + name: "empty", + program: &ast.Program{}, + want: &semantic.Program{ + Body: []semantic.Statement{}, + }, + }, + { + name: "var declaration", + program: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{Name: "a"}, + Init: &ast.BooleanLiteral{Value: true}, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.Identifier{Name: "a"}, + }, + }, + }, + want: &semantic.Program{ + Body: []semantic.Statement{ + &semantic.NativeVariableDeclaration{ + Identifier: &semantic.Identifier{Name: "a"}, + Init: &semantic.BooleanLiteral{Value: true}, + }, + &semantic.ExpressionStatement{ + Expression: &semantic.IdentifierExpression{Name: "a"}, + }, + }, + }, + }, + { + name: "function", + program: &ast.Program{ + Body: []ast.Statement{ + &ast.VariableDeclaration{ + Declarations: []*ast.VariableDeclarator{{ + ID: &ast.Identifier{Name: "f"}, + Init: &ast.ArrowFunctionExpression{ + Params: []*ast.Property{ + {Key: &ast.Identifier{Name: "a"}}, + {Key: &ast.Identifier{Name: "b"}}, + }, + Body: &ast.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &ast.Identifier{Name: "a"}, + Right: &ast.Identifier{Name: "b"}, + }, + }, + }}, + }, + &ast.ExpressionStatement{ + Expression: &ast.CallExpression{ + Callee: &ast.Identifier{Name: "f"}, + Arguments: []ast.Expression{&ast.ObjectExpression{ + Properties: []*ast.Property{ + {Key: &ast.Identifier{Name: "a"}, Value: &ast.IntegerLiteral{Value: 2}}, + {Key: &ast.Identifier{Name: "b"}, Value: &ast.IntegerLiteral{Value: 3}}, + }, + }}, + }, + }, + }, + }, + want: &semantic.Program{ + Body: []semantic.Statement{ + &semantic.NativeVariableDeclaration{ + Identifier: &semantic.Identifier{Name: "f"}, + Init: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{ + {Key: &semantic.Identifier{Name: "a"}}, + {Key: &semantic.Identifier{Name: "b"}}, + }, + Body: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.IdentifierExpression{ + Name: "a", + }, + Right: &semantic.IdentifierExpression{ + Name: "b", + }, + }, + }, + }, + &semantic.ExpressionStatement{ + Expression: &semantic.CallExpression{ + Callee: &semantic.IdentifierExpression{ + Name: "f", + }, + Arguments: &semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "a"}, Value: &semantic.IntegerLiteral{Value: 2}}, + {Key: &semantic.Identifier{Name: "b"}, Value: &semantic.IntegerLiteral{Value: 3}}, + }, + }, + }, + }, + }, + }, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + got, err := semantic.New(tc.program, nil) + if !tc.wantErr && err != nil { + t.Fatal(err) + } else if tc.wantErr && err == nil { + t.Fatal("expected error") + } + + if !cmp.Equal(tc.want, got, semantictest.CmpOptions...) { + t.Errorf("unexpected semantic program: -want/+got:\n%s", cmp.Diff(tc.want, got, semantictest.CmpOptions...)) + } + }) + } +} + +func TestExpression_Kind(t *testing.T) { + testCases := []struct { + name string + expr semantic.Expression + want semantic.Kind + }{ + { + name: "string", + expr: &semantic.StringLiteral{}, + want: semantic.String, + }, + { + name: "int", + expr: &semantic.IntegerLiteral{}, + want: semantic.Int, + }, + { + name: "uint", + expr: &semantic.UnsignedIntegerLiteral{}, + want: semantic.UInt, + }, + { + name: "float", + expr: &semantic.FloatLiteral{}, + want: semantic.Float, + }, + { + name: "bool", + expr: &semantic.BooleanLiteral{}, + want: semantic.Bool, + }, + { + name: "time", + expr: &semantic.DateTimeLiteral{}, + want: semantic.Time, + }, + { + name: "duration", + expr: &semantic.DurationLiteral{}, + want: semantic.Duration, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + got := tc.expr.Type().Kind() + + if !cmp.Equal(tc.want, got) { + t.Errorf("unexpected expression type: -want/+got:\n%s", cmp.Diff(tc.want, got)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/semantic/json.go b/vendor/github.com/influxdata/ifql/semantic/json.go new file mode 100644 index 000000000..d8e374cfa --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/json.go @@ -0,0 +1,866 @@ +package semantic + +import ( + "encoding/json" + "errors" + "fmt" + "regexp" + "strconv" + "time" +) + +func (p *Program) MarshalJSON() ([]byte, error) { + type Alias Program + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: p.NodeType(), + Alias: (*Alias)(p), + } + return json.Marshal(raw) +} +func (p *Program) UnmarshalJSON(data []byte) error { + type Alias Program + raw := struct { + *Alias + Body []json.RawMessage `json:"body"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *p = *(*Program)(raw.Alias) + } + + p.Body = make([]Statement, len(raw.Body)) + for i, r := range raw.Body { + s, err := unmarshalStatement(r) + if err != nil { + return err + } + p.Body[i] = s + } + return nil +} +func (s *BlockStatement) MarshalJSON() ([]byte, error) { + type Alias BlockStatement + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: s.NodeType(), + Alias: (*Alias)(s), + } + return json.Marshal(raw) +} +func (s *BlockStatement) UnmarshalJSON(data []byte) error { + type Alias BlockStatement + raw := struct { + *Alias + Body []json.RawMessage `json:"body"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *s = *(*BlockStatement)(raw.Alias) + } + + s.Body = make([]Statement, len(raw.Body)) + for i, r := range raw.Body { + stmt, err := unmarshalStatement(r) + if err != nil { + return err + } + s.Body[i] = stmt + } + return nil +} +func (s *ExpressionStatement) MarshalJSON() ([]byte, error) { + type Alias ExpressionStatement + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: s.NodeType(), + Alias: (*Alias)(s), + } + return json.Marshal(raw) +} +func (s *ExpressionStatement) UnmarshalJSON(data []byte) error { + type Alias ExpressionStatement + raw := struct { + *Alias + Expression json.RawMessage `json:"expression"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *s = *(*ExpressionStatement)(raw.Alias) + } + + e, err := unmarshalExpression(raw.Expression) + if err != nil { + return err + } + s.Expression = e + return nil +} +func (s *ReturnStatement) MarshalJSON() ([]byte, error) { + type Alias ReturnStatement + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: s.NodeType(), + Alias: (*Alias)(s), + } + return json.Marshal(raw) +} +func (s *ReturnStatement) UnmarshalJSON(data []byte) error { + type Alias ReturnStatement + raw := struct { + *Alias + Argument json.RawMessage `json:"argument"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *s = *(*ReturnStatement)(raw.Alias) + } + + e, err := unmarshalExpression(raw.Argument) + if err != nil { + return err + } + s.Argument = e + return nil +} +func (d *NativeVariableDeclaration) MarshalJSON() ([]byte, error) { + type Alias NativeVariableDeclaration + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: d.NodeType(), + Alias: (*Alias)(d), + } + return json.Marshal(raw) +} +func (d *NativeVariableDeclaration) UnmarshalJSON(data []byte) error { + type Alias NativeVariableDeclaration + raw := struct { + *Alias + Init json.RawMessage `json:"init"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *d = *(*NativeVariableDeclaration)(raw.Alias) + } + + e, err := unmarshalExpression(raw.Init) + if err != nil { + return err + } + d.Init = e + return nil +} +func (d *ExternalVariableDeclaration) MarshalJSON() ([]byte, error) { + return nil, errors.New("cannot marshal ExternalVariableDeclaration") +} +func (e *CallExpression) MarshalJSON() ([]byte, error) { + type Alias CallExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *CallExpression) UnmarshalJSON(data []byte) error { + type Alias CallExpression + raw := struct { + *Alias + Callee json.RawMessage `json:"callee"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*CallExpression)(raw.Alias) + } + + callee, err := unmarshalExpression(raw.Callee) + if err != nil { + return err + } + e.Callee = callee + + return nil +} +func (e *MemberExpression) MarshalJSON() ([]byte, error) { + type Alias MemberExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *MemberExpression) UnmarshalJSON(data []byte) error { + type Alias MemberExpression + raw := struct { + *Alias + Object json.RawMessage `json:"object"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*MemberExpression)(raw.Alias) + } + + object, err := unmarshalExpression(raw.Object) + if err != nil { + return err + } + e.Object = object + + return nil +} +func (e *FunctionExpression) MarshalJSON() ([]byte, error) { + type Alias FunctionExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *FunctionExpression) UnmarshalJSON(data []byte) error { + type Alias FunctionExpression + raw := struct { + *Alias + Body json.RawMessage `json:"body"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*FunctionExpression)(raw.Alias) + } + + body, err := unmarshalNode(raw.Body) + if err != nil { + return err + } + e.Body = body + return nil +} +func (e *FunctionParam) MarshalJSON() ([]byte, error) { + type Alias FunctionParam + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *FunctionParam) UnmarshalJSON(data []byte) error { + type Alias FunctionParam + raw := struct { + *Alias + Default json.RawMessage `json:"default"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*FunctionParam)(raw.Alias) + } + + def, err := unmarshalLiteral(raw.Default) + if err != nil { + return err + } + e.Default = def + return nil +} +func (e *BinaryExpression) MarshalJSON() ([]byte, error) { + type Alias BinaryExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *BinaryExpression) UnmarshalJSON(data []byte) error { + type Alias BinaryExpression + raw := struct { + *Alias + Left json.RawMessage `json:"left"` + Right json.RawMessage `json:"right"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*BinaryExpression)(raw.Alias) + } + + l, err := unmarshalExpression(raw.Left) + if err != nil { + return err + } + e.Left = l + + r, err := unmarshalExpression(raw.Right) + if err != nil { + return err + } + e.Right = r + return nil +} +func (e *UnaryExpression) MarshalJSON() ([]byte, error) { + type Alias UnaryExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *UnaryExpression) UnmarshalJSON(data []byte) error { + type Alias UnaryExpression + raw := struct { + *Alias + Argument json.RawMessage `json:"argument"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*UnaryExpression)(raw.Alias) + } + + argument, err := unmarshalExpression(raw.Argument) + if err != nil { + return err + } + e.Argument = argument + + return nil +} +func (e *LogicalExpression) MarshalJSON() ([]byte, error) { + type Alias LogicalExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *LogicalExpression) UnmarshalJSON(data []byte) error { + type Alias LogicalExpression + raw := struct { + *Alias + Left json.RawMessage `json:"left"` + Right json.RawMessage `json:"right"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*LogicalExpression)(raw.Alias) + } + + l, err := unmarshalExpression(raw.Left) + if err != nil { + return err + } + e.Left = l + + r, err := unmarshalExpression(raw.Right) + if err != nil { + return err + } + e.Right = r + return nil +} +func (e *ArrayExpression) MarshalJSON() ([]byte, error) { + type Alias ArrayExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *ArrayExpression) UnmarshalJSON(data []byte) error { + type Alias ArrayExpression + raw := struct { + *Alias + Elements []json.RawMessage `json:"elements"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*ArrayExpression)(raw.Alias) + } + + e.Elements = make([]Expression, len(raw.Elements)) + for i, r := range raw.Elements { + expr, err := unmarshalExpression(r) + if err != nil { + return err + } + e.Elements[i] = expr + } + return nil +} +func (e *ObjectExpression) MarshalJSON() ([]byte, error) { + type Alias ObjectExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *ConditionalExpression) MarshalJSON() ([]byte, error) { + type Alias ConditionalExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (e *ConditionalExpression) UnmarshalJSON(data []byte) error { + type Alias ConditionalExpression + raw := struct { + *Alias + Test json.RawMessage `json:"test"` + Alternate json.RawMessage `json:"alternate"` + Consequent json.RawMessage `json:"consequent"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *e = *(*ConditionalExpression)(raw.Alias) + } + + test, err := unmarshalExpression(raw.Test) + if err != nil { + return err + } + e.Test = test + + alternate, err := unmarshalExpression(raw.Alternate) + if err != nil { + return err + } + e.Alternate = alternate + + consequent, err := unmarshalExpression(raw.Consequent) + if err != nil { + return err + } + e.Consequent = consequent + return nil +} +func (p *Property) MarshalJSON() ([]byte, error) { + type Alias Property + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: p.NodeType(), + Alias: (*Alias)(p), + } + return json.Marshal(raw) +} +func (p *Property) UnmarshalJSON(data []byte) error { + type Alias Property + raw := struct { + *Alias + Value json.RawMessage `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + if raw.Alias != nil { + *p = *(*Property)(raw.Alias) + } + + if raw.Value != nil { + value, err := unmarshalExpression(raw.Value) + if err != nil { + return err + } + p.Value = value + } + return nil +} +func (e *IdentifierExpression) MarshalJSON() ([]byte, error) { + type Alias IdentifierExpression + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: e.NodeType(), + Alias: (*Alias)(e), + } + return json.Marshal(raw) +} +func (i *Identifier) MarshalJSON() ([]byte, error) { + type Alias Identifier + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: i.NodeType(), + Alias: (*Alias)(i), + } + return json.Marshal(raw) +} +func (l *StringLiteral) MarshalJSON() ([]byte, error) { + type Alias StringLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.NodeType(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} +func (l *BooleanLiteral) MarshalJSON() ([]byte, error) { + type Alias BooleanLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.NodeType(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} +func (l *FloatLiteral) MarshalJSON() ([]byte, error) { + type Alias FloatLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.NodeType(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} +func (l *IntegerLiteral) MarshalJSON() ([]byte, error) { + type Alias IntegerLiteral + raw := struct { + Type string `json:"type"` + *Alias + Value string `json:"value"` + }{ + Type: l.NodeType(), + Alias: (*Alias)(l), + Value: strconv.FormatInt(l.Value, 10), + } + return json.Marshal(raw) +} +func (l *IntegerLiteral) UnmarshalJSON(data []byte) error { + type Alias IntegerLiteral + raw := struct { + *Alias + Value string `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Alias != nil { + *l = *(*IntegerLiteral)(raw.Alias) + } + + value, err := strconv.ParseInt(raw.Value, 10, 64) + if err != nil { + return err + } + l.Value = value + return nil +} +func (l *UnsignedIntegerLiteral) MarshalJSON() ([]byte, error) { + type Alias UnsignedIntegerLiteral + raw := struct { + Type string `json:"type"` + *Alias + Value string `json:"value"` + }{ + Type: l.NodeType(), + Alias: (*Alias)(l), + Value: strconv.FormatUint(l.Value, 10), + } + return json.Marshal(raw) +} +func (l *UnsignedIntegerLiteral) UnmarshalJSON(data []byte) error { + type Alias UnsignedIntegerLiteral + raw := struct { + *Alias + Value string `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Alias != nil { + *l = *(*UnsignedIntegerLiteral)(raw.Alias) + } + + value, err := strconv.ParseUint(raw.Value, 10, 64) + if err != nil { + return err + } + l.Value = value + return nil +} +func (l *RegexpLiteral) MarshalJSON() ([]byte, error) { + type Alias RegexpLiteral + raw := struct { + Type string `json:"type"` + *Alias + Value string `json:"value"` + }{ + Type: l.NodeType(), + Alias: (*Alias)(l), + Value: l.Value.String(), + } + return json.Marshal(raw) +} +func (l *RegexpLiteral) UnmarshalJSON(data []byte) error { + type Alias RegexpLiteral + raw := struct { + *Alias + Value string `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Alias != nil { + *l = *(*RegexpLiteral)(raw.Alias) + } + + value, err := regexp.Compile(raw.Value) + if err != nil { + return err + } + l.Value = value + return nil +} +func (l *DurationLiteral) MarshalJSON() ([]byte, error) { + type Alias DurationLiteral + raw := struct { + Type string `json:"type"` + *Alias + Value string `json:"value"` + }{ + Type: l.NodeType(), + Alias: (*Alias)(l), + Value: l.Value.String(), + } + return json.Marshal(raw) +} +func (l *DurationLiteral) UnmarshalJSON(data []byte) error { + type Alias DurationLiteral + raw := struct { + *Alias + Value string `json:"value"` + }{} + if err := json.Unmarshal(data, &raw); err != nil { + return err + } + + if raw.Alias != nil { + *l = *(*DurationLiteral)(raw.Alias) + } + + value, err := time.ParseDuration(raw.Value) + if err != nil { + return err + } + l.Value = value + return nil +} + +func (l *DateTimeLiteral) MarshalJSON() ([]byte, error) { + type Alias DateTimeLiteral + raw := struct { + Type string `json:"type"` + *Alias + }{ + Type: l.NodeType(), + Alias: (*Alias)(l), + } + return json.Marshal(raw) +} + +func checkNullMsg(msg json.RawMessage) bool { + switch len(msg) { + case 0: + return true + case 4: + return string(msg) == "null" + default: + return false + } +} +func unmarshalStatement(msg json.RawMessage) (Statement, error) { + if checkNullMsg(msg) { + return nil, nil + } + n, err := unmarshalNode(msg) + if err != nil { + return nil, err + } + s, ok := n.(Statement) + if !ok { + return nil, fmt.Errorf("node %q is not a statement", n.NodeType()) + } + return s, nil +} +func unmarshalExpression(msg json.RawMessage) (Expression, error) { + if checkNullMsg(msg) { + return nil, nil + } + n, err := unmarshalNode(msg) + if err != nil { + return nil, err + } + e, ok := n.(Expression) + if !ok { + return nil, fmt.Errorf("node %q is not an expression", n.NodeType()) + } + return e, nil +} +func unmarshalLiteral(msg json.RawMessage) (Literal, error) { + if checkNullMsg(msg) { + return nil, nil + } + n, err := unmarshalNode(msg) + if err != nil { + return nil, err + } + e, ok := n.(Literal) + if !ok { + return nil, fmt.Errorf("node %q is not a literal", n.NodeType()) + } + return e, nil +} +func unmarshalNode(msg json.RawMessage) (Node, error) { + if checkNullMsg(msg) { + return nil, nil + } + + type typeRawMessage struct { + Type string `json:"type"` + } + + typ := typeRawMessage{} + if err := json.Unmarshal(msg, &typ); err != nil { + return nil, err + } + + var node Node + switch typ.Type { + case "Program": + node = new(Program) + case "BlockStatement": + node = new(BlockStatement) + case "ExpressionStatement": + node = new(ExpressionStatement) + case "ReturnStatement": + node = new(ReturnStatement) + case "NativeVariableDeclaration": + node = new(NativeVariableDeclaration) + case "CallExpression": + node = new(CallExpression) + case "MemberExpression": + node = new(MemberExpression) + case "BinaryExpression": + node = new(BinaryExpression) + case "UnaryExpression": + node = new(UnaryExpression) + case "LogicalExpression": + node = new(LogicalExpression) + case "ObjectExpression": + node = new(ObjectExpression) + case "ConditionalExpression": + node = new(ConditionalExpression) + case "ArrayExpression": + node = new(ArrayExpression) + case "Identifier": + node = new(Identifier) + case "IdentifierExpression": + node = new(IdentifierExpression) + case "StringLiteral": + node = new(StringLiteral) + case "BooleanLiteral": + node = new(BooleanLiteral) + case "FloatLiteral": + node = new(FloatLiteral) + case "IntegerLiteral": + node = new(IntegerLiteral) + case "UnsignedIntegerLiteral": + node = new(UnsignedIntegerLiteral) + case "RegexpLiteral": + node = new(RegexpLiteral) + case "DurationLiteral": + node = new(DurationLiteral) + case "DateTimeLiteral": + node = new(DateTimeLiteral) + case "ArrowFunctionExpression": + node = new(FunctionExpression) + case "Property": + node = new(Property) + default: + return nil, fmt.Errorf("unknown type %q", typ.Type) + } + + if err := json.Unmarshal(msg, node); err != nil { + return nil, err + } + return node, nil +} +func UnmarshalNode(data []byte) (Node, error) { + return unmarshalNode((json.RawMessage)(data)) +} diff --git a/vendor/github.com/influxdata/ifql/semantic/json_test.go b/vendor/github.com/influxdata/ifql/semantic/json_test.go new file mode 100644 index 000000000..706d81b19 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/json_test.go @@ -0,0 +1,233 @@ +package semantic_test + +import ( + "encoding/json" + "math" + "regexp" + "testing" + "time" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/ifql/ast" + "github.com/influxdata/ifql/semantic" + "github.com/influxdata/ifql/semantic/semantictest" +) + +func TestJSONMarshal(t *testing.T) { + testCases := []struct { + name string + node semantic.Node + want string + }{ + { + name: "simple program", + node: &semantic.Program{ + Body: []semantic.Statement{ + &semantic.ExpressionStatement{ + Expression: &semantic.StringLiteral{Value: "hello"}, + }, + }, + }, + want: `{"type":"Program","body":[{"type":"ExpressionStatement","expression":{"type":"StringLiteral","value":"hello"}}]}`, + }, + { + name: "block statement", + node: &semantic.BlockStatement{ + Body: []semantic.Statement{ + &semantic.ExpressionStatement{ + Expression: &semantic.StringLiteral{Value: "hello"}, + }, + }, + }, + want: `{"type":"BlockStatement","body":[{"type":"ExpressionStatement","expression":{"type":"StringLiteral","value":"hello"}}]}`, + }, + { + name: "expression statement", + node: &semantic.ExpressionStatement{ + Expression: &semantic.StringLiteral{Value: "hello"}, + }, + want: `{"type":"ExpressionStatement","expression":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "return statement", + node: &semantic.ReturnStatement{ + Argument: &semantic.StringLiteral{Value: "hello"}, + }, + want: `{"type":"ReturnStatement","argument":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "variable declaration", + node: &semantic.NativeVariableDeclaration{ + Identifier: &semantic.Identifier{Name: "a"}, + Init: &semantic.StringLiteral{Value: "hello"}, + }, + want: `{"type":"NativeVariableDeclaration","identifier":{"type":"Identifier","name":"a"},"init":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "call expression", + node: &semantic.CallExpression{ + Callee: &semantic.IdentifierExpression{Name: "a"}, + Arguments: &semantic.ObjectExpression{Properties: []*semantic.Property{{Key: &semantic.Identifier{Name: "s"}, Value: &semantic.StringLiteral{Value: "hello"}}}}, + }, + want: `{"type":"CallExpression","callee":{"type":"IdentifierExpression","name":"a"},"arguments":{"type":"ObjectExpression","properties":[{"type":"Property","key":{"type":"Identifier","name":"s"},"value":{"type":"StringLiteral","value":"hello"}}]}}`, + }, + { + name: "member expression", + node: &semantic.MemberExpression{ + Object: &semantic.IdentifierExpression{Name: "a"}, + Property: "hello", + }, + want: `{"type":"MemberExpression","object":{"type":"IdentifierExpression","name":"a"},"property":"hello"}`, + }, + { + name: "arrow function expression", + node: &semantic.FunctionExpression{ + Params: []*semantic.FunctionParam{{Key: &semantic.Identifier{Name: "a"}}}, + Body: &semantic.StringLiteral{Value: "hello"}, + }, + want: `{"type":"ArrowFunctionExpression","params":[{"type":"FunctionParam","key":{"type":"Identifier","name":"a"},"default":null}],"body":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "binary expression", + node: &semantic.BinaryExpression{ + Operator: ast.AdditionOperator, + Left: &semantic.StringLiteral{Value: "hello"}, + Right: &semantic.StringLiteral{Value: "world"}, + }, + want: `{"type":"BinaryExpression","operator":"+","left":{"type":"StringLiteral","value":"hello"},"right":{"type":"StringLiteral","value":"world"}}`, + }, + { + name: "unary expression", + node: &semantic.UnaryExpression{ + Operator: ast.NotOperator, + Argument: &semantic.BooleanLiteral{Value: true}, + }, + want: `{"type":"UnaryExpression","operator":"not","argument":{"type":"BooleanLiteral","value":true}}`, + }, + { + name: "logical expression", + node: &semantic.LogicalExpression{ + Operator: ast.OrOperator, + Left: &semantic.BooleanLiteral{Value: false}, + Right: &semantic.BooleanLiteral{Value: true}, + }, + want: `{"type":"LogicalExpression","operator":"or","left":{"type":"BooleanLiteral","value":false},"right":{"type":"BooleanLiteral","value":true}}`, + }, + { + name: "array expression", + node: &semantic.ArrayExpression{ + Elements: []semantic.Expression{&semantic.StringLiteral{Value: "hello"}}, + }, + want: `{"type":"ArrayExpression","elements":[{"type":"StringLiteral","value":"hello"}]}`, + }, + { + name: "object expression", + node: &semantic.ObjectExpression{ + Properties: []*semantic.Property{{ + Key: &semantic.Identifier{Name: "a"}, + Value: &semantic.StringLiteral{Value: "hello"}, + }}, + }, + want: `{"type":"ObjectExpression","properties":[{"type":"Property","key":{"type":"Identifier","name":"a"},"value":{"type":"StringLiteral","value":"hello"}}]}`, + }, + { + name: "conditional expression", + node: &semantic.ConditionalExpression{ + Test: &semantic.BooleanLiteral{Value: true}, + Alternate: &semantic.StringLiteral{Value: "false"}, + Consequent: &semantic.StringLiteral{Value: "true"}, + }, + want: `{"type":"ConditionalExpression","test":{"type":"BooleanLiteral","value":true},"alternate":{"type":"StringLiteral","value":"false"},"consequent":{"type":"StringLiteral","value":"true"}}`, + }, + { + name: "property", + node: &semantic.Property{ + Key: &semantic.Identifier{Name: "a"}, + Value: &semantic.StringLiteral{Value: "hello"}, + }, + want: `{"type":"Property","key":{"type":"Identifier","name":"a"},"value":{"type":"StringLiteral","value":"hello"}}`, + }, + { + name: "identifier", + node: &semantic.Identifier{ + Name: "a", + }, + want: `{"type":"Identifier","name":"a"}`, + }, + { + name: "string literal", + node: &semantic.StringLiteral{ + Value: "hello", + }, + want: `{"type":"StringLiteral","value":"hello"}`, + }, + { + name: "boolean literal", + node: &semantic.BooleanLiteral{ + Value: true, + }, + want: `{"type":"BooleanLiteral","value":true}`, + }, + { + name: "float literal", + node: &semantic.FloatLiteral{ + Value: 42.1, + }, + want: `{"type":"FloatLiteral","value":42.1}`, + }, + { + name: "integer literal", + node: &semantic.IntegerLiteral{ + Value: math.MaxInt64, + }, + want: `{"type":"IntegerLiteral","value":"9223372036854775807"}`, + }, + { + name: "unsigned integer literal", + node: &semantic.UnsignedIntegerLiteral{ + Value: math.MaxUint64, + }, + want: `{"type":"UnsignedIntegerLiteral","value":"18446744073709551615"}`, + }, + { + name: "regexp literal", + node: &semantic.RegexpLiteral{ + Value: regexp.MustCompile(`.*`), + }, + want: `{"type":"RegexpLiteral","value":".*"}`, + }, + { + name: "duration literal", + node: &semantic.DurationLiteral{ + Value: time.Hour + time.Minute, + }, + want: `{"type":"DurationLiteral","value":"1h1m0s"}`, + }, + { + name: "datetime literal", + node: &semantic.DateTimeLiteral{ + Value: time.Date(2017, 8, 8, 8, 8, 8, 8, time.UTC), + }, + want: `{"type":"DateTimeLiteral","value":"2017-08-08T08:08:08.000000008Z"}`, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + data, err := json.Marshal(tc.node) + if err != nil { + t.Fatal(err) + } + if got := string(data); got != tc.want { + t.Errorf("unexpected json data:\nwant:%s\ngot: %s\n", tc.want, got) + } + node, err := semantic.UnmarshalNode(data) + if err != nil { + t.Fatal(err) + } + if !cmp.Equal(tc.node, node, semantictest.CmpOptions...) { + t.Errorf("unexpected node after unmarshalling: -want/+got:\n%s", cmp.Diff(tc.node, node, semantictest.CmpOptions...)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/semantic/types.go b/vendor/github.com/influxdata/ifql/semantic/types.go new file mode 100644 index 000000000..5c8a90cba --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/types.go @@ -0,0 +1,492 @@ +package semantic + +import ( + "bytes" + "encoding/binary" + "fmt" + "hash/fnv" + "sort" + "strconv" + "sync" +) + +// Type is the representation of an IFQL type. +// +// Type values are comparable and as such can be used as map keys and directly comparison using the == operator. +// Two types are equal if they represent identical types. +// +// DO NOT embed this type into other interfaces or structs as that will invalidate the comparison properties of the interface. +type Type interface { + // Kind returns the specific kind of this type. + Kind() Kind + + // PropertyType returns the type of a given property. + // It panics if the type's Kind is not Object + PropertyType(name string) Type + + // Properties returns a map of all property types. + // It panics if the type's Kind is not Object + Properties() map[string]Type + + // ElementType return the type of elements in the array. + // It panics if the type's Kind is not Array. + ElementType() Type + + // PipeArgument reports the name of the argument that can be pipe into. + // It panics if the type's Kind is not Function. + PipeArgument() string + + // ReturnType reports the return type of the function + // It panics if the type's Kind is not Function. + ReturnType() Type + + // Types cannot be created outside of the semantic package + // This is needed so that we can cache type definitions. + typ() +} + +type Kind int + +const ( + Invalid Kind = iota + Nil + String + Int + UInt + Float + Bool + Time + Duration + Regexp + Array + Object + Function +) + +var kindNames = []string{ + Invalid: "invalid", + Nil: "nil", + String: "string", + Int: "int", + UInt: "uint", + Float: "float", + Bool: "bool", + Time: "time", + Duration: "duration", + Regexp: "regexp", + Array: "array", + Object: "object", + Function: "function", +} + +func (k Kind) String() string { + if int(k) < len(kindNames) { + return kindNames[k] + } + return "kind" + strconv.Itoa(int(k)) +} + +func (k Kind) Kind() Kind { + return k +} +func (k Kind) PropertyType(name string) Type { + panic(fmt.Errorf("cannot get type of property %q, from kind %q", name, k)) +} +func (k Kind) Properties() map[string]Type { + panic(fmt.Errorf("cannot get properties from kind %s", k)) +} +func (k Kind) ElementType() Type { + panic(fmt.Errorf("cannot get element type from kind %s", k)) +} +func (k Kind) PipeArgument() string { + panic(fmt.Errorf("cannot get pipe argument name from kind %s", k)) +} +func (k Kind) ReturnType() Type { + panic(fmt.Errorf("cannot get return type from kind %s", k)) +} +func (k Kind) typ() {} + +type arrayType struct { + elementType Type +} + +func (t *arrayType) String() string { + return fmt.Sprintf("[%v]", t.elementType) +} + +func (t *arrayType) Kind() Kind { + return Array +} +func (t *arrayType) PropertyType(name string) Type { + panic(fmt.Errorf("cannot get property type of kind %s", t.Kind())) +} +func (t *arrayType) Properties() map[string]Type { + panic(fmt.Errorf("cannot get properties type of kind %s", t.Kind())) +} +func (t *arrayType) ElementType() Type { + return t.elementType +} +func (t *arrayType) PipeArgument() string { + panic(fmt.Errorf("cannot get pipe argument name from kind %s", t.Kind())) +} +func (t *arrayType) ReturnType() Type { + panic(fmt.Errorf("cannot get return type of kind %s", t.Kind())) +} + +func (t *arrayType) typ() {} + +// arrayTypeCache caches *arrayType values. +// +// Since arrayTypes only have a single field elementType we can key +// all arrayTypes by their elementType. +var arrayTypeCache struct { + sync.Mutex // Guards stores (but not loads) on m. + + // m is a map[Type]*arrayType keyed by the elementType of the array. + // Elements in m are append-only and thus safe for concurrent reading. + m sync.Map +} + +// arrayTypeOf returns the Type for the given ArrayExpression. +func arrayTypeOf(e *ArrayExpression) Type { + if len(e.Elements) == 0 { + return EmptyArrayType + } + et := e.Elements[0].Type() + return NewArrayType(et) +} + +var EmptyArrayType = NewArrayType(Nil) + +func NewArrayType(elementType Type) Type { + // Lookup arrayType in cache by elementType + if t, ok := arrayTypeCache.m.Load(elementType); ok { + return t.(*arrayType) + } + + // Type not found in cache, lock and retry. + arrayTypeCache.Lock() + defer arrayTypeCache.Unlock() + + // First read again while holding the lock. + if t, ok := arrayTypeCache.m.Load(elementType); ok { + return t.(*arrayType) + } + + // Still no cache entry, add it. + at := &arrayType{ + elementType: elementType, + } + arrayTypeCache.m.Store(elementType, at) + + return at +} + +type objectType struct { + properties map[string]Type +} + +func (t *objectType) String() string { + var buf bytes.Buffer + buf.Write([]byte("{")) + for k, prop := range t.properties { + fmt.Fprintf(&buf, "%s:%v,", k, prop) + } + buf.WriteRune('}') + + return buf.String() +} + +func (t *objectType) Kind() Kind { + return Object +} +func (t *objectType) PropertyType(name string) Type { + return t.properties[name] +} +func (t *objectType) Properties() map[string]Type { + return t.properties +} +func (t *objectType) ElementType() Type { + panic(fmt.Errorf("cannot get element type of kind %s", t.Kind())) +} +func (t *objectType) PipeArgument() string { + panic(fmt.Errorf("cannot get pipe argument name from kind %s", t.Kind())) +} +func (t *objectType) ReturnType() Type { + panic(fmt.Errorf("cannot get return type of kind %s", t.Kind())) +} +func (t *objectType) typ() {} + +func (t *objectType) equal(o *objectType) bool { + if t == o { + return true + } + + if len(t.properties) != len(o.properties) { + return false + } + + for k, vtyp := range t.properties { + ovtyp, ok := o.properties[k] + if !ok { + return false + } + if ovtyp != vtyp { + return false + } + } + return true +} + +// objectTypeCache caches all *objectTypes. +// +// Since objectTypes are identified by their properties, +// a hash is computed of the property names and kinds to reduce the search space. +var objectTypeCache struct { + sync.Mutex // Guards stores (but not loads) on m. + + // m is a map[uint32][]*objectType keyed by the hash calculated of the object's properties' name and kind. + // Elements in m are append-only and thus safe for concurrent reading. + m sync.Map +} + +// objectTypeOf returns the Type for the given ObjectExpression. +func objectTypeOf(e *ObjectExpression) Type { + propertyTypes := make(map[string]Type, len(e.Properties)) + for _, p := range e.Properties { + propertyTypes[p.Key.Name] = p.Value.Type() + } + + return NewObjectType(propertyTypes) +} + +var EmptyObject = NewObjectType(nil) + +func NewObjectType(propertyTypes map[string]Type) Type { + propertyNames := make([]string, 0, len(propertyTypes)) + for name := range propertyTypes { + propertyNames = append(propertyNames, name) + } + sort.Strings(propertyNames) + + sum := fnv.New32a() + for _, p := range propertyNames { + t := propertyTypes[p] + + // track hash of property names and kinds + sum.Write([]byte(p)) + binary.Write(sum, binary.LittleEndian, t.Kind()) + } + + // Create new object type + ot := &objectType{ + properties: propertyTypes, + } + + // Simple linear search after hash lookup + h := sum.Sum32() + if ts, ok := objectTypeCache.m.Load(h); ok { + for _, t := range ts.([]*objectType) { + if t.equal(ot) { + return t + } + } + } + + // Type not found in cache, lock and retry. + objectTypeCache.Lock() + defer objectTypeCache.Unlock() + + // First read again while holding the lock. + var types []*objectType + if ts, ok := objectTypeCache.m.Load(h); ok { + types = ts.([]*objectType) + for _, t := range types { + if t.equal(ot) { + return t + } + } + } + + // Make copy of properties since we can't trust that the source will not be modified + properties := make(map[string]Type) + for k, v := range ot.properties { + properties[k] = v + } + ot.properties = properties + + // Still no cache entry, add it. + objectTypeCache.m.Store(h, append(types, ot)) + + return ot +} + +type functionType struct { + params map[string]Type + returnType Type + pipeArgument string +} + +func (t *functionType) String() string { + var buf bytes.Buffer + buf.Write([]byte("function(")) + for k, param := range t.params { + fmt.Fprintf(&buf, "%s:%v,", k, param) + } + fmt.Fprintf(&buf, ") %v", t.returnType) + + return buf.String() +} + +func (t *functionType) Kind() Kind { + return Function +} +func (t *functionType) PropertyType(name string) Type { + panic(fmt.Errorf("cannot get property type of kind %s", t.Kind())) +} +func (t *functionType) Properties() map[string]Type { + panic(fmt.Errorf("cannot get properties type of kind %s", t.Kind())) +} +func (t *functionType) ElementType() Type { + panic(fmt.Errorf("cannot get element type of kind %s", t.Kind())) +} +func (t *functionType) PipeArgument() string { + return t.pipeArgument +} +func (t *functionType) ReturnType() Type { + return t.returnType +} +func (t *functionType) typ() {} + +func (t *functionType) Params() map[string]Type { + return t.params +} + +func (t *functionType) equal(o *functionType) bool { + if t == o { + return true + } + + if t.returnType != o.returnType { + return false + } + + if len(t.params) != len(o.params) { + return false + } + + for k, pt := range t.params { + opt, ok := o.params[k] + if !ok { + return false + } + if opt != pt { + return false + } + } + + return true +} + +// functionTypeCache caches all *functionTypes. +// +// Since functionTypes are identified by their parameters and returnType, +// a hash is computed of the param names and kinds to reduce the search space. +var functionTypeCache struct { + sync.Mutex // Guards stores (but not loads) on m. + + // m is a map[uint32][]*functionType keyed by the hash calculated. + // Elements in m are append-only and thus safe for concurrent reading. + m sync.Map +} + +// functionTypeOf returns the Type for the given ObjectExpression. +func functionTypeOf(e *FunctionExpression) Type { + sig := FunctionSignature{} + sig.Params = make(map[string]Type, len(e.Params)) + for _, p := range e.Params { + sig.Params[p.Key.Name] = p.Type() + } + // Determine returnType + switch b := e.Body.(type) { + case Expression: + sig.ReturnType = b.Type() + case *BlockStatement: + rs := b.ReturnStatement() + sig.ReturnType = rs.Argument.Type() + } + for _, p := range e.Params { + if p.Piped { + sig.PipeArgument = p.Key.Name + break + } + } + return NewFunctionType(sig) +} + +type FunctionSignature struct { + Params map[string]Type + ReturnType Type + PipeArgument string +} + +func NewFunctionType(sig FunctionSignature) Type { + paramNames := make([]string, 0, len(sig.Params)) + for k := range sig.Params { + paramNames = append(paramNames, k) + } + sort.Strings(paramNames) + + sum := fnv.New32a() + sum.Write([]byte(sig.PipeArgument)) + for _, p := range paramNames { + // track hash of parameter names and kinds + sum.Write([]byte(p)) + // TODO(nathanielc): Include parameter type information + binary.Write(sum, binary.LittleEndian, sig.Params[p].Kind()) + } + + // Create new object type + ft := &functionType{ + params: sig.Params, + returnType: sig.ReturnType, + pipeArgument: sig.PipeArgument, + } + + // Simple linear search after hash lookup + h := sum.Sum32() + if ts, ok := functionTypeCache.m.Load(h); ok { + for _, t := range ts.([]*functionType) { + if t.equal(ft) { + return t + } + } + } + + // Type not found in cache, lock and retry. + functionTypeCache.Lock() + defer functionTypeCache.Unlock() + + // First read again while holding the lock. + var types []*functionType + if ts, ok := functionTypeCache.m.Load(h); ok { + types = ts.([]*functionType) + for _, t := range types { + if t.equal(ft) { + return t + } + } + } + + // Make copy of Params since we can't trust the source is not modified + params := make(map[string]Type, len(ft.params)) + for k, v := range ft.params { + params[k] = v + } + ft.params = params + + // Still no cache entry, add it. + functionTypeCache.m.Store(h, append(types, ft)) + + return ft +} diff --git a/vendor/github.com/influxdata/ifql/semantic/types_test.go b/vendor/github.com/influxdata/ifql/semantic/types_test.go new file mode 100644 index 000000000..ff2470a53 --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/types_test.go @@ -0,0 +1,218 @@ +package semantic_test + +import ( + "testing" + + "github.com/influxdata/ifql/semantic" +) + +func TestTypes_Comparable(t *testing.T) { + testCases := []struct { + name string + a, b semantic.Type + want bool + }{ + { + name: "equal int", + a: semantic.Int, + b: semantic.Int, + want: true, + }, + { + name: "not equal int bool", + a: semantic.Int, + b: semantic.Bool, + want: false, + }, + { + name: "equal array", + a: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.IntegerLiteral{Value: 1}, + }, + }).Type(), + b: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.IntegerLiteral{Value: 2}, + }, + }).Type(), + want: true, + }, + { + name: "not equal arrays", + a: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.IntegerLiteral{Value: 1}, + }, + }).Type(), + b: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.BooleanLiteral{Value: true}, + }, + }).Type(), + want: false, + }, + { + name: "not equal arrays primitive", + a: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.IntegerLiteral{Value: 1}, + }, + }).Type(), + b: semantic.Int, + want: false, + }, + { + name: "not equal empty array primitive", + a: (&semantic.ArrayExpression{}).Type(), + b: semantic.Nil, + want: false, + }, + { + name: "equal empty arrays", + a: (&semantic.ArrayExpression{}).Type(), + b: (&semantic.ArrayExpression{}).Type(), + want: true, + }, + { + name: "equal arrays of arrays", + a: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.IntegerLiteral{Value: 1}, + }, + }, + }, + }).Type(), + b: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.ArrayExpression{ + Elements: []semantic.Expression{ + &semantic.IntegerLiteral{Value: 2}, + }, + }, + }, + }).Type(), + want: true, + }, + { + name: "equal objects", + a: (&semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "x"}, Value: &semantic.IntegerLiteral{Value: 1}}, + {Key: &semantic.Identifier{Name: "y"}, Value: &semantic.FloatLiteral{Value: 1}}, + }, + }).Type(), + b: (&semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "x"}, Value: &semantic.IntegerLiteral{Value: -1}}, + {Key: &semantic.Identifier{Name: "y"}, Value: &semantic.FloatLiteral{Value: -1}}, + }, + }).Type(), + want: true, + }, + { + name: "equal objects of objects", + a: (&semantic.ObjectExpression{ + Properties: []*semantic.Property{ + { + Key: &semantic.Identifier{Name: "x"}, + Value: &semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "m"}, Value: &semantic.IntegerLiteral{Value: 1}}, + {Key: &semantic.Identifier{Name: "n"}, Value: &semantic.FloatLiteral{Value: 1}}, + }, + }, + }, + { + Key: &semantic.Identifier{Name: "y"}, + Value: &semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "j"}, Value: &semantic.IntegerLiteral{Value: 1}}, + {Key: &semantic.Identifier{Name: "k"}, Value: &semantic.FloatLiteral{Value: 1}}, + }, + }, + }, + }, + }).Type(), + b: (&semantic.ObjectExpression{ + Properties: []*semantic.Property{ + { + Key: &semantic.Identifier{Name: "x"}, + Value: &semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "m"}, Value: &semantic.IntegerLiteral{Value: 3}}, + {Key: &semantic.Identifier{Name: "n"}, Value: &semantic.FloatLiteral{Value: 3}}, + }, + }, + }, + { + Key: &semantic.Identifier{Name: "y"}, + Value: &semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "j"}, Value: &semantic.IntegerLiteral{Value: 4}}, + {Key: &semantic.Identifier{Name: "k"}, Value: &semantic.FloatLiteral{Value: 4}}, + }, + }, + }, + }, + }).Type(), + want: true, + }, + { + name: "equal array of objects", + a: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{&semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "x"}, Value: &semantic.IntegerLiteral{Value: 1}}, + {Key: &semantic.Identifier{Name: "y"}, Value: &semantic.FloatLiteral{Value: 1}}, + }, + }}, + }).Type(), + b: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{&semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "x"}, Value: &semantic.IntegerLiteral{Value: 2}}, + {Key: &semantic.Identifier{Name: "y"}, Value: &semantic.FloatLiteral{Value: 2}}, + }, + }}, + }).Type(), + want: true, + }, + { + name: "not equal array of objects", + a: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{&semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "x"}, Value: &semantic.IntegerLiteral{Value: 1}}, + {Key: &semantic.Identifier{Name: "y"}, Value: &semantic.FloatLiteral{Value: 1}}, + }, + }}, + }).Type(), + b: (&semantic.ArrayExpression{ + Elements: []semantic.Expression{&semantic.ObjectExpression{ + Properties: []*semantic.Property{ + {Key: &semantic.Identifier{Name: "w"}, Value: &semantic.IntegerLiteral{Value: 2}}, + {Key: &semantic.Identifier{Name: "x"}, Value: &semantic.FloatLiteral{Value: 2}}, + }, + }}, + }).Type(), + want: false, + }, + } + for _, tc := range testCases { + tc := tc + t.Run(tc.name, func(t *testing.T) { + if tc.want { + if tc.a != tc.b { + t.Errorf("expected types to be equal: pointers %p %p a: %#v b: %#v", tc.a, tc.b, tc.a, tc.b) + } + } else { + if tc.a == tc.b { + t.Errorf("expected types to not be equal: pointers %p %p a: %#v b: %#v", tc.a, tc.b, tc.a, tc.b) + } + } + }) + } +} diff --git a/vendor/github.com/influxdata/ifql/semantic/walk.go b/vendor/github.com/influxdata/ifql/semantic/walk.go new file mode 100644 index 000000000..683f3d4dd --- /dev/null +++ b/vendor/github.com/influxdata/ifql/semantic/walk.go @@ -0,0 +1,150 @@ +package semantic + +func Walk(v Visitor, node Node) { + walk(v, node) +} + +type Visitor interface { + Visit(node Node) Visitor + Done() +} + +func walk(v Visitor, n Node) { + defer v.Done() + switch n := n.(type) { + case *Program: + w := v.Visit(n) + if w != nil { + for _, s := range n.Body { + walk(w, s) + } + } + case *BlockStatement: + w := v.Visit(n) + if w != nil { + for _, s := range n.Body { + walk(w, s) + } + } + case *ExpressionStatement: + w := v.Visit(n) + if w != nil { + walk(w, n.Expression) + } + case *ReturnStatement: + w := v.Visit(n) + if w != nil { + walk(w, n.Argument) + } + case *NativeVariableDeclaration: + if n != nil { + w := v.Visit(n) + if w != nil { + walk(w, n.Identifier) + walk(w, n.Init) + } + } + case *ExternalVariableDeclaration: + if n != nil { + w := v.Visit(n) + if w != nil { + walk(w, n.Identifier) + } + } + case *ArrayExpression: + w := v.Visit(n) + if w != nil { + for _, e := range n.Elements { + walk(w, e) + } + } + case *FunctionExpression: + w := v.Visit(n) + if w != nil { + for _, p := range n.Params { + walk(w, p) + } + walk(w, n.Body) + } + case *BinaryExpression: + w := v.Visit(n) + if w != nil { + walk(w, n.Left) + walk(w, n.Right) + } + case *CallExpression: + w := v.Visit(n) + if w != nil { + walk(w, n.Callee) + walk(w, n.Arguments) + } + case *ConditionalExpression: + w := v.Visit(n) + if w != nil { + walk(w, n.Test) + walk(w, n.Alternate) + walk(w, n.Consequent) + } + case *IdentifierExpression: + w := v.Visit(n) + if w != nil { + walk(w, n.declaration) + } + case *LogicalExpression: + w := v.Visit(n) + if w != nil { + walk(w, n.Left) + walk(w, n.Right) + } + case *MemberExpression: + w := v.Visit(n) + if w != nil { + walk(w, n.Object) + } + case *ObjectExpression: + w := v.Visit(n) + if w != nil { + for _, p := range n.Properties { + walk(w, p) + } + } + case *UnaryExpression: + w := v.Visit(n) + if w != nil { + walk(w, n.Argument) + } + case *Identifier: + v.Visit(n) + case *Property: + w := v.Visit(n) + if w != nil { + walk(w, n.Key) + walk(w, n.Value) + } + case *FunctionParam: + w := v.Visit(n) + if w != nil { + walk(w, n.Key) + walk(w, n.Default) + if n.declaration != nil { + walk(w, n.declaration) + } + } + case *BooleanLiteral: + v.Visit(n) + case *DateTimeLiteral: + v.Visit(n) + case *DurationLiteral: + v.Visit(n) + case *FloatLiteral: + v.Visit(n) + case *IntegerLiteral: + v.Visit(n) + case *RegexpLiteral: + v.Visit(n) + case *StringLiteral: + v.Visit(n) + case *UnsignedIntegerLiteral: + v.Visit(n) + } +} diff --git a/vendor/github.com/influxdata/tdigest/.gitignore b/vendor/github.com/influxdata/tdigest/.gitignore new file mode 100644 index 000000000..098cc7e37 --- /dev/null +++ b/vendor/github.com/influxdata/tdigest/.gitignore @@ -0,0 +1 @@ +/test/*.dat* diff --git a/vendor/github.com/influxdata/tdigest/LICENSE b/vendor/github.com/influxdata/tdigest/LICENSE new file mode 100644 index 000000000..ebb2bfb1a --- /dev/null +++ b/vendor/github.com/influxdata/tdigest/LICENSE @@ -0,0 +1,202 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright 2018 InfluxData Inc. + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. + diff --git a/vendor/github.com/influxdata/tdigest/README.md b/vendor/github.com/influxdata/tdigest/README.md new file mode 100644 index 000000000..ea1fab0c2 --- /dev/null +++ b/vendor/github.com/influxdata/tdigest/README.md @@ -0,0 +1,42 @@ +# tdigest + +This is an implementation of Ted Dunning's [t-digest](https://github.com/tdunning/t-digest/) in Go. + +The implementaion is based off [Derrick Burns' C++ implementation](https://github.com/derrickburns/tdigest). + +## Example + +```go +package main + +import ( + "log" + + "github.com/influxdata/tdigest" +) + +func main() { + td := tdigest.NewWithCompression(1000) + for _, x := range []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1} { + td.Add(x, 1) + } + + // Compute Quantiles + log.Println("50th", td.Quantile(0.5)) + log.Println("75th", td.Quantile(0.75)) + log.Println("90th", td.Quantile(0.9)) + log.Println("99th", td.Quantile(0.99)) + + // Compute CDFs + log.Println("CDF(1) = ", td.CDF(1)) + log.Println("CDF(2) = ", td.CDF(2)) + log.Println("CDF(3) = ", td.CDF(3)) + log.Println("CDF(4) = ", td.CDF(4)) + log.Println("CDF(5) = ", td.CDF(5)) +} +``` + +## TODO + +Only the methods for a single TDigest have been implemented. +The methods to merge two or more existing t-digests into a single t-digest have yet to be implemented. diff --git a/vendor/github.com/influxdata/tdigest/centroid.go b/vendor/github.com/influxdata/tdigest/centroid.go new file mode 100644 index 000000000..b79cada72 --- /dev/null +++ b/vendor/github.com/influxdata/tdigest/centroid.go @@ -0,0 +1,59 @@ +package tdigest + +import ( + "fmt" + "sort" +) + +// ErrWeightLessThanZero is used when the weight is not able to be processed. +const ErrWeightLessThanZero = Error("centroid weight cannot be less than zero") + +// Error is a domain error encountered while processing tdigests +type Error string + +func (e Error) Error() string { + return string(e) +} + +// Centroid average position of all points in a shape +type Centroid struct { + Mean float64 + Weight float64 +} + +func (c *Centroid) String() string { + return fmt.Sprintf("{mean: %f weight: %f}", c.Mean, c.Weight) +} + +// Add averages the two centroids together and update this centroid +func (c *Centroid) Add(r Centroid) error { + if r.Weight < 0 { + return ErrWeightLessThanZero + } + if c.Weight != 0 { + c.Weight += r.Weight + c.Mean += r.Weight * (r.Mean - c.Mean) / c.Weight + } else { + c.Weight = r.Weight + c.Mean = r.Mean + } + return nil +} + +// CentroidList is sorted by the Mean of the centroid, ascending. +type CentroidList []Centroid + +func (l *CentroidList) Clear() { + *l = (*l)[0:0] +} + +func (l CentroidList) Len() int { return len(l) } +func (l CentroidList) Less(i, j int) bool { return l[i].Mean < l[j].Mean } +func (l CentroidList) Swap(i, j int) { l[i], l[j] = l[j], l[i] } + +// NewCentroidList creates a priority queue for the centroids +func NewCentroidList(centroids []Centroid) CentroidList { + l := CentroidList(centroids) + sort.Sort(l) + return l +} diff --git a/vendor/github.com/influxdata/tdigest/centroid_test.go b/vendor/github.com/influxdata/tdigest/centroid_test.go new file mode 100644 index 000000000..5a5866788 --- /dev/null +++ b/vendor/github.com/influxdata/tdigest/centroid_test.go @@ -0,0 +1,122 @@ +package tdigest_test + +import ( + "testing" + + "github.com/google/go-cmp/cmp" + "github.com/influxdata/tdigest" +) + +func TestCentroid_Add(t *testing.T) { + tests := []struct { + name string + c tdigest.Centroid + r tdigest.Centroid + want tdigest.Centroid + wantErr bool + errStr string + }{ + { + name: "error when weight is zero", + r: tdigest.Centroid{ + Weight: -1.0, + }, + wantErr: true, + errStr: "centroid weight cannot be less than zero", + }, + { + name: "zero weight", + c: tdigest.Centroid{ + Weight: 0.0, + Mean: 1.0, + }, + r: tdigest.Centroid{ + Weight: 1.0, + Mean: 2.0, + }, + want: tdigest.Centroid{ + Weight: 1.0, + Mean: 2.0, + }, + }, + { + name: "weight order of magnitude", + c: tdigest.Centroid{ + Weight: 1, + Mean: 1, + }, + r: tdigest.Centroid{ + Weight: 10, + Mean: 10, + }, + want: tdigest.Centroid{ + Weight: 11, + Mean: 9.181818181818182, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + c := &tt.c + if err := c.Add(tt.r); (err != nil) != tt.wantErr { + t.Errorf("Centroid.Add() error = %v, wantErr %v", err, tt.wantErr) + } else if tt.wantErr && err.Error() != tt.errStr { + t.Errorf("Centroid.Add() error.Error() = %s, errStr %v", err.Error(), tt.errStr) + } + if !cmp.Equal(tt.c, tt.want) { + t.Errorf("unexprected centroid -want/+got\n%s", cmp.Diff(tt.want, tt.c)) + } + }) + } +} + +func TestNewCentroidList(t *testing.T) { + tests := []struct { + name string + centroids []tdigest.Centroid + want tdigest.CentroidList + }{ + { + name: "empty list", + }, + { + name: "priority should be by mean ascending", + centroids: []tdigest.Centroid{ + { + Mean: 2.0, + }, + { + Mean: 1.0, + }, + }, + want: tdigest.CentroidList{ + { + Mean: 1.0, + }, + { + Mean: 2.0, + }, + }, + }, + { + name: "single element should be identity", + centroids: []tdigest.Centroid{ + { + Mean: 1.0, + }, + }, + want: tdigest.CentroidList{ + { + Mean: 1.0, + }, + }, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + if got := tdigest.NewCentroidList(tt.centroids); !cmp.Equal(tt.want, got) { + t.Errorf("NewCentroidList() = -want/+got %s", cmp.Diff(tt.want, got)) + } + }) + } +} diff --git a/vendor/github.com/influxdata/tdigest/tdigest.go b/vendor/github.com/influxdata/tdigest/tdigest.go new file mode 100644 index 000000000..6064e4864 --- /dev/null +++ b/vendor/github.com/influxdata/tdigest/tdigest.go @@ -0,0 +1,229 @@ +package tdigest + +import ( + "math" + "sort" +) + +type TDigest struct { + Compression float64 + + maxProcessed int + maxUnprocessed int + processed CentroidList + unprocessed CentroidList + cumulative []float64 + processedWeight float64 + unprocessedWeight float64 + min float64 + max float64 +} + +func New() *TDigest { + return NewWithCompression(1000) +} +func NewWithCompression(c float64) *TDigest { + t := &TDigest{ + Compression: c, + } + t.maxProcessed = processedSize(0, t.Compression) + t.maxUnprocessed = unprocessedSize(0, t.Compression) + t.processed = make([]Centroid, 0, t.maxProcessed) + t.unprocessed = make([]Centroid, 0, t.maxUnprocessed+1) + t.min = math.MaxFloat64 + t.max = -math.MaxFloat64 + return t +} + +func (t *TDigest) Add(x, w float64) { + if math.IsNaN(x) { + return + } + t.AddCentroid(Centroid{Mean: x, Weight: w}) +} + +func (t *TDigest) AddCentroidList(c CentroidList) { + l := c.Len() + for i := 0; i < l; i++ { + diff := l - i + room := t.maxUnprocessed - t.unprocessed.Len() + mid := i + diff + if room < diff { + mid = i + room + } + for i < mid { + t.AddCentroid(c[i]) + i++ + } + } +} + +func (t *TDigest) AddCentroid(c Centroid) { + t.unprocessed = append(t.unprocessed, c) + t.unprocessedWeight += c.Weight + + if t.processed.Len() > t.maxProcessed || + t.unprocessed.Len() > t.maxUnprocessed { + t.process() + } +} + +func (t *TDigest) process() { + if t.unprocessed.Len() > 0 || + t.processed.Len() > t.maxProcessed { + + // Append all processed centroids to the unprocessed list and sort + t.unprocessed = append(t.unprocessed, t.processed...) + sort.Sort(&t.unprocessed) + + // Reset processed list with first centroid + t.processed.Clear() + t.processed = append(t.processed, t.unprocessed[0]) + + t.processedWeight += t.unprocessedWeight + t.unprocessedWeight = 0 + soFar := t.unprocessed[0].Weight + limit := t.processedWeight * t.integratedQ(1.0) + for _, centroid := range t.unprocessed[1:] { + projected := soFar + centroid.Weight + if projected <= limit { + soFar = projected + (&t.processed[t.processed.Len()-1]).Add(centroid) + } else { + k1 := t.integratedLocation(soFar / t.processedWeight) + limit = t.processedWeight * t.integratedQ(k1+1.0) + soFar += centroid.Weight + t.processed = append(t.processed, centroid) + } + } + t.min = math.Min(t.min, t.processed[0].Mean) + t.max = math.Max(t.max, t.processed[t.processed.Len()-1].Mean) + t.updateCumulative() + t.unprocessed.Clear() + } +} + +func (t *TDigest) updateCumulative() { + t.cumulative = make([]float64, t.processed.Len()+1) + prev := 0.0 + for i, centroid := range t.processed { + cur := centroid.Weight + t.cumulative[i] = prev + cur/2.0 + prev = prev + cur + } + t.cumulative[t.processed.Len()] = prev +} + +func (t *TDigest) Quantile(q float64) float64 { + t.process() + if q < 0 || q > 1 || t.processed.Len() == 0 { + return math.NaN() + } + if t.processed.Len() == 1 { + return t.processed[0].Mean + } + index := q * t.processedWeight + if index < t.processed[0].Weight/2.0 { + return t.min + 2.0*index/t.processed[0].Weight*(t.processed[0].Mean-t.min) + } + + lower := sort.Search(len(t.cumulative), func(i int) bool { + return t.cumulative[i] >= index + }) + + if lower+1 != len(t.cumulative) { + z1 := index - t.cumulative[lower-1] + z2 := t.cumulative[lower] - index + return weightedAverage(t.processed[lower-1].Mean, z2, t.processed[lower].Mean, z1) + } + + z1 := index - t.processedWeight - t.processed[lower-1].Weight/2.0 + z2 := (t.processed[lower-1].Weight / 2.0) - z1 + return weightedAverage(t.processed[t.processed.Len()-1].Mean, z1, t.max, z2) +} + +func (t *TDigest) CDF(x float64) float64 { + t.process() + switch t.processed.Len() { + case 0: + return 0.0 + case 1: + width := t.max - t.min + if x <= t.min { + return 0.0 + } + if x >= t.max { + return 1.0 + } + if (x - t.min) <= width { + // min and max are too close together to do any viable interpolation + return 0.5 + } + return (x - t.min) / width + } + + if x <= t.min { + return 0.0 + } + if x >= t.max { + return 1.0 + } + m0 := t.processed[0].Mean + // Left Tail + if x <= m0 { + if m0-t.min > 0 { + return (x - t.min) / (m0 - t.min) * t.processed[0].Weight / t.processedWeight / 2.0 + } + return 0.0 + } + // Right Tail + mn := t.processed[t.processed.Len()-1].Mean + if x >= mn { + if t.max-mn > 0.0 { + return 1.0 - (t.max-x)/(t.max-mn)*t.processed[t.processed.Len()-1].Weight/t.processedWeight/2.0 + } + return 1.0 + } + + upper := sort.Search(t.processed.Len(), func(i int) bool { + return t.processed[i].Mean > x + }) + + z1 := x - t.processed[upper-1].Mean + z2 := t.processed[upper].Mean - x + return weightedAverage(t.cumulative[upper-1], z2, t.cumulative[upper], z1) / t.processedWeight +} + +func (t *TDigest) integratedQ(k float64) float64 { + return (math.Sin(math.Min(k, t.Compression)*math.Pi/t.Compression-math.Pi/2.0) + 1.0) / 2.0 +} + +func (t *TDigest) integratedLocation(q float64) float64 { + return t.Compression * (math.Asin(2.0*q-1.0) + math.Pi/2.0) / math.Pi +} + +func weightedAverage(x1, w1, x2, w2 float64) float64 { + if x1 <= x2 { + return weightedAverageSorted(x1, w1, x2, w2) + } + return weightedAverageSorted(x2, w2, x1, w1) +} + +func weightedAverageSorted(x1, w1, x2, w2 float64) float64 { + x := (x1*w1 + x2*w2) / (w1 + w2) + return math.Max(x1, math.Min(x, x2)) +} + +func processedSize(size int, compression float64) int { + if size == 0 { + return int(2 * math.Ceil(compression)) + } + return size +} + +func unprocessedSize(size int, compression float64) int { + if size == 0 { + return int(8 * math.Ceil(compression)) + } + return size +} diff --git a/vendor/github.com/influxdata/tdigest/tdigest_test.go b/vendor/github.com/influxdata/tdigest/tdigest_test.go new file mode 100644 index 000000000..46ead7259 --- /dev/null +++ b/vendor/github.com/influxdata/tdigest/tdigest_test.go @@ -0,0 +1,243 @@ +package tdigest_test + +import ( + "math/rand" + "testing" + + "github.com/gonum/stat/distuv" + "github.com/influxdata/tdigest" +) + +const ( + N = 1e6 + Mu = 10 + Sigma = 3 + + seed = 42 +) + +// NormalData is a slice of N random values that are normaly distributed with mean Mu and standard deviation Sigma. +var NormalData []float64 +var UniformData []float64 + +var NormalDigest *tdigest.TDigest +var UniformDigest *tdigest.TDigest + +func init() { + dist := distuv.Normal{ + Mu: Mu, + Sigma: Sigma, + Source: rand.New(rand.NewSource(seed)), + } + uniform := rand.New(rand.NewSource(seed)) + + UniformData = make([]float64, N) + UniformDigest = tdigest.NewWithCompression(1000) + + NormalData = make([]float64, N) + NormalDigest = tdigest.NewWithCompression(1000) + + for i := range NormalData { + NormalData[i] = dist.Rand() + NormalDigest.Add(NormalData[i], 1) + + UniformData[i] = uniform.Float64() * 100 + UniformDigest.Add(UniformData[i], 1) + } +} + +func TestTdigest_Quantile(t *testing.T) { + tests := []struct { + name string + data []float64 + digest *tdigest.TDigest + quantile float64 + want float64 + }{ + { + name: "increasing", + quantile: 0.5, + data: []float64{1, 2, 3, 4, 5}, + want: 3, + }, + { + name: "small", + quantile: 0.5, + data: []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1}, + want: 3, + }, + { + name: "small 99 (max)", + quantile: 0.99, + data: []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1}, + want: 5, + }, + { + name: "normal 50", + quantile: 0.5, + digest: NormalDigest, + want: 9.997821231634168, + }, + { + name: "normal 90", + quantile: 0.9, + digest: NormalDigest, + want: 13.843815760607427, + }, + { + name: "uniform 50", + quantile: 0.5, + digest: UniformDigest, + want: 50.02682856274754, + }, + { + name: "uniform 90", + quantile: 0.9, + digest: UniformDigest, + want: 90.02117754660424, + }, + { + name: "uniform 99", + quantile: 0.99, + digest: UniformDigest, + want: 99.00246731511771, + }, + { + name: "uniform 99.9", + quantile: 0.999, + digest: UniformDigest, + want: 99.90178495422307, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + td := tt.digest + if td == nil { + td = tdigest.NewWithCompression(1000) + for _, x := range tt.data { + td.Add(x, 1) + } + } + got := td.Quantile(tt.quantile) + if got != tt.want { + t.Errorf("unexpected quantile %f, got %g want %g", tt.quantile, got, tt.want) + } + }) + } +} + +func TestTdigest_CDFs(t *testing.T) { + tests := []struct { + name string + data []float64 + digest *tdigest.TDigest + cdf float64 + want float64 + }{ + { + name: "increasing", + cdf: 3, + data: []float64{1, 2, 3, 4, 5}, + want: 0.5, + }, + { + name: "small", + cdf: 4, + data: []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1}, + want: 0.75, + }, + { + name: "small max", + cdf: 5, + data: []float64{1, 2, 3, 4, 5, 5, 4, 3, 2, 1}, + want: 1, + }, + { + name: "normal mean", + cdf: 10, + data: NormalData, + want: 0.500298235578106, + }, + { + name: "normal high", + cdf: -100, + data: NormalData, + want: 0, + }, + { + name: "normal low", + cdf: 110, + data: NormalData, + want: 1, + }, + { + name: "uniform 50", + cdf: 50, + data: UniformData, + want: 0.49972989818712815, + }, + { + name: "uniform min", + cdf: 0, + data: UniformData, + want: 0, + }, + { + name: "uniform max", + cdf: 100, + data: UniformData, + want: 1, + }, + { + name: "uniform 10", + cdf: 10, + data: UniformData, + want: 0.099715527526992, + }, + { + name: "uniform 90", + cdf: 90, + data: UniformData, + want: 0.8997838903965611, + }, + } + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + td := tt.digest + if td == nil { + td = tdigest.NewWithCompression(1000) + for _, x := range tt.data { + td.Add(x, 1) + } + } + got := td.CDF(tt.cdf) + if got != tt.want { + t.Errorf("unexpected CDF %f, got %g want %g", tt.cdf, got, tt.want) + } + }) + } +} + +var quantiles = []float64{0.1, 0.5, 0.9, 0.99, 0.999} + +func BenchmarkTDigest_Add(b *testing.B) { + for n := 0; n < b.N; n++ { + td := tdigest.NewWithCompression(1000) + for _, x := range NormalData { + td.Add(x, 1) + } + } +} +func BenchmarkTDigest_Quantile(b *testing.B) { + td := tdigest.NewWithCompression(1000) + for _, x := range NormalData { + td.Add(x, 1) + } + b.ResetTimer() + var x float64 + for n := 0; n < b.N; n++ { + for _, q := range quantiles { + x += td.Quantile(q) + } + } +} diff --git a/vendor/github.com/influxdata/yamux/.gitignore b/vendor/github.com/influxdata/yamux/.gitignore new file mode 100644 index 000000000..836562412 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/.gitignore @@ -0,0 +1,23 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe +*.test diff --git a/vendor/github.com/influxdata/yamux/LICENSE b/vendor/github.com/influxdata/yamux/LICENSE new file mode 100644 index 000000000..f0e5c79e1 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/LICENSE @@ -0,0 +1,362 @@ +Mozilla Public License, version 2.0 + +1. Definitions + +1.1. "Contributor" + + means each individual or legal entity that creates, contributes to the + creation of, or owns Covered Software. + +1.2. "Contributor Version" + + means the combination of the Contributions of others (if any) used by a + Contributor and that particular Contributor's Contribution. + +1.3. "Contribution" + + means Covered Software of a particular Contributor. + +1.4. "Covered Software" + + means Source Code Form to which the initial Contributor has attached the + notice in Exhibit A, the Executable Form of such Source Code Form, and + Modifications of such Source Code Form, in each case including portions + thereof. + +1.5. "Incompatible With Secondary Licenses" + means + + a. that the initial Contributor has attached the notice described in + Exhibit B to the Covered Software; or + + b. that the Covered Software was made available under the terms of + version 1.1 or earlier of the License, but not also under the terms of + a Secondary License. + +1.6. "Executable Form" + + means any form of the work other than Source Code Form. + +1.7. "Larger Work" + + means a work that combines Covered Software with other material, in a + separate file or files, that is not Covered Software. + +1.8. "License" + + means this document. + +1.9. "Licensable" + + means having the right to grant, to the maximum extent possible, whether + at the time of the initial grant or subsequently, any and all of the + rights conveyed by this License. + +1.10. "Modifications" + + means any of the following: + + a. any file in Source Code Form that results from an addition to, + deletion from, or modification of the contents of Covered Software; or + + b. any new file in Source Code Form that contains any Covered Software. + +1.11. "Patent Claims" of a Contributor + + means any patent claim(s), including without limitation, method, + process, and apparatus claims, in any patent Licensable by such + Contributor that would be infringed, but for the grant of the License, + by the making, using, selling, offering for sale, having made, import, + or transfer of either its Contributions or its Contributor Version. + +1.12. "Secondary License" + + means either the GNU General Public License, Version 2.0, the GNU Lesser + General Public License, Version 2.1, the GNU Affero General Public + License, Version 3.0, or any later versions of those licenses. + +1.13. "Source Code Form" + + means the form of the work preferred for making modifications. + +1.14. "You" (or "Your") + + means an individual or a legal entity exercising rights under this + License. For legal entities, "You" includes any entity that controls, is + controlled by, or is under common control with You. For purposes of this + definition, "control" means (a) the power, direct or indirect, to cause + the direction or management of such entity, whether by contract or + otherwise, or (b) ownership of more than fifty percent (50%) of the + outstanding shares or beneficial ownership of such entity. + + +2. License Grants and Conditions + +2.1. Grants + + Each Contributor hereby grants You a world-wide, royalty-free, + non-exclusive license: + + a. under intellectual property rights (other than patent or trademark) + Licensable by such Contributor to use, reproduce, make available, + modify, display, perform, distribute, and otherwise exploit its + Contributions, either on an unmodified basis, with Modifications, or + as part of a Larger Work; and + + b. under Patent Claims of such Contributor to make, use, sell, offer for + sale, have made, import, and otherwise transfer either its + Contributions or its Contributor Version. + +2.2. Effective Date + + The licenses granted in Section 2.1 with respect to any Contribution + become effective for each Contribution on the date the Contributor first + distributes such Contribution. + +2.3. Limitations on Grant Scope + + The licenses granted in this Section 2 are the only rights granted under + this License. No additional rights or licenses will be implied from the + distribution or licensing of Covered Software under this License. + Notwithstanding Section 2.1(b) above, no patent license is granted by a + Contributor: + + a. for any code that a Contributor has removed from Covered Software; or + + b. for infringements caused by: (i) Your and any other third party's + modifications of Covered Software, or (ii) the combination of its + Contributions with other software (except as part of its Contributor + Version); or + + c. under Patent Claims infringed by Covered Software in the absence of + its Contributions. + + This License does not grant any rights in the trademarks, service marks, + or logos of any Contributor (except as may be necessary to comply with + the notice requirements in Section 3.4). + +2.4. Subsequent Licenses + + No Contributor makes additional grants as a result of Your choice to + distribute the Covered Software under a subsequent version of this + License (see Section 10.2) or under the terms of a Secondary License (if + permitted under the terms of Section 3.3). + +2.5. Representation + + Each Contributor represents that the Contributor believes its + Contributions are its original creation(s) or it has sufficient rights to + grant the rights to its Contributions conveyed by this License. + +2.6. Fair Use + + This License is not intended to limit any rights You have under + applicable copyright doctrines of fair use, fair dealing, or other + equivalents. + +2.7. Conditions + + Sections 3.1, 3.2, 3.3, and 3.4 are conditions of the licenses granted in + Section 2.1. + + +3. Responsibilities + +3.1. Distribution of Source Form + + All distribution of Covered Software in Source Code Form, including any + Modifications that You create or to which You contribute, must be under + the terms of this License. You must inform recipients that the Source + Code Form of the Covered Software is governed by the terms of this + License, and how they can obtain a copy of this License. You may not + attempt to alter or restrict the recipients' rights in the Source Code + Form. + +3.2. Distribution of Executable Form + + If You distribute Covered Software in Executable Form then: + + a. such Covered Software must also be made available in Source Code Form, + as described in Section 3.1, and You must inform recipients of the + Executable Form how they can obtain a copy of such Source Code Form by + reasonable means in a timely manner, at a charge no more than the cost + of distribution to the recipient; and + + b. You may distribute such Executable Form under the terms of this + License, or sublicense it under different terms, provided that the + license for the Executable Form does not attempt to limit or alter the + recipients' rights in the Source Code Form under this License. + +3.3. Distribution of a Larger Work + + You may create and distribute a Larger Work under terms of Your choice, + provided that You also comply with the requirements of this License for + the Covered Software. If the Larger Work is a combination of Covered + Software with a work governed by one or more Secondary Licenses, and the + Covered Software is not Incompatible With Secondary Licenses, this + License permits You to additionally distribute such Covered Software + under the terms of such Secondary License(s), so that the recipient of + the Larger Work may, at their option, further distribute the Covered + Software under the terms of either this License or such Secondary + License(s). + +3.4. Notices + + You may not remove or alter the substance of any license notices + (including copyright notices, patent notices, disclaimers of warranty, or + limitations of liability) contained within the Source Code Form of the + Covered Software, except that You may alter any license notices to the + extent required to remedy known factual inaccuracies. + +3.5. Application of Additional Terms + + You may choose to offer, and to charge a fee for, warranty, support, + indemnity or liability obligations to one or more recipients of Covered + Software. However, You may do so only on Your own behalf, and not on + behalf of any Contributor. You must make it absolutely clear that any + such warranty, support, indemnity, or liability obligation is offered by + You alone, and You hereby agree to indemnify every Contributor for any + liability incurred by such Contributor as a result of warranty, support, + indemnity or liability terms You offer. You may include additional + disclaimers of warranty and limitations of liability specific to any + jurisdiction. + +4. Inability to Comply Due to Statute or Regulation + + If it is impossible for You to comply with any of the terms of this License + with respect to some or all of the Covered Software due to statute, + judicial order, or regulation then You must: (a) comply with the terms of + this License to the maximum extent possible; and (b) describe the + limitations and the code they affect. Such description must be placed in a + text file included with all distributions of the Covered Software under + this License. Except to the extent prohibited by statute or regulation, + such description must be sufficiently detailed for a recipient of ordinary + skill to be able to understand it. + +5. Termination + +5.1. The rights granted under this License will terminate automatically if You + fail to comply with any of its terms. However, if You become compliant, + then the rights granted under this License from a particular Contributor + are reinstated (a) provisionally, unless and until such Contributor + explicitly and finally terminates Your grants, and (b) on an ongoing + basis, if such Contributor fails to notify You of the non-compliance by + some reasonable means prior to 60 days after You have come back into + compliance. Moreover, Your grants from a particular Contributor are + reinstated on an ongoing basis if such Contributor notifies You of the + non-compliance by some reasonable means, this is the first time You have + received notice of non-compliance with this License from such + Contributor, and You become compliant prior to 30 days after Your receipt + of the notice. + +5.2. If You initiate litigation against any entity by asserting a patent + infringement claim (excluding declaratory judgment actions, + counter-claims, and cross-claims) alleging that a Contributor Version + directly or indirectly infringes any patent, then the rights granted to + You by any and all Contributors for the Covered Software under Section + 2.1 of this License shall terminate. + +5.3. In the event of termination under Sections 5.1 or 5.2 above, all end user + license agreements (excluding distributors and resellers) which have been + validly granted by You or Your distributors under this License prior to + termination shall survive termination. + +6. Disclaimer of Warranty + + Covered Software is provided under this License on an "as is" basis, + without warranty of any kind, either expressed, implied, or statutory, + including, without limitation, warranties that the Covered Software is free + of defects, merchantable, fit for a particular purpose or non-infringing. + The entire risk as to the quality and performance of the Covered Software + is with You. Should any Covered Software prove defective in any respect, + You (not any Contributor) assume the cost of any necessary servicing, + repair, or correction. This disclaimer of warranty constitutes an essential + part of this License. No use of any Covered Software is authorized under + this License except under this disclaimer. + +7. Limitation of Liability + + Under no circumstances and under no legal theory, whether tort (including + negligence), contract, or otherwise, shall any Contributor, or anyone who + distributes Covered Software as permitted above, be liable to You for any + direct, indirect, special, incidental, or consequential damages of any + character including, without limitation, damages for lost profits, loss of + goodwill, work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses, even if such party shall have been + informed of the possibility of such damages. This limitation of liability + shall not apply to liability for death or personal injury resulting from + such party's negligence to the extent applicable law prohibits such + limitation. Some jurisdictions do not allow the exclusion or limitation of + incidental or consequential damages, so this exclusion and limitation may + not apply to You. + +8. Litigation + + Any litigation relating to this License may be brought only in the courts + of a jurisdiction where the defendant maintains its principal place of + business and such litigation shall be governed by laws of that + jurisdiction, without reference to its conflict-of-law provisions. Nothing + in this Section shall prevent a party's ability to bring cross-claims or + counter-claims. + +9. Miscellaneous + + This License represents the complete agreement concerning the subject + matter hereof. If any provision of this License is held to be + unenforceable, such provision shall be reformed only to the extent + necessary to make it enforceable. Any law or regulation which provides that + the language of a contract shall be construed against the drafter shall not + be used to construe this License against a Contributor. + + +10. Versions of the License + +10.1. New Versions + + Mozilla Foundation is the license steward. Except as provided in Section + 10.3, no one other than the license steward has the right to modify or + publish new versions of this License. Each version will be given a + distinguishing version number. + +10.2. Effect of New Versions + + You may distribute the Covered Software under the terms of the version + of the License under which You originally received the Covered Software, + or under the terms of any subsequent version published by the license + steward. + +10.3. Modified Versions + + If you create software not governed by this License, and you want to + create a new license for such software, you may create and use a + modified version of this License if you rename the license and remove + any references to the name of the license steward (except to note that + such modified license differs from this License). + +10.4. Distributing Source Code Form that is Incompatible With Secondary + Licenses If You choose to distribute Source Code Form that is + Incompatible With Secondary Licenses under the terms of this version of + the License, the notice described in Exhibit B of this License must be + attached. + +Exhibit A - Source Code Form License Notice + + This Source Code Form is subject to the + terms of the Mozilla Public License, v. + 2.0. If a copy of the MPL was not + distributed with this file, You can + obtain one at + http://mozilla.org/MPL/2.0/. + +If it is not possible or desirable to put the notice in a particular file, +then You may include the notice in a location (such as a LICENSE file in a +relevant directory) where a recipient would be likely to look for such a +notice. + +You may add additional accurate notices of copyright ownership. + +Exhibit B - "Incompatible With Secondary Licenses" Notice + + This Source Code Form is "Incompatible + With Secondary Licenses", as defined by + the Mozilla Public License, v. 2.0. \ No newline at end of file diff --git a/vendor/github.com/influxdata/yamux/README.md b/vendor/github.com/influxdata/yamux/README.md new file mode 100644 index 000000000..d4db7fc99 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/README.md @@ -0,0 +1,86 @@ +# Yamux + +Yamux (Yet another Multiplexer) is a multiplexing library for Golang. +It relies on an underlying connection to provide reliability +and ordering, such as TCP or Unix domain sockets, and provides +stream-oriented multiplexing. It is inspired by SPDY but is not +interoperable with it. + +Yamux features include: + +* Bi-directional streams + * Streams can be opened by either client or server + * Useful for NAT traversal + * Server-side push support +* Flow control + * Avoid starvation + * Back-pressure to prevent overwhelming a receiver +* Keep Alives + * Enables persistent connections over a load balancer +* Efficient + * Enables thousands of logical streams with low overhead + +## Documentation + +For complete documentation, see the associated [Godoc](http://godoc.org/github.com/hashicorp/yamux). + +## Specification + +The full specification for Yamux is provided in the `spec.md` file. +It can be used as a guide to implementors of interoperable libraries. + +## Usage + +Using Yamux is remarkably simple: + +```go + +func client() { + // Get a TCP connection + conn, err := net.Dial(...) + if err != nil { + panic(err) + } + + // Setup client side of yamux + session, err := yamux.Client(conn, nil) + if err != nil { + panic(err) + } + + // Open a new stream + stream, err := session.Open() + if err != nil { + panic(err) + } + + // Stream implements net.Conn + stream.Write([]byte("ping")) +} + +func server() { + // Accept a TCP connection + conn, err := listener.Accept() + if err != nil { + panic(err) + } + + // Setup server side of yamux + session, err := yamux.Server(conn, nil) + if err != nil { + panic(err) + } + + // Accept a stream + stream, err := session.Accept() + if err != nil { + panic(err) + } + + // Listen for a message + buf := make([]byte, 4) + stream.Read(buf) +} + +``` + diff --git a/vendor/github.com/influxdata/yamux/addr.go b/vendor/github.com/influxdata/yamux/addr.go new file mode 100644 index 000000000..be6ebca9c --- /dev/null +++ b/vendor/github.com/influxdata/yamux/addr.go @@ -0,0 +1,60 @@ +package yamux + +import ( + "fmt" + "net" +) + +// hasAddr is used to get the address from the underlying connection +type hasAddr interface { + LocalAddr() net.Addr + RemoteAddr() net.Addr +} + +// yamuxAddr is used when we cannot get the underlying address +type yamuxAddr struct { + Addr string +} + +func (*yamuxAddr) Network() string { + return "yamux" +} + +func (y *yamuxAddr) String() string { + return fmt.Sprintf("yamux:%s", y.Addr) +} + +// Addr is used to get the address of the listener. +func (s *Session) Addr() net.Addr { + return s.LocalAddr() +} + +// LocalAddr is used to get the local address of the +// underlying connection. +func (s *Session) LocalAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"local"} + } + return addr.LocalAddr() +} + +// RemoteAddr is used to get the address of remote end +// of the underlying connection +func (s *Session) RemoteAddr() net.Addr { + addr, ok := s.conn.(hasAddr) + if !ok { + return &yamuxAddr{"remote"} + } + return addr.RemoteAddr() +} + +// LocalAddr returns the local address +func (s *Stream) LocalAddr() net.Addr { + return s.session.LocalAddr() +} + +// LocalAddr returns the remote address +func (s *Stream) RemoteAddr() net.Addr { + return s.session.RemoteAddr() +} diff --git a/vendor/github.com/influxdata/yamux/bench_test.go b/vendor/github.com/influxdata/yamux/bench_test.go new file mode 100644 index 000000000..9d3b2a1f0 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/bench_test.go @@ -0,0 +1,124 @@ +package yamux + +import ( + "testing" +) + +func BenchmarkPing(b *testing.B) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + for i := 0; i < b.N; i++ { + rtt, err := client.Ping() + if err != nil { + b.Fatalf("err: %v", err) + } + if rtt == 0 { + b.Fatalf("bad: %v", rtt) + } + } +} + +func BenchmarkAccept(b *testing.B) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + go func() { + for i := 0; i < b.N; i++ { + stream, err := server.AcceptStream() + if err != nil { + return + } + stream.Close() + } + }() + + for i := 0; i < b.N; i++ { + stream, err := client.Open() + if err != nil { + b.Fatalf("err: %v", err) + } + stream.Close() + } +} + +func BenchmarkSendRecv(b *testing.B) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + sendBuf := make([]byte, 512) + recvBuf := make([]byte, 512) + + doneCh := make(chan struct{}) + go func() { + stream, err := server.AcceptStream() + if err != nil { + return + } + defer stream.Close() + for i := 0; i < b.N; i++ { + if _, err := stream.Read(recvBuf); err != nil { + b.Fatalf("err: %v", err) + } + } + close(doneCh) + }() + + stream, err := client.Open() + if err != nil { + b.Fatalf("err: %v", err) + } + defer stream.Close() + for i := 0; i < b.N; i++ { + if _, err := stream.Write(sendBuf); err != nil { + b.Fatalf("err: %v", err) + } + } + <-doneCh +} + +func BenchmarkSendRecvLarge(b *testing.B) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + const sendSize = 100 * 1024 * 1024 + const recvSize = 4 * 1024 + + sendBuf := make([]byte, sendSize) + recvBuf := make([]byte, recvSize) + + b.ResetTimer() + recvDone := make(chan struct{}) + + go func() { + stream, err := server.AcceptStream() + if err != nil { + return + } + defer stream.Close() + for i := 0; i < b.N; i++ { + for j := 0; j < sendSize/recvSize; j++ { + if _, err := stream.Read(recvBuf); err != nil { + b.Fatalf("err: %v", err) + } + } + } + close(recvDone) + }() + + stream, err := client.Open() + if err != nil { + b.Fatalf("err: %v", err) + } + defer stream.Close() + for i := 0; i < b.N; i++ { + if _, err := stream.Write(sendBuf); err != nil { + b.Fatalf("err: %v", err) + } + } + <-recvDone +} diff --git a/vendor/github.com/influxdata/yamux/const.go b/vendor/github.com/influxdata/yamux/const.go new file mode 100644 index 000000000..fb5bb2144 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/const.go @@ -0,0 +1,161 @@ +package yamux + +import ( + "encoding/binary" + "fmt" +) + +var ( + // ErrInvalidVersion means we received a frame with an + // invalid version + ErrInvalidVersion = fmt.Errorf("invalid protocol version") + + // ErrInvalidMsgType means we received a frame with an + // invalid message type + ErrInvalidMsgType = fmt.Errorf("invalid msg type") + + // ErrSessionShutdown is used if there is a shutdown during + // an operation + ErrSessionShutdown = fmt.Errorf("session shutdown") + + // ErrStreamsExhausted is returned if we have no more + // stream ids to issue + ErrStreamsExhausted = fmt.Errorf("streams exhausted") + + // ErrDuplicateStream is used if a duplicate stream is + // opened inbound + ErrDuplicateStream = fmt.Errorf("duplicate stream initiated") + + // ErrReceiveWindowExceeded indicates the window was exceeded + ErrRecvWindowExceeded = fmt.Errorf("recv window exceeded") + + // ErrTimeout is used when we reach an IO deadline + ErrTimeout = fmt.Errorf("i/o deadline reached") + + // ErrStreamClosed is returned when using a closed stream + ErrStreamClosed = fmt.Errorf("stream closed") + + // ErrUnexpectedFlag is set when we get an unexpected flag + ErrUnexpectedFlag = fmt.Errorf("unexpected flag") + + // ErrRemoteGoAway is used when we get a go away from the other side + ErrRemoteGoAway = fmt.Errorf("remote end is not accepting connections") + + // ErrConnectionReset is sent if a stream is reset. This can happen + // if the backlog is exceeded, or if there was a remote GoAway. + ErrConnectionReset = fmt.Errorf("connection reset") + + // ErrConnectionWriteTimeout indicates that we hit the "safety valve" + // timeout writing to the underlying stream connection. + ErrConnectionWriteTimeout = fmt.Errorf("connection write timeout") + + // ErrKeepAliveTimeout is sent if a missed keepalive caused the stream close + ErrKeepAliveTimeout = fmt.Errorf("keepalive timeout") +) + +const ( + // protoVersion is the only version we support + protoVersion uint8 = 0 +) + +const ( + // Data is used for data frames. They are followed + // by length bytes worth of payload. + typeData uint8 = iota + + // WindowUpdate is used to change the window of + // a given stream. The length indicates the delta + // update to the window. + typeWindowUpdate + + // Ping is sent as a keep-alive or to measure + // the RTT. The StreamID and Length value are echoed + // back in the response. + typePing + + // GoAway is sent to terminate a session. The StreamID + // should be 0 and the length is an error code. + typeGoAway + + // typeMax defines the upper bound of valid message types + // and should always be the last constant. + typeMax +) + +const ( + // SYN is sent to signal a new stream. May + // be sent with a data payload + flagSYN uint16 = 1 << iota + + // ACK is sent to acknowledge a new stream. May + // be sent with a data payload + flagACK + + // FIN is sent to half-close the given stream. + // May be sent with a data payload. + flagFIN + + // RST is used to hard close a given stream. + flagRST +) + +const ( + // initialStreamWindow is the initial stream window size + initialStreamWindow uint32 = 256 * 1024 +) + +const ( + // goAwayNormal is sent on a normal termination + goAwayNormal uint32 = iota + + // goAwayProtoErr sent on a protocol error + goAwayProtoErr + + // goAwayInternalErr sent on an internal error + goAwayInternalErr +) + +const ( + sizeOfVersion = 1 + sizeOfType = 1 + sizeOfFlags = 2 + sizeOfStreamID = 4 + sizeOfLength = 4 + headerSize = sizeOfVersion + sizeOfType + sizeOfFlags + + sizeOfStreamID + sizeOfLength +) + +type header []byte + +func (h header) Version() uint8 { + return h[0] +} + +func (h header) MsgType() uint8 { + return h[1] +} + +func (h header) Flags() uint16 { + return binary.BigEndian.Uint16(h[2:4]) +} + +func (h header) StreamID() uint32 { + return binary.BigEndian.Uint32(h[4:8]) +} + +func (h header) Length() uint32 { + return binary.BigEndian.Uint32(h[8:12]) +} + +func (h header) String() string { + return fmt.Sprintf("Vsn:%d Type:%d Flags:%d StreamID:%d Length:%d", + h.Version(), h.MsgType(), h.Flags(), h.StreamID(), h.Length()) +} + +func (h header) encode(msgType uint8, flags uint16, streamID uint32, length uint32) { + h[0] = protoVersion + h[1] = msgType + binary.BigEndian.PutUint16(h[2:4], flags) + binary.BigEndian.PutUint32(h[4:8], streamID) + binary.BigEndian.PutUint32(h[8:12], length) +} diff --git a/vendor/github.com/influxdata/yamux/const_test.go b/vendor/github.com/influxdata/yamux/const_test.go new file mode 100644 index 000000000..153da18b9 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/const_test.go @@ -0,0 +1,72 @@ +package yamux + +import ( + "testing" +) + +func TestConst(t *testing.T) { + if protoVersion != 0 { + t.Fatalf("bad: %v", protoVersion) + } + + if typeData != 0 { + t.Fatalf("bad: %v", typeData) + } + if typeWindowUpdate != 1 { + t.Fatalf("bad: %v", typeWindowUpdate) + } + if typePing != 2 { + t.Fatalf("bad: %v", typePing) + } + if typeGoAway != 3 { + t.Fatalf("bad: %v", typeGoAway) + } + + if flagSYN != 1 { + t.Fatalf("bad: %v", flagSYN) + } + if flagACK != 2 { + t.Fatalf("bad: %v", flagACK) + } + if flagFIN != 4 { + t.Fatalf("bad: %v", flagFIN) + } + if flagRST != 8 { + t.Fatalf("bad: %v", flagRST) + } + + if goAwayNormal != 0 { + t.Fatalf("bad: %v", goAwayNormal) + } + if goAwayProtoErr != 1 { + t.Fatalf("bad: %v", goAwayProtoErr) + } + if goAwayInternalErr != 2 { + t.Fatalf("bad: %v", goAwayInternalErr) + } + + if headerSize != 12 { + t.Fatalf("bad header size") + } +} + +func TestEncodeDecode(t *testing.T) { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagACK|flagRST, 1234, 4321) + + if hdr.Version() != protoVersion { + t.Fatalf("bad: %v", hdr) + } + if hdr.MsgType() != typeWindowUpdate { + t.Fatalf("bad: %v", hdr) + } + if hdr.Flags() != flagACK|flagRST { + t.Fatalf("bad: %v", hdr) + } + if hdr.StreamID() != 1234 { + t.Fatalf("bad: %v", hdr) + } + if hdr.Length() != 4321 { + t.Fatalf("bad: %v", hdr) + } +} diff --git a/vendor/github.com/influxdata/yamux/mux.go b/vendor/github.com/influxdata/yamux/mux.go new file mode 100644 index 000000000..7abc7c744 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/mux.go @@ -0,0 +1,87 @@ +package yamux + +import ( + "fmt" + "io" + "os" + "time" +) + +// Config is used to tune the Yamux session +type Config struct { + // AcceptBacklog is used to limit how many streams may be + // waiting an accept. + AcceptBacklog int + + // EnableKeepalive is used to do a period keep alive + // messages using a ping. + EnableKeepAlive bool + + // KeepAliveInterval is how often to perform the keep alive + KeepAliveInterval time.Duration + + // ConnectionWriteTimeout is meant to be a "safety valve" timeout after + // we which will suspect a problem with the underlying connection and + // close it. This is only applied to writes, where's there's generally + // an expectation that things will move along quickly. + ConnectionWriteTimeout time.Duration + + // MaxStreamWindowSize is used to control the maximum + // window size that we allow for a stream. + MaxStreamWindowSize uint32 + + // LogOutput is used to control the log destination + LogOutput io.Writer +} + +// DefaultConfig is used to return a default configuration +func DefaultConfig() *Config { + return &Config{ + AcceptBacklog: 256, + EnableKeepAlive: true, + KeepAliveInterval: 30 * time.Second, + ConnectionWriteTimeout: 10 * time.Second, + MaxStreamWindowSize: initialStreamWindow, + LogOutput: os.Stderr, + } +} + +// VerifyConfig is used to verify the sanity of configuration +func VerifyConfig(config *Config) error { + if config.AcceptBacklog <= 0 { + return fmt.Errorf("backlog must be positive") + } + if config.KeepAliveInterval == 0 { + return fmt.Errorf("keep-alive interval must be positive") + } + if config.MaxStreamWindowSize < initialStreamWindow { + return fmt.Errorf("MaxStreamWindowSize must be larger than %d", initialStreamWindow) + } + return nil +} + +// Server is used to initialize a new server-side connection. +// There must be at most one server-side connection. If a nil config is +// provided, the DefaultConfiguration will be used. +func Server(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, false), nil +} + +// Client is used to initialize a new client-side connection. +// There must be at most one client-side connection. +func Client(conn io.ReadWriteCloser, config *Config) (*Session, error) { + if config == nil { + config = DefaultConfig() + } + + if err := VerifyConfig(config); err != nil { + return nil, err + } + return newSession(config, conn, true), nil +} diff --git a/vendor/github.com/influxdata/yamux/session.go b/vendor/github.com/influxdata/yamux/session.go new file mode 100644 index 000000000..bf1971583 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/session.go @@ -0,0 +1,623 @@ +package yamux + +import ( + "bufio" + "fmt" + "io" + "io/ioutil" + "log" + "math" + "net" + "strings" + "sync" + "sync/atomic" + "time" +) + +// Session is used to wrap a reliable ordered connection and to +// multiplex it into multiple streams. +type Session struct { + // remoteGoAway indicates the remote side does + // not want futher connections. Must be first for alignment. + remoteGoAway int32 + + // localGoAway indicates that we should stop + // accepting futher connections. Must be first for alignment. + localGoAway int32 + + // nextStreamID is the next stream we should + // send. This depends if we are a client/server. + nextStreamID uint32 + + // config holds our configuration + config *Config + + // logger is used for our logs + logger *log.Logger + + // conn is the underlying connection + conn io.ReadWriteCloser + + // bufRead is a buffered reader + bufRead *bufio.Reader + + // pings is used to track inflight pings + pings map[uint32]chan struct{} + pingID uint32 + pingLock sync.Mutex + + // streams maps a stream id to a stream, and inflight has an entry + // for any outgoing stream that has not yet been established. Both are + // protected by streamLock. + streams map[uint32]*Stream + inflight map[uint32]struct{} + streamLock sync.Mutex + + // synCh acts like a semaphore. It is sized to the AcceptBacklog which + // is assumed to be symmetric between the client and server. This allows + // the client to avoid exceeding the backlog and instead blocks the open. + synCh chan struct{} + + // acceptCh is used to pass ready streams to the client + acceptCh chan *Stream + + // sendCh is used to mark a stream as ready to send, + // or to send a header out directly. + sendCh chan sendReady + + // recvDoneCh is closed when recv() exits to avoid a race + // between stream registration and stream shutdown + recvDoneCh chan struct{} + + // shutdown is used to safely close a session + shutdown bool + shutdownErr error + shutdownCh chan struct{} + shutdownLock sync.Mutex +} + +// sendReady is used to either mark a stream as ready +// or to directly send a header +type sendReady struct { + Hdr []byte + Body io.Reader + Err chan error +} + +// newSession is used to construct a new session +func newSession(config *Config, conn io.ReadWriteCloser, client bool) *Session { + s := &Session{ + config: config, + logger: log.New(config.LogOutput, "", log.LstdFlags), + conn: conn, + bufRead: bufio.NewReader(conn), + pings: make(map[uint32]chan struct{}), + streams: make(map[uint32]*Stream), + inflight: make(map[uint32]struct{}), + synCh: make(chan struct{}, config.AcceptBacklog), + acceptCh: make(chan *Stream, config.AcceptBacklog), + sendCh: make(chan sendReady, 64), + recvDoneCh: make(chan struct{}), + shutdownCh: make(chan struct{}), + } + if client { + s.nextStreamID = 1 + } else { + s.nextStreamID = 2 + } + go s.recv() + go s.send() + if config.EnableKeepAlive { + go s.keepalive() + } + return s +} + +// IsClosed does a safe check to see if we have shutdown +func (s *Session) IsClosed() bool { + select { + case <-s.shutdownCh: + return true + default: + return false + } +} + +// NumStreams returns the number of currently open streams +func (s *Session) NumStreams() int { + s.streamLock.Lock() + num := len(s.streams) + s.streamLock.Unlock() + return num +} + +// Open is used to create a new stream as a net.Conn +func (s *Session) Open() (net.Conn, error) { + conn, err := s.OpenStream() + if err != nil { + return nil, err + } + return conn, nil +} + +// OpenStream is used to create a new stream +func (s *Session) OpenStream() (*Stream, error) { + if s.IsClosed() { + return nil, ErrSessionShutdown + } + if atomic.LoadInt32(&s.remoteGoAway) == 1 { + return nil, ErrRemoteGoAway + } + + // Block if we have too many inflight SYNs + select { + case s.synCh <- struct{}{}: + case <-s.shutdownCh: + return nil, ErrSessionShutdown + } + +GET_ID: + // Get an ID, and check for stream exhaustion + id := atomic.LoadUint32(&s.nextStreamID) + if id >= math.MaxUint32-1 { + return nil, ErrStreamsExhausted + } + if !atomic.CompareAndSwapUint32(&s.nextStreamID, id, id+2) { + goto GET_ID + } + + // Register the stream + stream := newStream(s, id, streamInit) + s.streamLock.Lock() + s.streams[id] = stream + s.inflight[id] = struct{}{} + s.streamLock.Unlock() + + // Send the window update to create + if err := stream.sendWindowUpdate(); err != nil { + s.closeStream(id) + return nil, err + } + return stream, nil +} + +// Accept is used to block until the next available stream +// is ready to be accepted. +func (s *Session) Accept() (net.Conn, error) { + conn, err := s.AcceptStream() + if err != nil { + return nil, err + } + return conn, err +} + +// AcceptStream is used to block until the next available stream +// is ready to be accepted. +func (s *Session) AcceptStream() (*Stream, error) { + select { + case stream := <-s.acceptCh: + if err := stream.sendWindowUpdate(); err != nil { + return nil, err + } + return stream, nil + case <-s.shutdownCh: + return nil, s.shutdownErr + } +} + +// Close is used to close the session and all streams. +// Attempts to send a GoAway before closing the connection. +func (s *Session) Close() error { + s.shutdownLock.Lock() + defer s.shutdownLock.Unlock() + + if s.shutdown { + return nil + } + s.shutdown = true + if s.shutdownErr == nil { + s.shutdownErr = ErrSessionShutdown + } + close(s.shutdownCh) + s.conn.Close() + <-s.recvDoneCh + + s.streamLock.Lock() + defer s.streamLock.Unlock() + for _, stream := range s.streams { + stream.forceClose() + } + return nil +} + +// exitErr is used to handle an error that is causing the +// session to terminate. +func (s *Session) exitErr(err error) { + s.shutdownLock.Lock() + if s.shutdownErr == nil { + s.shutdownErr = err + } + s.shutdownLock.Unlock() + s.Close() +} + +// GoAway can be used to prevent accepting further +// connections. It does not close the underlying conn. +func (s *Session) GoAway() error { + return s.waitForSend(s.goAway(goAwayNormal), nil) +} + +// goAway is used to send a goAway message +func (s *Session) goAway(reason uint32) header { + atomic.SwapInt32(&s.localGoAway, 1) + hdr := header(make([]byte, headerSize)) + hdr.encode(typeGoAway, 0, 0, reason) + return hdr +} + +// Ping is used to measure the RTT response time +func (s *Session) Ping() (time.Duration, error) { + // Get a channel for the ping + ch := make(chan struct{}) + + // Get a new ping id, mark as pending + s.pingLock.Lock() + id := s.pingID + s.pingID++ + s.pings[id] = ch + s.pingLock.Unlock() + + // Send the ping request + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagSYN, 0, id) + if err := s.waitForSend(hdr, nil); err != nil { + return 0, err + } + + // Wait for a response + start := time.Now() + select { + case <-ch: + case <-time.After(s.config.ConnectionWriteTimeout): + s.pingLock.Lock() + delete(s.pings, id) // Ignore it if a response comes later. + s.pingLock.Unlock() + return 0, ErrTimeout + case <-s.shutdownCh: + return 0, ErrSessionShutdown + } + + // Compute the RTT + return time.Now().Sub(start), nil +} + +// keepalive is a long running goroutine that periodically does +// a ping to keep the connection alive. +func (s *Session) keepalive() { + for { + select { + case <-time.After(s.config.KeepAliveInterval): + _, err := s.Ping() + if err != nil { + s.logger.Printf("[ERR] yamux: keepalive failed: %v", err) + s.exitErr(ErrKeepAliveTimeout) + return + } + case <-s.shutdownCh: + return + } + } +} + +// waitForSendErr waits to send a header, checking for a potential shutdown +func (s *Session) waitForSend(hdr header, body io.Reader) error { + errCh := make(chan error, 1) + return s.waitForSendErr(hdr, body, errCh) +} + +// waitForSendErr waits to send a header with optional data, checking for a +// potential shutdown. Since there's the expectation that sends can happen +// in a timely manner, we enforce the connection write timeout here. +func (s *Session) waitForSendErr(hdr header, body io.Reader, errCh chan error) error { + t := timerPool.Get() + timer := t.(*time.Timer) + timer.Reset(s.config.ConnectionWriteTimeout) + defer func() { + timer.Stop() + timerPool.Put(t) + }() + + ready := sendReady{Hdr: hdr, Body: body, Err: errCh} + select { + case s.sendCh <- ready: + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } + + select { + case err := <-errCh: + return err + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// sendNoWait does a send without waiting. Since there's the expectation that +// the send happens right here, we enforce the connection write timeout if we +// can't queue the header to be sent. +func (s *Session) sendNoWait(hdr header) error { + timer := time.NewTimer(s.config.ConnectionWriteTimeout) + defer timer.Stop() + + select { + case s.sendCh <- sendReady{Hdr: hdr}: + return nil + case <-s.shutdownCh: + return ErrSessionShutdown + case <-timer.C: + return ErrConnectionWriteTimeout + } +} + +// send is a long running goroutine that sends data +func (s *Session) send() { + for { + select { + case ready := <-s.sendCh: + // Send a header if ready + if ready.Hdr != nil { + sent := 0 + for sent < len(ready.Hdr) { + n, err := s.conn.Write(ready.Hdr[sent:]) + if err != nil { + s.logger.Printf("[WARN] yamux: Failed to write header: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + sent += n + } + } + + // Send data from a body if given + if ready.Body != nil { + _, err := io.Copy(s.conn, ready.Body) + if err != nil { + s.logger.Printf("[WARN] yamux: Failed to write body: %v", err) + asyncSendErr(ready.Err, err) + s.exitErr(err) + return + } + } + + // No error, successful send + asyncSendErr(ready.Err, nil) + case <-s.shutdownCh: + return + } + } +} + +// recv is a long running goroutine that accepts new data +func (s *Session) recv() { + if err := s.recvLoop(); err != nil { + s.exitErr(err) + } +} + +var ( + handlers = []func(*Session, header) error{ + typeData: (*Session).handleStreamMessage, + typeWindowUpdate: (*Session).handleStreamMessage, + typePing: (*Session).handlePing, + typeGoAway: (*Session).handleGoAway, + } +) + +// recvLoop continues to receive data until a fatal error is encountered +func (s *Session) recvLoop() error { + defer close(s.recvDoneCh) + hdr := header(make([]byte, headerSize)) + for { + // Read the header + if _, err := io.ReadFull(s.bufRead, hdr); err != nil { + if err != io.EOF && !strings.Contains(err.Error(), "closed") && !strings.Contains(err.Error(), "reset by peer") { + s.logger.Printf("[ERR] yamux: Failed to read header: %v", err) + } + return err + } + + // Verify the version + if hdr.Version() != protoVersion { + s.logger.Printf("[ERR] yamux: Invalid protocol version: %d", hdr.Version()) + return ErrInvalidVersion + } + + mt := hdr.MsgType() + if mt < typeData || mt >= typeMax { + return ErrInvalidMsgType + } + + if err := handlers[mt](s, hdr); err != nil { + return err + } + } +} + +// handleStreamMessage handles either a data or window update frame +func (s *Session) handleStreamMessage(hdr header) error { + // Check for a new stream creation + id := hdr.StreamID() + flags := hdr.Flags() + if flags&flagSYN == flagSYN { + if err := s.incomingStream(id); err != nil { + return err + } + } + + // Get the stream + s.streamLock.Lock() + stream := s.streams[id] + s.streamLock.Unlock() + + // If we do not have a stream, likely we sent a RST or an error occurred sending a SYN + if stream == nil { + // Drain any data on the wire + if hdr.MsgType() == typeData && hdr.Length() > 0 { + s.logger.Printf("[WARN] yamux: Discarding data for stream: %d", id) + if _, err := io.CopyN(ioutil.Discard, s.bufRead, int64(hdr.Length())); err != nil { + s.logger.Printf("[ERR] yamux: Failed to discard data: %v", err) + return nil + } + } else { + s.logger.Printf("[WARN] yamux: frame for missing stream: %v", hdr) + } + return nil + } + + // Check if this is a window update + if hdr.MsgType() == typeWindowUpdate { + if err := stream.incrSendWindow(hdr, flags); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil + } + + // Read the new data + if err := stream.readData(hdr, flags, s.bufRead); err != nil { + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return err + } + return nil +} + +// handlePing is invokde for a typePing frame +func (s *Session) handlePing(hdr header) error { + flags := hdr.Flags() + pingID := hdr.Length() + + // Check if this is a query, respond back in a separate context so we + // don't interfere with the receiving thread blocking for the write. + if flags&flagSYN == flagSYN { + go func() { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, pingID) + if err := s.sendNoWait(hdr); err != nil { + s.logger.Printf("[WARN] yamux: failed to send ping reply: %v", err) + } + }() + return nil + } + + // Handle a response + s.pingLock.Lock() + ch := s.pings[pingID] + if ch != nil { + delete(s.pings, pingID) + close(ch) + } + s.pingLock.Unlock() + return nil +} + +// handleGoAway is invokde for a typeGoAway frame +func (s *Session) handleGoAway(hdr header) error { + code := hdr.Length() + switch code { + case goAwayNormal: + atomic.SwapInt32(&s.remoteGoAway, 1) + case goAwayProtoErr: + s.logger.Printf("[ERR] yamux: received protocol error go away") + return fmt.Errorf("yamux protocol error") + case goAwayInternalErr: + s.logger.Printf("[ERR] yamux: received internal error go away") + return fmt.Errorf("remote yamux internal error") + default: + s.logger.Printf("[ERR] yamux: received unexpected go away") + return fmt.Errorf("unexpected go away received") + } + return nil +} + +// incomingStream is used to create a new incoming stream +func (s *Session) incomingStream(id uint32) error { + // Reject immediately if we are doing a go away + if atomic.LoadInt32(&s.localGoAway) == 1 { + hdr := header(make([]byte, headerSize)) + hdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(hdr) + } + + // Allocate a new stream + stream := newStream(s, id, streamSYNReceived) + + s.streamLock.Lock() + defer s.streamLock.Unlock() + + // Check if stream already exists + if _, ok := s.streams[id]; ok { + s.logger.Printf("[ERR] yamux: duplicate stream declared") + if sendErr := s.sendNoWait(s.goAway(goAwayProtoErr)); sendErr != nil { + s.logger.Printf("[WARN] yamux: failed to send go away: %v", sendErr) + } + return ErrDuplicateStream + } + + // Register the stream + s.streams[id] = stream + + // Check if we've exceeded the backlog + select { + case s.acceptCh <- stream: + return nil + default: + // Backlog exceeded! RST the stream + s.logger.Printf("[WARN] yamux: backlog exceeded, forcing connection reset") + delete(s.streams, id) + stream.sendHdr.encode(typeWindowUpdate, flagRST, id, 0) + return s.sendNoWait(stream.sendHdr) + } +} + +// closeStream is used to close a stream once both sides have +// issued a close. If there was an in-flight SYN and the stream +// was not yet established, then this will give the credit back. +func (s *Session) closeStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: SYN tracking out of sync") + } + } + delete(s.streams, id) + s.streamLock.Unlock() +} + +// establishStream is used to mark a stream that was in the +// SYN Sent state as established. +func (s *Session) establishStream(id uint32) { + s.streamLock.Lock() + if _, ok := s.inflight[id]; ok { + delete(s.inflight, id) + } else { + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (no tracking entry)") + } + select { + case <-s.synCh: + default: + s.logger.Printf("[ERR] yamux: established stream without inflight SYN (didn't have semaphore)") + } + s.streamLock.Unlock() +} diff --git a/vendor/github.com/influxdata/yamux/session_test.go b/vendor/github.com/influxdata/yamux/session_test.go new file mode 100644 index 000000000..17304a307 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/session_test.go @@ -0,0 +1,1298 @@ +package yamux + +import ( + "bytes" + "fmt" + "io" + "io/ioutil" + "log" + "reflect" + "runtime" + "strings" + "sync" + "testing" + "time" +) + +type logCapture struct{ bytes.Buffer } + +func (l *logCapture) logs() []string { + return strings.Split(strings.TrimSpace(l.String()), "\n") +} + +func (l *logCapture) match(expect []string) bool { + return reflect.DeepEqual(l.logs(), expect) +} + +func captureLogs(s *Session) *logCapture { + buf := new(logCapture) + s.logger = log.New(buf, "", 0) + return buf +} + +type pipeConn struct { + reader *io.PipeReader + writer *io.PipeWriter + writeBlocker sync.Mutex +} + +func (p *pipeConn) Read(b []byte) (int, error) { + return p.reader.Read(b) +} + +func (p *pipeConn) Write(b []byte) (int, error) { + p.writeBlocker.Lock() + defer p.writeBlocker.Unlock() + return p.writer.Write(b) +} + +func (p *pipeConn) Close() error { + p.reader.Close() + return p.writer.Close() +} + +func testConn() (io.ReadWriteCloser, io.ReadWriteCloser) { + read1, write1 := io.Pipe() + read2, write2 := io.Pipe() + conn1 := &pipeConn{reader: read1, writer: write2} + conn2 := &pipeConn{reader: read2, writer: write1} + return conn1, conn2 +} + +func testConf() *Config { + conf := DefaultConfig() + conf.AcceptBacklog = 64 + conf.KeepAliveInterval = 100 * time.Millisecond + conf.ConnectionWriteTimeout = 250 * time.Millisecond + return conf +} + +func testConfNoKeepAlive() *Config { + conf := testConf() + conf.EnableKeepAlive = false + return conf +} + +func testClientServer() (*Session, *Session) { + return testClientServerConfig(testConf()) +} + +func testClientServerConfig(conf *Config) (*Session, *Session) { + conn1, conn2 := testConn() + client, _ := Client(conn1, conf) + server, _ := Server(conn2, conf) + return client, server +} + +func TestPing(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + rtt, err := client.Ping() + if err != nil { + t.Fatalf("err: %v", err) + } + if rtt == 0 { + t.Fatalf("bad: %v", rtt) + } + + rtt, err = server.Ping() + if err != nil { + t.Fatalf("err: %v", err) + } + if rtt == 0 { + t.Fatalf("bad: %v", rtt) + } +} + +func TestPing_Timeout(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + // Prevent the client from responding + clientConn := client.conn.(*pipeConn) + clientConn.writeBlocker.Lock() + + errCh := make(chan error, 1) + go func() { + _, err := server.Ping() // Ping via the server session + errCh <- err + }() + + select { + case err := <-errCh: + if err != ErrTimeout { + t.Fatalf("err: %v", err) + } + case <-time.After(client.config.ConnectionWriteTimeout * 2): + t.Fatalf("failed to timeout within expected %v", client.config.ConnectionWriteTimeout) + } + + // Verify that we recover, even if we gave up + clientConn.writeBlocker.Unlock() + + go func() { + _, err := server.Ping() // Ping via the server session + errCh <- err + }() + + select { + case err := <-errCh: + if err != nil { + t.Fatalf("err: %v", err) + } + case <-time.After(client.config.ConnectionWriteTimeout): + t.Fatalf("timeout") + } +} + +func TestCloseBeforeAck(t *testing.T) { + cfg := testConf() + cfg.AcceptBacklog = 8 + client, server := testClientServerConfig(cfg) + + defer client.Close() + defer server.Close() + + for i := 0; i < 8; i++ { + s, err := client.OpenStream() + if err != nil { + t.Fatal(err) + } + s.Close() + } + + for i := 0; i < 8; i++ { + s, err := server.AcceptStream() + if err != nil { + t.Fatal(err) + } + s.Close() + } + + done := make(chan struct{}) + go func() { + defer close(done) + s, err := client.OpenStream() + if err != nil { + t.Fatal(err) + } + s.Close() + }() + + select { + case <-done: + case <-time.After(time.Second * 5): + t.Fatal("timed out trying to open stream") + } +} + +func TestAccept(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + if client.NumStreams() != 0 { + t.Fatalf("bad") + } + if server.NumStreams() != 0 { + t.Fatalf("bad") + } + + wg := &sync.WaitGroup{} + wg.Add(4) + + go func() { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + if id := stream.StreamID(); id != 1 { + t.Fatalf("bad: %v", id) + } + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := client.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + if id := stream.StreamID(); id != 2 { + t.Fatalf("bad: %v", id) + } + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := server.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + if id := stream.StreamID(); id != 2 { + t.Fatalf("bad: %v", id) + } + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + if id := stream.StreamID(); id != 1 { + t.Fatalf("bad: %v", id) + } + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + + select { + case <-doneCh: + case <-time.After(time.Second): + panic("timeout") + } +} + +func TestNonNilInterface(t *testing.T) { + _, server := testClientServer() + server.Close() + + conn, err := server.Accept() + if err != nil && conn != nil { + t.Error("bad: accept should return a connection of nil value") + } + + conn, err = server.Open() + if err != nil && conn != nil { + t.Error("bad: open should return a connection of nil value") + } +} + +func TestSendData_Small(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + + if server.NumStreams() != 1 { + t.Fatalf("bad") + } + + buf := make([]byte, 4) + for i := 0; i < 1000; i++ { + n, err := stream.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("short read: %d", n) + } + if string(buf) != "test" { + t.Fatalf("bad: %s", buf) + } + } + + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + go func() { + defer wg.Done() + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + + if client.NumStreams() != 1 { + t.Fatalf("bad") + } + + for i := 0; i < 1000; i++ { + n, err := stream.Write([]byte("test")) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("short write %d", n) + } + } + + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + select { + case <-doneCh: + case <-time.After(time.Second): + panic("timeout") + } + + if client.NumStreams() != 0 { + t.Fatalf("bad") + } + if server.NumStreams() != 0 { + t.Fatalf("bad") + } +} + +func TestSendData_Large(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + const ( + sendSize = 250 * 1024 * 1024 + recvSize = 4 * 1024 + ) + + data := make([]byte, sendSize) + for idx := range data { + data[idx] = byte(idx % 256) + } + + wg := &sync.WaitGroup{} + wg.Add(2) + + go func() { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + var sz int + buf := make([]byte, recvSize) + for i := 0; i < sendSize/recvSize; i++ { + n, err := stream.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != recvSize { + t.Fatalf("short read: %d", n) + } + sz += n + for idx := range buf { + if buf[idx] != byte(idx%256) { + t.Fatalf("bad: %v %v %v", i, idx, buf[idx]) + } + } + } + + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + + t.Logf("cap=%d, n=%d\n", stream.recvBuf.Cap(), sz) + }() + + go func() { + defer wg.Done() + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + + n, err := stream.Write(data) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(data) { + t.Fatalf("short write %d", n) + } + + if err := stream.Close(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + select { + case <-doneCh: + case <-time.After(5 * time.Second): + panic("timeout") + } +} + +func TestGoAway(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + if err := server.GoAway(); err != nil { + t.Fatalf("err: %v", err) + } + + _, err := client.Open() + if err != ErrRemoteGoAway { + t.Fatalf("err: %v", err) + } +} + +func TestManyStreams(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + wg := &sync.WaitGroup{} + + acceptor := func(i int) { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + buf := make([]byte, 512) + for { + n, err := stream.Read(buf) + if err == io.EOF { + return + } + if err != nil { + t.Fatalf("err: %v", err) + } + if n == 0 { + t.Fatalf("err: %v", err) + } + } + } + sender := func(i int) { + defer wg.Done() + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + msg := fmt.Sprintf("%08d", i) + for i := 0; i < 1000; i++ { + n, err := stream.Write([]byte(msg)) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(msg) { + t.Fatalf("short write %d", n) + } + } + } + + for i := 0; i < 50; i++ { + wg.Add(2) + go acceptor(i) + go sender(i) + } + + wg.Wait() +} + +func TestManyStreams_PingPong(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + wg := &sync.WaitGroup{} + + ping := []byte("ping") + pong := []byte("pong") + + acceptor := func(i int) { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + buf := make([]byte, 4) + for { + // Read the 'ping' + n, err := stream.Read(buf) + if err == io.EOF { + return + } + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("err: %v", err) + } + if !bytes.Equal(buf, ping) { + t.Fatalf("bad: %s", buf) + } + + // Shrink the internal buffer! + stream.Shrink() + + // Write out the 'pong' + n, err = stream.Write(pong) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("err: %v", err) + } + } + } + sender := func(i int) { + defer wg.Done() + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + buf := make([]byte, 4) + for i := 0; i < 1000; i++ { + // Send the 'ping' + n, err := stream.Write(ping) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("short write %d", n) + } + + // Read the 'pong' + n, err = stream.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 4 { + t.Fatalf("err: %v", err) + } + if !bytes.Equal(buf, pong) { + t.Fatalf("bad: %s", buf) + } + + // Shrink the buffer + stream.Shrink() + } + } + + for i := 0; i < 50; i++ { + wg.Add(2) + go acceptor(i) + go sender(i) + } + + wg.Wait() +} + +func TestHalfClose(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + if _, err := stream.Write([]byte("a")); err != nil { + t.Fatalf("err: %v", err) + } + + stream2, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + stream2.Close() // Half close + + buf := make([]byte, 4) + n, err := stream2.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 1 { + t.Fatalf("bad: %v", n) + } + + // Send more + if _, err := stream.Write([]byte("bcd")); err != nil { + t.Fatalf("err: %v", err) + } + stream.Close() + + // Read after close + n, err = stream2.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != 3 { + t.Fatalf("bad: %v", n) + } + + // EOF after close + n, err = stream2.Read(buf) + if err != io.EOF { + t.Fatalf("err: %v", err) + } + if n != 0 { + t.Fatalf("bad: %v", n) + } +} + +func TestReadDeadline(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + stream2, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream2.Close() + + if err := stream.SetReadDeadline(time.Now().Add(5 * time.Millisecond)); err != nil { + t.Fatalf("err: %v", err) + } + + buf := make([]byte, 4) + if _, err := stream.Read(buf); err != ErrTimeout { + t.Fatalf("err: %v", err) + } +} + +func TestWriteDeadline(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + stream2, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream2.Close() + + if err := stream.SetWriteDeadline(time.Now().Add(50 * time.Millisecond)); err != nil { + t.Fatalf("err: %v", err) + } + + buf := make([]byte, 512) + for i := 0; i < int(initialStreamWindow); i++ { + _, err := stream.Write(buf) + if err != nil && err == ErrTimeout { + return + } else if err != nil { + t.Fatalf("err: %v", err) + } + } + t.Fatalf("Expected timeout") +} + +func TestBacklogExceeded(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + // Fill the backlog + max := client.config.AcceptBacklog + for i := 0; i < max; i++ { + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + if _, err := stream.Write([]byte("foo")); err != nil { + t.Fatalf("err: %v", err) + } + } + + // Attempt to open a new stream + errCh := make(chan error, 1) + go func() { + _, err := client.Open() + errCh <- err + }() + + // Shutdown the server + go func() { + time.Sleep(10 * time.Millisecond) + server.Close() + }() + + select { + case err := <-errCh: + if err == nil { + t.Fatalf("open should fail") + } + case <-time.After(time.Second): + t.Fatalf("timeout") + } +} + +func TestKeepAlive(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + time.Sleep(200 * time.Millisecond) + + // Ping value should increase + client.pingLock.Lock() + defer client.pingLock.Unlock() + if client.pingID == 0 { + t.Fatalf("should ping") + } + + server.pingLock.Lock() + defer server.pingLock.Unlock() + if server.pingID == 0 { + t.Fatalf("should ping") + } +} + +func TestKeepAlive_Timeout(t *testing.T) { + conn1, conn2 := testConn() + + clientConf := testConf() + clientConf.ConnectionWriteTimeout = time.Hour // We're testing keep alives, not connection writes + clientConf.EnableKeepAlive = false // Just test one direction, so it's deterministic who hangs up on whom + client, _ := Client(conn1, clientConf) + defer client.Close() + + server, _ := Server(conn2, testConf()) + defer server.Close() + + _ = captureLogs(client) // Client logs aren't part of the test + serverLogs := captureLogs(server) + + errCh := make(chan error, 1) + go func() { + _, err := server.Accept() // Wait until server closes + errCh <- err + }() + + // Prevent the client from responding + clientConn := client.conn.(*pipeConn) + clientConn.writeBlocker.Lock() + + select { + case err := <-errCh: + if err != ErrKeepAliveTimeout { + t.Fatalf("unexpected error: %v", err) + } + case <-time.After(1 * time.Second): + t.Fatalf("timeout waiting for timeout") + } + + if !server.IsClosed() { + t.Fatalf("server should have closed") + } + + if !serverLogs.match([]string{"[ERR] yamux: keepalive failed: i/o deadline reached"}) { + t.Fatalf("server log incorect: %v", serverLogs.logs()) + } +} + +func TestLargeWindow(t *testing.T) { + conf := DefaultConfig() + conf.MaxStreamWindowSize *= 2 + + client, server := testClientServerConfig(conf) + defer client.Close() + defer server.Close() + + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + stream2, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream2.Close() + + stream.SetWriteDeadline(time.Now().Add(10 * time.Millisecond)) + buf := make([]byte, conf.MaxStreamWindowSize) + n, err := stream.Write(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if n != len(buf) { + t.Fatalf("short write: %d", n) + } +} + +type UnlimitedReader struct{} + +func (u *UnlimitedReader) Read(p []byte) (int, error) { + runtime.Gosched() + return len(p), nil +} + +func TestSendData_VeryLarge(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + var n int64 = 1 * 1024 * 1024 * 1024 + var workers int = 16 + + wg := &sync.WaitGroup{} + wg.Add(workers * 2) + + for i := 0; i < workers; i++ { + go func() { + defer wg.Done() + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + buf := make([]byte, 4) + _, err = stream.Read(buf) + if err != nil { + t.Fatalf("err: %v", err) + } + if !bytes.Equal(buf, []byte{0, 1, 2, 3}) { + t.Fatalf("bad header") + } + + recv, err := io.Copy(ioutil.Discard, stream) + if err != nil { + t.Fatalf("err: %v", err) + } + if recv != n { + t.Fatalf("bad: %v", recv) + } + }() + } + for i := 0; i < workers; i++ { + go func() { + defer wg.Done() + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + _, err = stream.Write([]byte{0, 1, 2, 3}) + if err != nil { + t.Fatalf("err: %v", err) + } + + unlimited := &UnlimitedReader{} + sent, err := io.Copy(stream, io.LimitReader(unlimited, n)) + if err != nil { + t.Fatalf("err: %v", err) + } + if sent != n { + t.Fatalf("bad: %v", sent) + } + }() + } + + doneCh := make(chan struct{}) + go func() { + wg.Wait() + close(doneCh) + }() + select { + case <-doneCh: + case <-time.After(20 * time.Second): + panic("timeout") + } +} + +func TestBacklogExceeded_Accept(t *testing.T) { + client, server := testClientServer() + defer client.Close() + defer server.Close() + + max := 5 * client.config.AcceptBacklog + go func() { + for i := 0; i < max; i++ { + stream, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + } + }() + + // Fill the backlog + for i := 0; i < max; i++ { + stream, err := client.Open() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + if _, err := stream.Write([]byte("foo")); err != nil { + t.Fatalf("err: %v", err) + } + } +} + +func TestSessionOpenStream_WindowUpdateSYNTimeout(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + // Prevent the client from initially writing SYN + clientConn := client.conn.(*pipeConn) + clientConn.writeBlocker.Lock() + + var wg sync.WaitGroup + wg.Add(1) + + // server + go func() { + defer wg.Done() + + stream, err := server.Accept() + if err != nil { + t.Fatalf("err: %v", err) + } + stream.Close() + }() + + stream, err := client.OpenStream() + if err == nil { + t.Fatal("expected err: connection write timeout") + } + + // release lock + clientConn.writeBlocker.Unlock() + + if stream != nil { + t.Fatal("expected stream to be nil") + } + + wg.Wait() + + if exp, got := 0, len(client.streams); got != exp { + t.Errorf("invalid streams length; exp=%d, got=%d", exp, got) + } +} + +func TestSession_WindowUpdateWriteDuringRead(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(2) + + // Choose a huge flood size that we know will result in a window update. + flood := int64(client.config.MaxStreamWindowSize) - 1 + + // The server will accept a new stream and then flood data to it. + go func() { + defer wg.Done() + + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + n, err := stream.Write(make([]byte, flood)) + if err != nil { + t.Fatalf("err: %v", err) + } + if int64(n) != flood { + t.Fatalf("short write: %d", n) + } + }() + + // The client will open a stream, block outbound writes, and then + // listen to the flood from the server, which should time out since + // it won't be able to send the window update. + go func() { + defer wg.Done() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + conn := client.conn.(*pipeConn) + conn.writeBlocker.Lock() + + _, err = stream.Read(make([]byte, flood)) + if err != ErrConnectionWriteTimeout { + t.Fatalf("err: %v", err) + } + }() + + wg.Wait() +} + +func TestSession_PartialReadWindowUpdate(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(1) + + // Choose a huge flood size that we know will result in a window update. + flood := int64(client.config.MaxStreamWindowSize) + var wr *Stream + + // The server will accept a new stream and then flood data to it. + go func() { + defer wg.Done() + + var err error + wr, err = server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer wr.Close() + + if wr.sendWindow != client.config.MaxStreamWindowSize { + t.Fatalf("sendWindow: exp=%d, got=%d", client.config.MaxStreamWindowSize, wr.sendWindow) + } + + n, err := wr.Write(make([]byte, flood)) + if err != nil { + t.Fatalf("err: %v", err) + } + if int64(n) != flood { + t.Fatalf("short write: %d", n) + } + if wr.sendWindow != 0 { + t.Fatalf("sendWindow: exp=%d, got=%d", 0, wr.sendWindow) + } + }() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + wg.Wait() + + _, err = stream.Read(make([]byte, flood/2+1)) + + if exp := uint32(flood/2 + 1); wr.sendWindow != exp { + t.Errorf("sendWindow: exp=%d, got=%d", exp, wr.sendWindow) + } +} + +func TestSession_sendNoWait_Timeout(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + }() + + // The client will open the stream and then block outbound writes, we'll + // probe sendNoWait once it gets into that state. + go func() { + defer wg.Done() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + conn := client.conn.(*pipeConn) + conn.writeBlocker.Lock() + + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, flagACK, 0, 0) + for { + err = client.sendNoWait(hdr) + if err == nil { + continue + } else if err == ErrConnectionWriteTimeout { + break + } else { + t.Fatalf("err: %v", err) + } + } + }() + + wg.Wait() +} + +func TestSession_PingOfDeath(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(2) + + var doPingOfDeath sync.Mutex + doPingOfDeath.Lock() + + // This is used later to block outbound writes. + conn := server.conn.(*pipeConn) + + // The server will accept a stream, block outbound writes, and then + // flood its send channel so that no more headers can be queued. + go func() { + defer wg.Done() + + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + conn.writeBlocker.Lock() + for { + hdr := header(make([]byte, headerSize)) + hdr.encode(typePing, 0, 0, 0) + err = server.sendNoWait(hdr) + if err == nil { + continue + } else if err == ErrConnectionWriteTimeout { + break + } else { + t.Fatalf("err: %v", err) + } + } + + doPingOfDeath.Unlock() + }() + + // The client will open a stream and then send the server a ping once it + // can no longer write. This makes sure the server doesn't deadlock reads + // while trying to reply to the ping with no ability to write. + go func() { + defer wg.Done() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + // This ping will never unblock because the ping id will never + // show up in a response. + doPingOfDeath.Lock() + go func() { client.Ping() }() + + // Wait for a while to make sure the previous ping times out, + // then turn writes back on and make sure a ping works again. + time.Sleep(2 * server.config.ConnectionWriteTimeout) + conn.writeBlocker.Unlock() + if _, err = client.Ping(); err != nil { + t.Fatalf("err: %v", err) + } + }() + + wg.Wait() +} + +func TestSession_ConnectionWriteTimeout(t *testing.T) { + client, server := testClientServerConfig(testConfNoKeepAlive()) + defer client.Close() + defer server.Close() + + var wg sync.WaitGroup + wg.Add(2) + + go func() { + defer wg.Done() + + stream, err := server.AcceptStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + }() + + // The client will open the stream and then block outbound writes, we'll + // tee up a write and make sure it eventually times out. + go func() { + defer wg.Done() + + stream, err := client.OpenStream() + if err != nil { + t.Fatalf("err: %v", err) + } + defer stream.Close() + + conn := client.conn.(*pipeConn) + conn.writeBlocker.Lock() + + // Since the write goroutine is blocked then this will return a + // timeout since it can't get feedback about whether the write + // worked. + n, err := stream.Write([]byte("hello")) + if err != ErrConnectionWriteTimeout { + t.Fatalf("err: %v", err) + } + if n != 0 { + t.Fatalf("lied about writes: %d", n) + } + }() + + wg.Wait() +} diff --git a/vendor/github.com/influxdata/yamux/spec.md b/vendor/github.com/influxdata/yamux/spec.md new file mode 100644 index 000000000..183d797bd --- /dev/null +++ b/vendor/github.com/influxdata/yamux/spec.md @@ -0,0 +1,140 @@ +# Specification + +We use this document to detail the internal specification of Yamux. +This is used both as a guide for implementing Yamux, but also for +alternative interoperable libraries to be built. + +# Framing + +Yamux uses a streaming connection underneath, but imposes a message +framing so that it can be shared between many logical streams. Each +frame contains a header like: + +* Version (8 bits) +* Type (8 bits) +* Flags (16 bits) +* StreamID (32 bits) +* Length (32 bits) + +This means that each header has a 12 byte overhead. +All fields are encoded in network order (big endian). +Each field is described below: + +## Version Field + +The version field is used for future backward compatibility. At the +current time, the field is always set to 0, to indicate the initial +version. + +## Type Field + +The type field is used to switch the frame message type. The following +message types are supported: + +* 0x0 Data - Used to transmit data. May transmit zero length payloads + depending on the flags. + +* 0x1 Window Update - Used to updated the senders receive window size. + This is used to implement per-session flow control. + +* 0x2 Ping - Used to measure RTT. It can also be used to heart-beat + and do keep-alives over TCP. + +* 0x3 Go Away - Used to close a session. + +## Flag Field + +The flags field is used to provide additional information related +to the message type. The following flags are supported: + +* 0x1 SYN - Signals the start of a new stream. May be sent with a data or + window update message. Also sent with a ping to indicate outbound. + +* 0x2 ACK - Acknowledges the start of a new stream. May be sent with a data + or window update message. Also sent with a ping to indicate response. + +* 0x4 FIN - Performs a half-close of a stream. May be sent with a data + message or window update. + +* 0x8 RST - Reset a stream immediately. May be sent with a data or + window update message. + +## StreamID Field + +The StreamID field is used to identify the logical stream the frame +is addressing. The client side should use odd ID's, and the server even. +This prevents any collisions. Additionally, the 0 ID is reserved to represent +the session. + +Both Ping and Go Away messages should always use the 0 StreamID. + +## Length Field + +The meaning of the length field depends on the message type: + +* Data - provides the length of bytes following the header +* Window update - provides a delta update to the window size +* Ping - Contains an opaque value, echoed back +* Go Away - Contains an error code + +# Message Flow + +There is no explicit connection setup, as Yamux relies on an underlying +transport to be provided. However, there is a distinction between client +and server side of the connection. + +## Opening a stream + +To open a stream, an initial data or window update frame is sent +with a new StreamID. The SYN flag should be set to signal a new stream. + +The receiver must then reply with either a data or window update frame +with the StreamID along with the ACK flag to accept the stream or with +the RST flag to reject the stream. + +Because we are relying on the reliable stream underneath, a connection +can begin sending data once the SYN flag is sent. The corresponding +ACK does not need to be received. This is particularly well suited +for an RPC system where a client wants to open a stream and immediately +fire a request without waiting for the RTT of the ACK. + +This does introduce the possibility of a connection being rejected +after data has been sent already. This is a slight semantic difference +from TCP, where the conection cannot be refused after it is opened. +Clients should be prepared to handle this by checking for an error +that indicates a RST was received. + +## Closing a stream + +To close a stream, either side sends a data or window update frame +along with the FIN flag. This does a half-close indicating the sender +will send no further data. + +Once both sides have closed the connection, the stream is closed. + +Alternatively, if an error occurs, the RST flag can be used to +hard close a stream immediately. + +## Flow Control + +When Yamux is initially starts each stream with a 256KB window size. +There is no window size for the session. + +To prevent the streams from stalling, window update frames should be +sent regularly. Yamux can be configured to provide a larger limit for +windows sizes. Both sides assume the initial 256KB window, but can +immediately send a window update as part of the SYN/ACK indicating a +larger window. + +Both sides should track the number of bytes sent in Data frames +only, as only they are tracked as part of the window size. + +## Session termination + +When a session is being terminated, the Go Away message should +be sent. The Length should be set to one of the following to +provide an error code: + +* 0x0 Normal termination +* 0x1 Protocol error +* 0x2 Internal error diff --git a/vendor/github.com/influxdata/yamux/stream.go b/vendor/github.com/influxdata/yamux/stream.go new file mode 100644 index 000000000..c3255a644 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/stream.go @@ -0,0 +1,466 @@ +package yamux + +import ( + "bytes" + "io" + "sync" + "sync/atomic" + "time" +) + +type streamState int + +const ( + streamInit streamState = iota + streamSYNSent + streamSYNReceived + streamEstablished + streamLocalClose + streamRemoteClose + streamClosed + streamReset +) + +// Stream is used to represent a logical stream +// within a session. +type Stream struct { + recvWindow uint32 + sendWindow uint32 + + id uint32 + session *Session + + state streamState + stateLock sync.Mutex + + recvBuf *bytes.Buffer + recvLock sync.Mutex + + controlHdr header + controlErr chan error + controlHdrLock sync.Mutex + + sendHdr header + sendErr chan error + sendLock sync.Mutex + + recvNotifyCh chan struct{} + sendNotifyCh chan struct{} + + readDeadline time.Time + writeDeadline time.Time +} + +// newStream is used to construct a new stream within +// a given session for an ID +func newStream(session *Session, id uint32, state streamState) *Stream { + s := &Stream{ + id: id, + session: session, + state: state, + controlHdr: header(make([]byte, headerSize)), + controlErr: make(chan error, 1), + sendHdr: header(make([]byte, headerSize)), + sendErr: make(chan error, 1), + recvWindow: initialStreamWindow, + sendWindow: initialStreamWindow, + recvNotifyCh: make(chan struct{}, 1), + sendNotifyCh: make(chan struct{}, 1), + } + return s +} + +// Session returns the associated stream session +func (s *Stream) Session() *Session { + return s.session +} + +// StreamID returns the ID of this stream +func (s *Stream) StreamID() uint32 { + return s.id +} + +// Read is used to read from the stream +func (s *Stream) Read(b []byte) (n int, err error) { + defer asyncNotify(s.recvNotifyCh) +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamRemoteClose: + fallthrough + case streamClosed: + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + s.stateLock.Unlock() + return 0, io.EOF + } + s.recvLock.Unlock() + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + s.recvLock.Lock() + if s.recvBuf == nil || s.recvBuf.Len() == 0 { + s.recvLock.Unlock() + goto WAIT + } + + // Read any bytes + n, _ = s.recvBuf.Read(b) + s.recvLock.Unlock() + + // Send a window update potentially + err = s.sendWindowUpdate() + return n, err + +WAIT: + var timeout <-chan time.Time + var timer *time.Timer + if !s.readDeadline.IsZero() { + delay := s.readDeadline.Sub(time.Now()) + timer = time.NewTimer(delay) + timeout = timer.C + } + select { + case <-s.recvNotifyCh: + if timer != nil { + timer.Stop() + } + goto START + case <-timeout: + return 0, ErrTimeout + } +} + +// Write is used to write to the stream +func (s *Stream) Write(b []byte) (n int, err error) { + s.sendLock.Lock() + defer s.sendLock.Unlock() + total := 0 + for total < len(b) { + n, err := s.write(b[total:]) + total += n + if err != nil { + return total, err + } + } + return total, nil +} + +// write is used to write to the stream, may return on +// a short write. +func (s *Stream) write(b []byte) (n int, err error) { + var flags uint16 + var max uint32 + var body io.Reader +START: + s.stateLock.Lock() + switch s.state { + case streamLocalClose: + fallthrough + case streamClosed: + s.stateLock.Unlock() + return 0, ErrStreamClosed + case streamReset: + s.stateLock.Unlock() + return 0, ErrConnectionReset + } + s.stateLock.Unlock() + + // If there is no data available, block + window := atomic.LoadUint32(&s.sendWindow) + if window == 0 { + goto WAIT + } + + // Determine the flags if any + flags = s.sendFlags() + + // Send up to our send window + max = min(window, uint32(len(b))) + body = bytes.NewReader(b[:max]) + + // Send the header + s.sendHdr.encode(typeData, flags, s.id, max) + if err := s.session.waitForSendErr(s.sendHdr, body, s.sendErr); err != nil { + return 0, err + } + + // Reduce our send window + atomic.AddUint32(&s.sendWindow, ^uint32(max-1)) + + // Unlock + return int(max), err + +WAIT: + var timeout <-chan time.Time + if !s.writeDeadline.IsZero() { + delay := s.writeDeadline.Sub(time.Now()) + timeout = time.After(delay) + } + select { + case <-s.sendNotifyCh: + goto START + case <-timeout: + return 0, ErrTimeout + } + return 0, nil +} + +// sendFlags determines any flags that are appropriate +// based on the current stream state +func (s *Stream) sendFlags() uint16 { + s.stateLock.Lock() + defer s.stateLock.Unlock() + var flags uint16 + switch s.state { + case streamInit: + flags |= flagSYN + s.state = streamSYNSent + case streamSYNReceived: + flags |= flagACK + s.state = streamEstablished + } + return flags +} + +// sendWindowUpdate potentially sends a window update enabling +// further writes to take place. Must be invoked with the lock. +func (s *Stream) sendWindowUpdate() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + // Determine the delta update + max := s.session.config.MaxStreamWindowSize + var bufLen uint32 + s.recvLock.Lock() + if s.recvBuf != nil { + bufLen = uint32(s.recvBuf.Len()) + } + delta := (max - bufLen) - s.recvWindow + + // Determine the flags if any + flags := s.sendFlags() + + // Check if we can omit the update + if delta < (max/2) && flags == 0 { + s.recvLock.Unlock() + return nil + } + + // Update our window + s.recvWindow += delta + s.recvLock.Unlock() + + // Send the header + s.controlHdr.encode(typeWindowUpdate, flags, s.id, delta) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// sendClose is used to send a FIN +func (s *Stream) sendClose() error { + s.controlHdrLock.Lock() + defer s.controlHdrLock.Unlock() + + flags := s.sendFlags() + flags |= flagFIN + s.controlHdr.encode(typeWindowUpdate, flags, s.id, 0) + if err := s.session.waitForSendErr(s.controlHdr, nil, s.controlErr); err != nil { + return err + } + return nil +} + +// Close is used to close the stream +func (s *Stream) Close() error { + closeStream := false + s.stateLock.Lock() + switch s.state { + // Opened means we need to signal a close + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamLocalClose + goto SEND_CLOSE + + case streamLocalClose: + case streamRemoteClose: + s.state = streamClosed + closeStream = true + goto SEND_CLOSE + + case streamClosed: + case streamReset: + default: + panic("unhandled state") + } + s.stateLock.Unlock() + return nil +SEND_CLOSE: + s.stateLock.Unlock() + s.sendClose() + s.notifyWaiting() + if closeStream { + s.session.closeStream(s.id) + } + return nil +} + +// forceClose is used for when the session is exiting +func (s *Stream) forceClose() { + s.stateLock.Lock() + s.state = streamClosed + s.stateLock.Unlock() + s.notifyWaiting() +} + +// processFlags is used to update the state of the stream +// based on set flags, if any. Lock must be held +func (s *Stream) processFlags(flags uint16) error { + // Close the stream without holding the state lock + closeStream := false + defer func() { + if closeStream { + s.session.closeStream(s.id) + } + }() + + s.stateLock.Lock() + defer s.stateLock.Unlock() + if flags&flagACK == flagACK { + if s.state == streamSYNSent { + s.state = streamEstablished + } + s.session.establishStream(s.id) + } + if flags&flagFIN == flagFIN { + switch s.state { + case streamSYNSent: + fallthrough + case streamSYNReceived: + fallthrough + case streamEstablished: + s.state = streamRemoteClose + s.notifyWaiting() + case streamLocalClose: + s.state = streamClosed + closeStream = true + s.notifyWaiting() + default: + s.session.logger.Printf("[ERR] yamux: unexpected FIN flag in state %d", s.state) + return ErrUnexpectedFlag + } + } + if flags&flagRST == flagRST { + s.state = streamReset + closeStream = true + s.notifyWaiting() + } + return nil +} + +// notifyWaiting notifies all the waiting channels +func (s *Stream) notifyWaiting() { + asyncNotify(s.recvNotifyCh) + asyncNotify(s.sendNotifyCh) +} + +// incrSendWindow updates the size of our send window +func (s *Stream) incrSendWindow(hdr header, flags uint16) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Increase window, unblock a sender + atomic.AddUint32(&s.sendWindow, hdr.Length()) + asyncNotify(s.sendNotifyCh) + return nil +} + +// readData is used to handle a data frame +func (s *Stream) readData(hdr header, flags uint16, conn io.Reader) error { + if err := s.processFlags(flags); err != nil { + return err + } + + // Check that our recv window is not exceeded + length := hdr.Length() + if length == 0 { + return nil + } + + // Wrap in a limited reader + conn = &io.LimitedReader{R: conn, N: int64(length)} + + // Copy into buffer + s.recvLock.Lock() + + if length > s.recvWindow { + s.session.logger.Printf("[ERR] yamux: receive window exceeded (stream: %d, remain: %d, recv: %d)", s.id, s.recvWindow, length) + return ErrRecvWindowExceeded + } + + if s.recvBuf == nil { + // Allocate the receive buffer just-in-time to fit the full data frame. + // This way we can read in the whole packet without further allocations. + s.recvBuf = bytes.NewBuffer(make([]byte, 0, length)) + } + if _, err := io.Copy(s.recvBuf, conn); err != nil { + s.session.logger.Printf("[ERR] yamux: Failed to read stream data: %v", err) + s.recvLock.Unlock() + return err + } + + // Decrement the receive window + s.recvWindow += ^uint32(length - 1) + s.recvLock.Unlock() + + // Unblock any readers + asyncNotify(s.recvNotifyCh) + return nil +} + +// SetDeadline sets the read and write deadlines +func (s *Stream) SetDeadline(t time.Time) error { + if err := s.SetReadDeadline(t); err != nil { + return err + } + if err := s.SetWriteDeadline(t); err != nil { + return err + } + return nil +} + +// SetReadDeadline sets the deadline for future Read calls. +func (s *Stream) SetReadDeadline(t time.Time) error { + s.readDeadline = t + return nil +} + +// SetWriteDeadline sets the deadline for future Write calls +func (s *Stream) SetWriteDeadline(t time.Time) error { + s.writeDeadline = t + return nil +} + +// Shrink is used to compact the amount of buffers utilized +// This is useful when using Yamux in a connection pool to reduce +// the idle memory utilization. +func (s *Stream) Shrink() { + s.recvLock.Lock() + if s.recvBuf != nil && s.recvBuf.Len() == 0 { + s.recvBuf = nil + } + s.recvLock.Unlock() +} diff --git a/vendor/github.com/influxdata/yamux/util.go b/vendor/github.com/influxdata/yamux/util.go new file mode 100644 index 000000000..8a73e9249 --- /dev/null +++ b/vendor/github.com/influxdata/yamux/util.go @@ -0,0 +1,43 @@ +package yamux + +import ( + "sync" + "time" +) + +var ( + timerPool = &sync.Pool{ + New: func() interface{} { + timer := time.NewTimer(time.Hour * 1e6) + timer.Stop() + return timer + }, + } +) + +// asyncSendErr is used to try an async send of an error +func asyncSendErr(ch chan error, err error) { + if ch == nil { + return + } + select { + case ch <- err: + default: + } +} + +// asyncNotify is used to signal a waiting goroutine +func asyncNotify(ch chan struct{}) { + select { + case ch <- struct{}{}: + default: + } +} + +// min computes the minimum of two values +func min(a, b uint32) uint32 { + if a < b { + return a + } + return b +} diff --git a/vendor/github.com/influxdata/yamux/util_test.go b/vendor/github.com/influxdata/yamux/util_test.go new file mode 100644 index 000000000..dd14623af --- /dev/null +++ b/vendor/github.com/influxdata/yamux/util_test.go @@ -0,0 +1,50 @@ +package yamux + +import ( + "testing" +) + +func TestAsyncSendErr(t *testing.T) { + ch := make(chan error) + asyncSendErr(ch, ErrTimeout) + select { + case <-ch: + t.Fatalf("should not get") + default: + } + + ch = make(chan error, 1) + asyncSendErr(ch, ErrTimeout) + select { + case <-ch: + default: + t.Fatalf("should get") + } +} + +func TestAsyncNotify(t *testing.T) { + ch := make(chan struct{}) + asyncNotify(ch) + select { + case <-ch: + t.Fatalf("should not get") + default: + } + + ch = make(chan struct{}, 1) + asyncNotify(ch) + select { + case <-ch: + default: + t.Fatalf("should get") + } +} + +func TestMin(t *testing.T) { + if min(1, 2) != 1 { + t.Fatalf("bad") + } + if min(2, 1) != 1 { + t.Fatalf("bad") + } +} diff --git a/vendor/github.com/influxdata/yarpc/.gitignore b/vendor/github.com/influxdata/yarpc/.gitignore new file mode 100644 index 000000000..26f88111c --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/.gitignore @@ -0,0 +1 @@ +/protoc-gen-yarpc \ No newline at end of file diff --git a/vendor/github.com/influxdata/yarpc/Godeps b/vendor/github.com/influxdata/yarpc/Godeps new file mode 100644 index 000000000..da3be638e --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/Godeps @@ -0,0 +1,2 @@ +github.com/gogo/protobuf 1c2b16bc280d6635de6c52fc1471ab962dc36ec9 +github.com/influxdata/yamux e7f91523e648eeb91537e420aebbd96aa64ab6ae diff --git a/vendor/github.com/influxdata/yarpc/LICENSE b/vendor/github.com/influxdata/yarpc/LICENSE new file mode 100644 index 000000000..b8eef2c35 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/LICENSE @@ -0,0 +1,20 @@ +The MIT License (MIT) + +Copyright (c) 2017-2018 InfluxData Inc. + +Permission is hereby granted, free of charge, to any person obtaining a copy of +this software and associated documentation files (the "Software"), to deal in +the Software without restriction, including without limitation the rights to +use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of +the Software, and to permit persons to whom the Software is furnished to do so, +subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS +FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR +COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER +IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN +CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. \ No newline at end of file diff --git a/vendor/github.com/influxdata/yarpc/README.md b/vendor/github.com/influxdata/yarpc/README.md new file mode 100644 index 000000000..83725bb0d --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/README.md @@ -0,0 +1,8 @@ +yarpc +===== + +yarpc is Yet Another RPC package for Go. + +In a barely working state right now, little error handling and lots of details still to resolve. + +* How shall header and trailer data be handled (for open tracing, status) \ No newline at end of file diff --git a/vendor/github.com/influxdata/yarpc/call.go b/vendor/github.com/influxdata/yarpc/call.go new file mode 100644 index 000000000..575e0a122 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/call.go @@ -0,0 +1,54 @@ +package yarpc + +import ( + "context" + "encoding/binary" + + "github.com/influxdata/yamux" +) + +func Invoke(ctx context.Context, api uint16, args interface{}, reply interface{}, cc *ClientConn) error { + stream, err := cc.NewStream() + if err != nil { + // TODO(sgc): convert to RPC error + return err + } + defer stream.Close() + + var tmp [2]byte + binary.BigEndian.PutUint16(tmp[:], api) + _, err = stream.Write(tmp[:]) + if err != nil { + return err + } + + err = sendRequest(ctx, cc.dopts, stream, args) + if err != nil { + return err + } + + err = recvResponse(ctx, cc.dopts, stream, reply) + if err != nil { + return err + } + + return nil +} + +func sendRequest(ctx context.Context, dopts dialOptions, stream *yamux.Stream, args interface{}) error { + outBuf, err := encode(dopts.codec, args) + if err != nil { + return err + } + _, err = stream.Write(outBuf) + return err +} + +func recvResponse(ctx context.Context, dopts dialOptions, stream *yamux.Stream, reply interface{}) error { + p := &parser{r: stream} + err := decode(p, dopts.codec, stream, reply) + if err != nil { + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/yarpc/call_test.go b/vendor/github.com/influxdata/yarpc/call_test.go new file mode 100644 index 000000000..e5f098461 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/call_test.go @@ -0,0 +1 @@ +package yarpc diff --git a/vendor/github.com/influxdata/yarpc/clientconn.go b/vendor/github.com/influxdata/yarpc/clientconn.go new file mode 100644 index 000000000..63e422956 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/clientconn.go @@ -0,0 +1,65 @@ +package yarpc + +import ( + "net" + + "context" + + "github.com/influxdata/yamux" +) + +type dialOptions struct { + codec Codec +} + +type DialOption func(*dialOptions) + +func WithCodec(c Codec) DialOption { + return func(o *dialOptions) { + o.codec = c + } +} + +func Dial(addr string, opt ...DialOption) (*ClientConn, error) { + return DialContext(context.Background(), addr, opt...) +} + +func DialContext(ctx context.Context, addr string, opts ...DialOption) (*ClientConn, error) { + cn, err := net.Dial("tcp", addr) + if err != nil { + return nil, err + } + + s, err := yamux.Client(cn, nil) + if err != nil { + return nil, err + } + cc := &ClientConn{s: s} + cc.ctx, cc.cancel = context.WithCancel(ctx) + + for _, opt := range opts { + opt(&cc.dopts) + } + + if cc.dopts.codec == nil { + cc.dopts.codec = NewCodec() + } + + return cc, nil +} + +type ClientConn struct { + ctx context.Context + cancel context.CancelFunc + s *yamux.Session + dopts dialOptions +} + +func (cc *ClientConn) NewStream() (*yamux.Stream, error) { + return cc.s.OpenStream() +} + +func (cc *ClientConn) Close() error { + cc.cancel() + return cc.s.Close() +} diff --git a/vendor/github.com/influxdata/yarpc/codec.go b/vendor/github.com/influxdata/yarpc/codec.go new file mode 100644 index 000000000..e838b15f2 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/codec.go @@ -0,0 +1,116 @@ +package yarpc + +import ( + "encoding/binary" + "io" + "sync" + + "github.com/gogo/protobuf/codec" + "github.com/influxdata/yamux" + "github.com/influxdata/yarpc/codes" + "github.com/influxdata/yarpc/status" +) + +var ( + codecPool = &sync.Pool{ + New: func() interface{} { + return codec.New(1024) + }, + } +) + +type pooledCodec struct{} + +var ( + cd = &pooledCodec{} +) + +func NewCodec() Codec { + return cd +} + +func (*pooledCodec) Marshal(v interface{}) ([]byte, error) { + ci := codecPool.Get() + c := ci.(codec.Codec) + data, err := c.Marshal(v) + codecPool.Put(ci) + return data, err +} + +func (*pooledCodec) Unmarshal(data []byte, v interface{}) error { + ci := codecPool.Get() + c := ci.(codec.Codec) + err := c.Unmarshal(data, v) + codecPool.Put(ci) + return err +} + +type Codec interface { + Marshal(v interface{}) ([]byte, error) + Unmarshal(data []byte, v interface{}) error +} + +type parser struct { + r io.Reader + header [4]byte +} + +func (p *parser) recvMsg() (msg []byte, err error) { + if _, err := io.ReadFull(p.r, p.header[:]); err != nil { + return nil, err + } + + length := binary.BigEndian.Uint32(p.header[:]) + if length == 0 { + return nil, nil + } + + msg = make([]byte, int(length)) + if _, err := io.ReadFull(p.r, msg); err != nil { + if err == io.EOF { + err = io.ErrUnexpectedEOF + } + return nil, err + } + return msg, nil +} + +func encode(c Codec, msg interface{}) ([]byte, error) { + var ( + b []byte + length uint + ) + + if msg != nil { + var err error + b, err = c.Marshal(msg) + if err != nil { + // TODO(sgc): should return error with status code "internal" + return nil, status.Errorf(codes.Internal, "rpc: error while marshaling %v", err) + } + length = uint(len(b)) + } + + const ( + sizeLen = 4 + ) + + var buf = make([]byte, sizeLen+length) + binary.BigEndian.PutUint32(buf, uint32(length)) + copy(buf[4:], b) + + return buf, nil +} + +func decode(p *parser, c Codec, s *yamux.Stream, m interface{}) error { + d, err := p.recvMsg() + if err != nil { + return err + } + + if err := c.Unmarshal(d, m); err != nil { + return status.Errorf(codes.Internal, "rpc: failed to unmarshal received message %v", err) + } + + return nil +} diff --git a/vendor/github.com/influxdata/yarpc/codes/codes.pb.go b/vendor/github.com/influxdata/yarpc/codes/codes.pb.go new file mode 100644 index 000000000..6a3571a99 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/codes/codes.pb.go @@ -0,0 +1,86 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: codes/codes.proto + +/* +Package codes is a generated protocol buffer package. + +It is generated from these files: + codes/codes.proto + +It has these top-level messages: +*/ +package codes + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Code int32 + +const ( + // OK is returned on success. + OK Code = 0 + // Unknown error. + Unknown Code = 1 + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + Unimplemented Code = 2 + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + Internal Code = 3 +) + +var Code_name = map[int32]string{ + 0: "OK", + 1: "UNKNOWN", + 2: "UNIMPLEMETED", + 3: "INTERNAL", +} +var Code_value = map[string]int32{ + "OK": 0, + "UNKNOWN": 1, + "UNIMPLEMETED": 2, + "INTERNAL": 3, +} + +func (x Code) String() string { + return proto.EnumName(Code_name, int32(x)) +} +func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptorCodes, []int{0} } + +func init() { + proto.RegisterEnum("codes.Code", Code_name, Code_value) +} + +func init() { proto.RegisterFile("codes/codes.proto", fileDescriptorCodes) } + +var fileDescriptorCodes = []byte{ + // 219 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x4c, 0xce, 0x4f, 0x49, + 0x2d, 0xd6, 0x07, 0x93, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0xac, 0x60, 0x8e, 0x94, 0x6e, + 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x7a, 0xbe, 0x3e, + 0x58, 0x36, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0, 0x2c, 0x88, 0x2e, 0xad, 0x72, 0x2e, 0x16, + 0xe7, 0xfc, 0x94, 0x54, 0x21, 0x3e, 0x2e, 0x26, 0x7f, 0x6f, 0x01, 0x06, 0x29, 0xb6, 0xae, 0xb9, + 0x0a, 0x4c, 0xfe, 0xde, 0x42, 0x12, 0x5c, 0xec, 0xa1, 0x7e, 0xde, 0x7e, 0xfe, 0xe1, 0x7e, 0x02, + 0x8c, 0x52, 0xdc, 0x5d, 0x73, 0x15, 0xd8, 0x43, 0xf3, 0xb2, 0xf3, 0xf2, 0xcb, 0xf3, 0x84, 0x94, + 0xb9, 0x78, 0x42, 0xfd, 0x3c, 0x7d, 0x03, 0x7c, 0x5c, 0x7d, 0x5d, 0x43, 0x5c, 0x5d, 0x04, 0x98, + 0xa4, 0x04, 0xbb, 0xe6, 0x2a, 0xf0, 0x86, 0xe6, 0x65, 0xe6, 0x16, 0xe4, 0xa4, 0xe6, 0xa6, 0xe6, + 0x95, 0xa4, 0xa6, 0x08, 0x49, 0x71, 0x71, 0x78, 0xfa, 0x85, 0xb8, 0x06, 0xf9, 0x39, 0xfa, 0x08, + 0x30, 0x4b, 0xf1, 0x74, 0xcd, 0x55, 0xe0, 0xf0, 0xcc, 0x2b, 0x49, 0x2d, 0xca, 0x4b, 0xcc, 0x91, + 0x62, 0xe9, 0x58, 0x2c, 0xc7, 0xe0, 0x24, 0x70, 0xe2, 0x91, 0x1c, 0xe3, 0x85, 0x47, 0x72, 0x8c, + 0x0f, 0x1e, 0xc9, 0x31, 0x4e, 0x78, 0x2c, 0xc7, 0x90, 0xc4, 0x06, 0x76, 0x91, 0x31, 0x20, 0x00, + 0x00, 0xff, 0xff, 0xd4, 0x54, 0x67, 0x0c, 0xdc, 0x00, 0x00, 0x00, +} diff --git a/vendor/github.com/influxdata/yarpc/codes/codes.proto b/vendor/github.com/influxdata/yarpc/codes/codes.proto new file mode 100644 index 000000000..e00dfe110 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/codes/codes.proto @@ -0,0 +1,23 @@ +syntax = "proto3"; +package codes; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; + +enum Code { + option (gogoproto.goproto_enum_prefix) = false; + + // OK is returned on success. + OK = 0 [(gogoproto.enumvalue_customname) = "OK"]; + + // Unknown error. + UNKNOWN = 1 [(gogoproto.enumvalue_customname) = "Unknown"]; + + // Unimplemented indicates operation is not implemented or not + // supported/enabled in this service. + UNIMPLEMETED = 2 [(gogoproto.enumvalue_customname) = "Unimplemented"]; + + // Internal errors. Means some invariants expected by underlying + // system has been broken. If you see one of these errors, + // something is very broken. + INTERNAL = 3 [(gogoproto.enumvalue_customname) = "Internal"]; +} diff --git a/vendor/github.com/influxdata/yarpc/rpc.go b/vendor/github.com/influxdata/yarpc/rpc.go new file mode 100644 index 000000000..c3b3eb633 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/rpc.go @@ -0,0 +1,9 @@ +package yarpc + +//go:generate protoc -I$GOPATH/src -I. --gogofaster_out=. codes/codes.proto +//go:generate protoc -I$GOPATH/src -I. --gogofaster_out=. status/status.proto +//go:generate protoc -I$GOPATH/src -I. --gogofaster_out=Mgoogle/protobuf/descriptor.proto=github.com/gogo/protobuf/protoc-gen-gogo/descriptor:. yarpcproto/yarpc.proto + +const ( + SupportPackageIsVersion1 = true +) diff --git a/vendor/github.com/influxdata/yarpc/server.go b/vendor/github.com/influxdata/yarpc/server.go new file mode 100644 index 000000000..4be56f1a4 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/server.go @@ -0,0 +1,288 @@ +package yarpc + +import ( + "net" + + "encoding/binary" + "io" + + "context" + + "reflect" + + "log" + + "github.com/influxdata/yamux" + "github.com/influxdata/yarpc/codes" + "github.com/influxdata/yarpc/status" +) + +type methodHandler func(srv interface{}, ctx context.Context, dec func(interface{}) error) (interface{}, error) + +type MethodDesc struct { + Index uint8 + MethodName string + Handler methodHandler +} + +// ServiceDesc represents an RPC service's specification. +type ServiceDesc struct { + Index uint8 + ServiceName string + // The pointer to the service interface. Used to check whether the user + // provided implementation satisfies the interface requirements. + HandlerType interface{} + Methods []MethodDesc + Streams []StreamDesc + Metadata interface{} +} + +type service struct { + server interface{} + md map[uint8]*MethodDesc + sd map[uint8]*StreamDesc +} + +type Server struct { + opts options + m map[uint8]*service + serve bool + lis net.Listener +} + +type options struct { + codec Codec +} + +type ServerOption func(*options) + +func CustomCodec(c Codec) ServerOption { + return func(o *options) { + o.codec = c + } +} + +func NewServer(opts ...ServerOption) *Server { + s := &Server{ + m: make(map[uint8]*service), + } + + for _, opt := range opts { + opt(&s.opts) + } + + // defaults + if s.opts.codec == nil { + s.opts.codec = NewCodec() + } + + return s +} + +// RegisterService registers a service and its implementation to the gRPC +// server. It is called from the IDL generated code. This must be called before +// invoking Serve. +func (s *Server) RegisterService(sd *ServiceDesc, ss interface{}) { + ht := reflect.TypeOf(sd.HandlerType).Elem() + st := reflect.TypeOf(ss) + if !st.Implements(ht) { + log.Fatalf("rpc: Server.RegisterService found the handler of type %v that does not satisfy %v", st, ht) + } + s.register(sd, ss) +} + +func (s *Server) register(sd *ServiceDesc, ss interface{}) { + // s.opts.log.Info("register service", zap.String("name", sd.ServiceName), zap.Uint("index", uint(sd.Index))) + if s.serve { + log.Fatalf("rpc: Server.RegisterService after Server.Serve for %q", sd.ServiceName) + } + if _, ok := s.m[sd.Index]; ok { + log.Fatalf("rpc: Server.RegisterService found duplicate service registration for %q", sd.ServiceName) + } + + srv := &service{ + server: ss, + md: make(map[uint8]*MethodDesc), + sd: make(map[uint8]*StreamDesc), + } + for i := range sd.Methods { + d := &sd.Methods[i] + srv.md[d.Index] = d + } + for i := range sd.Streams { + d := &sd.Streams[i] + srv.sd[d.Index] = d + } + s.m[sd.Index] = srv +} + +func (s *Server) Serve(lis net.Listener) error { + s.lis = lis + for { + rawConn, err := lis.Accept() + if err != nil { + if ne, ok := err.(interface { + Temporary() bool + }); ok && ne.Temporary() { + // TODO(sgc): add logic to handle temporary errors + } + return err + } + + go s.handleRawConn(rawConn) + } +} + +func (s *Server) Stop() { + if s.lis != nil { + s.lis.Close() + s.lis = nil + } +} + +func (s *Server) handleRawConn(rawConn net.Conn) { + session, err := yamux.Server(rawConn, nil) + if err != nil { + log.Printf("ERR yamux.Server failed: error=%v", err) + rawConn.Close() + return + } + + s.serveSession(session) +} + +func (s *Server) serveSession(session *yamux.Session) { + for { + stream, err := session.AcceptStream() + if err != nil { + if err != io.EOF { + // TODO(sgc): handle session errors + log.Printf("ERR session.AcceptStream failed: error=%v", err) + session.Close() + } + return + } + + go s.handleStream(stream) + } +} + +func decodeServiceMethod(v uint16) (svc, mth uint8) { + //┌────────────────────────┬────────────────────────┐ + //│ SERVICE (8) │ METHOD (8) │ + //└────────────────────────┴────────────────────────┘ + + return uint8(v >> 8), uint8(v) +} + +func (s *Server) handleStream(st *yamux.Stream) { + defer st.Close() + + var tmp [2]byte + io.ReadAtLeast(st, tmp[:], 2) + service, method := decodeServiceMethod(binary.BigEndian.Uint16(tmp[:])) + srv, ok := s.m[service] + if !ok { + // TODO(sgc): handle unknown service + log.Printf("invalid service identifier: service=%d", service) + return + } + + if md, ok := srv.md[method]; ok { + // handle unary + s.handleUnaryRPC(st, srv, md) + return + } + + if sd, ok := srv.sd[method]; ok { + // handle unary + s.handleStreamingRPC(st, srv, sd) + return + } + + // TODO(sgc): handle unknown method + log.Printf("ERR invalid method identifier: service=%d method=%d", service, method) +} + +func (s *Server) handleStreamingRPC(st *yamux.Stream, srv *service, sd *StreamDesc) { + ss := &serverStream{ + cn: st, + codec: s.opts.codec, + p: &parser{r: st}, + } + + var appErr error + var server interface{} + if srv != nil { + server = srv.server + } + + appErr = sd.Handler(server, ss) + if appErr != nil { + // TODO(sgc): handle app error using similar code style to gRPC + log.Printf("ERR sd.Handler failed: error=%v", appErr) + // appStatus, ok := status.FromError(appErr) + return + } + + // TODO(sgc): write OK status? +} + +func (s *Server) handleUnaryRPC(st *yamux.Stream, srv *service, md *MethodDesc) error { + p := &parser{r: st} + req, err := p.recvMsg() + if err == io.EOF { + return err + } + + if err == io.ErrUnexpectedEOF { + return status.Errorf(codes.Internal, err.Error()) + } + + df := func(v interface{}) error { + if err := s.opts.codec.Unmarshal(req, v); err != nil { + return status.Errorf(codes.Internal, "rpc: error unmarshalling request: %v", err) + } + return nil + } + + reply, appErr := md.Handler(srv.server, context.Background(), df) + if appErr != nil { + appStatus, ok := status.FromError(appErr) + if !ok { + // convert to app error + appStatus = &status.Status{Code: codes.Unknown, Message: appErr.Error()} + appErr = appStatus + } + + // TODO(sgc): write error status + return appErr + } + + if err := s.sendResponse(st, reply); err != nil { + if err == io.EOF { + return err + } + + if s, ok := status.FromError(err); ok { + // TODO(sgc): write error status + _ = s + } + + return err + } + + // TODO(sgc): write OK status + return nil +} + +func (s *Server) sendResponse(stream *yamux.Stream, msg interface{}) error { + buf, err := encode(s.opts.codec, msg) + if err != nil { + // s.opts.log.Error("rpc: server failed to encode reply", zap.Error(err)) + return err + } + + _, err = stream.Write(buf) + return err +} diff --git a/vendor/github.com/influxdata/yarpc/status/status.go b/vendor/github.com/influxdata/yarpc/status/status.go new file mode 100644 index 000000000..a7cb6a64d --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/status/status.go @@ -0,0 +1,28 @@ +package status + +import ( + "fmt" + + "github.com/influxdata/yarpc/codes" +) + +func (m *Status) Error() string { + return fmt.Sprintf("rpc error: code = %s desc = %s", m.Code, m.Message) +} + +// FromError returns a Status representing err if it was produced from this +// package, otherwise it returns nil, false. +func FromError(err error) (s *Status, ok bool) { + if err == nil { + return &Status{Code: codes.OK}, true + } + if s, ok := err.(*Status); ok { + return s, true + } + return nil, false +} + +// Errorf returns Error(c, fmt.Sprintf(format, a...)). +func Errorf(c codes.Code, format string, a ...interface{}) error { + return &Status{Code: c, Message: fmt.Sprintf(format, a...)} +} diff --git a/vendor/github.com/influxdata/yarpc/status/status.pb.go b/vendor/github.com/influxdata/yarpc/status/status.pb.go new file mode 100644 index 000000000..0fca31348 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/status/status.pb.go @@ -0,0 +1,362 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: status/status.proto + +/* + Package status is a generated protocol buffer package. + + It is generated from these files: + status/status.proto + + It has these top-level messages: + Status +*/ +package status + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "github.com/gogo/protobuf/gogoproto" +import codes "github.com/influxdata/yarpc/codes" + +import io "io" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +type Status struct { + Code codes.Code `protobuf:"varint,1,opt,name=code,proto3,enum=codes.Code" json:"code,omitempty"` + Message string `protobuf:"bytes,2,opt,name=message,proto3" json:"message,omitempty"` +} + +func (m *Status) Reset() { *m = Status{} } +func (m *Status) String() string { return proto.CompactTextString(m) } +func (*Status) ProtoMessage() {} +func (*Status) Descriptor() ([]byte, []int) { return fileDescriptorStatus, []int{0} } + +func (m *Status) GetCode() codes.Code { + if m != nil { + return m.Code + } + return codes.OK +} + +func (m *Status) GetMessage() string { + if m != nil { + return m.Message + } + return "" +} + +func init() { + proto.RegisterType((*Status)(nil), "status.Status") +} +func (m *Status) Marshal() (dAtA []byte, err error) { + size := m.Size() + dAtA = make([]byte, size) + n, err := m.MarshalTo(dAtA) + if err != nil { + return nil, err + } + return dAtA[:n], nil +} + +func (m *Status) MarshalTo(dAtA []byte) (int, error) { + var i int + _ = i + var l int + _ = l + if m.Code != 0 { + dAtA[i] = 0x8 + i++ + i = encodeVarintStatus(dAtA, i, uint64(m.Code)) + } + if len(m.Message) > 0 { + dAtA[i] = 0x12 + i++ + i = encodeVarintStatus(dAtA, i, uint64(len(m.Message))) + i += copy(dAtA[i:], m.Message) + } + return i, nil +} + +func encodeFixed64Status(dAtA []byte, offset int, v uint64) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + dAtA[offset+4] = uint8(v >> 32) + dAtA[offset+5] = uint8(v >> 40) + dAtA[offset+6] = uint8(v >> 48) + dAtA[offset+7] = uint8(v >> 56) + return offset + 8 +} +func encodeFixed32Status(dAtA []byte, offset int, v uint32) int { + dAtA[offset] = uint8(v) + dAtA[offset+1] = uint8(v >> 8) + dAtA[offset+2] = uint8(v >> 16) + dAtA[offset+3] = uint8(v >> 24) + return offset + 4 +} +func encodeVarintStatus(dAtA []byte, offset int, v uint64) int { + for v >= 1<<7 { + dAtA[offset] = uint8(v&0x7f | 0x80) + v >>= 7 + offset++ + } + dAtA[offset] = uint8(v) + return offset + 1 +} +func (m *Status) Size() (n int) { + var l int + _ = l + if m.Code != 0 { + n += 1 + sovStatus(uint64(m.Code)) + } + l = len(m.Message) + if l > 0 { + n += 1 + l + sovStatus(uint64(l)) + } + return n +} + +func sovStatus(x uint64) (n int) { + for { + n++ + x >>= 7 + if x == 0 { + break + } + } + return n +} +func sozStatus(x uint64) (n int) { + return sovStatus(uint64((x << 1) ^ uint64((int64(x) >> 63)))) +} +func (m *Status) Unmarshal(dAtA []byte) error { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + preIndex := iNdEx + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + fieldNum := int32(wire >> 3) + wireType := int(wire & 0x7) + if wireType == 4 { + return fmt.Errorf("proto: Status: wiretype end group for non-group") + } + if fieldNum <= 0 { + return fmt.Errorf("proto: Status: illegal tag %d (wire type %d)", fieldNum, wire) + } + switch fieldNum { + case 1: + if wireType != 0 { + return fmt.Errorf("proto: wrong wireType = %d for field Code", wireType) + } + m.Code = 0 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + m.Code |= (codes.Code(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + case 2: + if wireType != 2 { + return fmt.Errorf("proto: wrong wireType = %d for field Message", wireType) + } + var stringLen uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return ErrIntOverflowStatus + } + if iNdEx >= l { + return io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + stringLen |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + intStringLen := int(stringLen) + if intStringLen < 0 { + return ErrInvalidLengthStatus + } + postIndex := iNdEx + intStringLen + if postIndex > l { + return io.ErrUnexpectedEOF + } + m.Message = string(dAtA[iNdEx:postIndex]) + iNdEx = postIndex + default: + iNdEx = preIndex + skippy, err := skipStatus(dAtA[iNdEx:]) + if err != nil { + return err + } + if skippy < 0 { + return ErrInvalidLengthStatus + } + if (iNdEx + skippy) > l { + return io.ErrUnexpectedEOF + } + iNdEx += skippy + } + } + + if iNdEx > l { + return io.ErrUnexpectedEOF + } + return nil +} +func skipStatus(dAtA []byte) (n int, err error) { + l := len(dAtA) + iNdEx := 0 + for iNdEx < l { + var wire uint64 + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStatus + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + wire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + wireType := int(wire & 0x7) + switch wireType { + case 0: + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStatus + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + iNdEx++ + if dAtA[iNdEx-1] < 0x80 { + break + } + } + return iNdEx, nil + case 1: + iNdEx += 8 + return iNdEx, nil + case 2: + var length int + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStatus + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + length |= (int(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + iNdEx += length + if length < 0 { + return 0, ErrInvalidLengthStatus + } + return iNdEx, nil + case 3: + for { + var innerWire uint64 + var start int = iNdEx + for shift := uint(0); ; shift += 7 { + if shift >= 64 { + return 0, ErrIntOverflowStatus + } + if iNdEx >= l { + return 0, io.ErrUnexpectedEOF + } + b := dAtA[iNdEx] + iNdEx++ + innerWire |= (uint64(b) & 0x7F) << shift + if b < 0x80 { + break + } + } + innerWireType := int(innerWire & 0x7) + if innerWireType == 4 { + break + } + next, err := skipStatus(dAtA[start:]) + if err != nil { + return 0, err + } + iNdEx = start + next + } + return iNdEx, nil + case 4: + return iNdEx, nil + case 5: + iNdEx += 4 + return iNdEx, nil + default: + return 0, fmt.Errorf("proto: illegal wireType %d", wireType) + } + } + panic("unreachable") +} + +var ( + ErrInvalidLengthStatus = fmt.Errorf("proto: negative length found during unmarshaling") + ErrIntOverflowStatus = fmt.Errorf("proto: integer overflow") +) + +func init() { proto.RegisterFile("status/status.proto", fileDescriptorStatus) } + +var fileDescriptorStatus = []byte{ + // 177 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0x2e, 0x2e, 0x49, 0x2c, + 0x29, 0x2d, 0xd6, 0x87, 0x50, 0x7a, 0x05, 0x45, 0xf9, 0x25, 0xf9, 0x42, 0x6c, 0x10, 0x9e, 0x94, + 0x6e, 0x7a, 0x66, 0x49, 0x46, 0x69, 0x92, 0x5e, 0x72, 0x7e, 0xae, 0x7e, 0x7a, 0x7e, 0x7a, 0xbe, + 0x3e, 0x58, 0x3a, 0xa9, 0x34, 0x0d, 0xcc, 0x03, 0x73, 0xc0, 0x2c, 0x88, 0x36, 0x14, 0xe5, 0x99, + 0x79, 0x69, 0x39, 0xa5, 0x15, 0x29, 0x89, 0x25, 0x89, 0xfa, 0x95, 0x89, 0x45, 0x05, 0xc9, 0xfa, + 0xc9, 0xf9, 0x29, 0xa9, 0xc5, 0x10, 0x12, 0xa2, 0x5c, 0xc9, 0x99, 0x8b, 0x2d, 0x18, 0x6c, 0x8f, + 0x90, 0x3c, 0x17, 0x0b, 0x48, 0x42, 0x82, 0x51, 0x81, 0x51, 0x83, 0xcf, 0x88, 0x5b, 0x0f, 0xa2, + 0xca, 0x39, 0x3f, 0x25, 0x35, 0x08, 0x2c, 0x21, 0x24, 0xc1, 0xc5, 0x9e, 0x9b, 0x5a, 0x5c, 0x9c, + 0x98, 0x9e, 0x2a, 0xc1, 0xa4, 0xc0, 0xa8, 0xc1, 0x19, 0x04, 0xe3, 0x3a, 0x09, 0x9c, 0x78, 0x24, + 0xc7, 0x78, 0xe1, 0x91, 0x1c, 0xe3, 0x83, 0x47, 0x72, 0x8c, 0x13, 0x1e, 0xcb, 0x31, 0x24, 0xb1, + 0x81, 0x4d, 0x37, 0x06, 0x04, 0x00, 0x00, 0xff, 0xff, 0x8c, 0xec, 0x25, 0x86, 0xda, 0x00, 0x00, + 0x00, +} diff --git a/vendor/github.com/influxdata/yarpc/status/status.proto b/vendor/github.com/influxdata/yarpc/status/status.proto new file mode 100644 index 000000000..e53b11ff9 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/status/status.proto @@ -0,0 +1,10 @@ +syntax = "proto3"; +package status; + +import "github.com/gogo/protobuf/gogoproto/gogo.proto"; +import "github.com/influxdata/yarpc/codes/codes.proto"; + +message Status { + codes.Code code = 1; + string message = 2; +} diff --git a/vendor/github.com/influxdata/yarpc/stream.go b/vendor/github.com/influxdata/yarpc/stream.go new file mode 100644 index 000000000..ae9ac5078 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/stream.go @@ -0,0 +1,183 @@ +package yarpc + +import ( + "context" + "encoding/binary" + "errors" + "io" + + "github.com/influxdata/yamux" + "github.com/influxdata/yarpc/codes" + "github.com/influxdata/yarpc/status" +) + +type StreamHandler func(srv interface{}, stream ServerStream) error + +type StreamDesc struct { + Index uint8 + StreamName string + Handler StreamHandler + + ServerStreams bool + ClientStreams bool +} + +// Stream defines the common interface a client or server stream has to satisfy. +type Stream interface { + // Context returns the context for this stream. + Context() context.Context + // SendMsg blocks until it sends m, the stream is done or the stream + // breaks. + // On error, it aborts the stream and returns an RPC status on client + // side. On server side, it simply returns the error to the caller. + // SendMsg is called by generated code. Also Users can call SendMsg + // directly when it is really needed in their use cases. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call SendMsg on the same stream in different goroutines. + SendMsg(m interface{}) error + // RecvMsg blocks until it receives a message or the stream is + // done. On client side, it returns io.EOF when the stream is done. On + // any other error, it aborts the stream and returns an RPC status. On + // server side, it simply returns the error to the caller. + // It's safe to have a goroutine calling SendMsg and another goroutine calling + // recvMsg on the same stream at the same time. + // But it is not safe to call RecvMsg on the same stream in different goroutines. + RecvMsg(m interface{}) error +} + +// ClientStream defines the interface a client stream has to satisfy. +type ClientStream interface { + // CloseSend closes the send direction of the stream. It closes the stream + // when non-nil error is met. + CloseSend() error + Stream +} + +func NewClientStream(ctx context.Context, desc *StreamDesc, cc *ClientConn, api uint16) (ClientStream, error) { + cn, err := cc.NewStream() + if err != nil { + return nil, err + } + + var tmp [2]byte + binary.BigEndian.PutUint16(tmp[:], api) + _, err = cn.Write(tmp[:]) + if err != nil { + return nil, err + } + + cs := &clientStream{ + cn: cn, + codec: cc.dopts.codec, + p: &parser{r: cn}, + desc: desc, + ctx: ctx, + closing: make(chan struct{}), + } + go func() { + select { + case <-ctx.Done(): + cs.CloseSend() + case <-cs.closing: + } + }() + + return cs, nil +} + +type clientStream struct { + cn *yamux.Stream + codec Codec + p *parser + desc *StreamDesc + + ctx context.Context + closing chan struct{} +} + +func (c *clientStream) CloseSend() error { + select { + case <-c.closing: + default: + close(c.closing) + } + return c.cn.Close() +} + +func (c *clientStream) Context() context.Context { + return c.ctx +} + +func (c *clientStream) SendMsg(m interface{}) error { + select { + case <-c.closing: + return errors.New("stream closed") + default: + } + out, err := encode(c.codec, m) + if err != nil { + return err + } + + _, err = c.cn.Write(out) + return err +} + +func (c *clientStream) RecvMsg(m interface{}) error { + select { + case <-c.closing: + return errors.New("stream closed") + default: + } + err := decode(c.p, c.codec, c.cn, m) + if err == nil { + if !c.desc.ClientStreams || c.desc.ServerStreams { + return nil + } + } + return err +} + +type ServerStream interface { + Stream +} + +type serverStream struct { + cn *yamux.Stream + codec Codec + p *parser + buf []byte +} + +func (s *serverStream) Context() context.Context { + panic("implement me") +} + +func (s *serverStream) SendMsg(m interface{}) error { + out, err := encode(s.codec, m) + if err != nil { + return err + } + + _, err = s.cn.Write(out) + if err != nil { + // TODO(sgc): wrap in status error + return err + } + return nil +} + +func (s *serverStream) RecvMsg(m interface{}) error { + if err := decode(s.p, s.codec, s.cn, m); err != nil { + if err == io.EOF { + return err + } + if err == io.ErrUnexpectedEOF { + err = status.Errorf(codes.Internal, io.ErrUnexpectedEOF.Error()) + } + // TODO(sgc): wrap in status error + return err + } + return nil +} diff --git a/vendor/github.com/influxdata/yarpc/yarpcproto/helper.go b/vendor/github.com/influxdata/yarpc/yarpcproto/helper.go new file mode 100644 index 000000000..6810be698 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/yarpcproto/helper.go @@ -0,0 +1,33 @@ +package yarpcproto + +import ( + "reflect" + + proto "github.com/gogo/protobuf/proto" + google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" +) + +func GetServiceIndex(service *google_protobuf.ServiceDescriptorProto) int { + return GetIntExtension(service.Options, E_YarpcServiceIndex, -1) +} + +func GetMethodIndex(service *google_protobuf.MethodDescriptorProto) int { + return GetIntExtension(service.Options, E_YarpcMethodIndex, -1) +} + +func GetIntExtension(pb proto.Message, extension *proto.ExtensionDesc, ifnotset int) int { + if reflect.ValueOf(pb).IsNil() { + return ifnotset + } + value, err := proto.GetExtension(pb, extension) + if err != nil { + return ifnotset + } + if value == nil { + return ifnotset + } + if value.(*uint32) == nil { + return ifnotset + } + return int(*(value.(*uint32))) +} diff --git a/vendor/github.com/influxdata/yarpc/yarpcproto/yarpc.pb.go b/vendor/github.com/influxdata/yarpc/yarpcproto/yarpc.pb.go new file mode 100644 index 000000000..3db97c757 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/yarpcproto/yarpc.pb.go @@ -0,0 +1,69 @@ +// Code generated by protoc-gen-gogo. DO NOT EDIT. +// source: yarpcproto/yarpc.proto + +/* +Package yarpcproto is a generated protocol buffer package. + +It is generated from these files: + yarpcproto/yarpc.proto + +It has these top-level messages: +*/ +package yarpcproto + +import proto "github.com/gogo/protobuf/proto" +import fmt "fmt" +import math "math" +import google_protobuf "github.com/gogo/protobuf/protoc-gen-gogo/descriptor" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.GoGoProtoPackageIsVersion2 // please upgrade the proto package + +var E_YarpcServiceIndex = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.ServiceOptions)(nil), + ExtensionType: (*uint32)(nil), + Field: 50000, + Name: "yarpcproto.yarpc_service_index", + Tag: "varint,50000,opt,name=yarpc_service_index,json=yarpcServiceIndex", + Filename: "yarpcproto/yarpc.proto", +} + +var E_YarpcMethodIndex = &proto.ExtensionDesc{ + ExtendedType: (*google_protobuf.MethodOptions)(nil), + ExtensionType: (*uint32)(nil), + Field: 50000, + Name: "yarpcproto.yarpc_method_index", + Tag: "varint,50000,opt,name=yarpc_method_index,json=yarpcMethodIndex", + Filename: "yarpcproto/yarpc.proto", +} + +func init() { + proto.RegisterExtension(E_YarpcServiceIndex) + proto.RegisterExtension(E_YarpcMethodIndex) +} + +func init() { proto.RegisterFile("yarpcproto/yarpc.proto", fileDescriptorYarpc) } + +var fileDescriptorYarpc = []byte{ + // 177 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xe2, 0x12, 0xab, 0x4c, 0x2c, 0x2a, + 0x48, 0x2e, 0x28, 0xca, 0x2f, 0xc9, 0xd7, 0x07, 0x33, 0xf5, 0xc0, 0x6c, 0x21, 0x2e, 0x84, 0xb8, + 0x94, 0x42, 0x7a, 0x7e, 0x7e, 0x7a, 0x4e, 0xaa, 0x3e, 0x98, 0x97, 0x54, 0x9a, 0xa6, 0x9f, 0x92, + 0x5a, 0x9c, 0x5c, 0x94, 0x59, 0x50, 0x92, 0x5f, 0x04, 0x51, 0x6d, 0x15, 0xc8, 0x25, 0x0c, 0x56, + 0x1f, 0x5f, 0x9c, 0x5a, 0x54, 0x96, 0x99, 0x9c, 0x1a, 0x9f, 0x99, 0x97, 0x92, 0x5a, 0x21, 0x24, + 0xaf, 0x07, 0xd1, 0xa9, 0x07, 0xd3, 0xa9, 0x17, 0x0c, 0x91, 0xf7, 0x2f, 0x28, 0xc9, 0xcc, 0xcf, + 0x2b, 0x96, 0xb8, 0xd0, 0xc6, 0xac, 0xc0, 0xa8, 0xc1, 0x1b, 0x24, 0x08, 0xd6, 0x0d, 0x95, 0xf4, + 0x04, 0xe9, 0xb5, 0xf2, 0xe3, 0x12, 0x82, 0x18, 0x99, 0x9b, 0x5a, 0x92, 0x91, 0x9f, 0x02, 0x35, + 0x51, 0x0e, 0xc3, 0x44, 0x5f, 0xb0, 0x34, 0xba, 0x81, 0x02, 0x60, 0xbd, 0x10, 0x39, 0xb0, 0x79, + 0x4e, 0x02, 0x27, 0x1e, 0xc9, 0x31, 0x5e, 0x78, 0x24, 0xc7, 0xf8, 0xe0, 0x91, 0x1c, 0xe3, 0x84, + 0xc7, 0x72, 0x0c, 0x80, 0x00, 0x00, 0x00, 0xff, 0xff, 0xfd, 0x27, 0x26, 0x95, 0xfb, 0x00, 0x00, + 0x00, +} diff --git a/vendor/github.com/influxdata/yarpc/yarpcproto/yarpc.proto b/vendor/github.com/influxdata/yarpc/yarpcproto/yarpc.proto new file mode 100644 index 000000000..e030ea470 --- /dev/null +++ b/vendor/github.com/influxdata/yarpc/yarpcproto/yarpc.proto @@ -0,0 +1,12 @@ +syntax = "proto2"; +package yarpcproto; + +import "google/protobuf/descriptor.proto"; + +extend google.protobuf.ServiceOptions { + optional uint32 yarpc_service_index = 50000; +} + +extend google.protobuf.MethodOptions { + optional uint32 yarpc_method_index = 50000; +} \ No newline at end of file diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml new file mode 100644 index 000000000..f1309c9f8 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/.travis.yml @@ -0,0 +1,2 @@ +language: go + diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE new file mode 100644 index 000000000..8dada3eda --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "{}" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright {yyyy} {name of copyright owner} + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE new file mode 100644 index 000000000..5d8cb5b72 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/NOTICE @@ -0,0 +1 @@ +Copyright 2012 Matt T. Proud (matt.proud@gmail.com) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/README.md b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md new file mode 100644 index 000000000..751ee6967 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/README.md @@ -0,0 +1,20 @@ +# Overview +This repository provides various Protocol Buffer extensions for the Go +language (golang), namely support for record length-delimited message +streaming. + +| Java | Go | +| ------------------------------ | --------------------- | +| MessageLite#parseDelimitedFrom | pbutil.ReadDelimited | +| MessageLite#writeDelimitedTo | pbutil.WriteDelimited | + +Because [Code Review 9102043](https://codereview.appspot.com/9102043/) is +destined to never be merged into mainline (i.e., never be promoted to formal +[goprotobuf features](https://github.com/golang/protobuf)), this repository +will live here in the wild. + +# Documentation +We have [generated Go Doc documentation](http://godoc.org/github.com/matttproud/golang_protobuf_extensions/pbutil) here. + +# Testing +[![Build Status](https://travis-ci.org/matttproud/golang_protobuf_extensions.png?branch=master)](https://travis-ci.org/matttproud/golang_protobuf_extensions) diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go new file mode 100644 index 000000000..5c463722d --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/all_test.go @@ -0,0 +1,177 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "bytes" + "testing" + + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +func TestWriteDelimited(t *testing.T) { + t.Parallel() + for _, test := range []struct { + msg Message + buf []byte + n int + err error + }{ + { + msg: &Empty{}, + n: 1, + buf: []byte{0}, + }, + { + msg: &GoEnum{Foo: FOO_FOO1.Enum()}, + n: 3, + buf: []byte{2, 8, 1}, + }, + { + msg: &Strings{ + StringField: String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }, + n: 271, + buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, + 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, + 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, + 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, + 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, + 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, + 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, + 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, + 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, + 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, + 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, + 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, + 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, + 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, + 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, + 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, + 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, + 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, + }, + } { + var buf bytes.Buffer + if n, err := WriteDelimited(&buf, test.msg); n != test.n || err != test.err { + t.Fatalf("WriteDelimited(buf, %#v) = %v, %v; want %v, %v", test.msg, n, err, test.n, test.err) + } + if out := buf.Bytes(); !bytes.Equal(out, test.buf) { + t.Fatalf("WriteDelimited(buf, %#v); buf = %v; want %v", test.msg, out, test.buf) + } + } +} + +func TestReadDelimited(t *testing.T) { + t.Parallel() + for _, test := range []struct { + buf []byte + msg Message + n int + err error + }{ + { + buf: []byte{0}, + msg: &Empty{}, + n: 1, + }, + { + n: 3, + buf: []byte{2, 8, 1}, + msg: &GoEnum{Foo: FOO_FOO1.Enum()}, + }, + { + buf: []byte{141, 2, 10, 138, 2, 84, 104, 105, 115, 32, 105, 115, 32, 109, + 121, 32, 103, 105, 103, 97, 110, 116, 105, 99, 44, 32, 117, 110, 104, + 97, 112, 112, 121, 32, 115, 116, 114, 105, 110, 103, 46, 32, 32, 73, + 116, 32, 101, 120, 99, 101, 101, 100, 115, 10, 116, 104, 101, 32, 101, + 110, 99, 111, 100, 105, 110, 103, 32, 115, 105, 122, 101, 32, 111, 102, + 32, 97, 32, 115, 105, 110, 103, 108, 101, 32, 98, 121, 116, 101, 32, + 118, 97, 114, 105, 110, 116, 46, 32, 32, 87, 101, 32, 97, 114, 101, 32, + 117, 115, 105, 110, 103, 32, 105, 116, 32, 116, 111, 32, 102, 117, 122, + 122, 32, 116, 101, 115, 116, 32, 116, 104, 101, 10, 99, 111, 114, 114, + 101, 99, 116, 110, 101, 115, 115, 32, 111, 102, 32, 116, 104, 101, 32, + 104, 101, 97, 100, 101, 114, 32, 100, 101, 99, 111, 100, 105, 110, 103, + 32, 109, 101, 99, 104, 97, 110, 105, 115, 109, 115, 44, 32, 119, 104, + 105, 99, 104, 32, 109, 97, 121, 32, 112, 114, 111, 118, 101, 32, 112, + 114, 111, 98, 108, 101, 109, 97, 116, 105, 99, 46, 10, 73, 32, 101, 120, + 112, 101, 99, 116, 32, 105, 116, 32, 109, 97, 121, 46, 32, 32, 76, 101, + 116, 39, 115, 32, 104, 111, 112, 101, 32, 121, 111, 117, 32, 101, 110, + 106, 111, 121, 32, 116, 101, 115, 116, 105, 110, 103, 32, 97, 115, 32, + 109, 117, 99, 104, 32, 97, 115, 32, 119, 101, 32, 100, 111, 46}, + msg: &Strings{ + StringField: String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }, + n: 271, + }, + } { + msg := Clone(test.msg) + msg.Reset() + if n, err := ReadDelimited(bytes.NewBuffer(test.buf), msg); n != test.n || err != test.err { + t.Fatalf("ReadDelimited(%v, msg) = %v, %v; want %v, %v", test.buf, n, err, test.n, test.err) + } + if !Equal(msg, test.msg) { + t.Fatalf("ReadDelimited(%v, msg); msg = %v; want %v", test.buf, msg, test.msg) + } + } +} + +func TestEndToEndValid(t *testing.T) { + t.Parallel() + for _, test := range [][]Message{ + {&Empty{}}, + {&GoEnum{Foo: FOO_FOO1.Enum()}, &Empty{}, &GoEnum{Foo: FOO_FOO1.Enum()}}, + {&GoEnum{Foo: FOO_FOO1.Enum()}}, + {&Strings{ + StringField: String(`This is my gigantic, unhappy string. It exceeds +the encoding size of a single byte varint. We are using it to fuzz test the +correctness of the header decoding mechanisms, which may prove problematic. +I expect it may. Let's hope you enjoy testing as much as we do.`), + }}, + } { + var buf bytes.Buffer + var written int + for i, msg := range test { + n, err := WriteDelimited(&buf, msg) + if err != nil { + // Assumption: TestReadDelimited and TestWriteDelimited are sufficient + // and inputs for this test are explicitly exercised there. + t.Fatalf("WriteDelimited(buf, %v[%d]) = ?, %v; wanted ?, nil", test, i, err) + } + written += n + } + var read int + for i, msg := range test { + out := Clone(msg) + out.Reset() + n, _ := ReadDelimited(&buf, out) + // Decide to do EOF checking? + read += n + if !Equal(out, msg) { + t.Fatalf("out = %v; want %v[%d] = %#v", out, test, i, msg) + } + } + if read != written { + t.Fatalf("%v read = %d; want %d", test, read, written) + } + } +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go new file mode 100644 index 000000000..258c0636a --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode.go @@ -0,0 +1,75 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "errors" + "io" + + "github.com/golang/protobuf/proto" +) + +var errInvalidVarint = errors.New("invalid varint32 encountered") + +// ReadDelimited decodes a message from the provided length-delimited stream, +// where the length is encoded as 32-bit varint prefix to the message body. +// It returns the total number of bytes read and any applicable error. This is +// roughly equivalent to the companion Java API's +// MessageLite#parseDelimitedFrom. As per the reader contract, this function +// calls r.Read repeatedly as required until exactly one message including its +// prefix is read and decoded (or an error has occurred). The function never +// reads more bytes from the stream than required. The function never returns +// an error if a message has been read and decoded correctly, even if the end +// of the stream has been reached in doing so. In that case, any subsequent +// calls return (0, io.EOF). +func ReadDelimited(r io.Reader, m proto.Message) (n int, err error) { + // Per AbstractParser#parsePartialDelimitedFrom with + // CodedInputStream#readRawVarint32. + var headerBuf [binary.MaxVarintLen32]byte + var bytesRead, varIntBytes int + var messageLength uint64 + for varIntBytes == 0 { // i.e. no varint has been decoded yet. + if bytesRead >= len(headerBuf) { + return bytesRead, errInvalidVarint + } + // We have to read byte by byte here to avoid reading more bytes + // than required. Each read byte is appended to what we have + // read before. + newBytesRead, err := r.Read(headerBuf[bytesRead : bytesRead+1]) + if newBytesRead == 0 { + if err != nil { + return bytesRead, err + } + // A Reader should not return (0, nil), but if it does, + // it should be treated as no-op (according to the + // Reader contract). So let's go on... + continue + } + bytesRead += newBytesRead + // Now present everything read so far to the varint decoder and + // see if a varint can be decoded already. + messageLength, varIntBytes = proto.DecodeVarint(headerBuf[:bytesRead]) + } + + messageBuf := make([]byte, messageLength) + newBytesRead, err := io.ReadFull(r, messageBuf) + bytesRead += newBytesRead + if err != nil { + return bytesRead, err + } + + return bytesRead, proto.Unmarshal(messageBuf, m) +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go new file mode 100644 index 000000000..364a7b799 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/decode_test.go @@ -0,0 +1,99 @@ +// Copyright 2016 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "bytes" + "io" + "testing" + "testing/iotest" +) + +func TestReadDelimitedIllegalVarint(t *testing.T) { + t.Parallel() + var tests = []struct { + in []byte + n int + err error + }{ + { + in: []byte{255, 255, 255, 255, 255}, + n: 5, + err: errInvalidVarint, + }, + { + in: []byte{255, 255, 255, 255, 255, 255}, + n: 5, + err: errInvalidVarint, + }, + } + for _, test := range tests { + n, err := ReadDelimited(bytes.NewReader(test.in), nil) + if got, want := n, test.n; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", test.in, got, want) + } + if got, want := err, test.err; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", test.in, got, want) + } + } +} + +func TestReadDelimitedPrematureHeader(t *testing.T) { + t.Parallel() + var data = []byte{128, 5} // 256 + 256 + 128 + n, err := ReadDelimited(bytes.NewReader(data[0:1]), nil) + if got, want := n, 1; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want) + } + if got, want := err, io.EOF; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want) + } +} + +func TestReadDelimitedPrematureBody(t *testing.T) { + t.Parallel() + var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128 + n, err := ReadDelimited(bytes.NewReader(data[:]), nil) + if got, want := n, 5; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want) + } + if got, want := err, io.ErrUnexpectedEOF; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want) + } +} + +func TestReadDelimitedPrematureHeaderIncremental(t *testing.T) { + t.Parallel() + var data = []byte{128, 5} // 256 + 256 + 128 + n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[0:1])), nil) + if got, want := n, 1; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data[0:1], got, want) + } + if got, want := err, io.EOF; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data[0:1], got, want) + } +} + +func TestReadDelimitedPrematureBodyIncremental(t *testing.T) { + t.Parallel() + var data = []byte{128, 5, 0, 0, 0} // 256 + 256 + 128 + n, err := ReadDelimited(iotest.OneByteReader(bytes.NewReader(data[:])), nil) + if got, want := n, 5; got != want { + t.Errorf("ReadDelimited(%#v, nil) = %#v, ?; want = %v#, ?", data, got, want) + } + if got, want := err, io.ErrUnexpectedEOF; got != want { + t.Errorf("ReadDelimited(%#v, nil) = ?, %#v; want = ?, %#v", data, got, want) + } +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go new file mode 100644 index 000000000..c318385cb --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/doc.go @@ -0,0 +1,16 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package pbutil provides record length-delimited Protocol Buffer streaming. +package pbutil diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go new file mode 100644 index 000000000..8fb59ad22 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode.go @@ -0,0 +1,46 @@ +// Copyright 2013 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "encoding/binary" + "io" + + "github.com/golang/protobuf/proto" +) + +// WriteDelimited encodes and dumps a message to the provided writer prefixed +// with a 32-bit varint indicating the length of the encoded message, producing +// a length-delimited record stream, which can be used to chain together +// encoded messages of the same type together in a file. It returns the total +// number of bytes written and any applicable error. This is roughly +// equivalent to the companion Java API's MessageLite#writeDelimitedTo. +func WriteDelimited(w io.Writer, m proto.Message) (n int, err error) { + buffer, err := proto.Marshal(m) + if err != nil { + return 0, err + } + + var buf [binary.MaxVarintLen32]byte + encodedLength := binary.PutUvarint(buf[:], uint64(len(buffer))) + + sync, err := w.Write(buf[:encodedLength]) + if err != nil { + return sync, err + } + + n, err = w.Write(buffer) + return n + sync, err +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go new file mode 100644 index 000000000..f92632b0b --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/encode_test.go @@ -0,0 +1,67 @@ +// Copyright 2016 Matt T. Proud +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package pbutil + +import ( + "bytes" + "errors" + "testing" + + "github.com/golang/protobuf/proto" +) + +var errMarshal = errors.New("pbutil: can't marshal") + +type cantMarshal struct{ proto.Message } + +func (cantMarshal) Marshal() ([]byte, error) { return nil, errMarshal } + +var _ proto.Message = cantMarshal{} + +func TestWriteDelimitedMarshalErr(t *testing.T) { + t.Parallel() + var data cantMarshal + var buf bytes.Buffer + n, err := WriteDelimited(&buf, data) + if got, want := n, 0; got != want { + t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want) + } + if got, want := err, errMarshal; got != want { + t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want) + } +} + +type canMarshal struct{ proto.Message } + +func (canMarshal) Marshal() ([]byte, error) { return []byte{0, 1, 2, 3, 4, 5}, nil } + +var errWrite = errors.New("pbutil: can't write") + +type cantWrite struct{} + +func (cantWrite) Write([]byte) (int, error) { return 0, errWrite } + +func TestWriteDelimitedWriteErr(t *testing.T) { + t.Parallel() + var data canMarshal + var buf cantWrite + n, err := WriteDelimited(buf, data) + if got, want := n, 0; got != want { + t.Errorf("WriteDelimited(buf, %#v) = %#v, ?; want = %v#, ?", data, got, want) + } + if got, want := err, errWrite; got != want { + t.Errorf("WriteDelimited(buf, %#v) = ?, %#v; want = ?, %#v", data, got, want) + } +} diff --git a/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go new file mode 100644 index 000000000..d6d9b2559 --- /dev/null +++ b/vendor/github.com/matttproud/golang_protobuf_extensions/pbutil/fixtures_test.go @@ -0,0 +1,103 @@ +// Copyright 2010 The Go Authors. All rights reserved. +// http://github.com/golang/protobuf/ +// +// Redistribution and use in source and binary forms, with or without +// modification, are permitted provided that the following conditions are +// met: +// +// * Redistributions of source code must retain the above copyright +// notice, this list of conditions and the following disclaimer. +// * Redistributions in binary form must reproduce the above +// copyright notice, this list of conditions and the following disclaimer +// in the documentation and/or other materials provided with the +// distribution. +// * Neither the name of Google Inc. nor the names of its +// contributors may be used to endorse or promote products derived from +// this software without specific prior written permission. +// +// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + +package pbutil + +import ( + . "github.com/golang/protobuf/proto" + . "github.com/golang/protobuf/proto/testdata" +) + +// FROM https://github.com/golang/protobuf/blob/master/proto/all_test.go. + +func initGoTestField() *GoTestField { + f := new(GoTestField) + f.Label = String("label") + f.Type = String("type") + return f +} + +// These are all structurally equivalent but the tag numbers differ. +// (It's remarkable that required, optional, and repeated all have +// 8 letters.) +func initGoTest_RequiredGroup() *GoTest_RequiredGroup { + return &GoTest_RequiredGroup{ + RequiredField: String("required"), + } +} + +func initGoTest_OptionalGroup() *GoTest_OptionalGroup { + return &GoTest_OptionalGroup{ + RequiredField: String("optional"), + } +} + +func initGoTest_RepeatedGroup() *GoTest_RepeatedGroup { + return &GoTest_RepeatedGroup{ + RequiredField: String("repeated"), + } +} + +func initGoTest(setdefaults bool) *GoTest { + pb := new(GoTest) + if setdefaults { + pb.F_BoolDefaulted = Bool(Default_GoTest_F_BoolDefaulted) + pb.F_Int32Defaulted = Int32(Default_GoTest_F_Int32Defaulted) + pb.F_Int64Defaulted = Int64(Default_GoTest_F_Int64Defaulted) + pb.F_Fixed32Defaulted = Uint32(Default_GoTest_F_Fixed32Defaulted) + pb.F_Fixed64Defaulted = Uint64(Default_GoTest_F_Fixed64Defaulted) + pb.F_Uint32Defaulted = Uint32(Default_GoTest_F_Uint32Defaulted) + pb.F_Uint64Defaulted = Uint64(Default_GoTest_F_Uint64Defaulted) + pb.F_FloatDefaulted = Float32(Default_GoTest_F_FloatDefaulted) + pb.F_DoubleDefaulted = Float64(Default_GoTest_F_DoubleDefaulted) + pb.F_StringDefaulted = String(Default_GoTest_F_StringDefaulted) + pb.F_BytesDefaulted = Default_GoTest_F_BytesDefaulted + pb.F_Sint32Defaulted = Int32(Default_GoTest_F_Sint32Defaulted) + pb.F_Sint64Defaulted = Int64(Default_GoTest_F_Sint64Defaulted) + } + + pb.Kind = GoTest_TIME.Enum() + pb.RequiredField = initGoTestField() + pb.F_BoolRequired = Bool(true) + pb.F_Int32Required = Int32(3) + pb.F_Int64Required = Int64(6) + pb.F_Fixed32Required = Uint32(32) + pb.F_Fixed64Required = Uint64(64) + pb.F_Uint32Required = Uint32(3232) + pb.F_Uint64Required = Uint64(6464) + pb.F_FloatRequired = Float32(3232) + pb.F_DoubleRequired = Float64(6464) + pb.F_StringRequired = String("string") + pb.F_BytesRequired = []byte("bytes") + pb.F_Sint32Required = Int32(-32) + pb.F_Sint64Required = Int64(-64) + pb.Requiredgroup = initGoTest_RequiredGroup() + + return pb +} diff --git a/vendor/github.com/opentracing/opentracing-go/.gitignore b/vendor/github.com/opentracing/opentracing-go/.gitignore new file mode 100644 index 000000000..565f0f732 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.gitignore @@ -0,0 +1,13 @@ +# IntelliJ project files +.idea/ +opentracing-go.iml +opentracing-go.ipr +opentracing-go.iws + +# Test results +*.cov +*.html +test.log + +# Build dir +build/ diff --git a/vendor/github.com/opentracing/opentracing-go/.travis.yml b/vendor/github.com/opentracing/opentracing-go/.travis.yml new file mode 100644 index 000000000..0538f1bfc --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/.travis.yml @@ -0,0 +1,14 @@ +language: go + +go: + - 1.6 + - 1.7 + - 1.8 + - tip + +install: + - go get -d -t github.com/opentracing/opentracing-go/... + - go get -u github.com/golang/lint/... +script: + - make test lint + - go build ./... diff --git a/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md new file mode 100644 index 000000000..1fc9fdf7f --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/CHANGELOG.md @@ -0,0 +1,14 @@ +Changes by Version +================== + +1.1.0 (unreleased) +------------------- + +- Deprecate InitGlobalTracer() in favor of SetGlobalTracer() + + +1.0.0 (2016-09-26) +------------------- + +- This release implements OpenTracing Specification 1.0 (http://opentracing.io/spec) + diff --git a/vendor/github.com/opentracing/opentracing-go/LICENSE b/vendor/github.com/opentracing/opentracing-go/LICENSE new file mode 100644 index 000000000..148509a40 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2016 The OpenTracing Authors + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/vendor/github.com/opentracing/opentracing-go/Makefile b/vendor/github.com/opentracing/opentracing-go/Makefile new file mode 100644 index 000000000..2f491f157 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/Makefile @@ -0,0 +1,32 @@ +PACKAGES := . ./mocktracer/... ./ext/... + +.DEFAULT_GOAL := test-and-lint + +.PHONE: test-and-lint + +test-and-lint: test lint + +.PHONY: test +test: + go test -v -cover ./... + +cover: + @rm -rf cover-all.out + $(foreach pkg, $(PACKAGES), $(MAKE) cover-pkg PKG=$(pkg) || true;) + @grep mode: cover.out > coverage.out + @cat cover-all.out >> coverage.out + go tool cover -html=coverage.out -o cover.html + @rm -rf cover.out cover-all.out coverage.out + +cover-pkg: + go test -coverprofile cover.out $(PKG) + @grep -v mode: cover.out >> cover-all.out + +.PHONY: lint +lint: + go fmt ./... + golint ./... + @# Run again with magic to exit non-zero if golint outputs anything. + @! (golint ./... | read dummy) + go vet ./... + diff --git a/vendor/github.com/opentracing/opentracing-go/README.md b/vendor/github.com/opentracing/opentracing-go/README.md new file mode 100644 index 000000000..1fb77d227 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/README.md @@ -0,0 +1,147 @@ +[![Gitter chat](http://img.shields.io/badge/gitter-join%20chat%20%E2%86%92-brightgreen.svg)](https://gitter.im/opentracing/public) [![Build Status](https://travis-ci.org/opentracing/opentracing-go.svg?branch=master)](https://travis-ci.org/opentracing/opentracing-go) [![GoDoc](https://godoc.org/github.com/opentracing/opentracing-go?status.svg)](http://godoc.org/github.com/opentracing/opentracing-go) + +# OpenTracing API for Go + +This package is a Go platform API for OpenTracing. + +## Required Reading + +In order to understand the Go platform API, one must first be familiar with the +[OpenTracing project](http://opentracing.io) and +[terminology](http://opentracing.io/documentation/pages/spec.html) more specifically. + +## API overview for those adding instrumentation + +Everyday consumers of this `opentracing` package really only need to worry +about a couple of key abstractions: the `StartSpan` function, the `Span` +interface, and binding a `Tracer` at `main()`-time. Here are code snippets +demonstrating some important use cases. + +#### Singleton initialization + +The simplest starting point is `./default_tracer.go`. As early as possible, call + +```go + import "github.com/opentracing/opentracing-go" + import ".../some_tracing_impl" + + func main() { + opentracing.InitGlobalTracer( + // tracing impl specific: + some_tracing_impl.New(...), + ) + ... + } +``` + +##### Non-Singleton initialization + +If you prefer direct control to singletons, manage ownership of the +`opentracing.Tracer` implementation explicitly. + +#### Creating a Span given an existing Go `context.Context` + +If you use `context.Context` in your application, OpenTracing's Go library will +happily rely on it for `Span` propagation. To start a new (blocking child) +`Span`, you can use `StartSpanFromContext`. + +```go + func xyz(ctx context.Context, ...) { + ... + span, ctx := opentracing.StartSpanFromContext(ctx, "operation_name") + defer span.Finish() + span.LogFields( + log.String("event", "soft error"), + log.String("type", "cache timeout"), + log.Int("waited.millis", 1500)) + ... + } +``` + +#### Starting an empty trace by creating a "root span" + +It's always possible to create a "root" `Span` with no parent or other causal +reference. + +```go + func xyz() { + ... + sp := opentracing.StartSpan("operation_name") + defer sp.Finish() + ... + } +``` + +#### Creating a (child) Span given an existing (parent) Span + +```go + func xyz(parentSpan opentracing.Span, ...) { + ... + sp := opentracing.StartSpan( + "operation_name", + opentracing.ChildOf(parentSpan.Context())) + defer sp.Finish() + ... + } +``` + +#### Serializing to the wire + +```go + func makeSomeRequest(ctx context.Context) ... { + if span := opentracing.SpanFromContext(ctx); span != nil { + httpClient := &http.Client{} + httpReq, _ := http.NewRequest("GET", "http://myservice/", nil) + + // Transmit the span's TraceContext as HTTP headers on our + // outbound request. + opentracing.GlobalTracer().Inject( + span.Context(), + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(httpReq.Header)) + + resp, err := httpClient.Do(httpReq) + ... + } + ... + } +``` + +#### Deserializing from the wire + +```go + http.HandleFunc("/", func(w http.ResponseWriter, req *http.Request) { + var serverSpan opentracing.Span + appSpecificOperationName := ... + wireContext, err := opentracing.GlobalTracer().Extract( + opentracing.HTTPHeaders, + opentracing.HTTPHeadersCarrier(req.Header)) + if err != nil { + // Optionally record something about err here + } + + // Create the span referring to the RPC client if available. + // If wireContext == nil, a root span will be created. + serverSpan = opentracing.StartSpan( + appSpecificOperationName, + ext.RPCServerOption(wireContext)) + + defer serverSpan.Finish() + + ctx := opentracing.ContextWithSpan(context.Background(), serverSpan) + ... + } +``` + +#### Goroutine-safety + +The entire public API is goroutine-safe and does not require external +synchronization. + +## API pointers for those implementing a tracing system + +Tracing system implementors may be able to reuse or copy-paste-modify the `basictracer` package, found [here](https://github.com/opentracing/basictracer-go). In particular, see `basictracer.New(...)`. + +## API compatibility + +For the time being, "mild" backwards-incompatible changes may be made without changing the major version number. As OpenTracing and `opentracing-go` mature, backwards compatibility will become more of a priority. diff --git a/vendor/github.com/opentracing/opentracing-go/globaltracer.go b/vendor/github.com/opentracing/opentracing-go/globaltracer.go new file mode 100644 index 000000000..8c8e793ff --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/globaltracer.go @@ -0,0 +1,32 @@ +package opentracing + +var ( + globalTracer Tracer = NoopTracer{} +) + +// SetGlobalTracer sets the [singleton] opentracing.Tracer returned by +// GlobalTracer(). Those who use GlobalTracer (rather than directly manage an +// opentracing.Tracer instance) should call SetGlobalTracer as early as +// possible in main(), prior to calling the `StartSpan` global func below. +// Prior to calling `SetGlobalTracer`, any Spans started via the `StartSpan` +// (etc) globals are noops. +func SetGlobalTracer(tracer Tracer) { + globalTracer = tracer +} + +// GlobalTracer returns the global singleton `Tracer` implementation. +// Before `SetGlobalTracer()` is called, the `GlobalTracer()` is a noop +// implementation that drops all data handed to it. +func GlobalTracer() Tracer { + return globalTracer +} + +// StartSpan defers to `Tracer.StartSpan`. See `GlobalTracer()`. +func StartSpan(operationName string, opts ...StartSpanOption) Span { + return globalTracer.StartSpan(operationName, opts...) +} + +// InitGlobalTracer is deprecated. Please use SetGlobalTracer. +func InitGlobalTracer(tracer Tracer) { + SetGlobalTracer(tracer) +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext.go b/vendor/github.com/opentracing/opentracing-go/gocontext.go new file mode 100644 index 000000000..222a65202 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext.go @@ -0,0 +1,57 @@ +package opentracing + +import "golang.org/x/net/context" + +type contextKey struct{} + +var activeSpanKey = contextKey{} + +// ContextWithSpan returns a new `context.Context` that holds a reference to +// `span`'s SpanContext. +func ContextWithSpan(ctx context.Context, span Span) context.Context { + return context.WithValue(ctx, activeSpanKey, span) +} + +// SpanFromContext returns the `Span` previously associated with `ctx`, or +// `nil` if no such `Span` could be found. +// +// NOTE: context.Context != SpanContext: the former is Go's intra-process +// context propagation mechanism, and the latter houses OpenTracing's per-Span +// identity and baggage information. +func SpanFromContext(ctx context.Context) Span { + val := ctx.Value(activeSpanKey) + if sp, ok := val.(Span); ok { + return sp + } + return nil +} + +// StartSpanFromContext starts and returns a Span with `operationName`, using +// any Span found within `ctx` as a ChildOfRef. If no such parent could be +// found, StartSpanFromContext creates a root (parentless) Span. +// +// The second return value is a context.Context object built around the +// returned Span. +// +// Example usage: +// +// SomeFunction(ctx context.Context, ...) { +// sp, ctx := opentracing.StartSpanFromContext(ctx, "SomeFunction") +// defer sp.Finish() +// ... +// } +func StartSpanFromContext(ctx context.Context, operationName string, opts ...StartSpanOption) (Span, context.Context) { + return startSpanFromContextWithTracer(ctx, GlobalTracer(), operationName, opts...) +} + +// startSpanFromContextWithTracer is factored out for testing purposes. +func startSpanFromContextWithTracer(ctx context.Context, tracer Tracer, operationName string, opts ...StartSpanOption) (Span, context.Context) { + var span Span + if parentSpan := SpanFromContext(ctx); parentSpan != nil { + opts = append(opts, ChildOf(parentSpan.Context())) + span = tracer.StartSpan(operationName, opts...) + } else { + span = tracer.StartSpan(operationName, opts...) + } + return span, ContextWithSpan(ctx, span) +} diff --git a/vendor/github.com/opentracing/opentracing-go/gocontext_test.go b/vendor/github.com/opentracing/opentracing-go/gocontext_test.go new file mode 100644 index 000000000..65c013086 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/gocontext_test.go @@ -0,0 +1,81 @@ +package opentracing + +import ( + "testing" + "time" + + "github.com/stretchr/testify/assert" + "golang.org/x/net/context" +) + +func TestContextWithSpan(t *testing.T) { + span := &noopSpan{} + ctx := ContextWithSpan(context.Background(), span) + span2 := SpanFromContext(ctx) + if span != span2 { + t.Errorf("Not the same span returned from context, expected=%+v, actual=%+v", span, span2) + } + + ctx = context.Background() + span2 = SpanFromContext(ctx) + if span2 != nil { + t.Errorf("Expected nil span, found %+v", span2) + } + + ctx = ContextWithSpan(ctx, span) + span2 = SpanFromContext(ctx) + if span != span2 { + t.Errorf("Not the same span returned from context, expected=%+v, actual=%+v", span, span2) + } +} + +func TestStartSpanFromContext(t *testing.T) { + testTracer := testTracer{} + + // Test the case where there *is* a Span in the Context. + { + parentSpan := &testSpan{} + parentCtx := ContextWithSpan(context.Background(), parentSpan) + childSpan, childCtx := startSpanFromContextWithTracer(parentCtx, testTracer, "child") + if !childSpan.Context().(testSpanContext).HasParent { + t.Errorf("Failed to find parent: %v", childSpan) + } + if !childSpan.(testSpan).Equal(SpanFromContext(childCtx)) { + t.Errorf("Unable to find child span in context: %v", childCtx) + } + } + + // Test the case where there *is not* a Span in the Context. + { + emptyCtx := context.Background() + childSpan, childCtx := startSpanFromContextWithTracer(emptyCtx, testTracer, "child") + if childSpan.Context().(testSpanContext).HasParent { + t.Errorf("Should not have found parent: %v", childSpan) + } + if !childSpan.(testSpan).Equal(SpanFromContext(childCtx)) { + t.Errorf("Unable to find child span in context: %v", childCtx) + } + } +} + +func TestStartSpanFromContextOptions(t *testing.T) { + testTracer := testTracer{} + + // Test options are passed to tracer + + startTime := time.Now().Add(-10 * time.Second) // ten seconds ago + span, ctx := startSpanFromContextWithTracer( + context.Background(), testTracer, "parent", StartTime(startTime), Tag{"component", "test"}) + + assert.Equal(t, "test", span.(testSpan).Tags["component"]) + assert.Equal(t, startTime, span.(testSpan).StartTime) + + // Test it also works for a child span + + childStartTime := startTime.Add(3 * time.Second) + childSpan, _ := startSpanFromContextWithTracer( + ctx, testTracer, "child", StartTime(childStartTime)) + + assert.Equal(t, childSpan.(testSpan).Tags["component"], nil) + assert.Equal(t, childSpan.(testSpan).StartTime, childStartTime) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field.go b/vendor/github.com/opentracing/opentracing-go/log/field.go new file mode 100644 index 000000000..d2cd39a16 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field.go @@ -0,0 +1,245 @@ +package log + +import ( + "fmt" + "math" +) + +type fieldType int + +const ( + stringType fieldType = iota + boolType + intType + int32Type + uint32Type + int64Type + uint64Type + float32Type + float64Type + errorType + objectType + lazyLoggerType +) + +// Field instances are constructed via LogBool, LogString, and so on. +// Tracing implementations may then handle them via the Field.Marshal +// method. +// +// "heavily influenced by" (i.e., partially stolen from) +// https://github.com/uber-go/zap +type Field struct { + key string + fieldType fieldType + numericVal int64 + stringVal string + interfaceVal interface{} +} + +// String adds a string-valued key:value pair to a Span.LogFields() record +func String(key, val string) Field { + return Field{ + key: key, + fieldType: stringType, + stringVal: val, + } +} + +// Bool adds a bool-valued key:value pair to a Span.LogFields() record +func Bool(key string, val bool) Field { + var numericVal int64 + if val { + numericVal = 1 + } + return Field{ + key: key, + fieldType: boolType, + numericVal: numericVal, + } +} + +// Int adds an int-valued key:value pair to a Span.LogFields() record +func Int(key string, val int) Field { + return Field{ + key: key, + fieldType: intType, + numericVal: int64(val), + } +} + +// Int32 adds an int32-valued key:value pair to a Span.LogFields() record +func Int32(key string, val int32) Field { + return Field{ + key: key, + fieldType: int32Type, + numericVal: int64(val), + } +} + +// Int64 adds an int64-valued key:value pair to a Span.LogFields() record +func Int64(key string, val int64) Field { + return Field{ + key: key, + fieldType: int64Type, + numericVal: val, + } +} + +// Uint32 adds a uint32-valued key:value pair to a Span.LogFields() record +func Uint32(key string, val uint32) Field { + return Field{ + key: key, + fieldType: uint32Type, + numericVal: int64(val), + } +} + +// Uint64 adds a uint64-valued key:value pair to a Span.LogFields() record +func Uint64(key string, val uint64) Field { + return Field{ + key: key, + fieldType: uint64Type, + numericVal: int64(val), + } +} + +// Float32 adds a float32-valued key:value pair to a Span.LogFields() record +func Float32(key string, val float32) Field { + return Field{ + key: key, + fieldType: float32Type, + numericVal: int64(math.Float32bits(val)), + } +} + +// Float64 adds a float64-valued key:value pair to a Span.LogFields() record +func Float64(key string, val float64) Field { + return Field{ + key: key, + fieldType: float64Type, + numericVal: int64(math.Float64bits(val)), + } +} + +// Error adds an error with the key "error" to a Span.LogFields() record +func Error(err error) Field { + return Field{ + key: "error", + fieldType: errorType, + interfaceVal: err, + } +} + +// Object adds an object-valued key:value pair to a Span.LogFields() record +func Object(key string, obj interface{}) Field { + return Field{ + key: key, + fieldType: objectType, + interfaceVal: obj, + } +} + +// LazyLogger allows for user-defined, late-bound logging of arbitrary data +type LazyLogger func(fv Encoder) + +// Lazy adds a LazyLogger to a Span.LogFields() record; the tracing +// implementation will call the LazyLogger function at an indefinite time in +// the future (after Lazy() returns). +func Lazy(ll LazyLogger) Field { + return Field{ + fieldType: lazyLoggerType, + interfaceVal: ll, + } +} + +// Encoder allows access to the contents of a Field (via a call to +// Field.Marshal). +// +// Tracer implementations typically provide an implementation of Encoder; +// OpenTracing callers typically do not need to concern themselves with it. +type Encoder interface { + EmitString(key, value string) + EmitBool(key string, value bool) + EmitInt(key string, value int) + EmitInt32(key string, value int32) + EmitInt64(key string, value int64) + EmitUint32(key string, value uint32) + EmitUint64(key string, value uint64) + EmitFloat32(key string, value float32) + EmitFloat64(key string, value float64) + EmitObject(key string, value interface{}) + EmitLazyLogger(value LazyLogger) +} + +// Marshal passes a Field instance through to the appropriate +// field-type-specific method of an Encoder. +func (lf Field) Marshal(visitor Encoder) { + switch lf.fieldType { + case stringType: + visitor.EmitString(lf.key, lf.stringVal) + case boolType: + visitor.EmitBool(lf.key, lf.numericVal != 0) + case intType: + visitor.EmitInt(lf.key, int(lf.numericVal)) + case int32Type: + visitor.EmitInt32(lf.key, int32(lf.numericVal)) + case int64Type: + visitor.EmitInt64(lf.key, int64(lf.numericVal)) + case uint32Type: + visitor.EmitUint32(lf.key, uint32(lf.numericVal)) + case uint64Type: + visitor.EmitUint64(lf.key, uint64(lf.numericVal)) + case float32Type: + visitor.EmitFloat32(lf.key, math.Float32frombits(uint32(lf.numericVal))) + case float64Type: + visitor.EmitFloat64(lf.key, math.Float64frombits(uint64(lf.numericVal))) + case errorType: + if err, ok := lf.interfaceVal.(error); ok { + visitor.EmitString(lf.key, err.Error()) + } else { + visitor.EmitString(lf.key, "") + } + case objectType: + visitor.EmitObject(lf.key, lf.interfaceVal) + case lazyLoggerType: + visitor.EmitLazyLogger(lf.interfaceVal.(LazyLogger)) + } +} + +// Key returns the field's key. +func (lf Field) Key() string { + return lf.key +} + +// Value returns the field's value as interface{}. +func (lf Field) Value() interface{} { + switch lf.fieldType { + case stringType: + return lf.stringVal + case boolType: + return lf.numericVal != 0 + case intType: + return int(lf.numericVal) + case int32Type: + return int32(lf.numericVal) + case int64Type: + return int64(lf.numericVal) + case uint32Type: + return uint32(lf.numericVal) + case uint64Type: + return uint64(lf.numericVal) + case float32Type: + return math.Float32frombits(uint32(lf.numericVal)) + case float64Type: + return math.Float64frombits(uint64(lf.numericVal)) + case errorType, objectType, lazyLoggerType: + return lf.interfaceVal + default: + return nil + } +} + +// String returns a string representation of the key and value. +func (lf Field) String() string { + return fmt.Sprint(lf.key, ":", lf.Value()) +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/field_test.go b/vendor/github.com/opentracing/opentracing-go/log/field_test.go new file mode 100644 index 000000000..8304f1820 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/field_test.go @@ -0,0 +1,39 @@ +package log + +import ( + "fmt" + "testing" +) + +func TestFieldString(t *testing.T) { + testCases := []struct { + field Field + expected string + }{ + { + field: String("key", "value"), + expected: "key:value", + }, + { + field: Bool("key", true), + expected: "key:true", + }, + { + field: Int("key", 5), + expected: "key:5", + }, + { + field: Error(fmt.Errorf("err msg")), + expected: "error:err msg", + }, + { + field: Error(nil), + expected: "error:", + }, + } + for i, tc := range testCases { + if str := tc.field.String(); str != tc.expected { + t.Errorf("%d: expected '%s', got '%s'", i, tc.expected, str) + } + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/log/util.go b/vendor/github.com/opentracing/opentracing-go/log/util.go new file mode 100644 index 000000000..3832feb5c --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/log/util.go @@ -0,0 +1,54 @@ +package log + +import "fmt" + +// InterleavedKVToFields converts keyValues a la Span.LogKV() to a Field slice +// a la Span.LogFields(). +func InterleavedKVToFields(keyValues ...interface{}) ([]Field, error) { + if len(keyValues)%2 != 0 { + return nil, fmt.Errorf("non-even keyValues len: %d", len(keyValues)) + } + fields := make([]Field, len(keyValues)/2) + for i := 0; i*2 < len(keyValues); i++ { + key, ok := keyValues[i*2].(string) + if !ok { + return nil, fmt.Errorf( + "non-string key (pair #%d): %T", + i, keyValues[i*2]) + } + switch typedVal := keyValues[i*2+1].(type) { + case bool: + fields[i] = Bool(key, typedVal) + case string: + fields[i] = String(key, typedVal) + case int: + fields[i] = Int(key, typedVal) + case int8: + fields[i] = Int32(key, int32(typedVal)) + case int16: + fields[i] = Int32(key, int32(typedVal)) + case int32: + fields[i] = Int32(key, typedVal) + case int64: + fields[i] = Int64(key, typedVal) + case uint: + fields[i] = Uint64(key, uint64(typedVal)) + case uint64: + fields[i] = Uint64(key, typedVal) + case uint8: + fields[i] = Uint32(key, uint32(typedVal)) + case uint16: + fields[i] = Uint32(key, uint32(typedVal)) + case uint32: + fields[i] = Uint32(key, typedVal) + case float32: + fields[i] = Float32(key, typedVal) + case float64: + fields[i] = Float64(key, typedVal) + default: + // When in doubt, coerce to a string + fields[i] = String(key, fmt.Sprint(typedVal)) + } + } + return fields, nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/noop.go b/vendor/github.com/opentracing/opentracing-go/noop.go new file mode 100644 index 000000000..0d32f692c --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/noop.go @@ -0,0 +1,64 @@ +package opentracing + +import "github.com/opentracing/opentracing-go/log" + +// A NoopTracer is a trivial, minimum overhead implementation of Tracer +// for which all operations are no-ops. +// +// The primary use of this implementation is in libraries, such as RPC +// frameworks, that make tracing an optional feature controlled by the +// end user. A no-op implementation allows said libraries to use it +// as the default Tracer and to write instrumentation that does +// not need to keep checking if the tracer instance is nil. +// +// For the same reason, the NoopTracer is the default "global" tracer +// (see GlobalTracer and SetGlobalTracer functions). +// +// WARNING: NoopTracer does not support baggage propagation. +type NoopTracer struct{} + +type noopSpan struct{} +type noopSpanContext struct{} + +var ( + defaultNoopSpanContext = noopSpanContext{} + defaultNoopSpan = noopSpan{} + defaultNoopTracer = NoopTracer{} +) + +const ( + emptyString = "" +) + +// noopSpanContext: +func (n noopSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +// noopSpan: +func (n noopSpan) Context() SpanContext { return defaultNoopSpanContext } +func (n noopSpan) SetBaggageItem(key, val string) Span { return defaultNoopSpan } +func (n noopSpan) BaggageItem(key string) string { return emptyString } +func (n noopSpan) SetTag(key string, value interface{}) Span { return n } +func (n noopSpan) LogFields(fields ...log.Field) {} +func (n noopSpan) LogKV(keyVals ...interface{}) {} +func (n noopSpan) Finish() {} +func (n noopSpan) FinishWithOptions(opts FinishOptions) {} +func (n noopSpan) SetOperationName(operationName string) Span { return n } +func (n noopSpan) Tracer() Tracer { return defaultNoopTracer } +func (n noopSpan) LogEvent(event string) {} +func (n noopSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n noopSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n NoopTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + return defaultNoopSpan +} + +// Inject belongs to the Tracer interface. +func (n NoopTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + return nil +} + +// Extract belongs to the Tracer interface. +func (n NoopTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/options_test.go b/vendor/github.com/opentracing/opentracing-go/options_test.go new file mode 100644 index 000000000..56a543bfe --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/options_test.go @@ -0,0 +1,31 @@ +package opentracing + +import ( + "testing" + + "github.com/stretchr/testify/require" +) + +func TestChildOfAndFollowsFrom(t *testing.T) { + tests := []struct { + newOpt func(SpanContext) SpanReference + refType SpanReferenceType + name string + }{ + {ChildOf, ChildOfRef, "ChildOf"}, + {FollowsFrom, FollowsFromRef, "FollowsFrom"}, + } + + for _, test := range tests { + opts := new(StartSpanOptions) + + test.newOpt(nil).Apply(opts) + require.Nil(t, opts.References, "%s(nil) must not append a reference", test.name) + + ctx := new(noopSpanContext) + test.newOpt(ctx).Apply(opts) + require.Equal(t, []SpanReference{ + SpanReference{ReferencedContext: ctx, Type: test.refType}, + }, opts.References, "%s(ctx) must append a reference", test.name) + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation.go b/vendor/github.com/opentracing/opentracing-go/propagation.go new file mode 100644 index 000000000..9583fc53a --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation.go @@ -0,0 +1,176 @@ +package opentracing + +import ( + "errors" + "net/http" +) + +/////////////////////////////////////////////////////////////////////////////// +// CORE PROPAGATION INTERFACES: +/////////////////////////////////////////////////////////////////////////////// + +var ( + // ErrUnsupportedFormat occurs when the `format` passed to Tracer.Inject() or + // Tracer.Extract() is not recognized by the Tracer implementation. + ErrUnsupportedFormat = errors.New("opentracing: Unknown or unsupported Inject/Extract format") + + // ErrSpanContextNotFound occurs when the `carrier` passed to + // Tracer.Extract() is valid and uncorrupted but has insufficient + // information to extract a SpanContext. + ErrSpanContextNotFound = errors.New("opentracing: SpanContext not found in Extract carrier") + + // ErrInvalidSpanContext errors occur when Tracer.Inject() is asked to + // operate on a SpanContext which it is not prepared to handle (for + // example, since it was created by a different tracer implementation). + ErrInvalidSpanContext = errors.New("opentracing: SpanContext type incompatible with tracer") + + // ErrInvalidCarrier errors occur when Tracer.Inject() or Tracer.Extract() + // implementations expect a different type of `carrier` than they are + // given. + ErrInvalidCarrier = errors.New("opentracing: Invalid Inject/Extract carrier") + + // ErrSpanContextCorrupted occurs when the `carrier` passed to + // Tracer.Extract() is of the expected type but is corrupted. + ErrSpanContextCorrupted = errors.New("opentracing: SpanContext data corrupted in Extract carrier") +) + +/////////////////////////////////////////////////////////////////////////////// +// BUILTIN PROPAGATION FORMATS: +/////////////////////////////////////////////////////////////////////////////// + +// BuiltinFormat is used to demarcate the values within package `opentracing` +// that are intended for use with the Tracer.Inject() and Tracer.Extract() +// methods. +type BuiltinFormat byte + +const ( + // Binary represents SpanContexts as opaque binary data. + // + // For Tracer.Inject(): the carrier must be an `io.Writer`. + // + // For Tracer.Extract(): the carrier must be an `io.Reader`. + Binary BuiltinFormat = iota + + // TextMap represents SpanContexts as key:value string pairs. + // + // Unlike HTTPHeaders, the TextMap format does not restrict the key or + // value character sets in any way. + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + TextMap + + // HTTPHeaders represents SpanContexts as HTTP header string pairs. + // + // Unlike TextMap, the HTTPHeaders format requires that the keys and values + // be valid as HTTP headers as-is (i.e., character casing may be unstable + // and special characters are disallowed in keys, values should be + // URL-escaped, etc). + // + // For Tracer.Inject(): the carrier must be a `TextMapWriter`. + // + // For Tracer.Extract(): the carrier must be a `TextMapReader`. + // + // See HTTPHeaderCarrier for an implementation of both TextMapWriter + // and TextMapReader that defers to an http.Header instance for storage. + // For example, Inject(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := span.Tracer().Inject( + // span, opentracing.HTTPHeaders, carrier) + // + // Or Extract(): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // span, err := tracer.Extract( + // opentracing.HTTPHeaders, carrier) + // + HTTPHeaders +) + +// TextMapWriter is the Inject() carrier for the TextMap builtin format. With +// it, the caller can encode a SpanContext for propagation as entries in a map +// of unicode strings. +type TextMapWriter interface { + // Set a key:value pair to the carrier. Multiple calls to Set() for the + // same key leads to undefined behavior. + // + // NOTE: The backing store for the TextMapWriter may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + Set(key, val string) +} + +// TextMapReader is the Extract() carrier for the TextMap builtin format. With it, +// the caller can decode a propagated SpanContext as entries in a map of +// unicode strings. +type TextMapReader interface { + // ForeachKey returns TextMap contents via repeated calls to the `handler` + // function. If any call to `handler` returns a non-nil error, ForeachKey + // terminates and returns that error. + // + // NOTE: The backing store for the TextMapReader may contain data unrelated + // to SpanContext. As such, Inject() and Extract() implementations that + // call the TextMapWriter and TextMapReader interfaces must agree on a + // prefix or other convention to distinguish their own key:value pairs. + // + // The "foreach" callback pattern reduces unnecessary copying in some cases + // and also allows implementations to hold locks while the map is read. + ForeachKey(handler func(key, val string) error) error +} + +// TextMapCarrier allows the use of regular map[string]string +// as both TextMapWriter and TextMapReader. +type TextMapCarrier map[string]string + +// ForeachKey conforms to the TextMapReader interface. +func (c TextMapCarrier) ForeachKey(handler func(key, val string) error) error { + for k, v := range c { + if err := handler(k, v); err != nil { + return err + } + } + return nil +} + +// Set implements Set() of opentracing.TextMapWriter +func (c TextMapCarrier) Set(key, val string) { + c[key] = val +} + +// HTTPHeadersCarrier satisfies both TextMapWriter and TextMapReader. +// +// Example usage for server side: +// +// carrier := opentracing.HttpHeadersCarrier(httpReq.Header) +// spanContext, err := tracer.Extract(opentracing.HttpHeaders, carrier) +// +// Example usage for client side: +// +// carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) +// err := tracer.Inject( +// span.Context(), +// opentracing.HttpHeaders, +// carrier) +// +type HTTPHeadersCarrier http.Header + +// Set conforms to the TextMapWriter interface. +func (c HTTPHeadersCarrier) Set(key, val string) { + h := http.Header(c) + h.Add(key, val) +} + +// ForeachKey conforms to the TextMapReader interface. +func (c HTTPHeadersCarrier) ForeachKey(handler func(key, val string) error) error { + for k, vals := range c { + for _, v := range vals { + if err := handler(k, v); err != nil { + return err + } + } + } + return nil +} diff --git a/vendor/github.com/opentracing/opentracing-go/propagation_test.go b/vendor/github.com/opentracing/opentracing-go/propagation_test.go new file mode 100644 index 000000000..e3dad5597 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/propagation_test.go @@ -0,0 +1,93 @@ +package opentracing + +import ( + "net/http" + "strconv" + "testing" +) + +const testHeaderPrefix = "testprefix-" + +func TestTextMapCarrierInject(t *testing.T) { + m := make(map[string]string) + m["NotOT"] = "blah" + m["opname"] = "AlsoNotOT" + tracer := testTracer{} + span := tracer.StartSpan("someSpan") + fakeID := span.Context().(testSpanContext).FakeID + + carrier := TextMapCarrier(m) + if err := span.Tracer().Inject(span.Context(), TextMap, carrier); err != nil { + t.Fatal(err) + } + + if len(m) != 3 { + t.Errorf("Unexpected header length: %v", len(m)) + } + // The prefix comes from just above; the suffix comes from + // testTracer.Inject(). + if m["testprefix-fakeid"] != strconv.Itoa(fakeID) { + t.Errorf("Could not find fakeid at expected key") + } +} + +func TestTextMapCarrierExtract(t *testing.T) { + m := make(map[string]string) + m["NotOT"] = "blah" + m["opname"] = "AlsoNotOT" + m["testprefix-fakeid"] = "42" + tracer := testTracer{} + + carrier := TextMapCarrier(m) + extractedContext, err := tracer.Extract(TextMap, carrier) + if err != nil { + t.Fatal(err) + } + + if extractedContext.(testSpanContext).FakeID != 42 { + t.Errorf("Failed to read testprefix-fakeid correctly") + } +} + +func TestHTTPHeaderInject(t *testing.T) { + h := http.Header{} + h.Add("NotOT", "blah") + h.Add("opname", "AlsoNotOT") + tracer := testTracer{} + span := tracer.StartSpan("someSpan") + fakeID := span.Context().(testSpanContext).FakeID + + // Use HTTPHeadersCarrier to wrap around `h`. + carrier := HTTPHeadersCarrier(h) + if err := span.Tracer().Inject(span.Context(), HTTPHeaders, carrier); err != nil { + t.Fatal(err) + } + + if len(h) != 3 { + t.Errorf("Unexpected header length: %v", len(h)) + } + // The prefix comes from just above; the suffix comes from + // testTracer.Inject(). + if h.Get("testprefix-fakeid") != strconv.Itoa(fakeID) { + t.Errorf("Could not find fakeid at expected key") + } +} + +func TestHTTPHeaderExtract(t *testing.T) { + h := http.Header{} + h.Add("NotOT", "blah") + h.Add("opname", "AlsoNotOT") + h.Add("testprefix-fakeid", "42") + tracer := testTracer{} + + // Use HTTPHeadersCarrier to wrap around `h`. + carrier := HTTPHeadersCarrier(h) + spanContext, err := tracer.Extract(HTTPHeaders, carrier) + if err != nil { + t.Fatal(err) + } + + if spanContext.(testSpanContext).FakeID != 42 { + t.Errorf("Failed to read testprefix-fakeid correctly") + } +} diff --git a/vendor/github.com/opentracing/opentracing-go/span.go b/vendor/github.com/opentracing/opentracing-go/span.go new file mode 100644 index 000000000..f6c3234ac --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/span.go @@ -0,0 +1,185 @@ +package opentracing + +import ( + "time" + + "github.com/opentracing/opentracing-go/log" +) + +// SpanContext represents Span state that must propagate to descendant Spans and across process +// boundaries (e.g., a tuple). +type SpanContext interface { + // ForeachBaggageItem grants access to all baggage items stored in the + // SpanContext. + // The handler function will be called for each baggage key/value pair. + // The ordering of items is not guaranteed. + // + // The bool return value indicates if the handler wants to continue iterating + // through the rest of the baggage items; for example if the handler is trying to + // find some baggage item by pattern matching the name, it can return false + // as soon as the item is found to stop further iterations. + ForeachBaggageItem(handler func(k, v string) bool) +} + +// Span represents an active, un-finished span in the OpenTracing system. +// +// Spans are created by the Tracer interface. +type Span interface { + // Sets the end timestamp and finalizes Span state. + // + // With the exception of calls to Context() (which are always allowed), + // Finish() must be the last call made to any span instance, and to do + // otherwise leads to undefined behavior. + Finish() + // FinishWithOptions is like Finish() but with explicit control over + // timestamps and log data. + FinishWithOptions(opts FinishOptions) + + // Context() yields the SpanContext for this Span. Note that the return + // value of Context() is still valid after a call to Span.Finish(), as is + // a call to Span.Context() after a call to Span.Finish(). + Context() SpanContext + + // Sets or changes the operation name. + SetOperationName(operationName string) Span + + // Adds a tag to the span. + // + // If there is a pre-existing tag set for `key`, it is overwritten. + // + // Tag values can be numeric types, strings, or bools. The behavior of + // other tag value types is undefined at the OpenTracing level. If a + // tracing system does not know how to handle a particular value type, it + // may ignore the tag, but shall not panic. + SetTag(key string, value interface{}) Span + + // LogFields is an efficient and type-checked way to record key:value + // logging data about a Span, though the programming interface is a little + // more verbose than LogKV(). Here's an example: + // + // span.LogFields( + // log.String("event", "soft error"), + // log.String("type", "cache timeout"), + // log.Int("waited.millis", 1500)) + // + // Also see Span.FinishWithOptions() and FinishOptions.BulkLogData. + LogFields(fields ...log.Field) + + // LogKV is a concise, readable way to record key:value logging data about + // a Span, though unfortunately this also makes it less efficient and less + // type-safe than LogFields(). Here's an example: + // + // span.LogKV( + // "event", "soft error", + // "type", "cache timeout", + // "waited.millis", 1500) + // + // For LogKV (as opposed to LogFields()), the parameters must appear as + // key-value pairs, like + // + // span.LogKV(key1, val1, key2, val2, key3, val3, ...) + // + // The keys must all be strings. The values may be strings, numeric types, + // bools, Go error instances, or arbitrary structs. + // + // (Note to implementors: consider the log.InterleavedKVToFields() helper) + LogKV(alternatingKeyValues ...interface{}) + + // SetBaggageItem sets a key:value pair on this Span and its SpanContext + // that also propagates to descendants of this Span. + // + // SetBaggageItem() enables powerful functionality given a full-stack + // opentracing integration (e.g., arbitrary application data from a mobile + // app can make it, transparently, all the way into the depths of a storage + // system), and with it some powerful costs: use this feature with care. + // + // IMPORTANT NOTE #1: SetBaggageItem() will only propagate baggage items to + // *future* causal descendants of the associated Span. + // + // IMPORTANT NOTE #2: Use this thoughtfully and with care. Every key and + // value is copied into every local *and remote* child of the associated + // Span, and that can add up to a lot of network and cpu overhead. + // + // Returns a reference to this Span for chaining. + SetBaggageItem(restrictedKey, value string) Span + + // Gets the value for a baggage item given its key. Returns the empty string + // if the value isn't found in this Span. + BaggageItem(restrictedKey string) string + + // Provides access to the Tracer that created this Span. + Tracer() Tracer + + // Deprecated: use LogFields or LogKV + LogEvent(event string) + // Deprecated: use LogFields or LogKV + LogEventWithPayload(event string, payload interface{}) + // Deprecated: use LogFields or LogKV + Log(data LogData) +} + +// LogRecord is data associated with a single Span log. Every LogRecord +// instance must specify at least one Field. +type LogRecord struct { + Timestamp time.Time + Fields []log.Field +} + +// FinishOptions allows Span.FinishWithOptions callers to override the finish +// timestamp and provide log data via a bulk interface. +type FinishOptions struct { + // FinishTime overrides the Span's finish time, or implicitly becomes + // time.Now() if FinishTime.IsZero(). + // + // FinishTime must resolve to a timestamp that's >= the Span's StartTime + // (per StartSpanOptions). + FinishTime time.Time + + // LogRecords allows the caller to specify the contents of many LogFields() + // calls with a single slice. May be nil. + // + // None of the LogRecord.Timestamp values may be .IsZero() (i.e., they must + // be set explicitly). Also, they must be >= the Span's start timestamp and + // <= the FinishTime (or time.Now() if FinishTime.IsZero()). Otherwise the + // behavior of FinishWithOptions() is undefined. + // + // If specified, the caller hands off ownership of LogRecords at + // FinishWithOptions() invocation time. + // + // If specified, the (deprecated) BulkLogData must be nil or empty. + LogRecords []LogRecord + + // BulkLogData is DEPRECATED. + BulkLogData []LogData +} + +// LogData is DEPRECATED +type LogData struct { + Timestamp time.Time + Event string + Payload interface{} +} + +// ToLogRecord converts a deprecated LogData to a non-deprecated LogRecord +func (ld *LogData) ToLogRecord() LogRecord { + var literalTimestamp time.Time + if ld.Timestamp.IsZero() { + literalTimestamp = time.Now() + } else { + literalTimestamp = ld.Timestamp + } + rval := LogRecord{ + Timestamp: literalTimestamp, + } + if ld.Payload == nil { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + } + } else { + rval.Fields = []log.Field{ + log.String("event", ld.Event), + log.Object("payload", ld.Payload), + } + } + return rval +} diff --git a/vendor/github.com/opentracing/opentracing-go/testtracer_test.go b/vendor/github.com/opentracing/opentracing-go/testtracer_test.go new file mode 100644 index 000000000..dd13788cf --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/testtracer_test.go @@ -0,0 +1,138 @@ +package opentracing + +import ( + "strconv" + "strings" + "time" + + "github.com/opentracing/opentracing-go/log" +) + +const testHTTPHeaderPrefix = "testprefix-" + +// testTracer is a most-noop Tracer implementation that makes it possible for +// unittests to verify whether certain methods were / were not called. +type testTracer struct{} + +var fakeIDSource = 1 + +func nextFakeID() int { + fakeIDSource++ + return fakeIDSource +} + +type testSpanContext struct { + HasParent bool + FakeID int +} + +func (n testSpanContext) ForeachBaggageItem(handler func(k, v string) bool) {} + +type testSpan struct { + spanContext testSpanContext + OperationName string + StartTime time.Time + Tags map[string]interface{} +} + +func (n testSpan) Equal(os Span) bool { + other, ok := os.(testSpan) + if !ok { + return false + } + if n.spanContext != other.spanContext { + return false + } + if n.OperationName != other.OperationName { + return false + } + if !n.StartTime.Equal(other.StartTime) { + return false + } + if len(n.Tags) != len(other.Tags) { + return false + } + + for k, v := range n.Tags { + if ov, ok := other.Tags[k]; !ok || ov != v { + return false + } + } + + return true +} + +// testSpan: +func (n testSpan) Context() SpanContext { return n.spanContext } +func (n testSpan) SetTag(key string, value interface{}) Span { return n } +func (n testSpan) Finish() {} +func (n testSpan) FinishWithOptions(opts FinishOptions) {} +func (n testSpan) LogFields(fields ...log.Field) {} +func (n testSpan) LogKV(kvs ...interface{}) {} +func (n testSpan) SetOperationName(operationName string) Span { return n } +func (n testSpan) Tracer() Tracer { return testTracer{} } +func (n testSpan) SetBaggageItem(key, val string) Span { return n } +func (n testSpan) BaggageItem(key string) string { return "" } +func (n testSpan) LogEvent(event string) {} +func (n testSpan) LogEventWithPayload(event string, payload interface{}) {} +func (n testSpan) Log(data LogData) {} + +// StartSpan belongs to the Tracer interface. +func (n testTracer) StartSpan(operationName string, opts ...StartSpanOption) Span { + sso := StartSpanOptions{} + for _, o := range opts { + o.Apply(&sso) + } + return n.startSpanWithOptions(operationName, sso) +} + +func (n testTracer) startSpanWithOptions(name string, opts StartSpanOptions) Span { + fakeID := nextFakeID() + if len(opts.References) > 0 { + fakeID = opts.References[0].ReferencedContext.(testSpanContext).FakeID + } + + return testSpan{ + OperationName: name, + StartTime: opts.StartTime, + Tags: opts.Tags, + spanContext: testSpanContext{ + HasParent: len(opts.References) > 0, + FakeID: fakeID, + }, + } +} + +// Inject belongs to the Tracer interface. +func (n testTracer) Inject(sp SpanContext, format interface{}, carrier interface{}) error { + spanContext := sp.(testSpanContext) + switch format { + case HTTPHeaders, TextMap: + carrier.(TextMapWriter).Set(testHTTPHeaderPrefix+"fakeid", strconv.Itoa(spanContext.FakeID)) + return nil + } + return ErrUnsupportedFormat +} + +// Extract belongs to the Tracer interface. +func (n testTracer) Extract(format interface{}, carrier interface{}) (SpanContext, error) { + switch format { + case HTTPHeaders, TextMap: + // Just for testing purposes... generally not a worthwhile thing to + // propagate. + sm := testSpanContext{} + err := carrier.(TextMapReader).ForeachKey(func(key, val string) error { + switch strings.ToLower(key) { + case testHTTPHeaderPrefix + "fakeid": + i, err := strconv.Atoi(val) + if err != nil { + return err + } + sm.FakeID = i + } + return nil + }) + return sm, err + } + return nil, ErrSpanContextNotFound +} diff --git a/vendor/github.com/opentracing/opentracing-go/tracer.go b/vendor/github.com/opentracing/opentracing-go/tracer.go new file mode 100644 index 000000000..fd77c1df3 --- /dev/null +++ b/vendor/github.com/opentracing/opentracing-go/tracer.go @@ -0,0 +1,305 @@ +package opentracing + +import "time" + +// Tracer is a simple, thin interface for Span creation and SpanContext +// propagation. +type Tracer interface { + + // Create, start, and return a new Span with the given `operationName` and + // incorporate the given StartSpanOption `opts`. (Note that `opts` borrows + // from the "functional options" pattern, per + // http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis) + // + // A Span with no SpanReference options (e.g., opentracing.ChildOf() or + // opentracing.FollowsFrom()) becomes the root of its own trace. + // + // Examples: + // + // var tracer opentracing.Tracer = ... + // + // // The root-span case: + // sp := tracer.StartSpan("GetFeed") + // + // // The vanilla child span case: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context())) + // + // // All the bells and whistles: + // sp := tracer.StartSpan( + // "GetFeed", + // opentracing.ChildOf(parentSpan.Context()), + // opentracing.Tag("user_agent", loggedReq.UserAgent), + // opentracing.StartTime(loggedReq.Timestamp), + // ) + // + StartSpan(operationName string, opts ...StartSpanOption) Span + + // Inject() takes the `sm` SpanContext instance and injects it for + // propagation within `carrier`. The actual type of `carrier` depends on + // the value of `format`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (sans error handling): + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // err := tracer.Inject( + // span.Context(), + // opentracing.HTTPHeaders, + // carrier) + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Implementations may return opentracing.ErrUnsupportedFormat if `format` + // is not supported by (or not known by) the implementation. + // + // Implementations may return opentracing.ErrInvalidCarrier or any other + // implementation-specific error if the format is supported but injection + // fails anyway. + // + // See Tracer.Extract(). + Inject(sm SpanContext, format interface{}, carrier interface{}) error + + // Extract() returns a SpanContext instance given `format` and `carrier`. + // + // OpenTracing defines a common set of `format` values (see BuiltinFormat), + // and each has an expected carrier type. + // + // Other packages may declare their own `format` values, much like the keys + // used by `context.Context` (see + // https://godoc.org/golang.org/x/net/context#WithValue). + // + // Example usage (with StartSpan): + // + // + // carrier := opentracing.HTTPHeadersCarrier(httpReq.Header) + // clientContext, err := tracer.Extract(opentracing.HTTPHeaders, carrier) + // + // // ... assuming the ultimate goal here is to resume the trace with a + // // server-side Span: + // var serverSpan opentracing.Span + // if err == nil { + // span = tracer.StartSpan( + // rpcMethodName, ext.RPCServerOption(clientContext)) + // } else { + // span = tracer.StartSpan(rpcMethodName) + // } + // + // + // NOTE: All opentracing.Tracer implementations MUST support all + // BuiltinFormats. + // + // Return values: + // - A successful Extract returns a SpanContext instance and a nil error + // - If there was simply no SpanContext to extract in `carrier`, Extract() + // returns (nil, opentracing.ErrSpanContextNotFound) + // - If `format` is unsupported or unrecognized, Extract() returns (nil, + // opentracing.ErrUnsupportedFormat) + // - If there are more fundamental problems with the `carrier` object, + // Extract() may return opentracing.ErrInvalidCarrier, + // opentracing.ErrSpanContextCorrupted, or implementation-specific + // errors. + // + // See Tracer.Inject(). + Extract(format interface{}, carrier interface{}) (SpanContext, error) +} + +// StartSpanOptions allows Tracer.StartSpan() callers and implementors a +// mechanism to override the start timestamp, specify Span References, and make +// a single Tag or multiple Tags available at Span start time. +// +// StartSpan() callers should look at the StartSpanOption interface and +// implementations available in this package. +// +// Tracer implementations can convert a slice of `StartSpanOption` instances +// into a `StartSpanOptions` struct like so: +// +// func StartSpan(opName string, opts ...opentracing.StartSpanOption) { +// sso := opentracing.StartSpanOptions{} +// for _, o := range opts { +// o.Apply(&sso) +// } +// ... +// } +// +type StartSpanOptions struct { + // Zero or more causal references to other Spans (via their SpanContext). + // If empty, start a "root" Span (i.e., start a new trace). + References []SpanReference + + // StartTime overrides the Span's start time, or implicitly becomes + // time.Now() if StartTime.IsZero(). + StartTime time.Time + + // Tags may have zero or more entries; the restrictions on map values are + // identical to those for Span.SetTag(). May be nil. + // + // If specified, the caller hands off ownership of Tags at + // StartSpan() invocation time. + Tags map[string]interface{} +} + +// StartSpanOption instances (zero or more) may be passed to Tracer.StartSpan. +// +// StartSpanOption borrows from the "functional options" pattern, per +// http://dave.cheney.net/2014/10/17/functional-options-for-friendly-apis +type StartSpanOption interface { + Apply(*StartSpanOptions) +} + +// SpanReferenceType is an enum type describing different categories of +// relationships between two Spans. If Span-2 refers to Span-1, the +// SpanReferenceType describes Span-1 from Span-2's perspective. For example, +// ChildOfRef means that Span-1 created Span-2. +// +// NOTE: Span-1 and Span-2 do *not* necessarily depend on each other for +// completion; e.g., Span-2 may be part of a background job enqueued by Span-1, +// or Span-2 may be sitting in a distributed queue behind Span-1. +type SpanReferenceType int + +const ( + // ChildOfRef refers to a parent Span that caused *and* somehow depends + // upon the new child Span. Often (but not always), the parent Span cannot + // finish until the child Span does. + // + // An timing diagram for a ChildOfRef that's blocked on the new Span: + // + // [-Parent Span---------] + // [-Child Span----] + // + // See http://opentracing.io/spec/ + // + // See opentracing.ChildOf() + ChildOfRef SpanReferenceType = iota + + // FollowsFromRef refers to a parent Span that does not depend in any way + // on the result of the new child Span. For instance, one might use + // FollowsFromRefs to describe pipeline stages separated by queues, + // or a fire-and-forget cache insert at the tail end of a web request. + // + // A FollowsFromRef Span is part of the same logical trace as the new Span: + // i.e., the new Span is somehow caused by the work of its FollowsFromRef. + // + // All of the following could be valid timing diagrams for children that + // "FollowFrom" a parent. + // + // [-Parent Span-] [-Child Span-] + // + // + // [-Parent Span--] + // [-Child Span-] + // + // + // [-Parent Span-] + // [-Child Span-] + // + // See http://opentracing.io/spec/ + // + // See opentracing.FollowsFrom() + FollowsFromRef +) + +// SpanReference is a StartSpanOption that pairs a SpanReferenceType and a +// referenced SpanContext. See the SpanReferenceType documentation for +// supported relationships. If SpanReference is created with +// ReferencedContext==nil, it has no effect. Thus it allows for a more concise +// syntax for starting spans: +// +// sc, _ := tracer.Extract(someFormat, someCarrier) +// span := tracer.StartSpan("operation", opentracing.ChildOf(sc)) +// +// The `ChildOf(sc)` option above will not panic if sc == nil, it will just +// not add the parent span reference to the options. +type SpanReference struct { + Type SpanReferenceType + ReferencedContext SpanContext +} + +// Apply satisfies the StartSpanOption interface. +func (r SpanReference) Apply(o *StartSpanOptions) { + if r.ReferencedContext != nil { + o.References = append(o.References, r) + } +} + +// ChildOf returns a StartSpanOption pointing to a dependent parent span. +// If sc == nil, the option has no effect. +// +// See ChildOfRef, SpanReference +func ChildOf(sc SpanContext) SpanReference { + return SpanReference{ + Type: ChildOfRef, + ReferencedContext: sc, + } +} + +// FollowsFrom returns a StartSpanOption pointing to a parent Span that caused +// the child Span but does not directly depend on its result in any way. +// If sc == nil, the option has no effect. +// +// See FollowsFromRef, SpanReference +func FollowsFrom(sc SpanContext) SpanReference { + return SpanReference{ + Type: FollowsFromRef, + ReferencedContext: sc, + } +} + +// StartTime is a StartSpanOption that sets an explicit start timestamp for the +// new Span. +type StartTime time.Time + +// Apply satisfies the StartSpanOption interface. +func (t StartTime) Apply(o *StartSpanOptions) { + o.StartTime = time.Time(t) +} + +// Tags are a generic map from an arbitrary string key to an opaque value type. +// The underlying tracing system is responsible for interpreting and +// serializing the values. +type Tags map[string]interface{} + +// Apply satisfies the StartSpanOption interface. +func (t Tags) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + for k, v := range t { + o.Tags[k] = v + } +} + +// Tag may be passed as a StartSpanOption to add a tag to new spans, +// or its Set method may be used to apply the tag to an existing Span, +// for example: +// +// tracer.StartSpan("opName", Tag{"Key", value}) +// +// or +// +// Tag{"key", value}.Set(span) +type Tag struct { + Key string + Value interface{} +} + +// Apply satisfies the StartSpanOption interface. +func (t Tag) Apply(o *StartSpanOptions) { + if o.Tags == nil { + o.Tags = make(map[string]interface{}) + } + o.Tags[t.Key] = t.Value +} + +// Set applies the tag to an existing Span. +func (t Tag) Set(s Span) { + s.SetTag(t.Key, t.Value) +} diff --git a/vendor/github.com/prometheus/client_golang/.gitignore b/vendor/github.com/prometheus/client_golang/.gitignore new file mode 100644 index 000000000..f6fc2e8eb --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/.gitignore @@ -0,0 +1,26 @@ +# Compiled Object files, Static and Dynamic libs (Shared Objects) +*.o +*.a +*.so + +# Folders +_obj +_test + +# Architecture specific extensions/prefixes +*.[568vq] +[568vq].out + +*.cgo1.go +*.cgo2.c +_cgo_defun.c +_cgo_gotypes.go +_cgo_export.* + +_testmain.go + +*.exe + +*~ +*# +.build diff --git a/vendor/github.com/prometheus/client_golang/.travis.yml b/vendor/github.com/prometheus/client_golang/.travis.yml new file mode 100644 index 000000000..d83f31a59 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/.travis.yml @@ -0,0 +1,9 @@ +sudo: false +language: go + +go: + - 1.5.4 + - 1.6.2 + +script: + - go test -short ./... diff --git a/vendor/github.com/prometheus/client_golang/AUTHORS.md b/vendor/github.com/prometheus/client_golang/AUTHORS.md new file mode 100644 index 000000000..c5275d5ab --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/AUTHORS.md @@ -0,0 +1,18 @@ +The Prometheus project was started by Matt T. Proud (emeritus) and +Julius Volz in 2012. + +Maintainers of this repository: + +* Björn Rabenstein + +The following individuals have contributed code to this repository +(listed in alphabetical order): + +* Bernerd Schaefer +* Björn Rabenstein +* Daniel Bornkessel +* Jeff Younker +* Julius Volz +* Matt T. Proud +* Tobias Schmidt + diff --git a/vendor/github.com/prometheus/client_golang/CHANGELOG.md b/vendor/github.com/prometheus/client_golang/CHANGELOG.md new file mode 100644 index 000000000..330788a4e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/CHANGELOG.md @@ -0,0 +1,109 @@ +## 0.8.0 / 2016-08-17 +* [CHANGE] Registry is doing more consistency checks. This might break + existing setups that used to export inconsistent metrics. +* [CHANGE] Pushing to Pushgateway moved to package `push` and changed to allow + arbitrary grouping. +* [CHANGE] Removed `SelfCollector`. +* [CHANGE] Removed `PanicOnCollectError` and `EnableCollectChecks` methods. +* [CHANGE] Moved packages to the prometheus/common repo: `text`, `model`, + `extraction`. +* [CHANGE] Deprecated a number of functions. +* [FEATURE] Allow custom registries. Added `Registerer` and `Gatherer` + interfaces. +* [FEATURE] Separated HTTP exposition, allowing custom HTTP handlers (package + `promhttp`) and enabling the creation of other exposition mechanisms. +* [FEATURE] `MustRegister` is variadic now, allowing registration of many + collectors in one call. +* [FEATURE] Added HTTP API v1 package. +* [ENHANCEMENT] Numerous documentation improvements. +* [ENHANCEMENT] Improved metric sorting. +* [ENHANCEMENT] Inlined fnv64a hashing for improved performance. +* [ENHANCEMENT] Several test improvements. +* [BUGFIX] Handle collisions in MetricVec. + +## 0.7.0 / 2015-07-27 +* [CHANGE] Rename ExporterLabelPrefix to ExportedLabelPrefix. +* [BUGFIX] Closed gaps in metric consistency check. +* [BUGFIX] Validate LabelName/LabelSet on JSON unmarshaling. +* [ENHANCEMENT] Document the possibility to create "empty" metrics in + a metric vector. +* [ENHANCEMENT] Fix and clarify various doc comments and the README.md. +* [ENHANCEMENT] (Kind of) solve "The Proxy Problem" of http.InstrumentHandler. +* [ENHANCEMENT] Change responseWriterDelegator.written to int64. + +## 0.6.0 / 2015-06-01 +* [CHANGE] Rename process_goroutines to go_goroutines. +* [ENHANCEMENT] Validate label names during YAML decoding. +* [ENHANCEMENT] Add LabelName regular expression. +* [BUGFIX] Ensure alignment of struct members for 32-bit systems. + +## 0.5.0 / 2015-05-06 +* [BUGFIX] Removed a weakness in the fingerprinting aka signature code. + This makes fingerprinting slower and more allocation-heavy, but the + weakness was too severe to be tolerated. +* [CHANGE] As a result of the above, Metric.Fingerprint is now returning + a different fingerprint. To keep the same fingerprint, the new method + Metric.FastFingerprint was introduced, which will be used by the + Prometheus server for storage purposes (implying that a collision + detection has to be added, too). +* [ENHANCEMENT] The Metric.Equal and Metric.Before do not depend on + fingerprinting anymore, removing the possibility of an undetected + fingerprint collision. +* [FEATURE] The Go collector in the exposition library includes garbage + collection stats. +* [FEATURE] The exposition library allows to create constant "throw-away" + summaries and histograms. +* [CHANGE] A number of new reserved labels and prefixes. + +## 0.4.0 / 2015-04-08 +* [CHANGE] Return NaN when Summaries have no observations yet. +* [BUGFIX] Properly handle Summary decay upon Write(). +* [BUGFIX] Fix the documentation link to the consumption library. +* [FEATURE] Allow the metric family injection hook to merge with existing + metric families. +* [ENHANCEMENT] Removed cgo dependency and conditional compilation of procfs. +* [MAINTENANCE] Adjusted to changes in matttproud/golang_protobuf_extensions. + +## 0.3.2 / 2015-03-11 +* [BUGFIX] Fixed the receiver type of COWMetric.Set(). This method is + only used by the Prometheus server internally. +* [CLEANUP] Added licenses of vendored code left out by godep. + +## 0.3.1 / 2015-03-04 +* [ENHANCEMENT] Switched fingerprinting functions from own free list to + sync.Pool. +* [CHANGE] Makefile uses Go 1.4.2 now (only relevant for examples and tests). + +## 0.3.0 / 2015-03-03 +* [CHANGE] Changed the fingerprinting for metrics. THIS WILL INVALIDATE ALL + PERSISTED FINGERPRINTS. IF YOU COMPILE THE PROMETHEUS SERVER WITH THIS + VERSION, YOU HAVE TO WIPE THE PREVIOUSLY CREATED STORAGE. +* [CHANGE] LabelValuesToSignature removed. (Nobody had used it, and it was + arguably broken.) +* [CHANGE] Vendored dependencies. Those are only used by the Makefile. If + client_golang is used as a library, the vendoring will stay out of your way. +* [BUGFIX] Remove a weakness in the fingerprinting for metrics. (This made + the fingerprinting change above necessary.) +* [FEATURE] Added new fingerprinting functions SignatureForLabels and + SignatureWithoutLabels to be used by the Prometheus server. These functions + require fewer allocations than the ones currently used by the server. + +## 0.2.0 / 2015-02-23 +* [FEATURE] Introduce new Histagram metric type. +* [CHANGE] Ignore process collector errors for now (better error handling + pending). +* [CHANGE] Use clear error interface for process pidFn. +* [BUGFIX] Fix Go download links for several archs and OSes. +* [ENHANCEMENT] Massively improve Gauge and Counter performance. +* [ENHANCEMENT] Catch illegal label names for summaries in histograms. +* [ENHANCEMENT] Reduce allocations during fingerprinting. +* [ENHANCEMENT] Remove cgo dependency. procfs package will only be included if + both cgo is available and the build is for an OS with procfs. +* [CLEANUP] Clean up code style issues. +* [CLEANUP] Mark slow test as such and exclude them from travis. +* [CLEANUP] Update protobuf library package name. +* [CLEANUP] Updated vendoring of beorn7/perks. + +## 0.1.0 / 2015-02-02 +* [CLEANUP] Introduced semantic versioning and changelog. From now on, + changes will be reported in this file. diff --git a/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md new file mode 100644 index 000000000..5705f0fbe --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull + request, addressing (with `@...`) one or more of the maintainers + (see [AUTHORS.md](AUTHORS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/client_golang/LICENSE b/vendor/github.com/prometheus/client_golang/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_golang/NOTICE b/vendor/github.com/prometheus/client_golang/NOTICE new file mode 100644 index 000000000..dd878a30e --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/NOTICE @@ -0,0 +1,23 @@ +Prometheus instrumentation library for Go applications +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). + + +The following components are included in this product: + +perks - a fork of https://github.com/bmizerany/perks +https://github.com/beorn7/perks +Copyright 2013-2015 Blake Mizerany, Björn Rabenstein +See https://github.com/beorn7/perks/blob/master/README.md for license details. + +Go support for Protocol Buffers - Google's data interchange format +http://github.com/golang/protobuf/ +Copyright 2010 The Go Authors +See source code for license details. + +Support for streaming Protocol Buffer messages for the Go language (golang). +https://github.com/matttproud/golang_protobuf_extensions +Copyright 2013 Matt T. Proud +Licensed under the Apache License, Version 2.0 diff --git a/vendor/github.com/prometheus/client_golang/README.md b/vendor/github.com/prometheus/client_golang/README.md new file mode 100644 index 000000000..557eacf5a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/README.md @@ -0,0 +1,45 @@ +# Prometheus Go client library + +[![Build Status](https://travis-ci.org/prometheus/client_golang.svg?branch=master)](https://travis-ci.org/prometheus/client_golang) + +This is the [Go](http://golang.org) client library for +[Prometheus](http://prometheus.io). It has two separate parts, one for +instrumenting application code, and one for creating clients that talk to the +Prometheus HTTP API. + +## Instrumenting applications + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/prometheus)](http://gocover.io/github.com/prometheus/client_golang/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus) + +The +[`prometheus` directory](https://github.com/prometheus/client_golang/tree/master/prometheus) +contains the instrumentation library. See the +[best practices section](http://prometheus.io/docs/practices/naming/) of the +Prometheus documentation to learn more about instrumenting applications. + +The +[`examples` directory](https://github.com/prometheus/client_golang/tree/master/examples) +contains simple examples of instrumented code. + +## Client for the Prometheus HTTP API + +[![code-coverage](http://gocover.io/_badge/github.com/prometheus/client_golang/api/prometheus)](http://gocover.io/github.com/prometheus/client_golang/api/prometheus) [![go-doc](https://godoc.org/github.com/prometheus/client_golang/api/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/api/prometheus) + +The +[`api/prometheus` directory](https://github.com/prometheus/client_golang/tree/master/api/prometheus) +contains the client for the +[Prometheus HTTP API](http://prometheus.io/docs/querying/api/). It allows you +to write Go applications that query time series data from a Prometheus server. + +## Where is `model`, `extraction`, and `text`? + +The `model` packages has been moved to +[`prometheus/common/model`](https://github.com/prometheus/common/tree/master/model). + +The `extraction` and `text` packages are now contained in +[`prometheus/common/expfmt`](https://github.com/prometheus/common/tree/master/expfmt). + +## Contributing and community + +See the [contributing guidelines](CONTRIBUTING.md) and the +[Community section](http://prometheus.io/community/) of the homepage. diff --git a/vendor/github.com/prometheus/client_golang/VERSION b/vendor/github.com/prometheus/client_golang/VERSION new file mode 100644 index 000000000..a3df0a695 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/VERSION @@ -0,0 +1 @@ +0.8.0 diff --git a/vendor/github.com/prometheus/client_golang/prometheus/.gitignore b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore new file mode 100644 index 000000000..3460f0346 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/.gitignore @@ -0,0 +1 @@ +command-line-arguments.test diff --git a/vendor/github.com/prometheus/client_golang/prometheus/README.md b/vendor/github.com/prometheus/client_golang/prometheus/README.md new file mode 100644 index 000000000..44986bff0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/README.md @@ -0,0 +1 @@ +See [![go-doc](https://godoc.org/github.com/prometheus/client_golang/prometheus?status.svg)](https://godoc.org/github.com/prometheus/client_golang/prometheus). diff --git a/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go new file mode 100644 index 000000000..a3d86698b --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/benchmark_test.go @@ -0,0 +1,183 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "sync" + "testing" +) + +func BenchmarkCounterWithLabelValues(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } +} + +func BenchmarkCounterWithLabelValuesConcurrent(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + wg := sync.WaitGroup{} + for i := 0; i < 10; i++ { + wg.Add(1) + go func() { + for j := 0; j < b.N/10; j++ { + m.WithLabelValues("eins", "zwei", "drei").Inc() + } + wg.Done() + }() + } + wg.Wait() +} + +func BenchmarkCounterWithMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.With(Labels{"two": "zwei", "one": "eins", "three": "drei"}).Inc() + } +} + +func BenchmarkCounterWithPreparedMappedLabels(b *testing.B) { + m := NewCounterVec( + CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + labels := Labels{"two": "zwei", "one": "eins", "three": "drei"} + for i := 0; i < b.N; i++ { + m.With(labels).Inc() + } +} + +func BenchmarkCounterNoLabels(b *testing.B) { + m := NewCounter(CounterOpts{ + Name: "benchmark_counter", + Help: "A counter to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Inc() + } +} + +func BenchmarkGaugeWithLabelValues(b *testing.B) { + m := NewGaugeVec( + GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Set(3.1415) + } +} + +func BenchmarkGaugeNoLabels(b *testing.B) { + m := NewGauge(GaugeOpts{ + Name: "benchmark_gauge", + Help: "A gauge to benchmark it.", + }) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Set(3.1415) + } +} + +func BenchmarkSummaryWithLabelValues(b *testing.B) { + m := NewSummaryVec( + SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkSummaryNoLabels(b *testing.B) { + m := NewSummary(SummaryOpts{ + Name: "benchmark_summary", + Help: "A summary to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} + +func BenchmarkHistogramWithLabelValues(b *testing.B) { + m := NewHistogramVec( + HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + []string{"one", "two", "three"}, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.WithLabelValues("eins", "zwei", "drei").Observe(3.1415) + } +} + +func BenchmarkHistogramNoLabels(b *testing.B) { + m := NewHistogram(HistogramOpts{ + Name: "benchmark_histogram", + Help: "A histogram to benchmark it.", + }, + ) + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + m.Observe(3.1415) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/collector.go b/vendor/github.com/prometheus/client_golang/prometheus/collector.go new file mode 100644 index 000000000..623d3d83f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/collector.go @@ -0,0 +1,75 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Collector is the interface implemented by anything that can be used by +// Prometheus to collect metrics. A Collector has to be registered for +// collection. See Registerer.Register. +// +// The stock metrics provided by this package (Gauge, Counter, Summary, +// Histogram, Untyped) are also Collectors (which only ever collect one metric, +// namely itself). An implementer of Collector may, however, collect multiple +// metrics in a coordinated fashion and/or create metrics on the fly. Examples +// for collectors already implemented in this library are the metric vectors +// (i.e. collection of multiple instances of the same Metric but with different +// label values) like GaugeVec or SummaryVec, and the ExpvarCollector. +type Collector interface { + // Describe sends the super-set of all possible descriptors of metrics + // collected by this Collector to the provided channel and returns once + // the last descriptor has been sent. The sent descriptors fulfill the + // consistency and uniqueness requirements described in the Desc + // documentation. (It is valid if one and the same Collector sends + // duplicate descriptors. Those duplicates are simply ignored. However, + // two different Collectors must not send duplicate descriptors.) This + // method idempotently sends the same descriptors throughout the + // lifetime of the Collector. If a Collector encounters an error while + // executing this method, it must send an invalid descriptor (created + // with NewInvalidDesc) to signal the error to the registry. + Describe(chan<- *Desc) + // Collect is called by the Prometheus registry when collecting + // metrics. The implementation sends each collected metric via the + // provided channel and returns once the last metric has been sent. The + // descriptor of each sent metric is one of those returned by + // Describe. Returned metrics that share the same descriptor must differ + // in their variable label values. This method may be called + // concurrently and must therefore be implemented in a concurrency safe + // way. Blocking occurs at the expense of total performance of rendering + // all registered metrics. Ideally, Collector implementations support + // concurrent readers. + Collect(chan<- Metric) +} + +// selfCollector implements Collector for a single Metric so that the Metric +// collects itself. Add it as an anonymous field to a struct that implements +// Metric, and call init with the Metric itself as an argument. +type selfCollector struct { + self Metric +} + +// init provides the selfCollector with a reference to the metric it is supposed +// to collect. It is usually called within the factory function to create a +// metric. See example. +func (c *selfCollector) init(self Metric) { + c.self = self +} + +// Describe implements Collector. +func (c *selfCollector) Describe(ch chan<- *Desc) { + ch <- c.self.Desc() +} + +// Collect implements Collector. +func (c *selfCollector) Collect(ch chan<- Metric) { + ch <- c.self +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter.go b/vendor/github.com/prometheus/client_golang/prometheus/counter.go new file mode 100644 index 000000000..ee37949ad --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter.go @@ -0,0 +1,172 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" +) + +// Counter is a Metric that represents a single numerical value that only ever +// goes up. That implies that it cannot be used to count items whose number can +// also go down, e.g. the number of currently running goroutines. Those +// "counters" are represented by Gauges. +// +// A Counter is typically used to count requests served, tasks completed, errors +// occurred, etc. +// +// To create Counter instances, use NewCounter. +type Counter interface { + Metric + Collector + + // Set is used to set the Counter to an arbitrary value. It is only used + // if you have to transfer a value from an external counter into this + // Prometheus metric. Do not use it for regular handling of a + // Prometheus counter (as it can be used to break the contract of + // monotonically increasing values). + // + // Deprecated: Use NewConstMetric to create a counter for an external + // value. A Counter should never be set. + Set(float64) + // Inc increments the counter by 1. + Inc() + // Add adds the given value to the counter. It panics if the value is < + // 0. + Add(float64) +} + +// CounterOpts is an alias for Opts. See there for doc comments. +type CounterOpts Opts + +// NewCounter creates a new Counter based on the provided CounterOpts. +func NewCounter(opts CounterOpts) Counter { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ) + result := &counter{value: value{desc: desc, valType: CounterValue, labelPairs: desc.constLabelPairs}} + result.init(result) // Init self-collection. + return result +} + +type counter struct { + value +} + +func (c *counter) Add(v float64) { + if v < 0 { + panic(errors.New("counter cannot decrease in value")) + } + c.value.Add(v) +} + +// CounterVec is a Collector that bundles a set of Counters that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. number of HTTP requests, partitioned by response code and +// method). Create instances with NewCounterVec. +// +// CounterVec embeds MetricVec. See there for a full list of methods with +// detailed documentation. +type CounterVec struct { + *MetricVec +} + +// NewCounterVec creates a new CounterVec based on the provided CounterOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewCounterVec(opts CounterOpts, labelNames []string) *CounterVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &CounterVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + result := &counter{value: value{ + desc: desc, + valType: CounterValue, + labelPairs: makeLabelPairs(desc, lvs), + }} + result.init(result) // Init self-collection. + return result + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Counter and not a +// Metric so that no type conversion is required. +func (m *CounterVec) GetMetricWithLabelValues(lvs ...string) (Counter, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Counter and not a Metric so that no +// type conversion is required. +func (m *CounterVec) GetMetricWith(labels Labels) (Counter, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Counter), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *CounterVec) WithLabelValues(lvs ...string) Counter { + return m.MetricVec.WithLabelValues(lvs...).(Counter) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *CounterVec) With(labels Labels) Counter { + return m.MetricVec.With(labels).(Counter) +} + +// CounterFunc is a Counter whose value is determined at collect time by calling a +// provided function. +// +// To create CounterFunc instances, use NewCounterFunc. +type CounterFunc interface { + Metric + Collector +} + +// NewCounterFunc creates a new CounterFunc based on the provided +// CounterOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a CounterFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. The function should also honor +// the contract for a Counter (values only go up, not down), but compliance will +// not be checked. +func NewCounterFunc(opts CounterOpts, function func() float64) CounterFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), CounterValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go new file mode 100644 index 000000000..67391a23a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/counter_test.go @@ -0,0 +1,58 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestCounterAdd(t *testing.T) { + counter := NewCounter(CounterOpts{ + Name: "test", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }).(*counter) + counter.Inc() + if expected, got := 1., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + counter.Add(42) + if expected, got := 43., math.Float64frombits(counter.valBits); expected != got { + t.Errorf("Expected %f, got %f.", expected, got) + } + + if expected, got := "counter cannot decrease in value", decreaseCounter(counter).Error(); expected != got { + t.Errorf("Expected error %q, got %q.", expected, got) + } + + m := &dto.Metric{} + counter.Write(m) + + if expected, got := `label: label: counter: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} + +func decreaseCounter(c *counter) (err error) { + defer func() { + if e := recover(); e != nil { + err = e.(error) + } + }() + c.Add(-1) + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/desc.go b/vendor/github.com/prometheus/client_golang/prometheus/desc.go new file mode 100644 index 000000000..77f4b30e8 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/desc.go @@ -0,0 +1,205 @@ +// Copyright 2016 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "regexp" + "sort" + "strings" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +var ( + metricNameRE = regexp.MustCompile(`^[a-zA-Z_][a-zA-Z0-9_:]*$`) + labelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") +) + +// reservedLabelPrefix is a prefix which is not legal in user-supplied +// label names. +const reservedLabelPrefix = "__" + +// Labels represents a collection of label name -> value mappings. This type is +// commonly used with the With(Labels) and GetMetricWith(Labels) methods of +// metric vector Collectors, e.g.: +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +// +// The other use-case is the specification of constant label pairs in Opts or to +// create a Desc. +type Labels map[string]string + +// Desc is the descriptor used by every Prometheus Metric. It is essentially +// the immutable meta-data of a Metric. The normal Metric implementations +// included in this package manage their Desc under the hood. Users only have to +// deal with Desc if they use advanced features like the ExpvarCollector or +// custom Collectors and Metrics. +// +// Descriptors registered with the same registry have to fulfill certain +// consistency and uniqueness criteria if they share the same fully-qualified +// name: They must have the same help string and the same label names (aka label +// dimensions) in each, constLabels and variableLabels, but they must differ in +// the values of the constLabels. +// +// Descriptors that share the same fully-qualified names and the same label +// values of their constLabels are considered equal. +// +// Use NewDesc to create new Desc instances. +type Desc struct { + // fqName has been built from Namespace, Subsystem, and Name. + fqName string + // help provides some helpful information about this metric. + help string + // constLabelPairs contains precalculated DTO label pairs based on + // the constant labels. + constLabelPairs []*dto.LabelPair + // VariableLabels contains names of labels for which the metric + // maintains variable values. + variableLabels []string + // id is a hash of the values of the ConstLabels and fqName. This + // must be unique among all registered descriptors and can therefore be + // used as an identifier of the descriptor. + id uint64 + // dimHash is a hash of the label names (preset and variable) and the + // Help string. Each Desc with the same fqName must have the same + // dimHash. + dimHash uint64 + // err is an error that occured during construction. It is reported on + // registration time. + err error +} + +// NewDesc allocates and initializes a new Desc. Errors are recorded in the Desc +// and will be reported on registration time. variableLabels and constLabels can +// be nil if no such labels should be set. fqName and help must not be empty. +// +// variableLabels only contain the label names. Their label values are variable +// and therefore not part of the Desc. (They are managed within the Metric.) +// +// For constLabels, the label values are constant. Therefore, they are fully +// specified in the Desc. See the Opts documentation for the implications of +// constant labels. +func NewDesc(fqName, help string, variableLabels []string, constLabels Labels) *Desc { + d := &Desc{ + fqName: fqName, + help: help, + variableLabels: variableLabels, + } + if help == "" { + d.err = errors.New("empty help string") + return d + } + if !metricNameRE.MatchString(fqName) { + d.err = fmt.Errorf("%q is not a valid metric name", fqName) + return d + } + // labelValues contains the label values of const labels (in order of + // their sorted label names) plus the fqName (at position 0). + labelValues := make([]string, 1, len(constLabels)+1) + labelValues[0] = fqName + labelNames := make([]string, 0, len(constLabels)+len(variableLabels)) + labelNameSet := map[string]struct{}{} + // First add only the const label names and sort them... + for labelName := range constLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, labelName) + labelNameSet[labelName] = struct{}{} + } + sort.Strings(labelNames) + // ... so that we can now add const label values in the order of their names. + for _, labelName := range labelNames { + labelValues = append(labelValues, constLabels[labelName]) + } + // Now add the variable label names, but prefix them with something that + // cannot be in a regular label name. That prevents matching the label + // dimension with a different mix between preset and variable labels. + for _, labelName := range variableLabels { + if !checkLabelName(labelName) { + d.err = fmt.Errorf("%q is not a valid label name", labelName) + return d + } + labelNames = append(labelNames, "$"+labelName) + labelNameSet[labelName] = struct{}{} + } + if len(labelNames) != len(labelNameSet) { + d.err = errors.New("duplicate label names") + return d + } + vh := hashNew() + for _, val := range labelValues { + vh = hashAdd(vh, val) + vh = hashAddByte(vh, separatorByte) + } + d.id = vh + // Sort labelNames so that order doesn't matter for the hash. + sort.Strings(labelNames) + // Now hash together (in this order) the help string and the sorted + // label names. + lh := hashNew() + lh = hashAdd(lh, help) + lh = hashAddByte(lh, separatorByte) + for _, labelName := range labelNames { + lh = hashAdd(lh, labelName) + lh = hashAddByte(lh, separatorByte) + } + d.dimHash = lh + + d.constLabelPairs = make([]*dto.LabelPair, 0, len(constLabels)) + for n, v := range constLabels { + d.constLabelPairs = append(d.constLabelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(v), + }) + } + sort.Sort(LabelPairSorter(d.constLabelPairs)) + return d +} + +// NewInvalidDesc returns an invalid descriptor, i.e. a descriptor with the +// provided error set. If a collector returning such a descriptor is registered, +// registration will fail with the provided error. NewInvalidDesc can be used by +// a Collector to signal inability to describe itself. +func NewInvalidDesc(err error) *Desc { + return &Desc{ + err: err, + } +} + +func (d *Desc) String() string { + lpStrings := make([]string, 0, len(d.constLabelPairs)) + for _, lp := range d.constLabelPairs { + lpStrings = append( + lpStrings, + fmt.Sprintf("%s=%q", lp.GetName(), lp.GetValue()), + ) + } + return fmt.Sprintf( + "Desc{fqName: %q, help: %q, constLabels: {%s}, variableLabels: %v}", + d.fqName, + d.help, + strings.Join(lpStrings, ","), + d.variableLabels, + ) +} + +func checkLabelName(l string) bool { + return labelNameRE.MatchString(l) && + !strings.HasPrefix(l, reservedLabelPrefix) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/doc.go b/vendor/github.com/prometheus/client_golang/prometheus/doc.go new file mode 100644 index 000000000..b15a2d3b9 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/doc.go @@ -0,0 +1,181 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package prometheus provides metrics primitives to instrument code for +// monitoring. It also offers a registry for metrics. Sub-packages allow to +// expose the registered metrics via HTTP (package promhttp) or push them to a +// Pushgateway (package push). +// +// All exported functions and methods are safe to be used concurrently unless +//specified otherwise. +// +// A Basic Example +// +// As a starting point, a very basic usage example: +// +// package main +// +// import ( +// "net/http" +// +// "github.com/prometheus/client_golang/prometheus" +// "github.com/prometheus/client_golang/prometheus/promhttp" +// ) +// +// var ( +// cpuTemp = prometheus.NewGauge(prometheus.GaugeOpts{ +// Name: "cpu_temperature_celsius", +// Help: "Current temperature of the CPU.", +// }) +// hdFailures = prometheus.NewCounterVec( +// prometheus.CounterOpts{ +// Name: "hd_errors_total", +// Help: "Number of hard-disk errors.", +// }, +// []string{"device"}, +// ) +// ) +// +// func init() { +// // Metrics have to be registered to be exposed: +// prometheus.MustRegister(cpuTemp) +// prometheus.MustRegister(hdFailures) +// } +// +// func main() { +// cpuTemp.Set(65.3) +// hdFailures.With(prometheus.Labels{"device":"/dev/sda"}).Inc() +// +// // The Handler function provides a default handler to expose metrics +// // via an HTTP server. "/metrics" is the usual endpoint for that. +// http.Handle("/metrics", promhttp.Handler()) +// http.ListenAndServe(":8080", nil) +// } +// +// +// This is a complete program that exports two metrics, a Gauge and a Counter, +// the latter with a label attached to turn it into a (one-dimensional) vector. +// +// Metrics +// +// The number of exported identifiers in this package might appear a bit +// overwhelming. Hovever, in addition to the basic plumbing shown in the example +// above, you only need to understand the different metric types and their +// vector versions for basic usage. +// +// Above, you have already touched the Counter and the Gauge. There are two more +// advanced metric types: the Summary and Histogram. A more thorough description +// of those four metric types can be found in the Prometheus docs: +// https://prometheus.io/docs/concepts/metric_types/ +// +// A fifth "type" of metric is Untyped. It behaves like a Gauge, but signals the +// Prometheus server not to assume anything about its type. +// +// In addition to the fundamental metric types Gauge, Counter, Summary, +// Histogram, and Untyped, a very important part of the Prometheus data model is +// the partitioning of samples along dimensions called labels, which results in +// metric vectors. The fundamental types are GaugeVec, CounterVec, SummaryVec, +// HistogramVec, and UntypedVec. +// +// While only the fundamental metric types implement the Metric interface, both +// the metrics and their vector versions implement the Collector interface. A +// Collector manages the collection of a number of Metrics, but for convenience, +// a Metric can also “collect itself”. Note that Gauge, Counter, Summary, +// Histogram, and Untyped are interfaces themselves while GaugeVec, CounterVec, +// SummaryVec, HistogramVec, and UntypedVec are not. +// +// To create instances of Metrics and their vector versions, you need a suitable +// …Opts struct, i.e. GaugeOpts, CounterOpts, SummaryOpts, +// HistogramOpts, or UntypedOpts. +// +// Custom Collectors and constant Metrics +// +// While you could create your own implementations of Metric, most likely you +// will only ever implement the Collector interface on your own. At a first +// glance, a custom Collector seems handy to bundle Metrics for common +// registration (with the prime example of the different metric vectors above, +// which bundle all the metrics of the same name but with different labels). +// +// There is a more involved use case, too: If you already have metrics +// available, created outside of the Prometheus context, you don't need the +// interface of the various Metric types. You essentially want to mirror the +// existing numbers into Prometheus Metrics during collection. An own +// implementation of the Collector interface is perfect for that. You can create +// Metric instances “on the fly” using NewConstMetric, NewConstHistogram, and +// NewConstSummary (and their respective Must… versions). That will happen in +// the Collect method. The Describe method has to return separate Desc +// instances, representative of the “throw-away” metrics to be created +// later. NewDesc comes in handy to create those Desc instances. +// +// The Collector example illustrates the use case. You can also look at the +// source code of the processCollector (mirroring process metrics), the +// goCollector (mirroring Go metrics), or the expvarCollector (mirroring expvar +// metrics) as examples that are used in this package itself. +// +// If you just need to call a function to get a single float value to collect as +// a metric, GaugeFunc, CounterFunc, or UntypedFunc might be interesting +// shortcuts. +// +// Advanced Uses of the Registry +// +// While MustRegister is the by far most common way of registering a Collector, +// sometimes you might want to handle the errors the registration might +// cause. As suggested by the name, MustRegister panics if an error occurs. With +// the Register function, the error is returned and can be handled. +// +// An error is returned if the registered Collector is incompatible or +// inconsistent with already registered metrics. The registry aims for +// consistency of the collected metrics according to the Prometheus data +// model. Inconsistencies are ideally detected at registration time, not at +// collect time. The former will usually be detected at start-up time of a +// program, while the latter will only happen at scrape time, possibly not even +// on the first scrape if the inconsistency only becomes relevant later. That is +// the main reason why a Collector and a Metric have to describe themselves to +// the registry. +// +// So far, everything we did operated on the so-called default registry, as it +// can be found in the global DefaultRegistry variable. With NewRegistry, you +// can create a custom registry, or you can even implement the Registerer or +// Gatherer interfaces yourself. The methods Register and Unregister work in +// the same way on a custom registry as the global functions Register and +// Unregister on the default registry. +// +// There are a number of uses for custom registries: You can use registries +// with special properties, see NewPedanticRegistry. You can avoid global state, +// as it is imposed by the DefaultRegistry. You can use multiple registries at +// the same time to expose different metrics in different ways. You can use +// separate registries for testing purposes. +// +// Also note that the DefaultRegistry comes registered with a Collector for Go +// runtime metrics (via NewGoCollector) and a Collector for process metrics (via +// NewProcessCollector). With a custom registry, you are in control and decide +// yourself about the Collectors to register. +// +// HTTP Exposition +// +// The Registry implements the Gatherer interface. The caller of the Gather +// method can then expose the gathered metrics in some way. Usually, the metrics +// are served via HTTP on the /metrics endpoint. That's happening in the example +// above. The tools to expose metrics via HTTP are in the promhttp +// sub-package. (The top-level functions in the prometheus package are +// deprecated.) +// +// Pushing to the Pushgateway +// +// Function for pushing to the Pushgateway can be found in the push sub-package. +// +// Other Means of Exposition +// +// More ways of exposing metrics can easily be added. Sending metrics to +// Graphite would be an example that will soon be implemented. +package prometheus diff --git a/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go new file mode 100644 index 000000000..260c1b52d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/example_clustermanager_test.go @@ -0,0 +1,118 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import "github.com/prometheus/client_golang/prometheus" + +// ClusterManager is an example for a system that might have been built without +// Prometheus in mind. It models a central manager of jobs running in a +// cluster. To turn it into something that collects Prometheus metrics, we +// simply add the two methods required for the Collector interface. +// +// An additional challenge is that multiple instances of the ClusterManager are +// run within the same binary, each in charge of a different zone. We need to +// make use of ConstLabels to be able to register each ClusterManager instance +// with Prometheus. +type ClusterManager struct { + Zone string + OOMCountDesc *prometheus.Desc + RAMUsageDesc *prometheus.Desc + // ... many more fields +} + +// ReallyExpensiveAssessmentOfTheSystemState is a mock for the data gathering a +// real cluster manager would have to do. Since it may actually be really +// expensive, it must only be called once per collection. This implementation, +// obviously, only returns some made-up data. +func (c *ClusterManager) ReallyExpensiveAssessmentOfTheSystemState() ( + oomCountByHost map[string]int, ramUsageByHost map[string]float64, +) { + // Just example fake data. + oomCountByHost = map[string]int{ + "foo.example.org": 42, + "bar.example.org": 2001, + } + ramUsageByHost = map[string]float64{ + "foo.example.org": 6.023e23, + "bar.example.org": 3.14, + } + return +} + +// Describe simply sends the two Descs in the struct to the channel. +func (c *ClusterManager) Describe(ch chan<- *prometheus.Desc) { + ch <- c.OOMCountDesc + ch <- c.RAMUsageDesc +} + +// Collect first triggers the ReallyExpensiveAssessmentOfTheSystemState. Then it +// creates constant metrics for each host on the fly based on the returned data. +// +// Note that Collect could be called concurrently, so we depend on +// ReallyExpensiveAssessmentOfTheSystemState to be concurrency-safe. +func (c *ClusterManager) Collect(ch chan<- prometheus.Metric) { + oomCountByHost, ramUsageByHost := c.ReallyExpensiveAssessmentOfTheSystemState() + for host, oomCount := range oomCountByHost { + ch <- prometheus.MustNewConstMetric( + c.OOMCountDesc, + prometheus.CounterValue, + float64(oomCount), + host, + ) + } + for host, ramUsage := range ramUsageByHost { + ch <- prometheus.MustNewConstMetric( + c.RAMUsageDesc, + prometheus.GaugeValue, + ramUsage, + host, + ) + } +} + +// NewClusterManager creates the two Descs OOMCountDesc and RAMUsageDesc. Note +// that the zone is set as a ConstLabel. (It's different in each instance of the +// ClusterManager, but constant over the lifetime of an instance.) Then there is +// a variable label "host", since we want to partition the collected metrics by +// host. Since all Descs created in this way are consistent across instances, +// with a guaranteed distinction by the "zone" label, we can register different +// ClusterManager instances with the same registry. +func NewClusterManager(zone string) *ClusterManager { + return &ClusterManager{ + Zone: zone, + OOMCountDesc: prometheus.NewDesc( + "clustermanager_oom_crashes_total", + "Number of OOM crashes.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + RAMUsageDesc: prometheus.NewDesc( + "clustermanager_ram_usage_bytes", + "RAM usage as reported to the cluster manager.", + []string{"host"}, + prometheus.Labels{"zone": zone}, + ), + } +} + +func ExampleCollector() { + workerDB := NewClusterManager("db") + workerCA := NewClusterManager("ca") + + // Since we are dealing with custom Collector implementations, it might + // be a good idea to try it out with a pedantic registry. + reg := prometheus.NewPedanticRegistry() + reg.MustRegister(workerDB) + reg.MustRegister(workerCA) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go new file mode 100644 index 000000000..f87f21a8f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/examples_test.go @@ -0,0 +1,751 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "bytes" + "fmt" + "math" + "net/http" + "runtime" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/expfmt" + + "github.com/golang/protobuf/proto" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleGauge() { + opsQueued := prometheus.NewGauge(prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed.", + }) + prometheus.MustRegister(opsQueued) + + // 10 operations queued by the goroutine managing incoming requests. + opsQueued.Add(10) + // A worker goroutine has picked up a waiting operation. + opsQueued.Dec() + // And once more... + opsQueued.Dec() +} + +func ExampleGaugeVec() { + opsQueued := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Namespace: "our_company", + Subsystem: "blob_storage", + Name: "ops_queued", + Help: "Number of blob storage operations waiting to be processed, partitioned by user and type.", + }, + []string{ + // Which user has requested the operation? + "user", + // Of what type is the operation? + "type", + }, + ) + prometheus.MustRegister(opsQueued) + + // Increase a value using compact (but order-sensitive!) WithLabelValues(). + opsQueued.WithLabelValues("bob", "put").Add(4) + // Increase a value with a map using WithLabels. More verbose, but order + // doesn't matter anymore. + opsQueued.With(prometheus.Labels{"type": "delete", "user": "alice"}).Inc() +} + +func ExampleGaugeFunc() { + if err := prometheus.Register(prometheus.NewGaugeFunc( + prometheus.GaugeOpts{ + Subsystem: "runtime", + Name: "goroutines_count", + Help: "Number of goroutines that currently exist.", + }, + func() float64 { return float64(runtime.NumGoroutine()) }, + )); err == nil { + fmt.Println("GaugeFunc 'goroutines_count' registered.") + } + // Note that the count of goroutines is a gauge (and not a counter) as + // it can go up and down. + + // Output: + // GaugeFunc 'goroutines_count' registered. +} + +func ExampleCounter() { + pushCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", // Note: No help string... + }) + err := prometheus.Register(pushCounter) // ... so this will return an error. + if err != nil { + fmt.Println("Push counter couldn't be registered, no counting will happen:", err) + return + } + + // Try it once more, this time with a help string. + pushCounter = prometheus.NewCounter(prometheus.CounterOpts{ + Name: "repository_pushes", + Help: "Number of pushes to external repository.", + }) + err = prometheus.Register(pushCounter) + if err != nil { + fmt.Println("Push counter couldn't be registered AGAIN, no counting will happen:", err) + return + } + + pushComplete := make(chan struct{}) + // TODO: Start a goroutine that performs repository pushes and reports + // each completion via the channel. + for _ = range pushComplete { + pushCounter.Inc() + } + // Output: + // Push counter couldn't be registered, no counting will happen: descriptor Desc{fqName: "repository_pushes", help: "", constLabels: {}, variableLabels: []} is invalid: empty help string +} + +func ExampleCounterVec() { + httpReqs := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "http_requests_total", + Help: "How many HTTP requests processed, partitioned by status code and HTTP method.", + }, + []string{"code", "method"}, + ) + prometheus.MustRegister(httpReqs) + + httpReqs.WithLabelValues("404", "POST").Add(42) + + // If you have to access the same set of labels very frequently, it + // might be good to retrieve the metric only once and keep a handle to + // it. But beware of deletion of that metric, see below! + m := httpReqs.WithLabelValues("200", "GET") + for i := 0; i < 1000000; i++ { + m.Inc() + } + // Delete a metric from the vector. If you have previously kept a handle + // to that metric (as above), future updates via that handle will go + // unseen (even if you re-create a metric with the same label set + // later). + httpReqs.DeleteLabelValues("200", "GET") + // Same thing with the more verbose Labels syntax. + httpReqs.Delete(prometheus.Labels{"method": "GET", "code": "200"}) +} + +func ExampleInstrumentHandler() { + // Handle the "/doc" endpoint with the standard http.FileServer handler. + // By wrapping the handler with InstrumentHandler, request count, + // request and response sizes, and request latency are automatically + // exported to Prometheus, partitioned by HTTP status code and method + // and by the handler name (here "fileserver"). + http.Handle("/doc", prometheus.InstrumentHandler( + "fileserver", http.FileServer(http.Dir("/usr/share/doc")), + )) + // The Prometheus handler still has to be registered to handle the + // "/metrics" endpoint. The handler returned by prometheus.Handler() is + // already instrumented - with "prometheus" as the handler name. In this + // example, we want the handler name to be "metrics", so we instrument + // the uninstrumented Prometheus handler ourselves. + http.Handle("/metrics", prometheus.InstrumentHandler( + "metrics", prometheus.UninstrumentedHandler(), + )) +} + +func ExampleLabelPairSorter() { + labelPairs := []*dto.LabelPair{ + &dto.LabelPair{Name: proto.String("status"), Value: proto.String("404")}, + &dto.LabelPair{Name: proto.String("method"), Value: proto.String("get")}, + } + + sort.Sort(prometheus.LabelPairSorter(labelPairs)) + + fmt.Println(labelPairs) + // Output: + // [name:"method" value:"get" name:"status" value:"404" ] +} + +func ExampleRegister() { + // Imagine you have a worker pool and want to count the tasks completed. + taskCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }) + // This will register fine. + if err := prometheus.Register(taskCounter); err != nil { + fmt.Println(err) + } else { + fmt.Println("taskCounter registered.") + } + // Don't forget to tell the HTTP server about the Prometheus handler. + // (In a real program, you still need to start the HTTP server...) + http.Handle("/metrics", prometheus.Handler()) + + // Now you can start workers and give every one of them a pointer to + // taskCounter and let it increment it whenever it completes a task. + taskCounter.Inc() // This has to happen somewhere in the worker code. + + // But wait, you want to see how individual workers perform. So you need + // a vector of counters, with one element for each worker. + taskCounterVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_total", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + + // Registering will fail because we already have a metric of that name. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + + // To fix, first unregister the old taskCounter. + if prometheus.Unregister(taskCounter) { + fmt.Println("taskCounter unregistered.") + } + + // Try registering taskCounterVec again. + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Bummer! Still doesn't work. + + // Prometheus will not allow you to ever export metrics with + // inconsistent help strings or label names. After unregistering, the + // unregistered metrics will cease to show up in the /metrics HTTP + // response, but the registry still remembers that those metrics had + // been exported before. For this example, we will now choose a + // different name. (In a real program, you would obviously not export + // the obsolete metric in the first place.) + taskCounterVec = prometheus.NewCounterVec( + prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks_by_id", + Help: "Total number of tasks completed.", + }, + []string{"worker_id"}, + ) + if err := prometheus.Register(taskCounterVec); err != nil { + fmt.Println("taskCounterVec not registered:", err) + } else { + fmt.Println("taskCounterVec registered.") + } + // Finally it worked! + + // The workers have to tell taskCounterVec their id to increment the + // right element in the metric vector. + taskCounterVec.WithLabelValues("42").Inc() // Code from worker 42. + + // Each worker could also keep a reference to their own counter element + // around. Pick the counter at initialization time of the worker. + myCounter := taskCounterVec.WithLabelValues("42") // From worker 42 initialization code. + myCounter.Inc() // Somewhere in the code of that worker. + + // Note that something like WithLabelValues("42", "spurious arg") would + // panic (because you have provided too many label values). If you want + // to get an error instead, use GetMetricWithLabelValues(...) instead. + notMyCounter, err := taskCounterVec.GetMetricWithLabelValues("42", "spurious arg") + if err != nil { + fmt.Println("Worker initialization failed:", err) + } + if notMyCounter == nil { + fmt.Println("notMyCounter is nil.") + } + + // A different (and somewhat tricky) approach is to use + // ConstLabels. ConstLabels are pairs of label names and label values + // that never change. You might ask what those labels are good for (and + // rightfully so - if they never change, they could as well be part of + // the metric name). There are essentially two use-cases: The first is + // if labels are constant throughout the lifetime of a binary execution, + // but they vary over time or between different instances of a running + // binary. The second is what we have here: Each worker creates and + // registers an own Counter instance where the only difference is in the + // value of the ConstLabels. Those Counters can all be registered + // because the different ConstLabel values guarantee that each worker + // will increment a different Counter metric. + counterOpts := prometheus.CounterOpts{ + Subsystem: "worker_pool", + Name: "completed_tasks", + Help: "Total number of tasks completed.", + ConstLabels: prometheus.Labels{"worker_id": "42"}, + } + taskCounterForWorker42 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker42); err != nil { + fmt.Println("taskCounterVForWorker42 not registered:", err) + } else { + fmt.Println("taskCounterForWorker42 registered.") + } + // Obviously, in real code, taskCounterForWorker42 would be a member + // variable of a worker struct, and the "42" would be retrieved with a + // GetId() method or something. The Counter would be created and + // registered in the initialization code of the worker. + + // For the creation of the next Counter, we can recycle + // counterOpts. Just change the ConstLabels. + counterOpts.ConstLabels = prometheus.Labels{"worker_id": "2001"} + taskCounterForWorker2001 := prometheus.NewCounter(counterOpts) + if err := prometheus.Register(taskCounterForWorker2001); err != nil { + fmt.Println("taskCounterVForWorker2001 not registered:", err) + } else { + fmt.Println("taskCounterForWorker2001 registered.") + } + + taskCounterForWorker2001.Inc() + taskCounterForWorker42.Inc() + taskCounterForWorker2001.Inc() + + // Yet another approach would be to turn the workers themselves into + // Collectors and register them. See the Collector example for details. + + // Output: + // taskCounter registered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounter unregistered. + // taskCounterVec not registered: a previously registered descriptor with the same fully-qualified name as Desc{fqName: "worker_pool_completed_tasks_total", help: "Total number of tasks completed.", constLabels: {}, variableLabels: [worker_id]} has different label names or a different help string + // taskCounterVec registered. + // Worker initialization failed: inconsistent label cardinality + // notMyCounter is nil. + // taskCounterForWorker42 registered. + // taskCounterForWorker2001 registered. +} + +func ExampleSummary() { + temps := prometheus.NewSummary(prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > +} + +func ExampleSummaryVec() { + temps := prometheus.NewSummaryVec( + prometheus.SummaryOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + }, + []string{"species"}, + ) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.WithLabelValues("litoria-caerulea").Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + temps.WithLabelValues("lithobates-catesbeianus").Observe(32 + math.Floor(100*math.Cos(float64(i)*0.11))/10) + } + + // Create a Summary without any observations. + temps.WithLabelValues("leiopelma-hochstetteri") + + // Just for demonstration, let's check the state of the summary vector + // by registering it with a custom registry and then let it collect the + // metrics. + reg := prometheus.NewRegistry() + reg.MustRegister(temps) + + metricFamilies, err := reg.Gather() + if err != nil || len(metricFamilies) != 1 { + panic("unexpected behavior of custom test registry") + } + fmt.Println(proto.MarshalTextString(metricFamilies[0])) + + // Output: + // name: "pond_temperature_celsius" + // help: "The temperature of the frog pond." + // type: SUMMARY + // metric: < + // label: < + // name: "species" + // value: "leiopelma-hochstetteri" + // > + // summary: < + // sample_count: 0 + // sample_sum: 0 + // quantile: < + // quantile: 0.5 + // value: nan + // > + // quantile: < + // quantile: 0.9 + // value: nan + // > + // quantile: < + // quantile: 0.99 + // value: nan + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "lithobates-catesbeianus" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 31956.100000000017 + // quantile: < + // quantile: 0.5 + // value: 32.4 + // > + // quantile: < + // quantile: 0.9 + // value: 41.4 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > + // metric: < + // label: < + // name: "species" + // value: "litoria-caerulea" + // > + // summary: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // quantile: < + // quantile: 0.5 + // value: 31.1 + // > + // quantile: < + // quantile: 0.9 + // value: 41.3 + // > + // quantile: < + // quantile: 0.99 + // value: 41.9 + // > + // > + // > +} + +func ExampleNewConstSummary() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A summary of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant summary from values we got from a 3rd party telemetry system. + s := prometheus.MustNewConstSummary( + desc, + 4711, 403.34, + map[float64]float64{0.5: 42.3, 0.9: 323.3}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the summary by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + s.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // summary: < + // sample_count: 4711 + // sample_sum: 403.34 + // quantile: < + // quantile: 0.5 + // value: 42.3 + // > + // quantile: < + // quantile: 0.9 + // value: 323.3 + // > + // > +} + +func ExampleHistogram() { + temps := prometheus.NewHistogram(prometheus.HistogramOpts{ + Name: "pond_temperature_celsius", + Help: "The temperature of the frog pond.", // Sorry, we can't measure how badly it smells. + Buckets: prometheus.LinearBuckets(20, 5, 5), // 5 buckets, each 5 centigrade wide. + }) + + // Simulate some observations. + for i := 0; i < 1000; i++ { + temps.Observe(30 + math.Floor(120*math.Sin(float64(i)*0.1))/10) + } + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + temps.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // histogram: < + // sample_count: 1000 + // sample_sum: 29969.50000000001 + // bucket: < + // cumulative_count: 192 + // upper_bound: 20 + // > + // bucket: < + // cumulative_count: 366 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 501 + // upper_bound: 30 + // > + // bucket: < + // cumulative_count: 638 + // upper_bound: 35 + // > + // bucket: < + // cumulative_count: 816 + // upper_bound: 40 + // > + // > +} + +func ExampleNewConstHistogram() { + desc := prometheus.NewDesc( + "http_request_duration_seconds", + "A histogram of the HTTP request durations.", + []string{"code", "method"}, + prometheus.Labels{"owner": "example"}, + ) + + // Create a constant histogram from values we got from a 3rd party telemetry system. + h := prometheus.MustNewConstHistogram( + desc, + 4711, 403.34, + map[float64]uint64{25: 121, 50: 2403, 100: 3221, 200: 4233}, + "200", "get", + ) + + // Just for demonstration, let's check the state of the histogram by + // (ab)using its Write method (which is usually only used by Prometheus + // internally). + metric := &dto.Metric{} + h.Write(metric) + fmt.Println(proto.MarshalTextString(metric)) + + // Output: + // label: < + // name: "code" + // value: "200" + // > + // label: < + // name: "method" + // value: "get" + // > + // label: < + // name: "owner" + // value: "example" + // > + // histogram: < + // sample_count: 4711 + // sample_sum: 403.34 + // bucket: < + // cumulative_count: 121 + // upper_bound: 25 + // > + // bucket: < + // cumulative_count: 2403 + // upper_bound: 50 + // > + // bucket: < + // cumulative_count: 3221 + // upper_bound: 100 + // > + // bucket: < + // cumulative_count: 4233 + // upper_bound: 200 + // > + // > +} + +func ExampleAlreadyRegisteredError() { + reqCounter := prometheus.NewCounter(prometheus.CounterOpts{ + Name: "requests_total", + Help: "The total number of requests served.", + }) + if err := prometheus.Register(reqCounter); err != nil { + if are, ok := err.(prometheus.AlreadyRegisteredError); ok { + // A counter for that metric has been registered before. + // Use the old counter from now on. + reqCounter = are.ExistingCollector.(prometheus.Counter) + } else { + // Something else went wrong! + panic(err) + } + } +} + +func ExampleGatherers() { + reg := prometheus.NewRegistry() + temp := prometheus.NewGaugeVec( + prometheus.GaugeOpts{ + Name: "temperature_kelvin", + Help: "Temperature in Kelvin.", + }, + []string{"location"}, + ) + reg.MustRegister(temp) + temp.WithLabelValues("outside").Set(273.14) + temp.WithLabelValues("inside").Set(298.44) + + var parser expfmt.TextParser + + text := ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +temperature_kelvin{location="somewhere else"} 4.5 +` + + parseText := func() ([]*dto.MetricFamily, error) { + parsed, err := parser.TextToMetricFamilies(strings.NewReader(text)) + if err != nil { + return nil, err + } + var result []*dto.MetricFamily + for _, mf := range parsed { + result = append(result, mf) + } + return result, nil + } + + gatherers := prometheus.Gatherers{ + reg, + prometheus.GathererFunc(parseText), + } + + gathering, err := gatherers.Gather() + if err != nil { + fmt.Println(err) + } + + out := &bytes.Buffer{} + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + fmt.Println("----------") + + // Note how the temperature_kelvin metric family has been merged from + // different sources. Now try + text = ` +# TYPE humidity_percent gauge +# HELP humidity_percent Humidity in %. +humidity_percent{location="outside"} 45.4 +humidity_percent{location="inside"} 33.2 +# TYPE temperature_kelvin gauge +# HELP temperature_kelvin Temperature in Kelvin. +# Duplicate metric: +temperature_kelvin{location="outside"} 265.3 + # Wrong labels: +temperature_kelvin 4.5 +` + + gathering, err = gatherers.Gather() + if err != nil { + fmt.Println(err) + } + // Note that still as many metrics as possible are returned: + out.Reset() + for _, mf := range gathering { + if _, err := expfmt.MetricFamilyToText(out, mf); err != nil { + panic(err) + } + } + fmt.Print(out.String()) + + // Output: + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 + // temperature_kelvin{location="somewhere else"} 4.5 + // ---------- + // 2 error(s) occurred: + // * collected metric temperature_kelvin label: gauge: was collected before with the same name and label values + // * collected metric temperature_kelvin gauge: has label dimensions inconsistent with previously collected metrics in the same metric family + // # HELP humidity_percent Humidity in %. + // # TYPE humidity_percent gauge + // humidity_percent{location="inside"} 33.2 + // humidity_percent{location="outside"} 45.4 + // # HELP temperature_kelvin Temperature in Kelvin. + // # TYPE temperature_kelvin gauge + // temperature_kelvin{location="inside"} 298.44 + // temperature_kelvin{location="outside"} 273.14 +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go new file mode 100644 index 000000000..18a99d5fa --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector.go @@ -0,0 +1,119 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "encoding/json" + "expvar" +) + +type expvarCollector struct { + exports map[string]*Desc +} + +// NewExpvarCollector returns a newly allocated expvar Collector that still has +// to be registered with a Prometheus registry. +// +// An expvar Collector collects metrics from the expvar interface. It provides a +// quick way to expose numeric values that are already exported via expvar as +// Prometheus metrics. Note that the data models of expvar and Prometheus are +// fundamentally different, and that the expvar Collector is inherently slower +// than native Prometheus metrics. Thus, the expvar Collector is probably great +// for experiments and prototying, but you should seriously consider a more +// direct implementation of Prometheus metrics for monitoring production +// systems. +// +// The exports map has the following meaning: +// +// The keys in the map correspond to expvar keys, i.e. for every expvar key you +// want to export as Prometheus metric, you need an entry in the exports +// map. The descriptor mapped to each key describes how to export the expvar +// value. It defines the name and the help string of the Prometheus metric +// proxying the expvar value. The type will always be Untyped. +// +// For descriptors without variable labels, the expvar value must be a number or +// a bool. The number is then directly exported as the Prometheus sample +// value. (For a bool, 'false' translates to 0 and 'true' to 1). Expvar values +// that are not numbers or bools are silently ignored. +// +// If the descriptor has one variable label, the expvar value must be an expvar +// map. The keys in the expvar map become the various values of the one +// Prometheus label. The values in the expvar map must be numbers or bools again +// as above. +// +// For descriptors with more than one variable label, the expvar must be a +// nested expvar map, i.e. where the values of the topmost map are maps again +// etc. until a depth is reached that corresponds to the number of labels. The +// leaves of that structure must be numbers or bools as above to serve as the +// sample values. +// +// Anything that does not fit into the scheme above is silently ignored. +func NewExpvarCollector(exports map[string]*Desc) Collector { + return &expvarCollector{ + exports: exports, + } +} + +// Describe implements Collector. +func (e *expvarCollector) Describe(ch chan<- *Desc) { + for _, desc := range e.exports { + ch <- desc + } +} + +// Collect implements Collector. +func (e *expvarCollector) Collect(ch chan<- Metric) { + for name, desc := range e.exports { + var m Metric + expVar := expvar.Get(name) + if expVar == nil { + continue + } + var v interface{} + labels := make([]string, len(desc.variableLabels)) + if err := json.Unmarshal([]byte(expVar.String()), &v); err != nil { + ch <- NewInvalidMetric(desc, err) + continue + } + var processValue func(v interface{}, i int) + processValue = func(v interface{}, i int) { + if i >= len(labels) { + copiedLabels := append(make([]string, 0, len(labels)), labels...) + switch v := v.(type) { + case float64: + m = MustNewConstMetric(desc, UntypedValue, v, copiedLabels...) + case bool: + if v { + m = MustNewConstMetric(desc, UntypedValue, 1, copiedLabels...) + } else { + m = MustNewConstMetric(desc, UntypedValue, 0, copiedLabels...) + } + default: + return + } + ch <- m + return + } + vm, ok := v.(map[string]interface{}) + if !ok { + return + } + for lv, val := range vm { + labels[i] = lv + processValue(val, i+1) + } + } + processValue(v, 0) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go new file mode 100644 index 000000000..5d3128fae --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/expvar_collector_test.go @@ -0,0 +1,97 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus_test + +import ( + "expvar" + "fmt" + "sort" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/client_golang/prometheus" +) + +func ExampleExpvarCollector() { + expvarCollector := prometheus.NewExpvarCollector(map[string]*prometheus.Desc{ + "memstats": prometheus.NewDesc( + "expvar_memstats", + "All numeric memstats as one metric family. Not a good role-model, actually... ;-)", + []string{"type"}, nil, + ), + "lone-int": prometheus.NewDesc( + "expvar_lone_int", + "Just an expvar int as an example.", + nil, nil, + ), + "http-request-map": prometheus.NewDesc( + "expvar_http_request_total", + "How many http requests processed, partitioned by status code and http method.", + []string{"code", "method"}, nil, + ), + }) + prometheus.MustRegister(expvarCollector) + + // The Prometheus part is done here. But to show that this example is + // doing anything, we have to manually export something via expvar. In + // real-life use-cases, some library would already have exported via + // expvar what we want to re-export as Prometheus metrics. + expvar.NewInt("lone-int").Set(42) + expvarMap := expvar.NewMap("http-request-map") + var ( + expvarMap1, expvarMap2 expvar.Map + expvarInt11, expvarInt12, expvarInt21, expvarInt22 expvar.Int + ) + expvarMap1.Init() + expvarMap2.Init() + expvarInt11.Set(3) + expvarInt12.Set(13) + expvarInt21.Set(11) + expvarInt22.Set(212) + expvarMap1.Set("POST", &expvarInt11) + expvarMap1.Set("GET", &expvarInt12) + expvarMap2.Set("POST", &expvarInt21) + expvarMap2.Set("GET", &expvarInt22) + expvarMap.Set("404", &expvarMap1) + expvarMap.Set("200", &expvarMap2) + // Results in the following expvar map: + // "http-request-count": {"200": {"POST": 11, "GET": 212}, "404": {"POST": 3, "GET": 13}} + + // Let's see what the scrape would yield, but exclude the memstats metrics. + metricStrings := []string{} + metric := dto.Metric{} + metricChan := make(chan prometheus.Metric) + go func() { + expvarCollector.Collect(metricChan) + close(metricChan) + }() + for m := range metricChan { + if strings.Index(m.Desc().String(), "expvar_memstats") == -1 { + metric.Reset() + m.Write(&metric) + metricStrings = append(metricStrings, metric.String()) + } + } + sort.Strings(metricStrings) + for _, s := range metricStrings { + fmt.Println(strings.TrimRight(s, " ")) + } + // Output: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // label: label: untyped: + // untyped: +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/fnv.go b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go new file mode 100644 index 000000000..e3b67df8a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/fnv.go @@ -0,0 +1,29 @@ +package prometheus + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go new file mode 100644 index 000000000..8b70e5141 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge.go @@ -0,0 +1,140 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Gauge is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// A Gauge is typically used for measured values like temperatures or current +// memory usage, but also "counts" that can go up and down, like the number of +// running goroutines. +// +// To create Gauge instances, use NewGauge. +type Gauge interface { + Metric + Collector + + // Set sets the Gauge to an arbitrary value. + Set(float64) + // Inc increments the Gauge by 1. + Inc() + // Dec decrements the Gauge by 1. + Dec() + // Add adds the given value to the Gauge. (The value can be + // negative, resulting in a decrease of the Gauge.) + Add(float64) + // Sub subtracts the given value from the Gauge. (The value can be + // negative, resulting in an increase of the Gauge.) + Sub(float64) +} + +// GaugeOpts is an alias for Opts. See there for doc comments. +type GaugeOpts Opts + +// NewGauge creates a new Gauge based on the provided GaugeOpts. +func NewGauge(opts GaugeOpts) Gauge { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, 0) +} + +// GaugeVec is a Collector that bundles a set of Gauges that all share the same +// Desc, but have different values for their variable labels. This is used if +// you want to count the same thing partitioned by various dimensions +// (e.g. number of operations queued, partitioned by user and operation +// type). Create instances with NewGaugeVec. +type GaugeVec struct { + *MetricVec +} + +// NewGaugeVec creates a new GaugeVec based on the provided GaugeOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewGaugeVec(opts GaugeOpts, labelNames []string) *GaugeVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &GaugeVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, GaugeValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Gauge and not a +// Metric so that no type conversion is required. +func (m *GaugeVec) GetMetricWithLabelValues(lvs ...string) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Gauge and not a Metric so that no +// type conversion is required. +func (m *GaugeVec) GetMetricWith(labels Labels) (Gauge, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Gauge), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *GaugeVec) WithLabelValues(lvs ...string) Gauge { + return m.MetricVec.WithLabelValues(lvs...).(Gauge) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *GaugeVec) With(labels Labels) Gauge { + return m.MetricVec.With(labels).(Gauge) +} + +// GaugeFunc is a Gauge whose value is determined at collect time by calling a +// provided function. +// +// To create GaugeFunc instances, use NewGaugeFunc. +type GaugeFunc interface { + Metric + Collector +} + +// NewGaugeFunc creates a new GaugeFunc based on the provided GaugeOpts. The +// value reported is determined by calling the given function from within the +// Write method. Take into account that metric collection may happen +// concurrently. If that results in concurrent calls to Write, like in the case +// where a GaugeFunc is directly registered with Prometheus, the provided +// function must be concurrency-safe. +func NewGaugeFunc(opts GaugeOpts, function func() float64) GaugeFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), GaugeValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go new file mode 100644 index 000000000..48cab4636 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/gauge_test.go @@ -0,0 +1,182 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func listenGaugeStream(vals, result chan float64, done chan struct{}) { + var sum float64 +outer: + for { + select { + case <-done: + close(vals) + for v := range vals { + sum += v + } + break outer + case v := <-vals: + sum += v + } + } + result <- sum + close(result) +} + +func TestGaugeConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStream := make(chan float64, mutations*concLevel) + result := make(chan float64) + done := make(chan struct{}) + + go listenGaugeStream(sStream, result, done) + go func() { + end.Wait() + close(done) + }() + + gge := NewGauge(GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sStream <- v + gge.Add(v) + } + end.Done() + }(vals) + } + start.Done() + + if expected, got := <-result, math.Float64frombits(gge.(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeVecConcurrency(t *testing.T) { + it := func(n uint32) bool { + mutations := int(n % 10000) + concLevel := int(n%15 + 1) + vecLength := int(n%5 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sStreams := make([]chan float64, vecLength) + results := make([]chan float64, vecLength) + done := make(chan struct{}) + + for i := 0; i < vecLength; i++ { + sStreams[i] = make(chan float64, mutations*concLevel) + results[i] = make(chan float64) + go listenGaugeStream(sStreams[i], results[i], done) + } + + go func() { + end.Wait() + close(done) + }() + + gge := NewGaugeVec( + GaugeOpts{ + Name: "test_gauge", + Help: "no help can be found here", + }, + []string{"label"}, + ) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + pick := make([]int, mutations) + for j := 0; j < mutations; j++ { + vals[j] = rand.Float64() - 0.5 + pick[j] = rand.Intn(vecLength) + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sStreams[pick[i]] <- v + gge.WithLabelValues(string('A' + pick[i])).Add(v) + } + end.Done() + }(vals) + } + start.Done() + + for i := range sStreams { + if expected, got := <-results[i], math.Float64frombits(gge.WithLabelValues(string('A'+i)).(*value).valBits); math.Abs(expected-got) > 0.000001 { + t.Fatalf("expected approx. %f, got %f", expected, got) + return false + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Fatal(err) + } +} + +func TestGaugeFunc(t *testing.T) { + gf := NewGaugeFunc( + GaugeOpts{ + Name: "test_name", + Help: "test help", + ConstLabels: Labels{"a": "1", "b": "2"}, + }, + func() float64 { return 3.1415 }, + ) + + if expected, got := `Desc{fqName: "test_name", help: "test help", constLabels: {a="1",b="2"}, variableLabels: []}`, gf.Desc().String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } + + m := &dto.Metric{} + gf.Write(m) + + if expected, got := `label: label: gauge: `, m.String(); expected != got { + t.Errorf("expected %q, got %q", expected, got) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go new file mode 100644 index 000000000..abc9d4ec4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector.go @@ -0,0 +1,263 @@ +package prometheus + +import ( + "fmt" + "runtime" + "runtime/debug" + "time" +) + +type goCollector struct { + goroutines Gauge + gcDesc *Desc + + // metrics to describe and collect + metrics memStatsMetrics +} + +// NewGoCollector returns a collector which exports metrics about the current +// go process. +func NewGoCollector() Collector { + return &goCollector{ + goroutines: NewGauge(GaugeOpts{ + Namespace: "go", + Name: "goroutines", + Help: "Number of goroutines that currently exist.", + }), + gcDesc: NewDesc( + "go_gc_duration_seconds", + "A summary of the GC invocation durations.", + nil, nil), + metrics: memStatsMetrics{ + { + desc: NewDesc( + memstatNamespace("alloc_bytes"), + "Number of bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Alloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("alloc_bytes_total"), + "Total number of bytes allocated, even if freed.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.TotalAlloc) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("sys_bytes"), + "Number of bytes obtained by system. Sum of all system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Sys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("lookups_total"), + "Total number of pointer lookups.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Lookups) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("mallocs_total"), + "Total number of mallocs.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Mallocs) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("frees_total"), + "Total number of frees.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.Frees) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_alloc_bytes"), + "Number of heap bytes allocated and still in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapAlloc) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_sys_bytes"), + "Number of heap bytes obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_idle_bytes"), + "Number of heap bytes waiting to be used.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapIdle) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_inuse_bytes"), + "Number of heap bytes that are in use.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("heap_released_bytes_total"), + "Total number of heap bytes released to OS.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapReleased) }, + valType: CounterValue, + }, { + desc: NewDesc( + memstatNamespace("heap_objects"), + "Number of allocated objects.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.HeapObjects) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_inuse_bytes"), + "Number of bytes in use by the stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("stack_sys_bytes"), + "Number of bytes obtained from system for stack allocator.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.StackSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_inuse_bytes"), + "Number of bytes in use by mspan structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mspan_sys_bytes"), + "Number of bytes used for mspan structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MSpanSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_inuse_bytes"), + "Number of bytes in use by mcache structures.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheInuse) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("mcache_sys_bytes"), + "Number of bytes used for mcache structures obtained from system.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.MCacheSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("buck_hash_sys_bytes"), + "Number of bytes used by the profiling bucket hash table.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.BuckHashSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("gc_sys_bytes"), + "Number of bytes used for garbage collection system metadata.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.GCSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("other_sys_bytes"), + "Number of bytes used for other system allocations.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.OtherSys) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("next_gc_bytes"), + "Number of heap bytes when next garbage collection will take place.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.NextGC) }, + valType: GaugeValue, + }, { + desc: NewDesc( + memstatNamespace("last_gc_time_seconds"), + "Number of seconds since 1970 of last garbage collection.", + nil, nil, + ), + eval: func(ms *runtime.MemStats) float64 { return float64(ms.LastGC) / 1e9 }, + valType: GaugeValue, + }, + }, + } +} + +func memstatNamespace(s string) string { + return fmt.Sprintf("go_memstats_%s", s) +} + +// Describe returns all descriptions of the collector. +func (c *goCollector) Describe(ch chan<- *Desc) { + ch <- c.goroutines.Desc() + ch <- c.gcDesc + + for _, i := range c.metrics { + ch <- i.desc + } +} + +// Collect returns the current state of all metrics of the collector. +func (c *goCollector) Collect(ch chan<- Metric) { + c.goroutines.Set(float64(runtime.NumGoroutine())) + ch <- c.goroutines + + var stats debug.GCStats + stats.PauseQuantiles = make([]time.Duration, 5) + debug.ReadGCStats(&stats) + + quantiles := make(map[float64]float64) + for idx, pq := range stats.PauseQuantiles[1:] { + quantiles[float64(idx+1)/float64(len(stats.PauseQuantiles)-1)] = pq.Seconds() + } + quantiles[0.0] = stats.PauseQuantiles[0].Seconds() + ch <- MustNewConstSummary(c.gcDesc, uint64(stats.NumGC), float64(stats.PauseTotal.Seconds()), quantiles) + + ms := &runtime.MemStats{} + runtime.ReadMemStats(ms) + for _, i := range c.metrics { + ch <- MustNewConstMetric(i.desc, i.valType, i.eval(ms)) + } +} + +// memStatsMetrics provide description, value, and value type for memstat metrics. +type memStatsMetrics []struct { + desc *Desc + eval func(*runtime.MemStats) float64 + valType ValueType +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go new file mode 100644 index 000000000..9a8858cbd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/go_collector_test.go @@ -0,0 +1,123 @@ +package prometheus + +import ( + "runtime" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func TestGoCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + old = -1 + ) + defer close(closec) + + go func() { + c.Collect(ch) + go func(c <-chan struct{}) { + <-c + }(closec) + <-waitc + c.Collect(ch) + }() + + for { + select { + case metric := <-ch: + switch m := metric.(type) { + // Attention, this also catches Counter... + case Gauge: + pb := &dto.Metric{} + m.Write(pb) + if pb.GetGauge() == nil { + continue + } + + if old == -1 { + old = int(pb.GetGauge().GetValue()) + close(waitc) + continue + } + + if diff := int(pb.GetGauge().GetValue()) - old; diff != 1 { + // TODO: This is flaky in highly concurrent situations. + t.Errorf("want 1 new goroutine, got %d", diff) + } + + // GoCollector performs two sends per call. + // On line 27 we need to receive the second send + // to shut down cleanly. + <-ch + return + } + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} + +func TestGCCollector(t *testing.T) { + var ( + c = NewGoCollector() + ch = make(chan Metric) + waitc = make(chan struct{}) + closec = make(chan struct{}) + oldGC uint64 + oldPause float64 + ) + defer close(closec) + + go func() { + c.Collect(ch) + // force GC + runtime.GC() + <-waitc + c.Collect(ch) + }() + + first := true + for { + select { + case metric := <-ch: + switch m := metric.(type) { + case *constSummary, *value: + pb := &dto.Metric{} + m.Write(pb) + if pb.GetSummary() == nil { + continue + } + + if len(pb.GetSummary().Quantile) != 5 { + t.Errorf("expected 4 buckets, got %d", len(pb.GetSummary().Quantile)) + } + for idx, want := range []float64{0.0, 0.25, 0.5, 0.75, 1.0} { + if *pb.GetSummary().Quantile[idx].Quantile != want { + t.Errorf("bucket #%d is off, got %f, want %f", idx, *pb.GetSummary().Quantile[idx].Quantile, want) + } + } + if first { + first = false + oldGC = *pb.GetSummary().SampleCount + oldPause = *pb.GetSummary().SampleSum + close(waitc) + continue + } + if diff := *pb.GetSummary().SampleCount - oldGC; diff != 1 { + t.Errorf("want 1 new garbage collection run, got %d", diff) + } + if diff := *pb.GetSummary().SampleSum - oldPause; diff <= 0 { + t.Errorf("want moar pause, got %f", diff) + } + return + } + case <-time.After(1 * time.Second): + t.Fatalf("expected collect timed out") + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go new file mode 100644 index 000000000..9719e8fac --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram.go @@ -0,0 +1,444 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync/atomic" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// A Histogram counts individual observations from an event or sample stream in +// configurable buckets. Similar to a summary, it also provides a sum of +// observations and an observation count. +// +// On the Prometheus server, quantiles can be calculated from a Histogram using +// the histogram_quantile function in the query language. +// +// Note that Histograms, in contrast to Summaries, can be aggregated with the +// Prometheus query language (see the documentation for detailed +// procedures). However, Histograms require the user to pre-define suitable +// buckets, and they are in general less accurate. The Observe method of a +// Histogram has a very low performance overhead in comparison with the Observe +// method of a Summary. +// +// To create Histogram instances, use NewHistogram. +type Histogram interface { + Metric + Collector + + // Observe adds a single observation to the histogram. + Observe(float64) +} + +// bucketLabel is used for the label that defines the upper bound of a +// bucket of a histogram ("le" -> "less or equal"). +const bucketLabel = "le" + +// DefBuckets are the default Histogram buckets. The default buckets are +// tailored to broadly measure the response time (in seconds) of a network +// service. Most likely, however, you will be required to define buckets +// customized to your use case. +var ( + DefBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10} + + errBucketLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in histograms", bucketLabel, + ) +) + +// LinearBuckets creates 'count' buckets, each 'width' wide, where the lowest +// bucket has an upper bound of 'start'. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is zero or negative. +func LinearBuckets(start, width float64, count int) []float64 { + if count < 1 { + panic("LinearBuckets needs a positive count") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start += width + } + return buckets +} + +// ExponentialBuckets creates 'count' buckets, where the lowest bucket has an +// upper bound of 'start' and each following bucket's upper bound is 'factor' +// times the previous bucket's upper bound. The final +Inf bucket is not counted +// and not included in the returned slice. The returned slice is meant to be +// used for the Buckets field of HistogramOpts. +// +// The function panics if 'count' is 0 or negative, if 'start' is 0 or negative, +// or if 'factor' is less than or equal 1. +func ExponentialBuckets(start, factor float64, count int) []float64 { + if count < 1 { + panic("ExponentialBuckets needs a positive count") + } + if start <= 0 { + panic("ExponentialBuckets needs a positive start value") + } + if factor <= 1 { + panic("ExponentialBuckets needs a factor greater than 1") + } + buckets := make([]float64, count) + for i := range buckets { + buckets[i] = start + start *= factor + } + return buckets +} + +// HistogramOpts bundles the options for creating a Histogram metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type HistogramOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Histogram (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Histogram must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Histogram. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Histogram. Histograms with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // HistogramVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Histograms with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Buckets defines the buckets into which observations are counted. Each + // element in the slice is the upper inclusive bound of a bucket. The + // values must be sorted in strictly increasing order. There is no need + // to add a highest bucket with +Inf bound, it will be added + // implicitly. The default value is DefBuckets. + Buckets []float64 +} + +// NewHistogram creates a new Histogram based on the provided HistogramOpts. It +// panics if the buckets in HistogramOpts are not in strictly increasing order. +func NewHistogram(opts HistogramOpts) Histogram { + return newHistogram( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newHistogram(desc *Desc, opts HistogramOpts, labelValues ...string) Histogram { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == bucketLabel { + panic(errBucketLabelNotAllowed) + } + } + + if len(opts.Buckets) == 0 { + opts.Buckets = DefBuckets + } + + h := &histogram{ + desc: desc, + upperBounds: opts.Buckets, + labelPairs: makeLabelPairs(desc, labelValues), + } + for i, upperBound := range h.upperBounds { + if i < len(h.upperBounds)-1 { + if upperBound >= h.upperBounds[i+1] { + panic(fmt.Errorf( + "histogram buckets must be in increasing order: %f >= %f", + upperBound, h.upperBounds[i+1], + )) + } + } else { + if math.IsInf(upperBound, +1) { + // The +Inf bucket is implicit. Remove it here. + h.upperBounds = h.upperBounds[:i] + } + } + } + // Finally we know the final length of h.upperBounds and can make counts. + h.counts = make([]uint64, len(h.upperBounds)) + + h.init(h) // Init self-collection. + return h +} + +type histogram struct { + // sumBits contains the bits of the float64 representing the sum of all + // observations. sumBits and count have to go first in the struct to + // guarantee alignment for atomic operations. + // http://golang.org/pkg/sync/atomic/#pkg-note-BUG + sumBits uint64 + count uint64 + + selfCollector + // Note that there is no mutex required. + + desc *Desc + + upperBounds []float64 + counts []uint64 + + labelPairs []*dto.LabelPair +} + +func (h *histogram) Desc() *Desc { + return h.desc +} + +func (h *histogram) Observe(v float64) { + // TODO(beorn7): For small numbers of buckets (<30), a linear search is + // slightly faster than the binary search. If we really care, we could + // switch from one search strategy to the other depending on the number + // of buckets. + // + // Microbenchmarks (BenchmarkHistogramNoLabels): + // 11 buckets: 38.3 ns/op linear - binary 48.7 ns/op + // 100 buckets: 78.1 ns/op linear - binary 54.9 ns/op + // 300 buckets: 154 ns/op linear - binary 61.6 ns/op + i := sort.SearchFloat64s(h.upperBounds, v) + if i < len(h.counts) { + atomic.AddUint64(&h.counts[i], 1) + } + atomic.AddUint64(&h.count, 1) + for { + oldBits := atomic.LoadUint64(&h.sumBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + v) + if atomic.CompareAndSwapUint64(&h.sumBits, oldBits, newBits) { + break + } + } +} + +func (h *histogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, len(h.upperBounds)) + + his.SampleSum = proto.Float64(math.Float64frombits(atomic.LoadUint64(&h.sumBits))) + his.SampleCount = proto.Uint64(atomic.LoadUint64(&h.count)) + var count uint64 + for i, upperBound := range h.upperBounds { + count += atomic.LoadUint64(&h.counts[i]) + buckets[i] = &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + } + } + his.Bucket = buckets + out.Histogram = his + out.Label = h.labelPairs + return nil +} + +// HistogramVec is a Collector that bundles a set of Histograms that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewHistogramVec. +type HistogramVec struct { + *MetricVec +} + +// NewHistogramVec creates a new HistogramVec based on the provided HistogramOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewHistogramVec(opts HistogramOpts, labelNames []string) *HistogramVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &HistogramVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newHistogram(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Histogram and not a +// Metric so that no type conversion is required. +func (m *HistogramVec) GetMetricWithLabelValues(lvs ...string) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Histogram and not a Metric so that no +// type conversion is required. +func (m *HistogramVec) GetMetricWith(labels Labels) (Histogram, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Histogram), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *HistogramVec) WithLabelValues(lvs ...string) Histogram { + return m.MetricVec.WithLabelValues(lvs...).(Histogram) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *HistogramVec) With(labels Labels) Histogram { + return m.MetricVec.With(labels).(Histogram) +} + +type constHistogram struct { + desc *Desc + count uint64 + sum float64 + buckets map[float64]uint64 + labelPairs []*dto.LabelPair +} + +func (h *constHistogram) Desc() *Desc { + return h.desc +} + +func (h *constHistogram) Write(out *dto.Metric) error { + his := &dto.Histogram{} + buckets := make([]*dto.Bucket, 0, len(h.buckets)) + + his.SampleCount = proto.Uint64(h.count) + his.SampleSum = proto.Float64(h.sum) + + for upperBound, count := range h.buckets { + buckets = append(buckets, &dto.Bucket{ + CumulativeCount: proto.Uint64(count), + UpperBound: proto.Float64(upperBound), + }) + } + + if len(buckets) > 0 { + sort.Sort(buckSort(buckets)) + } + his.Bucket = buckets + + out.Histogram = his + out.Label = h.labelPairs + + return nil +} + +// NewConstHistogram returns a metric representing a Prometheus histogram with +// fixed values for the count, sum, and bucket counts. As those parameters +// cannot be changed, the returned value does not implement the Histogram +// interface (but only the Metric interface). Users of this package will not +// have much use for it in regular operations. However, when implementing custom +// Collectors, it is useful as a throw-away metric that is generated on the fly +// to send it to Prometheus in the Collect method. +// +// buckets is a map of upper bounds to cumulative counts, excluding the +Inf +// bucket. +// +// NewConstHistogram returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constHistogram{ + desc: desc, + count: count, + sum: sum, + buckets: buckets, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstHistogram is a version of NewConstHistogram that panics where +// NewConstMetric would have returned an error. +func MustNewConstHistogram( + desc *Desc, + count uint64, + sum float64, + buckets map[float64]uint64, + labelValues ...string, +) Metric { + m, err := NewConstHistogram(desc, count, sum, buckets, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type buckSort []*dto.Bucket + +func (s buckSort) Len() int { + return len(s) +} + +func (s buckSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s buckSort) Less(i, j int) bool { + return s[i].GetUpperBound() < s[j].GetUpperBound() +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go new file mode 100644 index 000000000..d1242e08d --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/histogram_test.go @@ -0,0 +1,326 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "reflect" + "sort" + "sync" + "testing" + "testing/quick" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkHistogramObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramObserve1(b *testing.B) { + benchmarkHistogramObserve(1, b) +} + +func BenchmarkHistogramObserve2(b *testing.B) { + benchmarkHistogramObserve(2, b) +} + +func BenchmarkHistogramObserve4(b *testing.B) { + benchmarkHistogramObserve(4, b) +} + +func BenchmarkHistogramObserve8(b *testing.B) { + benchmarkHistogramObserve(8, b) +} + +func benchmarkHistogramWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewHistogram(HistogramOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkHistogramWrite1(b *testing.B) { + benchmarkHistogramWrite(1, b) +} + +func BenchmarkHistogramWrite2(b *testing.B) { + benchmarkHistogramWrite(2, b) +} + +func BenchmarkHistogramWrite4(b *testing.B) { + benchmarkHistogramWrite(4, b) +} + +func BenchmarkHistogramWrite8(b *testing.B) { + benchmarkHistogramWrite(8, b) +} + +// Intentionally adding +Inf here to test if that case is handled correctly. +// Also, getCumulativeCounts depends on it. +var testBuckets = []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)} + +func TestHistogramConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewHistogram(HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: testBuckets, + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Histogram.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + for i, wantBound := range testBuckets { + if i == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[i].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[i].CumulativeCount, wantCounts[i]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestHistogramVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + his := NewHistogramVec( + HistogramOpts{ + Name: "test_histogram", + Help: "helpless", + Buckets: []float64{-2, -1, -0.5, 0, 0.5, 1, 2, math.Inf(+1)}, + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + his.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := his.WithLabelValues(string('A' + i)) + s.Write(m) + + if got, want := len(m.Histogram.Bucket), len(testBuckets)-1; got != want { + t.Errorf("got %d buckets in protobuf, want %d", got, want) + } + if got, want := int(*m.Histogram.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Histogram.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + wantCounts := getCumulativeCounts(allVars[i]) + + for j, wantBound := range testBuckets { + if j == len(testBuckets)-1 { + break // No +Inf bucket in protobuf. + } + if gotBound := *m.Histogram.Bucket[j].UpperBound; gotBound != wantBound { + t.Errorf("got bound %f, want %f", gotBound, wantBound) + } + if gotCount, wantCount := *m.Histogram.Bucket[j].CumulativeCount, wantCounts[j]; gotCount != wantCount { + t.Errorf("got count %d, want %d", gotCount, wantCount) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func getCumulativeCounts(vars []float64) []uint64 { + counts := make([]uint64, len(testBuckets)) + for _, v := range vars { + for i := len(testBuckets) - 1; i >= 0; i-- { + if v > testBuckets[i] { + break + } + counts[i]++ + } + } + return counts +} + +func TestBuckets(t *testing.T) { + got := LinearBuckets(-15, 5, 6) + want := []float64{-15, -10, -5, 0, 5, 10} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } + + got = ExponentialBuckets(100, 1.2, 3) + want = []float64{100, 120, 144} + if !reflect.DeepEqual(got, want) { + t.Errorf("linear buckets: got %v, want %v", got, want) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http.go b/vendor/github.com/prometheus/client_golang/prometheus/http.go new file mode 100644 index 000000000..67ee5ac79 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http.go @@ -0,0 +1,490 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bufio" + "bytes" + "compress/gzip" + "fmt" + "io" + "net" + "net/http" + "strconv" + "strings" + "sync" + "time" + + "github.com/prometheus/common/expfmt" +) + +// TODO(beorn7): Remove this whole file. It is a partial mirror of +// promhttp/http.go (to avoid circular import chains) where everything HTTP +// related should live. The functions here are just for avoiding +// breakage. Everything is deprecated. + +const ( + contentTypeHeader = "Content-Type" + contentLengthHeader = "Content-Length" + contentEncodingHeader = "Content-Encoding" + acceptEncodingHeader = "Accept-Encoding" +) + +var bufPool sync.Pool + +func getBuf() *bytes.Buffer { + buf := bufPool.Get() + if buf == nil { + return &bytes.Buffer{} + } + return buf.(*bytes.Buffer) +} + +func giveBuf(buf *bytes.Buffer) { + buf.Reset() + bufPool.Put(buf) +} + +// Handler returns an HTTP handler for the DefaultGatherer. It is +// already instrumented with InstrumentHandler (using "prometheus" as handler +// name). +// +// Deprecated: Please note the issues described in the doc comment of +// InstrumentHandler. You might want to consider using promhttp.Handler instead +// (which is non instrumented). +func Handler() http.Handler { + return InstrumentHandler("prometheus", UninstrumentedHandler()) +} + +// UninstrumentedHandler returns an HTTP handler for the DefaultGatherer. +// +// Deprecated: Use promhttp.Handler instead. See there for further documentation. +func UninstrumentedHandler() http.Handler { + return http.HandlerFunc(func(w http.ResponseWriter, req *http.Request) { + mfs, err := DefaultGatherer.Gather() + if err != nil { + http.Error(w, "An error has occurred during metrics collection:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + + contentType := expfmt.Negotiate(req.Header) + buf := getBuf() + defer giveBuf(buf) + writer, encoding := decorateWriter(req, buf) + enc := expfmt.NewEncoder(writer, contentType) + var lastErr error + for _, mf := range mfs { + if err := enc.Encode(mf); err != nil { + lastErr = err + http.Error(w, "An error has occurred during metrics encoding:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + } + if closer, ok := writer.(io.Closer); ok { + closer.Close() + } + if lastErr != nil && buf.Len() == 0 { + http.Error(w, "No metrics encoded, last error:\n\n"+err.Error(), http.StatusInternalServerError) + return + } + header := w.Header() + header.Set(contentTypeHeader, string(contentType)) + header.Set(contentLengthHeader, fmt.Sprint(buf.Len())) + if encoding != "" { + header.Set(contentEncodingHeader, encoding) + } + w.Write(buf.Bytes()) + }) +} + +// decorateWriter wraps a writer to handle gzip compression if requested. It +// returns the decorated writer and the appropriate "Content-Encoding" header +// (which is empty if no compression is enabled). +func decorateWriter(request *http.Request, writer io.Writer) (io.Writer, string) { + header := request.Header.Get(acceptEncodingHeader) + parts := strings.Split(header, ",") + for _, part := range parts { + part := strings.TrimSpace(part) + if part == "gzip" || strings.HasPrefix(part, "gzip;") { + return gzip.NewWriter(writer), "gzip" + } + } + return writer, "" +} + +var instLabels = []string{"method", "code"} + +type nower interface { + Now() time.Time +} + +type nowFunc func() time.Time + +func (n nowFunc) Now() time.Time { + return n() +} + +var now nower = nowFunc(func() time.Time { + return time.Now() +}) + +func nowSeries(t ...time.Time) nower { + return nowFunc(func() time.Time { + defer func() { + t = t[1:] + }() + + return t[0] + }) +} + +// InstrumentHandler wraps the given HTTP handler for instrumentation. It +// registers four metric collectors (if not already done) and reports HTTP +// metrics to the (newly or already) registered collectors: http_requests_total +// (CounterVec), http_request_duration_microseconds (Summary), +// http_request_size_bytes (Summary), http_response_size_bytes (Summary). Each +// has a constant label named "handler" with the provided handlerName as +// value. http_requests_total is a metric vector partitioned by HTTP method +// (label name "method") and HTTP status code (label name "code"). +// +// Deprecated: InstrumentHandler has several issues: +// +// - It uses Summaries rather than Histograms. Summaries are not useful if +// aggregation across multiple instances is required. +// +// - It uses microseconds as unit, which is deprecated and should be replaced by +// seconds. +// +// - The size of the request is calculated in a separate goroutine. Since this +// calculator requires access to the request header, it creates a race with +// any writes to the header performed during request handling. +// httputil.ReverseProxy is a prominent example for a handler +// performing such writes. +// +// Upcoming versions of this package will provide ways of instrumenting HTTP +// handlers that are more flexible and have fewer issues. Please prefer direct +// instrumentation in the meantime. +func InstrumentHandler(handlerName string, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFunc(handlerName, handler.ServeHTTP) +} + +// InstrumentHandlerFunc wraps the given function for instrumentation. It +// otherwise works in the same way as InstrumentHandler (and shares the same +// issues). +// +// Deprecated: InstrumentHandlerFunc is deprecated for the same reasons as +// InstrumentHandler is. +func InstrumentHandlerFunc(handlerName string, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts( + SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": handlerName}, + }, + handlerFunc, + ) +} + +// InstrumentHandlerWithOpts works like InstrumentHandler (and shares the same +// issues) but provides more flexibility (at the cost of a more complex call +// syntax). As InstrumentHandler, this function registers four metric +// collectors, but it uses the provided SummaryOpts to create them. However, the +// fields "Name" and "Help" in the SummaryOpts are ignored. "Name" is replaced +// by "requests_total", "request_duration_microseconds", "request_size_bytes", +// and "response_size_bytes", respectively. "Help" is replaced by an appropriate +// help string. The names of the variable labels of the http_requests_total +// CounterVec are "method" (get, post, etc.), and "code" (HTTP status code). +// +// If InstrumentHandlerWithOpts is called as follows, it mimics exactly the +// behavior of InstrumentHandler: +// +// prometheus.InstrumentHandlerWithOpts( +// prometheus.SummaryOpts{ +// Subsystem: "http", +// ConstLabels: prometheus.Labels{"handler": handlerName}, +// }, +// handler, +// ) +// +// Technical detail: "requests_total" is a CounterVec, not a SummaryVec, so it +// cannot use SummaryOpts. Instead, a CounterOpts struct is created internally, +// and all its fields are set to the equally named fields in the provided +// SummaryOpts. +// +// Deprecated: InstrumentHandlerWithOpts is deprecated for the same reasons as +// InstrumentHandler is. +func InstrumentHandlerWithOpts(opts SummaryOpts, handler http.Handler) http.HandlerFunc { + return InstrumentHandlerFuncWithOpts(opts, handler.ServeHTTP) +} + +// InstrumentHandlerFuncWithOpts works like InstrumentHandlerFunc (and shares +// the same issues) but provides more flexibility (at the cost of a more complex +// call syntax). See InstrumentHandlerWithOpts for details how the provided +// SummaryOpts are used. +// +// Deprecated: InstrumentHandlerFuncWithOpts is deprecated for the same reasons +// as InstrumentHandler is. +func InstrumentHandlerFuncWithOpts(opts SummaryOpts, handlerFunc func(http.ResponseWriter, *http.Request)) http.HandlerFunc { + reqCnt := NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + ) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := NewSummary(opts) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + reqSz := NewSummary(opts) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + resSz := NewSummary(opts) + + regReqCnt := MustRegisterOrGet(reqCnt).(*CounterVec) + regReqDur := MustRegisterOrGet(reqDur).(Summary) + regReqSz := MustRegisterOrGet(reqSz).(Summary) + regResSz := MustRegisterOrGet(resSz).(Summary) + + return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { + now := time.Now() + + delegate := &responseWriterDelegator{ResponseWriter: w} + out := make(chan int) + urlLen := 0 + if r.URL != nil { + urlLen = len(r.URL.String()) + } + go computeApproximateRequestSize(r, out, urlLen) + + _, cn := w.(http.CloseNotifier) + _, fl := w.(http.Flusher) + _, hj := w.(http.Hijacker) + _, rf := w.(io.ReaderFrom) + var rw http.ResponseWriter + if cn && fl && hj && rf { + rw = &fancyResponseWriterDelegator{delegate} + } else { + rw = delegate + } + handlerFunc(rw, r) + + elapsed := float64(time.Since(now)) / float64(time.Microsecond) + + method := sanitizeMethod(r.Method) + code := sanitizeCode(delegate.status) + regReqCnt.WithLabelValues(method, code).Inc() + regReqDur.Observe(elapsed) + regResSz.Observe(float64(delegate.written)) + regReqSz.Observe(float64(<-out)) + }) +} + +func computeApproximateRequestSize(r *http.Request, out chan int, s int) { + s += len(r.Method) + s += len(r.Proto) + for name, values := range r.Header { + s += len(name) + for _, value := range values { + s += len(value) + } + } + s += len(r.Host) + + // N.B. r.Form and r.MultipartForm are assumed to be included in r.URL. + + if r.ContentLength != -1 { + s += int(r.ContentLength) + } + out <- s +} + +type responseWriterDelegator struct { + http.ResponseWriter + + handler, method string + status int + written int64 + wroteHeader bool +} + +func (r *responseWriterDelegator) WriteHeader(code int) { + r.status = code + r.wroteHeader = true + r.ResponseWriter.WriteHeader(code) +} + +func (r *responseWriterDelegator) Write(b []byte) (int, error) { + if !r.wroteHeader { + r.WriteHeader(http.StatusOK) + } + n, err := r.ResponseWriter.Write(b) + r.written += int64(n) + return n, err +} + +type fancyResponseWriterDelegator struct { + *responseWriterDelegator +} + +func (f *fancyResponseWriterDelegator) CloseNotify() <-chan bool { + return f.ResponseWriter.(http.CloseNotifier).CloseNotify() +} + +func (f *fancyResponseWriterDelegator) Flush() { + f.ResponseWriter.(http.Flusher).Flush() +} + +func (f *fancyResponseWriterDelegator) Hijack() (net.Conn, *bufio.ReadWriter, error) { + return f.ResponseWriter.(http.Hijacker).Hijack() +} + +func (f *fancyResponseWriterDelegator) ReadFrom(r io.Reader) (int64, error) { + if !f.wroteHeader { + f.WriteHeader(http.StatusOK) + } + n, err := f.ResponseWriter.(io.ReaderFrom).ReadFrom(r) + f.written += n + return n, err +} + +func sanitizeMethod(m string) string { + switch m { + case "GET", "get": + return "get" + case "PUT", "put": + return "put" + case "HEAD", "head": + return "head" + case "POST", "post": + return "post" + case "DELETE", "delete": + return "delete" + case "CONNECT", "connect": + return "connect" + case "OPTIONS", "options": + return "options" + case "NOTIFY", "notify": + return "notify" + default: + return strings.ToLower(m) + } +} + +func sanitizeCode(s int) string { + switch s { + case 100: + return "100" + case 101: + return "101" + + case 200: + return "200" + case 201: + return "201" + case 202: + return "202" + case 203: + return "203" + case 204: + return "204" + case 205: + return "205" + case 206: + return "206" + + case 300: + return "300" + case 301: + return "301" + case 302: + return "302" + case 304: + return "304" + case 305: + return "305" + case 307: + return "307" + + case 400: + return "400" + case 401: + return "401" + case 402: + return "402" + case 403: + return "403" + case 404: + return "404" + case 405: + return "405" + case 406: + return "406" + case 407: + return "407" + case 408: + return "408" + case 409: + return "409" + case 410: + return "410" + case 411: + return "411" + case 412: + return "412" + case 413: + return "413" + case 414: + return "414" + case 415: + return "415" + case 416: + return "416" + case 417: + return "417" + case 418: + return "418" + + case 500: + return "500" + case 501: + return "501" + case 502: + return "502" + case 503: + return "503" + case 504: + return "504" + case 505: + return "505" + + case 428: + return "428" + case 429: + return "429" + case 431: + return "431" + case 511: + return "511" + + default: + return strconv.Itoa(s) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/http_test.go b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go new file mode 100644 index 000000000..ffe0418cf --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/http_test.go @@ -0,0 +1,121 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "net/http" + "net/http/httptest" + "testing" + "time" + + dto "github.com/prometheus/client_model/go" +) + +type respBody string + +func (b respBody) ServeHTTP(w http.ResponseWriter, r *http.Request) { + w.WriteHeader(http.StatusTeapot) + w.Write([]byte(b)) +} + +func TestInstrumentHandler(t *testing.T) { + defer func(n nower) { + now = n.(nower) + }(now) + + instant := time.Now() + end := instant.Add(30 * time.Second) + now = nowSeries(instant, end) + respBody := respBody("Howdy there!") + + hndlr := InstrumentHandler("test-handler", respBody) + + opts := SummaryOpts{ + Subsystem: "http", + ConstLabels: Labels{"handler": "test-handler"}, + } + + reqCnt := MustRegisterOrGet(NewCounterVec( + CounterOpts{ + Namespace: opts.Namespace, + Subsystem: opts.Subsystem, + Name: "requests_total", + Help: "Total number of HTTP requests made.", + ConstLabels: opts.ConstLabels, + }, + instLabels, + )).(*CounterVec) + + opts.Name = "request_duration_microseconds" + opts.Help = "The HTTP request latencies in microseconds." + reqDur := MustRegisterOrGet(NewSummary(opts)).(Summary) + + opts.Name = "request_size_bytes" + opts.Help = "The HTTP request sizes in bytes." + MustRegisterOrGet(NewSummary(opts)) + + opts.Name = "response_size_bytes" + opts.Help = "The HTTP response sizes in bytes." + MustRegisterOrGet(NewSummary(opts)) + + reqCnt.Reset() + + resp := httptest.NewRecorder() + req := &http.Request{ + Method: "GET", + } + + hndlr.ServeHTTP(resp, req) + + if resp.Code != http.StatusTeapot { + t.Fatalf("expected status %d, got %d", http.StatusTeapot, resp.Code) + } + if string(resp.Body.Bytes()) != "Howdy there!" { + t.Fatalf("expected body %s, got %s", "Howdy there!", string(resp.Body.Bytes())) + } + + out := &dto.Metric{} + reqDur.Write(out) + if want, got := "test-handler", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqDur, got %q", want, got) + } + if want, got := uint64(1), out.Summary.GetSampleCount(); want != got { + t.Errorf("want sample count %d in reqDur, got %d", want, got) + } + + out.Reset() + if want, got := 1, len(reqCnt.children); want != got { + t.Errorf("want %d children in reqCnt, got %d", want, got) + } + cnt, err := reqCnt.GetMetricWithLabelValues("get", "418") + if err != nil { + t.Fatal(err) + } + cnt.Write(out) + if want, got := "418", out.Label[0].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "test-handler", out.Label[1].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if want, got := "get", out.Label[2].GetValue(); want != got { + t.Errorf("want label value %q in reqCnt, got %q", want, got) + } + if out.Counter == nil { + t.Fatal("expected non-nil counter in reqCnt") + } + if want, got := 1., out.Counter.GetValue(); want != got { + t.Errorf("want reqCnt of %f, got %f", want, got) + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric.go b/vendor/github.com/prometheus/client_golang/prometheus/metric.go new file mode 100644 index 000000000..d4063d98f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric.go @@ -0,0 +1,166 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "strings" + + dto "github.com/prometheus/client_model/go" +) + +const separatorByte byte = 255 + +// A Metric models a single sample value with its meta data being exported to +// Prometheus. Implementations of Metric in this package are Gauge, Counter, +// Histogram, Summary, and Untyped. +type Metric interface { + // Desc returns the descriptor for the Metric. This method idempotently + // returns the same descriptor throughout the lifetime of the + // Metric. The returned descriptor is immutable by contract. A Metric + // unable to describe itself must return an invalid descriptor (created + // with NewInvalidDesc). + Desc() *Desc + // Write encodes the Metric into a "Metric" Protocol Buffer data + // transmission object. + // + // Metric implementations must observe concurrency safety as reads of + // this metric may occur at any time, and any blocking occurs at the + // expense of total performance of rendering all registered + // metrics. Ideally, Metric implementations should support concurrent + // readers. + // + // While populating dto.Metric, it is the responsibility of the + // implementation to ensure validity of the Metric protobuf (like valid + // UTF-8 strings or syntactically valid metric and label names). It is + // recommended to sort labels lexicographically. (Implementers may find + // LabelPairSorter useful for that.) Callers of Write should still make + // sure of sorting if they depend on it. + Write(*dto.Metric) error + // TODO(beorn7): The original rationale of passing in a pre-allocated + // dto.Metric protobuf to save allocations has disappeared. The + // signature of this method should be changed to "Write() (*dto.Metric, + // error)". +} + +// Opts bundles the options for creating most Metric types. Each metric +// implementation XXX has its own XXXOpts type, but in most cases, it is just be +// an alias of this type (which might change when the requirement arises.) +// +// It is mandatory to set Name and Help to a non-empty string. All other fields +// are optional and can safely be left at their zero value. +type Opts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Metric (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the metric must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this metric. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this metric. Metrics + // with the same fully-qualified name must have the same label names in + // their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a metric + // vector collector (like CounterVec, GaugeVec, UntypedVec). ConstLabels + // serve only special purposes. One is for the special case where the + // value of a label does not change during the lifetime of a process, + // e.g. if the revision of the running binary is put into a + // label. Another, more advanced purpose is if more than one Collector + // needs to collect Metrics with the same fully-qualified name. In that + // case, those Metrics must differ in the values of their + // ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels +} + +// BuildFQName joins the given three name components by "_". Empty name +// components are ignored. If the name parameter itself is empty, an empty +// string is returned, no matter what. Metric implementations included in this +// library use this function internally to generate the fully-qualified metric +// name from the name component in their Opts. Users of the library will only +// need this function if they implement their own Metric or instantiate a Desc +// (with NewDesc) directly. +func BuildFQName(namespace, subsystem, name string) string { + if name == "" { + return "" + } + switch { + case namespace != "" && subsystem != "": + return strings.Join([]string{namespace, subsystem, name}, "_") + case namespace != "": + return strings.Join([]string{namespace, name}, "_") + case subsystem != "": + return strings.Join([]string{subsystem, name}, "_") + } + return name +} + +// LabelPairSorter implements sort.Interface. It is used to sort a slice of +// dto.LabelPair pointers. This is useful for implementing the Write method of +// custom metrics. +type LabelPairSorter []*dto.LabelPair + +func (s LabelPairSorter) Len() int { + return len(s) +} + +func (s LabelPairSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s LabelPairSorter) Less(i, j int) bool { + return s[i].GetName() < s[j].GetName() +} + +type hashSorter []uint64 + +func (s hashSorter) Len() int { + return len(s) +} + +func (s hashSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s hashSorter) Less(i, j int) bool { + return s[i] < s[j] +} + +type invalidMetric struct { + desc *Desc + err error +} + +// NewInvalidMetric returns a metric whose Write method always returns the +// provided error. It is useful if a Collector finds itself unable to collect +// a metric and wishes to report an error to the registry. +func NewInvalidMetric(desc *Desc, err error) Metric { + return &invalidMetric{desc, err} +} + +func (m *invalidMetric) Desc() *Desc { return m.desc } + +func (m *invalidMetric) Write(*dto.Metric) error { return m.err } diff --git a/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go b/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go new file mode 100644 index 000000000..7145f5e53 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/metric_test.go @@ -0,0 +1,35 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "testing" + +func TestBuildFQName(t *testing.T) { + scenarios := []struct{ namespace, subsystem, name, result string }{ + {"a", "b", "c", "a_b_c"}, + {"", "b", "c", "b_c"}, + {"a", "", "c", "a_c"}, + {"", "", "c", "c"}, + {"a", "b", "", ""}, + {"a", "", "", ""}, + {"", "b", "", ""}, + {" ", "", "", ""}, + } + + for i, s := range scenarios { + if want, got := s.result, BuildFQName(s.namespace, s.subsystem, s.name); want != got { + t.Errorf("%d. want %s, got %s", i, want, got) + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go new file mode 100644 index 000000000..e31e62e78 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector.go @@ -0,0 +1,142 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import "github.com/prometheus/procfs" + +type processCollector struct { + pid int + collectFn func(chan<- Metric) + pidFn func() (int, error) + cpuTotal Counter + openFDs, maxFDs Gauge + vsize, rss Gauge + startTime Gauge +} + +// NewProcessCollector returns a collector which exports the current state of +// process metrics including cpu, memory and file descriptor usage as well as +// the process start time for the given process id under the given namespace. +func NewProcessCollector(pid int, namespace string) Collector { + return NewProcessCollectorPIDFn( + func() (int, error) { return pid, nil }, + namespace, + ) +} + +// NewProcessCollectorPIDFn returns a collector which exports the current state +// of process metrics including cpu, memory and file descriptor usage as well +// as the process start time under the given namespace. The given pidFn is +// called on each collect and is used to determine the process to export +// metrics for. +func NewProcessCollectorPIDFn( + pidFn func() (int, error), + namespace string, +) Collector { + c := processCollector{ + pidFn: pidFn, + collectFn: func(chan<- Metric) {}, + + cpuTotal: NewCounter(CounterOpts{ + Namespace: namespace, + Name: "process_cpu_seconds_total", + Help: "Total user and system CPU time spent in seconds.", + }), + openFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_open_fds", + Help: "Number of open file descriptors.", + }), + maxFDs: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_max_fds", + Help: "Maximum number of open file descriptors.", + }), + vsize: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_virtual_memory_bytes", + Help: "Virtual memory size in bytes.", + }), + rss: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_resident_memory_bytes", + Help: "Resident memory size in bytes.", + }), + startTime: NewGauge(GaugeOpts{ + Namespace: namespace, + Name: "process_start_time_seconds", + Help: "Start time of the process since unix epoch in seconds.", + }), + } + + // Set up process metric collection if supported by the runtime. + if _, err := procfs.NewStat(); err == nil { + c.collectFn = c.processCollect + } + + return &c +} + +// Describe returns all descriptions of the collector. +func (c *processCollector) Describe(ch chan<- *Desc) { + ch <- c.cpuTotal.Desc() + ch <- c.openFDs.Desc() + ch <- c.maxFDs.Desc() + ch <- c.vsize.Desc() + ch <- c.rss.Desc() + ch <- c.startTime.Desc() +} + +// Collect returns the current state of all metrics of the collector. +func (c *processCollector) Collect(ch chan<- Metric) { + c.collectFn(ch) +} + +// TODO(ts): Bring back error reporting by reverting 7faf9e7 as soon as the +// client allows users to configure the error behavior. +func (c *processCollector) processCollect(ch chan<- Metric) { + pid, err := c.pidFn() + if err != nil { + return + } + + p, err := procfs.NewProc(pid) + if err != nil { + return + } + + if stat, err := p.NewStat(); err == nil { + c.cpuTotal.Set(stat.CPUTime()) + ch <- c.cpuTotal + c.vsize.Set(float64(stat.VirtualMemory())) + ch <- c.vsize + c.rss.Set(float64(stat.ResidentMemory())) + ch <- c.rss + + if startTime, err := stat.StartTime(); err == nil { + c.startTime.Set(startTime) + ch <- c.startTime + } + } + + if fds, err := p.FileDescriptorsLen(); err == nil { + c.openFDs.Set(float64(fds)) + ch <- c.openFDs + } + + if limits, err := p.NewLimits(); err == nil { + c.maxFDs.Set(float64(limits.OpenFiles)) + ch <- c.maxFDs + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go new file mode 100644 index 000000000..d3362dae7 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/process_collector_test.go @@ -0,0 +1,58 @@ +package prometheus + +import ( + "bytes" + "os" + "regexp" + "testing" + + "github.com/prometheus/common/expfmt" + "github.com/prometheus/procfs" +) + +func TestProcessCollector(t *testing.T) { + if _, err := procfs.Self(); err != nil { + t.Skipf("skipping TestProcessCollector, procfs not available: %s", err) + } + + registry := NewRegistry() + if err := registry.Register(NewProcessCollector(os.Getpid(), "")); err != nil { + t.Fatal(err) + } + if err := registry.Register(NewProcessCollectorPIDFn( + func() (int, error) { return os.Getpid(), nil }, "foobar"), + ); err != nil { + t.Fatal(err) + } + + mfs, err := registry.Gather() + if err != nil { + t.Fatal(err) + } + + var buf bytes.Buffer + for _, mf := range mfs { + if _, err := expfmt.MetricFamilyToText(&buf, mf); err != nil { + t.Fatal(err) + } + } + + for _, re := range []*regexp.Regexp{ + regexp.MustCompile("process_cpu_seconds_total [0-9]"), + regexp.MustCompile("process_max_fds [1-9]"), + regexp.MustCompile("process_open_fds [1-9]"), + regexp.MustCompile("process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("process_resident_memory_bytes [1-9]"), + regexp.MustCompile("process_start_time_seconds [0-9.]{10,}"), + regexp.MustCompile("foobar_process_cpu_seconds_total [0-9]"), + regexp.MustCompile("foobar_process_max_fds [1-9]"), + regexp.MustCompile("foobar_process_open_fds [1-9]"), + regexp.MustCompile("foobar_process_virtual_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_resident_memory_bytes [1-9]"), + regexp.MustCompile("foobar_process_start_time_seconds [0-9.]{10,}"), + } { + if !re.Match(buf.Bytes()) { + t.Errorf("want body to match %s\n%s", re, buf.String()) + } + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry.go b/vendor/github.com/prometheus/client_golang/prometheus/registry.go new file mode 100644 index 000000000..32a3986b0 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry.go @@ -0,0 +1,806 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "bytes" + "errors" + "fmt" + "os" + "sort" + "sync" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +const ( + // Capacity for the channel to collect metrics and descriptors. + capMetricChan = 1000 + capDescChan = 10 +) + +// DefaultRegisterer and DefaultGatherer are the implementations of the +// Registerer and Gatherer interface a number of convenience functions in this +// package act on. Initially, both variables point to the same Registry, which +// has a process collector (see NewProcessCollector) and a Go collector (see +// NewGoCollector) already registered. This approach to keep default instances +// as global state mirrors the approach of other packages in the Go standard +// library. Note that there are caveats. Change the variables with caution and +// only if you understand the consequences. Users who want to avoid global state +// altogether should not use the convenience function and act on custom +// instances instead. +var ( + defaultRegistry = NewRegistry() + DefaultRegisterer Registerer = defaultRegistry + DefaultGatherer Gatherer = defaultRegistry +) + +func init() { + MustRegister(NewProcessCollector(os.Getpid(), "")) + MustRegister(NewGoCollector()) +} + +// NewRegistry creates a new vanilla Registry without any Collectors +// pre-registered. +func NewRegistry() *Registry { + return &Registry{ + collectorsByID: map[uint64]Collector{}, + descIDs: map[uint64]struct{}{}, + dimHashesByName: map[string]uint64{}, + } +} + +// NewPedanticRegistry returns a registry that checks during collection if each +// collected Metric is consistent with its reported Desc, and if the Desc has +// actually been registered with the registry. +// +// Usually, a Registry will be happy as long as the union of all collected +// Metrics is consistent and valid even if some metrics are not consistent with +// their own Desc or a Desc provided by their registered Collector. Well-behaved +// Collectors and Metrics will only provide consistent Descs. This Registry is +// useful to test the implementation of Collectors and Metrics. +func NewPedanticRegistry() *Registry { + r := NewRegistry() + r.pedanticChecksEnabled = true + return r +} + +// Registerer is the interface for the part of a registry in charge of +// registering and unregistering. Users of custom registries should use +// Registerer as type for registration purposes (rather then the Registry type +// directly). In that way, they are free to use custom Registerer implementation +// (e.g. for testing purposes). +type Registerer interface { + // Register registers a new Collector to be included in metrics + // collection. It returns an error if the descriptors provided by the + // Collector are invalid or if they — in combination with descriptors of + // already registered Collectors — do not fulfill the consistency and + // uniqueness criteria described in the documentation of metric.Desc. + // + // If the provided Collector is equal to a Collector already registered + // (which includes the case of re-registering the same Collector), the + // returned error is an instance of AlreadyRegisteredError, which + // contains the previously registered Collector. + // + // It is in general not safe to register the same Collector multiple + // times concurrently. + Register(Collector) error + // MustRegister works like Register but registers any number of + // Collectors and panics upon the first registration that causes an + // error. + MustRegister(...Collector) + // Unregister unregisters the Collector that equals the Collector passed + // in as an argument. (Two Collectors are considered equal if their + // Describe method yields the same set of descriptors.) The function + // returns whether a Collector was unregistered. + // + // Note that even after unregistering, it will not be possible to + // register a new Collector that is inconsistent with the unregistered + // Collector, e.g. a Collector collecting metrics with the same name but + // a different help string. The rationale here is that the same registry + // instance must only collect consistent metrics throughout its + // lifetime. + Unregister(Collector) bool +} + +// Gatherer is the interface for the part of a registry in charge of gathering +// the collected metrics into a number of MetricFamilies. The Gatherer interface +// comes with the same general implication as described for the Registerer +// interface. +type Gatherer interface { + // Gather calls the Collect method of the registered Collectors and then + // gathers the collected metrics into a lexicographically sorted slice + // of MetricFamily protobufs. Even if an error occurs, Gather attempts + // to gather as many metrics as possible. Hence, if a non-nil error is + // returned, the returned MetricFamily slice could be nil (in case of a + // fatal error that prevented any meaningful metric collection) or + // contain a number of MetricFamily protobufs, some of which might be + // incomplete, and some might be missing altogether. The returned error + // (which might be a MultiError) explains the details. In scenarios + // where complete collection is critical, the returned MetricFamily + // protobufs should be disregarded if the returned error is non-nil. + Gather() ([]*dto.MetricFamily, error) +} + +// Register registers the provided Collector with the DefaultRegisterer. +// +// Register is a shortcut for DefaultRegisterer.Register(c). See there for more +// details. +func Register(c Collector) error { + return DefaultRegisterer.Register(c) +} + +// MustRegister registers the provided Collectors with the DefaultRegisterer and +// panics if any error occurs. +// +// MustRegister is a shortcut for DefaultRegisterer.MustRegister(cs...). See +// there for more details. +func MustRegister(cs ...Collector) { + DefaultRegisterer.MustRegister(cs...) +} + +// RegisterOrGet registers the provided Collector with the DefaultRegisterer and +// returns the Collector, unless an equal Collector was registered before, in +// which case that Collector is returned. +// +// Deprecated: RegisterOrGet is merely a convenience function for the +// implementation as described in the documentation for +// AlreadyRegisteredError. As the use case is relatively rare, this function +// will be removed in a future version of this package to clean up the +// namespace. +func RegisterOrGet(c Collector) (Collector, error) { + if err := Register(c); err != nil { + if are, ok := err.(AlreadyRegisteredError); ok { + return are.ExistingCollector, nil + } + return nil, err + } + return c, nil +} + +// MustRegisterOrGet behaves like RegisterOrGet but panics instead of returning +// an error. +// +// Deprecated: This is deprecated for the same reason RegisterOrGet is. See +// there for details. +func MustRegisterOrGet(c Collector) Collector { + c, err := RegisterOrGet(c) + if err != nil { + panic(err) + } + return c +} + +// Unregister removes the registration of the provided Collector from the +// DefaultRegisterer. +// +// Unregister is a shortcut for DefaultRegisterer.Unregister(c). See there for +// more details. +func Unregister(c Collector) bool { + return DefaultRegisterer.Unregister(c) +} + +// GathererFunc turns a function into a Gatherer. +type GathererFunc func() ([]*dto.MetricFamily, error) + +// Gather implements Gatherer. +func (gf GathererFunc) Gather() ([]*dto.MetricFamily, error) { + return gf() +} + +// SetMetricFamilyInjectionHook replaces the DefaultGatherer with one that +// gathers from the previous DefaultGatherers but then merges the MetricFamily +// protobufs returned from the provided hook function with the MetricFamily +// protobufs returned from the original DefaultGatherer. +// +// Deprecated: This function manipulates the DefaultGatherer variable. Consider +// the implications, i.e. don't do this concurrently with any uses of the +// DefaultGatherer. In the rare cases where you need to inject MetricFamily +// protobufs directly, it is recommended to use a custom Registry and combine it +// with a custom Gatherer using the Gatherers type (see +// there). SetMetricFamilyInjectionHook only exists for compatibility reasons +// with previous versions of this package. +func SetMetricFamilyInjectionHook(hook func() []*dto.MetricFamily) { + DefaultGatherer = Gatherers{ + DefaultGatherer, + GathererFunc(func() ([]*dto.MetricFamily, error) { return hook(), nil }), + } +} + +// AlreadyRegisteredError is returned by the Register method if the Collector to +// be registered has already been registered before, or a different Collector +// that collects the same metrics has been registered before. Registration fails +// in that case, but you can detect from the kind of error what has +// happened. The error contains fields for the existing Collector and the +// (rejected) new Collector that equals the existing one. This can be used to +// find out if an equal Collector has been registered before and switch over to +// using the old one, as demonstrated in the example. +type AlreadyRegisteredError struct { + ExistingCollector, NewCollector Collector +} + +func (err AlreadyRegisteredError) Error() string { + return "duplicate metrics collector registration attempted" +} + +// MultiError is a slice of errors implementing the error interface. It is used +// by a Gatherer to report multiple errors during MetricFamily gathering. +type MultiError []error + +func (errs MultiError) Error() string { + if len(errs) == 0 { + return "" + } + buf := &bytes.Buffer{} + fmt.Fprintf(buf, "%d error(s) occurred:", len(errs)) + for _, err := range errs { + fmt.Fprintf(buf, "\n* %s", err) + } + return buf.String() +} + +// MaybeUnwrap returns nil if len(errs) is 0. It returns the first and only +// contained error as error if len(errs is 1). In all other cases, it returns +// the MultiError directly. This is helpful for returning a MultiError in a way +// that only uses the MultiError if needed. +func (errs MultiError) MaybeUnwrap() error { + switch len(errs) { + case 0: + return nil + case 1: + return errs[0] + default: + return errs + } +} + +// Registry registers Prometheus collectors, collects their metrics, and gathers +// them into MetricFamilies for exposition. It implements both Registerer and +// Gatherer. The zero value is not usable. Create instances with NewRegistry or +// NewPedanticRegistry. +type Registry struct { + mtx sync.RWMutex + collectorsByID map[uint64]Collector // ID is a hash of the descIDs. + descIDs map[uint64]struct{} + dimHashesByName map[string]uint64 + pedanticChecksEnabled bool +} + +// Register implements Registerer. +func (r *Registry) Register(c Collector) error { + var ( + descChan = make(chan *Desc, capDescChan) + newDescIDs = map[uint64]struct{}{} + newDimHashesByName = map[string]uint64{} + collectorID uint64 // Just a sum of all desc IDs. + duplicateDescErr error + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + r.mtx.Lock() + defer r.mtx.Unlock() + // Coduct various tests... + for desc := range descChan { + + // Is the descriptor valid at all? + if desc.err != nil { + return fmt.Errorf("descriptor %s is invalid: %s", desc, desc.err) + } + + // Is the descID unique? + // (In other words: Is the fqName + constLabel combination unique?) + if _, exists := r.descIDs[desc.id]; exists { + duplicateDescErr = fmt.Errorf("descriptor %s already exists with the same fully-qualified name and const label values", desc) + } + // If it is not a duplicate desc in this collector, add it to + // the collectorID. (We allow duplicate descs within the same + // collector, but their existence must be a no-op.) + if _, exists := newDescIDs[desc.id]; !exists { + newDescIDs[desc.id] = struct{}{} + collectorID += desc.id + } + + // Are all the label names and the help string consistent with + // previous descriptors of the same name? + // First check existing descriptors... + if dimHash, exists := r.dimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("a previously registered descriptor with the same fully-qualified name as %s has different label names or a different help string", desc) + } + } else { + // ...then check the new descriptors already seen. + if dimHash, exists := newDimHashesByName[desc.fqName]; exists { + if dimHash != desc.dimHash { + return fmt.Errorf("descriptors reported by collector have inconsistent label names or help strings for the same fully-qualified name, offender is %s", desc) + } + } else { + newDimHashesByName[desc.fqName] = desc.dimHash + } + } + } + // Did anything happen at all? + if len(newDescIDs) == 0 { + return errors.New("collector has no descriptors") + } + if existing, exists := r.collectorsByID[collectorID]; exists { + return AlreadyRegisteredError{ + ExistingCollector: existing, + NewCollector: c, + } + } + // If the collectorID is new, but at least one of the descs existed + // before, we are in trouble. + if duplicateDescErr != nil { + return duplicateDescErr + } + + // Only after all tests have passed, actually register. + r.collectorsByID[collectorID] = c + for hash := range newDescIDs { + r.descIDs[hash] = struct{}{} + } + for name, dimHash := range newDimHashesByName { + r.dimHashesByName[name] = dimHash + } + return nil +} + +// Unregister implements Registerer. +func (r *Registry) Unregister(c Collector) bool { + var ( + descChan = make(chan *Desc, capDescChan) + descIDs = map[uint64]struct{}{} + collectorID uint64 // Just a sum of the desc IDs. + ) + go func() { + c.Describe(descChan) + close(descChan) + }() + for desc := range descChan { + if _, exists := descIDs[desc.id]; !exists { + collectorID += desc.id + descIDs[desc.id] = struct{}{} + } + } + + r.mtx.RLock() + if _, exists := r.collectorsByID[collectorID]; !exists { + r.mtx.RUnlock() + return false + } + r.mtx.RUnlock() + + r.mtx.Lock() + defer r.mtx.Unlock() + + delete(r.collectorsByID, collectorID) + for id := range descIDs { + delete(r.descIDs, id) + } + // dimHashesByName is left untouched as those must be consistent + // throughout the lifetime of a program. + return true +} + +// MustRegister implements Registerer. +func (r *Registry) MustRegister(cs ...Collector) { + for _, c := range cs { + if err := r.Register(c); err != nil { + panic(err) + } + } +} + +// Gather implements Gatherer. +func (r *Registry) Gather() ([]*dto.MetricFamily, error) { + var ( + metricChan = make(chan Metric, capMetricChan) + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + wg sync.WaitGroup + errs MultiError // The collected errors to return in the end. + registeredDescIDs map[uint64]struct{} // Only used for pedantic checks + ) + + r.mtx.RLock() + metricFamiliesByName := make(map[string]*dto.MetricFamily, len(r.dimHashesByName)) + + // Scatter. + // (Collectors could be complex and slow, so we call them all at once.) + wg.Add(len(r.collectorsByID)) + go func() { + wg.Wait() + close(metricChan) + }() + for _, collector := range r.collectorsByID { + go func(collector Collector) { + defer wg.Done() + collector.Collect(metricChan) + }(collector) + } + + // In case pedantic checks are enabled, we have to copy the map before + // giving up the RLock. + if r.pedanticChecksEnabled { + registeredDescIDs = make(map[uint64]struct{}, len(r.descIDs)) + for id := range r.descIDs { + registeredDescIDs[id] = struct{}{} + } + } + + r.mtx.RUnlock() + + // Drain metricChan in case of premature return. + defer func() { + for _ = range metricChan { + } + }() + + // Gather. + for metric := range metricChan { + // This could be done concurrently, too, but it required locking + // of metricFamiliesByName (and of metricHashes if checks are + // enabled). Most likely not worth it. + desc := metric.Desc() + dtoMetric := &dto.Metric{} + if err := metric.Write(dtoMetric); err != nil { + errs = append(errs, fmt.Errorf( + "error collecting metric %v: %s", desc, err, + )) + continue + } + metricFamily, ok := metricFamiliesByName[desc.fqName] + if ok { + if metricFamily.GetHelp() != desc.help { + errs = append(errs, fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + desc.fqName, dtoMetric, desc.help, metricFamily.GetHelp(), + )) + continue + } + // TODO(beorn7): Simplify switch once Desc has type. + switch metricFamily.GetType() { + case dto.MetricType_COUNTER: + if dtoMetric.Counter == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Counter", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_GAUGE: + if dtoMetric.Gauge == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Gauge", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_SUMMARY: + if dtoMetric.Summary == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Summary", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_UNTYPED: + if dtoMetric.Untyped == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be Untyped", + desc.fqName, dtoMetric, + )) + continue + } + case dto.MetricType_HISTOGRAM: + if dtoMetric.Histogram == nil { + errs = append(errs, fmt.Errorf( + "collected metric %s %s should be a Histogram", + desc.fqName, dtoMetric, + )) + continue + } + default: + panic("encountered MetricFamily with invalid type") + } + } else { + metricFamily = &dto.MetricFamily{} + metricFamily.Name = proto.String(desc.fqName) + metricFamily.Help = proto.String(desc.help) + // TODO(beorn7): Simplify switch once Desc has type. + switch { + case dtoMetric.Gauge != nil: + metricFamily.Type = dto.MetricType_GAUGE.Enum() + case dtoMetric.Counter != nil: + metricFamily.Type = dto.MetricType_COUNTER.Enum() + case dtoMetric.Summary != nil: + metricFamily.Type = dto.MetricType_SUMMARY.Enum() + case dtoMetric.Untyped != nil: + metricFamily.Type = dto.MetricType_UNTYPED.Enum() + case dtoMetric.Histogram != nil: + metricFamily.Type = dto.MetricType_HISTOGRAM.Enum() + default: + errs = append(errs, fmt.Errorf( + "empty metric collected: %s", dtoMetric, + )) + continue + } + metricFamiliesByName[desc.fqName] = metricFamily + } + if err := checkMetricConsistency(metricFamily, dtoMetric, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + if r.pedanticChecksEnabled { + // Is the desc registered at all? + if _, exist := registeredDescIDs[desc.id]; !exist { + errs = append(errs, fmt.Errorf( + "collected metric %s %s with unregistered descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + )) + continue + } + if err := checkDescConsistency(metricFamily, dtoMetric, desc); err != nil { + errs = append(errs, err) + continue + } + } + metricFamily.Metric = append(metricFamily.Metric, dtoMetric) + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// Gatherers is a slice of Gatherer instances that implements the Gatherer +// interface itself. Its Gather method calls Gather on all Gatherers in the +// slice in order and returns the merged results. Errors returned from the +// Gather calles are all returned in a flattened MultiError. Duplicate and +// inconsistent Metrics are skipped (first occurrence in slice order wins) and +// reported in the returned error. +// +// Gatherers can be used to merge the Gather results from multiple +// Registries. It also provides a way to directly inject existing MetricFamily +// protobufs into the gathering by creating a custom Gatherer with a Gather +// method that simply returns the existing MetricFamily protobufs. Note that no +// registration is involved (in contrast to Collector registration), so +// obviously registration-time checks cannot happen. Any inconsistencies between +// the gathered MetricFamilies are reported as errors by the Gather method, and +// inconsistent Metrics are dropped. Invalid parts of the MetricFamilies +// (e.g. syntactically invalid metric or label names) will go undetected. +type Gatherers []Gatherer + +// Gather implements Gatherer. +func (gs Gatherers) Gather() ([]*dto.MetricFamily, error) { + var ( + metricFamiliesByName = map[string]*dto.MetricFamily{} + metricHashes = map[uint64]struct{}{} + dimHashes = map[string]uint64{} + errs MultiError // The collected errors to return in the end. + ) + + for i, g := range gs { + mfs, err := g.Gather() + if err != nil { + if multiErr, ok := err.(MultiError); ok { + for _, err := range multiErr { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } else { + errs = append(errs, fmt.Errorf("[from Gatherer #%d] %s", i+1, err)) + } + } + for _, mf := range mfs { + existingMF, exists := metricFamiliesByName[mf.GetName()] + if exists { + if existingMF.GetHelp() != mf.GetHelp() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has help %q but should have %q", + mf.GetName(), mf.GetHelp(), existingMF.GetHelp(), + )) + continue + } + if existingMF.GetType() != mf.GetType() { + errs = append(errs, fmt.Errorf( + "gathered metric family %s has type %s but should have %s", + mf.GetName(), mf.GetType(), existingMF.GetType(), + )) + continue + } + } else { + existingMF = &dto.MetricFamily{} + existingMF.Name = mf.Name + existingMF.Help = mf.Help + existingMF.Type = mf.Type + metricFamiliesByName[mf.GetName()] = existingMF + } + for _, m := range mf.Metric { + if err := checkMetricConsistency(existingMF, m, metricHashes, dimHashes); err != nil { + errs = append(errs, err) + continue + } + existingMF.Metric = append(existingMF.Metric, m) + } + } + } + return normalizeMetricFamilies(metricFamiliesByName), errs.MaybeUnwrap() +} + +// metricSorter is a sortable slice of *dto.Metric. +type metricSorter []*dto.Metric + +func (s metricSorter) Len() int { + return len(s) +} + +func (s metricSorter) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s metricSorter) Less(i, j int) bool { + if len(s[i].Label) != len(s[j].Label) { + // This should not happen. The metrics are + // inconsistent. However, we have to deal with the fact, as + // people might use custom collectors or metric family injection + // to create inconsistent metrics. So let's simply compare the + // number of labels in this case. That will still yield + // reproducible sorting. + return len(s[i].Label) < len(s[j].Label) + } + for n, lp := range s[i].Label { + vi := lp.GetValue() + vj := s[j].Label[n].GetValue() + if vi != vj { + return vi < vj + } + } + + // We should never arrive here. Multiple metrics with the same + // label set in the same scrape will lead to undefined ingestion + // behavior. However, as above, we have to provide stable sorting + // here, even for inconsistent metrics. So sort equal metrics + // by their timestamp, with missing timestamps (implying "now") + // coming last. + if s[i].TimestampMs == nil { + return false + } + if s[j].TimestampMs == nil { + return true + } + return s[i].GetTimestampMs() < s[j].GetTimestampMs() +} + +// normalizeMetricFamilies returns a MetricFamily slice whith empty +// MetricFamilies pruned and the remaining MetricFamilies sorted by name within +// the slice, with the contained Metrics sorted within each MetricFamily. +func normalizeMetricFamilies(metricFamiliesByName map[string]*dto.MetricFamily) []*dto.MetricFamily { + for _, mf := range metricFamiliesByName { + sort.Sort(metricSorter(mf.Metric)) + } + names := make([]string, 0, len(metricFamiliesByName)) + for name, mf := range metricFamiliesByName { + if len(mf.Metric) > 0 { + names = append(names, name) + } + } + sort.Strings(names) + result := make([]*dto.MetricFamily, 0, len(names)) + for _, name := range names { + result = append(result, metricFamiliesByName[name]) + } + return result +} + +// checkMetricConsistency checks if the provided Metric is consistent with the +// provided MetricFamily. It also hashed the Metric labels and the MetricFamily +// name. If the resulting hash is alread in the provided metricHashes, an error +// is returned. If not, it is added to metricHashes. The provided dimHashes maps +// MetricFamily names to their dimHash (hashed sorted label names). If dimHashes +// doesn't yet contain a hash for the provided MetricFamily, it is +// added. Otherwise, an error is returned if the existing dimHashes in not equal +// the calculated dimHash. +func checkMetricConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + metricHashes map[uint64]struct{}, + dimHashes map[string]uint64, +) error { + // Type consistency with metric family. + if metricFamily.GetType() == dto.MetricType_GAUGE && dtoMetric.Gauge == nil || + metricFamily.GetType() == dto.MetricType_COUNTER && dtoMetric.Counter == nil || + metricFamily.GetType() == dto.MetricType_SUMMARY && dtoMetric.Summary == nil || + metricFamily.GetType() == dto.MetricType_HISTOGRAM && dtoMetric.Histogram == nil || + metricFamily.GetType() == dto.MetricType_UNTYPED && dtoMetric.Untyped == nil { + return fmt.Errorf( + "collected metric %s %s is not a %s", + metricFamily.GetName(), dtoMetric, metricFamily.GetType(), + ) + } + + // Is the metric unique (i.e. no other metric with the same name and the same label values)? + h := hashNew() + h = hashAdd(h, metricFamily.GetName()) + h = hashAddByte(h, separatorByte) + dh := hashNew() + // Make sure label pairs are sorted. We depend on it for the consistency + // check. + sort.Sort(LabelPairSorter(dtoMetric.Label)) + for _, lp := range dtoMetric.Label { + h = hashAdd(h, lp.GetValue()) + h = hashAddByte(h, separatorByte) + dh = hashAdd(dh, lp.GetName()) + dh = hashAddByte(dh, separatorByte) + } + if _, exists := metricHashes[h]; exists { + return fmt.Errorf( + "collected metric %s %s was collected before with the same name and label values", + metricFamily.GetName(), dtoMetric, + ) + } + if dimHash, ok := dimHashes[metricFamily.GetName()]; ok { + if dimHash != dh { + return fmt.Errorf( + "collected metric %s %s has label dimensions inconsistent with previously collected metrics in the same metric family", + metricFamily.GetName(), dtoMetric, + ) + } + } else { + dimHashes[metricFamily.GetName()] = dh + } + metricHashes[h] = struct{}{} + return nil +} + +func checkDescConsistency( + metricFamily *dto.MetricFamily, + dtoMetric *dto.Metric, + desc *Desc, +) error { + // Desc help consistency with metric family help. + if metricFamily.GetHelp() != desc.help { + return fmt.Errorf( + "collected metric %s %s has help %q but should have %q", + metricFamily.GetName(), dtoMetric, metricFamily.GetHelp(), desc.help, + ) + } + + // Is the desc consistent with the content of the metric? + lpsFromDesc := make([]*dto.LabelPair, 0, len(dtoMetric.Label)) + lpsFromDesc = append(lpsFromDesc, desc.constLabelPairs...) + for _, l := range desc.variableLabels { + lpsFromDesc = append(lpsFromDesc, &dto.LabelPair{ + Name: proto.String(l), + }) + } + if len(lpsFromDesc) != len(dtoMetric.Label) { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + sort.Sort(LabelPairSorter(lpsFromDesc)) + for i, lpFromDesc := range lpsFromDesc { + lpFromMetric := dtoMetric.Label[i] + if lpFromDesc.GetName() != lpFromMetric.GetName() || + lpFromDesc.Value != nil && lpFromDesc.GetValue() != lpFromMetric.GetValue() { + return fmt.Errorf( + "labels in collected metric %s %s are inconsistent with descriptor %s", + metricFamily.GetName(), dtoMetric, desc, + ) + } + } + return nil +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go new file mode 100644 index 000000000..9dacb6256 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/registry_test.go @@ -0,0 +1,545 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Copyright (c) 2013, The Prometheus Authors +// All rights reserved. +// +// Use of this source code is governed by a BSD-style license that can be found +// in the LICENSE file. + +package prometheus_test + +import ( + "bytes" + "net/http" + "net/http/httptest" + "testing" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/expfmt" + + "github.com/prometheus/client_golang/prometheus" + "github.com/prometheus/client_golang/prometheus/promhttp" +) + +func testHandler(t testing.TB) { + + metricVec := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "name", + Help: "docstring", + ConstLabels: prometheus.Labels{"constname": "constvalue"}, + }, + []string{"labelname"}, + ) + + metricVec.WithLabelValues("val1").Inc() + metricVec.WithLabelValues("val2").Inc() + + externalMetricFamily := &dto.MetricFamily{ + Name: proto.String("externalname"), + Help: proto.String("externaldocstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("externalconstname"), + Value: proto.String("externalconstvalue"), + }, + { + Name: proto.String("externallabelname"), + Value: proto.String("externalval1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + externalBuf := &bytes.Buffer{} + enc := expfmt.NewEncoder(externalBuf, expfmt.FmtProtoDelim) + if err := enc.Encode(externalMetricFamily); err != nil { + t.Fatal(err) + } + externalMetricFamilyAsBytes := externalBuf.Bytes() + externalMetricFamilyAsText := []byte(`# HELP externalname externaldocstring +# TYPE externalname counter +externalname{externalconstname="externalconstvalue",externallabelname="externalval1"} 1 +`) + externalMetricFamilyAsProtoText := []byte(`name: "externalname" +help: "externaldocstring" +type: COUNTER +metric: < + label: < + name: "externalconstname" + value: "externalconstvalue" + > + label: < + name: "externallabelname" + value: "externalval1" + > + counter: < + value: 1 + > +> + +`) + externalMetricFamilyAsProtoCompactText := []byte(`name:"externalname" help:"externaldocstring" type:COUNTER metric: label: counter: > +`) + + expectedMetricFamily := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(1), + }, + }, + }, + } + buf := &bytes.Buffer{} + enc = expfmt.NewEncoder(buf, expfmt.FmtProtoDelim) + if err := enc.Encode(expectedMetricFamily); err != nil { + t.Fatal(err) + } + expectedMetricFamilyAsBytes := buf.Bytes() + expectedMetricFamilyAsText := []byte(`# HELP name docstring +# TYPE name counter +name{constname="constvalue",labelname="val1"} 1 +name{constname="constvalue",labelname="val2"} 1 +`) + expectedMetricFamilyAsProtoText := []byte(`name: "name" +help: "docstring" +type: COUNTER +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val1" + > + counter: < + value: 1 + > +> +metric: < + label: < + name: "constname" + value: "constvalue" + > + label: < + name: "labelname" + value: "val2" + > + counter: < + value: 1 + > +> + +`) + expectedMetricFamilyAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > +`) + + externalMetricFamilyWithSameName := &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("docstring"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + { + Label: []*dto.LabelPair{ + { + Name: proto.String("constname"), + Value: proto.String("constvalue"), + }, + { + Name: proto.String("labelname"), + Value: proto.String("different_val"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(42), + }, + }, + }, + } + + expectedMetricFamilyMergedWithExternalAsProtoCompactText := []byte(`name:"name" help:"docstring" type:COUNTER metric: label: counter: > metric: label: counter: > metric: label: counter: > +`) + + type output struct { + headers map[string]string + body []byte + } + + var scenarios = []struct { + headers map[string]string + out output + collector prometheus.Collector + externalMF []*dto.MetricFamily + }{ + { // 0 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, dings/bums;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 1 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/quark;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 2 + headers: map[string]string{ + "Accept": "foo/bar;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 3 + headers: map[string]string{ + "Accept": "text/plain;q=0.2, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.8", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: []byte{}, + }, + }, + { // 4 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 5 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: expectedMetricFamilyAsBytes, + }, + collector: metricVec, + }, + { // 6 + headers: map[string]string{ + "Accept": "application/json", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: externalMetricFamilyAsText, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 7 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: externalMetricFamilyAsBytes, + }, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 8 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 9 + headers: map[string]string{ + "Accept": "text/plain", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: []byte{}, + }, + }, + { // 10 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: expectedMetricFamilyAsText, + }, + collector: metricVec, + }, + { // 11 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=bla;q=0.2, text/plain;q=0.5;version=0.0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `text/plain; version=0.0.4`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsText, + expectedMetricFamilyAsText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 12 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.2, text/plain;q=0.5;version=0.0.2", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=delimited`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsBytes, + expectedMetricFamilyAsBytes, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 13 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=text;q=0.5, application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=delimited;q=0.4", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoText, + expectedMetricFamilyAsProtoText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 14 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{externalMetricFamily}, + }, + { // 15 + headers: map[string]string{ + "Accept": "application/vnd.google.protobuf;proto=io.prometheus.client.MetricFamily;encoding=compact-text", + }, + out: output{ + headers: map[string]string{ + "Content-Type": `application/vnd.google.protobuf; proto=io.prometheus.client.MetricFamily; encoding=compact-text`, + }, + body: bytes.Join( + [][]byte{ + externalMetricFamilyAsProtoCompactText, + expectedMetricFamilyMergedWithExternalAsProtoCompactText, + }, + []byte{}, + ), + }, + collector: metricVec, + externalMF: []*dto.MetricFamily{ + externalMetricFamily, + externalMetricFamilyWithSameName, + }, + }, + } + for i, scenario := range scenarios { + registry := prometheus.NewPedanticRegistry() + gatherer := prometheus.Gatherer(registry) + if scenario.externalMF != nil { + gatherer = prometheus.Gatherers{ + registry, + prometheus.GathererFunc(func() ([]*dto.MetricFamily, error) { + return scenario.externalMF, nil + }), + } + } + + if scenario.collector != nil { + registry.Register(scenario.collector) + } + writer := httptest.NewRecorder() + handler := prometheus.InstrumentHandler("prometheus", promhttp.HandlerFor(gatherer, promhttp.HandlerOpts{})) + request, _ := http.NewRequest("GET", "/", nil) + for key, value := range scenario.headers { + request.Header.Add(key, value) + } + handler(writer, request) + + for key, value := range scenario.out.headers { + if writer.HeaderMap.Get(key) != value { + t.Errorf( + "%d. expected %q for header %q, got %q", + i, value, key, writer.Header().Get(key), + ) + } + } + + if !bytes.Equal(scenario.out.body, writer.Body.Bytes()) { + t.Errorf( + "%d. expected body:\n%s\ngot body:\n%s\n", + i, scenario.out.body, writer.Body.Bytes(), + ) + } + } +} + +func TestHandler(t *testing.T) { + testHandler(t) +} + +func BenchmarkHandler(b *testing.B) { + for i := 0; i < b.N; i++ { + testHandler(b) + } +} + +func TestRegisterWithOrGet(t *testing.T) { + // Replace the default registerer just to be sure. This is bad, but this + // whole test will go away once RegisterOrGet is removed. + oldRegisterer := prometheus.DefaultRegisterer + defer func() { + prometheus.DefaultRegisterer = oldRegisterer + }() + prometheus.DefaultRegisterer = prometheus.NewRegistry() + original := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + equalButNotSame := prometheus.NewCounterVec( + prometheus.CounterOpts{ + Name: "test", + Help: "help", + }, + []string{"foo", "bar"}, + ) + if err := prometheus.Register(original); err != nil { + t.Fatal(err) + } + if err := prometheus.Register(equalButNotSame); err == nil { + t.Fatal("expected error when registringe equal collector") + } + existing, err := prometheus.RegisterOrGet(equalButNotSame) + if err != nil { + t.Fatal(err) + } + if existing != original { + t.Error("expected original collector but got something else") + } + if existing == equalButNotSame { + t.Error("expected original callector but got new one") + } +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary.go b/vendor/github.com/prometheus/client_golang/prometheus/summary.go new file mode 100644 index 000000000..bce05bf9a --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary.go @@ -0,0 +1,534 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "math" + "sort" + "sync" + "time" + + "github.com/beorn7/perks/quantile" + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +// quantileLabel is used for the label that defines the quantile in a +// summary. +const quantileLabel = "quantile" + +// A Summary captures individual observations from an event or sample stream and +// summarizes them in a manner similar to traditional summary statistics: 1. sum +// of observations, 2. observation count, 3. rank estimations. +// +// A typical use-case is the observation of request latencies. By default, a +// Summary provides the median, the 90th and the 99th percentile of the latency +// as rank estimations. +// +// Note that the rank estimations cannot be aggregated in a meaningful way with +// the Prometheus query language (i.e. you cannot average or add them). If you +// need aggregatable quantiles (e.g. you want the 99th percentile latency of all +// queries served across all instances of a service), consider the Histogram +// metric type. See the Prometheus documentation for more details. +// +// To create Summary instances, use NewSummary. +type Summary interface { + Metric + Collector + + // Observe adds a single observation to the summary. + Observe(float64) +} + +// DefObjectives are the default Summary quantile values. +var ( + DefObjectives = map[float64]float64{0.5: 0.05, 0.9: 0.01, 0.99: 0.001} + + errQuantileLabelNotAllowed = fmt.Errorf( + "%q is not allowed as label name in summaries", quantileLabel, + ) +) + +// Default values for SummaryOpts. +const ( + // DefMaxAge is the default duration for which observations stay + // relevant. + DefMaxAge time.Duration = 10 * time.Minute + // DefAgeBuckets is the default number of buckets used to calculate the + // age of observations. + DefAgeBuckets = 5 + // DefBufCap is the standard buffer size for collecting Summary observations. + DefBufCap = 500 +) + +// SummaryOpts bundles the options for creating a Summary metric. It is +// mandatory to set Name and Help to a non-empty string. All other fields are +// optional and can safely be left at their zero value. +type SummaryOpts struct { + // Namespace, Subsystem, and Name are components of the fully-qualified + // name of the Summary (created by joining these components with + // "_"). Only Name is mandatory, the others merely help structuring the + // name. Note that the fully-qualified name of the Summary must be a + // valid Prometheus metric name. + Namespace string + Subsystem string + Name string + + // Help provides information about this Summary. Mandatory! + // + // Metrics with the same fully-qualified name must have the same Help + // string. + Help string + + // ConstLabels are used to attach fixed labels to this + // Summary. Summaries with the same fully-qualified name must have the + // same label names in their ConstLabels. + // + // Note that in most cases, labels have a value that varies during the + // lifetime of a process. Those labels are usually managed with a + // SummaryVec. ConstLabels serve only special purposes. One is for the + // special case where the value of a label does not change during the + // lifetime of a process, e.g. if the revision of the running binary is + // put into a label. Another, more advanced purpose is if more than one + // Collector needs to collect Summaries with the same fully-qualified + // name. In that case, those Summaries must differ in the values of + // their ConstLabels. See the Collector examples. + // + // If the value of a label never changes (not even between binaries), + // that label most likely should not be a label at all (but part of the + // metric name). + ConstLabels Labels + + // Objectives defines the quantile rank estimates with their respective + // absolute error. If Objectives[q] = e, then the value reported + // for q will be the φ-quantile value for some φ between q-e and q+e. + // The default value is DefObjectives. + Objectives map[float64]float64 + + // MaxAge defines the duration for which an observation stays relevant + // for the summary. Must be positive. The default value is DefMaxAge. + MaxAge time.Duration + + // AgeBuckets is the number of buckets used to exclude observations that + // are older than MaxAge from the summary. A higher number has a + // resource penalty, so only increase it if the higher resolution is + // really required. For very high observation rates, you might want to + // reduce the number of age buckets. With only one age bucket, you will + // effectively see a complete reset of the summary each time MaxAge has + // passed. The default value is DefAgeBuckets. + AgeBuckets uint32 + + // BufCap defines the default sample stream buffer size. The default + // value of DefBufCap should suffice for most uses. If there is a need + // to increase the value, a multiple of 500 is recommended (because that + // is the internal buffer size of the underlying package + // "github.com/bmizerany/perks/quantile"). + BufCap uint32 +} + +// Great fuck-up with the sliding-window decay algorithm... The Merge method of +// perk/quantile is actually not working as advertised - and it might be +// unfixable, as the underlying algorithm is apparently not capable of merging +// summaries in the first place. To avoid using Merge, we are currently adding +// observations to _each_ age bucket, i.e. the effort to add a sample is +// essentially multiplied by the number of age buckets. When rotating age +// buckets, we empty the previous head stream. On scrape time, we simply take +// the quantiles from the head stream (no merging required). Result: More effort +// on observation time, less effort on scrape time, which is exactly the +// opposite of what we try to accomplish, but at least the results are correct. +// +// The quite elegant previous contraption to merge the age buckets efficiently +// on scrape time (see code up commit 6b9530d72ea715f0ba612c0120e6e09fbf1d49d0) +// can't be used anymore. + +// NewSummary creates a new Summary based on the provided SummaryOpts. +func NewSummary(opts SummaryOpts) Summary { + return newSummary( + NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), + opts, + ) +} + +func newSummary(desc *Desc, opts SummaryOpts, labelValues ...string) Summary { + if len(desc.variableLabels) != len(labelValues) { + panic(errInconsistentCardinality) + } + + for _, n := range desc.variableLabels { + if n == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + for _, lp := range desc.constLabelPairs { + if lp.GetName() == quantileLabel { + panic(errQuantileLabelNotAllowed) + } + } + + if len(opts.Objectives) == 0 { + opts.Objectives = DefObjectives + } + + if opts.MaxAge < 0 { + panic(fmt.Errorf("illegal max age MaxAge=%v", opts.MaxAge)) + } + if opts.MaxAge == 0 { + opts.MaxAge = DefMaxAge + } + + if opts.AgeBuckets == 0 { + opts.AgeBuckets = DefAgeBuckets + } + + if opts.BufCap == 0 { + opts.BufCap = DefBufCap + } + + s := &summary{ + desc: desc, + + objectives: opts.Objectives, + sortedObjectives: make([]float64, 0, len(opts.Objectives)), + + labelPairs: makeLabelPairs(desc, labelValues), + + hotBuf: make([]float64, 0, opts.BufCap), + coldBuf: make([]float64, 0, opts.BufCap), + streamDuration: opts.MaxAge / time.Duration(opts.AgeBuckets), + } + s.headStreamExpTime = time.Now().Add(s.streamDuration) + s.hotBufExpTime = s.headStreamExpTime + + for i := uint32(0); i < opts.AgeBuckets; i++ { + s.streams = append(s.streams, s.newStream()) + } + s.headStream = s.streams[0] + + for qu := range s.objectives { + s.sortedObjectives = append(s.sortedObjectives, qu) + } + sort.Float64s(s.sortedObjectives) + + s.init(s) // Init self-collection. + return s +} + +type summary struct { + selfCollector + + bufMtx sync.Mutex // Protects hotBuf and hotBufExpTime. + mtx sync.Mutex // Protects every other moving part. + // Lock bufMtx before mtx if both are needed. + + desc *Desc + + objectives map[float64]float64 + sortedObjectives []float64 + + labelPairs []*dto.LabelPair + + sum float64 + cnt uint64 + + hotBuf, coldBuf []float64 + + streams []*quantile.Stream + streamDuration time.Duration + headStream *quantile.Stream + headStreamIdx int + headStreamExpTime, hotBufExpTime time.Time +} + +func (s *summary) Desc() *Desc { + return s.desc +} + +func (s *summary) Observe(v float64) { + s.bufMtx.Lock() + defer s.bufMtx.Unlock() + + now := time.Now() + if now.After(s.hotBufExpTime) { + s.asyncFlush(now) + } + s.hotBuf = append(s.hotBuf, v) + if len(s.hotBuf) == cap(s.hotBuf) { + s.asyncFlush(now) + } +} + +func (s *summary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.objectives)) + + s.bufMtx.Lock() + s.mtx.Lock() + // Swap bufs even if hotBuf is empty to set new hotBufExpTime. + s.swapBufs(time.Now()) + s.bufMtx.Unlock() + + s.flushColdBuf() + sum.SampleCount = proto.Uint64(s.cnt) + sum.SampleSum = proto.Float64(s.sum) + + for _, rank := range s.sortedObjectives { + var q float64 + if s.headStream.Count() == 0 { + q = math.NaN() + } else { + q = s.headStream.Query(rank) + } + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + s.mtx.Unlock() + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + return nil +} + +func (s *summary) newStream() *quantile.Stream { + return quantile.NewTargeted(s.objectives) +} + +// asyncFlush needs bufMtx locked. +func (s *summary) asyncFlush(now time.Time) { + s.mtx.Lock() + s.swapBufs(now) + + // Unblock the original goroutine that was responsible for the mutation + // that triggered the compaction. But hold onto the global non-buffer + // state mutex until the operation finishes. + go func() { + s.flushColdBuf() + s.mtx.Unlock() + }() +} + +// rotateStreams needs mtx AND bufMtx locked. +func (s *summary) maybeRotateStreams() { + for !s.hotBufExpTime.Equal(s.headStreamExpTime) { + s.headStream.Reset() + s.headStreamIdx++ + if s.headStreamIdx >= len(s.streams) { + s.headStreamIdx = 0 + } + s.headStream = s.streams[s.headStreamIdx] + s.headStreamExpTime = s.headStreamExpTime.Add(s.streamDuration) + } +} + +// flushColdBuf needs mtx locked. +func (s *summary) flushColdBuf() { + for _, v := range s.coldBuf { + for _, stream := range s.streams { + stream.Insert(v) + } + s.cnt++ + s.sum += v + } + s.coldBuf = s.coldBuf[0:0] + s.maybeRotateStreams() +} + +// swapBufs needs mtx AND bufMtx locked, coldBuf must be empty. +func (s *summary) swapBufs(now time.Time) { + if len(s.coldBuf) != 0 { + panic("coldBuf is not empty") + } + s.hotBuf, s.coldBuf = s.coldBuf, s.hotBuf + // hotBuf is now empty and gets new expiration set. + for now.After(s.hotBufExpTime) { + s.hotBufExpTime = s.hotBufExpTime.Add(s.streamDuration) + } +} + +type quantSort []*dto.Quantile + +func (s quantSort) Len() int { + return len(s) +} + +func (s quantSort) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +func (s quantSort) Less(i, j int) bool { + return s[i].GetQuantile() < s[j].GetQuantile() +} + +// SummaryVec is a Collector that bundles a set of Summaries that all share the +// same Desc, but have different values for their variable labels. This is used +// if you want to count the same thing partitioned by various dimensions +// (e.g. HTTP request latencies, partitioned by status code and method). Create +// instances with NewSummaryVec. +type SummaryVec struct { + *MetricVec +} + +// NewSummaryVec creates a new SummaryVec based on the provided SummaryOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewSummaryVec(opts SummaryOpts, labelNames []string) *SummaryVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &SummaryVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newSummary(desc, opts, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns a Summary and not a +// Metric so that no type conversion is required. +func (m *SummaryVec) GetMetricWithLabelValues(lvs ...string) (Summary, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns a Summary and not a Metric so that no +// type conversion is required. +func (m *SummaryVec) GetMetricWith(labels Labels) (Summary, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Summary), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Observe(42.21) +func (m *SummaryVec) WithLabelValues(lvs ...string) Summary { + return m.MetricVec.WithLabelValues(lvs...).(Summary) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Observe(42.21) +func (m *SummaryVec) With(labels Labels) Summary { + return m.MetricVec.With(labels).(Summary) +} + +type constSummary struct { + desc *Desc + count uint64 + sum float64 + quantiles map[float64]float64 + labelPairs []*dto.LabelPair +} + +func (s *constSummary) Desc() *Desc { + return s.desc +} + +func (s *constSummary) Write(out *dto.Metric) error { + sum := &dto.Summary{} + qs := make([]*dto.Quantile, 0, len(s.quantiles)) + + sum.SampleCount = proto.Uint64(s.count) + sum.SampleSum = proto.Float64(s.sum) + + for rank, q := range s.quantiles { + qs = append(qs, &dto.Quantile{ + Quantile: proto.Float64(rank), + Value: proto.Float64(q), + }) + } + + if len(qs) > 0 { + sort.Sort(quantSort(qs)) + } + sum.Quantile = qs + + out.Summary = sum + out.Label = s.labelPairs + + return nil +} + +// NewConstSummary returns a metric representing a Prometheus summary with fixed +// values for the count, sum, and quantiles. As those parameters cannot be +// changed, the returned value does not implement the Summary interface (but +// only the Metric interface). Users of this package will not have much use for +// it in regular operations. However, when implementing custom Collectors, it is +// useful as a throw-away metric that is generated on the fly to send it to +// Prometheus in the Collect method. +// +// quantiles maps ranks to quantile values. For example, a median latency of +// 0.23s and a 99th percentile latency of 0.56s would be expressed as: +// map[float64]float64{0.5: 0.23, 0.99: 0.56} +// +// NewConstSummary returns an error if the length of labelValues is not +// consistent with the variable labels in Desc. +func NewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constSummary{ + desc: desc, + count: count, + sum: sum, + quantiles: quantiles, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstSummary is a version of NewConstSummary that panics where +// NewConstMetric would have returned an error. +func MustNewConstSummary( + desc *Desc, + count uint64, + sum float64, + quantiles map[float64]float64, + labelValues ...string, +) Metric { + m, err := NewConstSummary(desc, count, sum, quantiles, labelValues...) + if err != nil { + panic(err) + } + return m +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go new file mode 100644 index 000000000..c4575ffbd --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/summary_test.go @@ -0,0 +1,347 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "math" + "math/rand" + "sort" + "sync" + "testing" + "testing/quick" + "time" + + dto "github.com/prometheus/client_model/go" +) + +func benchmarkSummaryObserve(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < w; i++ { + go func() { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Observe(float64(i)) + } + + wg.Done() + }() + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryObserve1(b *testing.B) { + benchmarkSummaryObserve(1, b) +} + +func BenchmarkSummaryObserve2(b *testing.B) { + benchmarkSummaryObserve(2, b) +} + +func BenchmarkSummaryObserve4(b *testing.B) { + benchmarkSummaryObserve(4, b) +} + +func BenchmarkSummaryObserve8(b *testing.B) { + benchmarkSummaryObserve(8, b) +} + +func benchmarkSummaryWrite(w int, b *testing.B) { + b.StopTimer() + + wg := new(sync.WaitGroup) + wg.Add(w) + + g := new(sync.WaitGroup) + g.Add(1) + + s := NewSummary(SummaryOpts{}) + + for i := 0; i < 1000000; i++ { + s.Observe(float64(i)) + } + + for j := 0; j < w; j++ { + outs := make([]dto.Metric, b.N) + + go func(o []dto.Metric) { + g.Wait() + + for i := 0; i < b.N; i++ { + s.Write(&o[i]) + } + + wg.Done() + }(outs) + } + + b.StartTimer() + g.Done() + wg.Wait() +} + +func BenchmarkSummaryWrite1(b *testing.B) { + benchmarkSummaryWrite(1, b) +} + +func BenchmarkSummaryWrite2(b *testing.B) { + benchmarkSummaryWrite(2, b) +} + +func BenchmarkSummaryWrite4(b *testing.B) { + benchmarkSummaryWrite(4, b) +} + +func BenchmarkSummaryWrite8(b *testing.B) { + benchmarkSummaryWrite(8, b) +} + +func TestSummaryConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%5 + 1) + total := mutations * concLevel + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + }) + + allVars := make([]float64, total) + var sampleSum float64 + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + allVars[i*mutations+j] = v + sampleSum += v + } + + go func(vals []float64) { + start.Wait() + for _, v := range vals { + sum.Observe(v) + } + end.Done() + }(vals) + } + sort.Float64s(allVars) + start.Done() + end.Wait() + + m := &dto.Metric{} + sum.Write(m) + if got, want := int(*m.Summary.SampleCount), total; got != want { + t.Errorf("got sample count %d, want %d", got, want) + } + if got, want := *m.Summary.SampleSum, sampleSum; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f, want %f", got, want) + } + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + for i, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[i].Quantile + gotV := *m.Summary.Quantile[i].Value + min, max := getBounds(allVars, wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f, want %f", gotQ, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f, want [%f,%f]", gotV, gotQ, min, max) + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryVecConcurrency(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + } + + rand.Seed(42) + + objectives := make([]float64, 0, len(DefObjectives)) + for qu := range DefObjectives { + + objectives = append(objectives, qu) + } + sort.Float64s(objectives) + + it := func(n uint32) bool { + mutations := int(n%1e4 + 1e4) + concLevel := int(n%7 + 1) + vecLength := int(n%3 + 1) + + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + sum := NewSummaryVec( + SummaryOpts{ + Name: "test_summary", + Help: "helpless", + }, + []string{"label"}, + ) + + allVars := make([][]float64, vecLength) + sampleSums := make([]float64, vecLength) + for i := 0; i < concLevel; i++ { + vals := make([]float64, mutations) + picks := make([]int, mutations) + for j := 0; j < mutations; j++ { + v := rand.NormFloat64() + vals[j] = v + pick := rand.Intn(vecLength) + picks[j] = pick + allVars[pick] = append(allVars[pick], v) + sampleSums[pick] += v + } + + go func(vals []float64) { + start.Wait() + for i, v := range vals { + sum.WithLabelValues(string('A' + picks[i])).Observe(v) + } + end.Done() + }(vals) + } + for _, vars := range allVars { + sort.Float64s(vars) + } + start.Done() + end.Wait() + + for i := 0; i < vecLength; i++ { + m := &dto.Metric{} + s := sum.WithLabelValues(string('A' + i)) + s.Write(m) + if got, want := int(*m.Summary.SampleCount), len(allVars[i]); got != want { + t.Errorf("got sample count %d for label %c, want %d", got, 'A'+i, want) + } + if got, want := *m.Summary.SampleSum, sampleSums[i]; math.Abs((got-want)/want) > 0.001 { + t.Errorf("got sample sum %f for label %c, want %f", got, 'A'+i, want) + } + for j, wantQ := range objectives { + ε := DefObjectives[wantQ] + gotQ := *m.Summary.Quantile[j].Quantile + gotV := *m.Summary.Quantile[j].Value + min, max := getBounds(allVars[i], wantQ, ε) + if gotQ != wantQ { + t.Errorf("got quantile %f for label %c, want %f", gotQ, 'A'+i, wantQ) + } + if gotV < min || gotV > max { + t.Errorf("got %f for quantile %f for label %c, want [%f,%f]", gotV, gotQ, 'A'+i, min, max) + } + } + } + return true + } + + if err := quick.Check(it, nil); err != nil { + t.Error(err) + } +} + +func TestSummaryDecay(t *testing.T) { + if testing.Short() { + t.Skip("Skipping test in short mode.") + // More because it depends on timing than because it is particularly long... + } + + sum := NewSummary(SummaryOpts{ + Name: "test_summary", + Help: "helpless", + MaxAge: 100 * time.Millisecond, + Objectives: map[float64]float64{0.1: 0.001}, + AgeBuckets: 10, + }) + + m := &dto.Metric{} + i := 0 + tick := time.NewTicker(time.Millisecond) + for _ = range tick.C { + i++ + sum.Observe(float64(i)) + if i%10 == 0 { + sum.Write(m) + if got, want := *m.Summary.Quantile[0].Value, math.Max(float64(i)/10, float64(i-90)); math.Abs(got-want) > 20 { + t.Errorf("%d. got %f, want %f", i, got, want) + } + m.Reset() + } + if i >= 1000 { + break + } + } + tick.Stop() + // Wait for MaxAge without observations and make sure quantiles are NaN. + time.Sleep(100 * time.Millisecond) + sum.Write(m) + if got := *m.Summary.Quantile[0].Value; !math.IsNaN(got) { + t.Errorf("got %f, want NaN after expiration", got) + } +} + +func getBounds(vars []float64, q, ε float64) (min, max float64) { + // TODO(beorn7): This currently tolerates an error of up to 2*ε. The + // error must be at most ε, but for some reason, it's sometimes slightly + // higher. That's a bug. + n := float64(len(vars)) + lower := int((q - 2*ε) * n) + upper := int(math.Ceil((q + 2*ε) * n)) + min = vars[0] + if lower > 1 { + min = vars[lower-1] + } + max = vars[len(vars)-1] + if upper < len(vars) { + max = vars[upper-1] + } + return +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/untyped.go b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go new file mode 100644 index 000000000..5faf7e6e3 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/untyped.go @@ -0,0 +1,138 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +// Untyped is a Metric that represents a single numerical value that can +// arbitrarily go up and down. +// +// An Untyped metric works the same as a Gauge. The only difference is that to +// no type information is implied. +// +// To create Untyped instances, use NewUntyped. +type Untyped interface { + Metric + Collector + + // Set sets the Untyped metric to an arbitrary value. + Set(float64) + // Inc increments the Untyped metric by 1. + Inc() + // Dec decrements the Untyped metric by 1. + Dec() + // Add adds the given value to the Untyped metric. (The value can be + // negative, resulting in a decrease.) + Add(float64) + // Sub subtracts the given value from the Untyped metric. (The value can + // be negative, resulting in an increase.) + Sub(float64) +} + +// UntypedOpts is an alias for Opts. See there for doc comments. +type UntypedOpts Opts + +// NewUntyped creates a new Untyped metric from the provided UntypedOpts. +func NewUntyped(opts UntypedOpts) Untyped { + return newValue(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, 0) +} + +// UntypedVec is a Collector that bundles a set of Untyped metrics that all +// share the same Desc, but have different values for their variable +// labels. This is used if you want to count the same thing partitioned by +// various dimensions. Create instances with NewUntypedVec. +type UntypedVec struct { + *MetricVec +} + +// NewUntypedVec creates a new UntypedVec based on the provided UntypedOpts and +// partitioned by the given label names. At least one label name must be +// provided. +func NewUntypedVec(opts UntypedOpts, labelNames []string) *UntypedVec { + desc := NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + labelNames, + opts.ConstLabels, + ) + return &UntypedVec{ + MetricVec: newMetricVec(desc, func(lvs ...string) Metric { + return newValue(desc, UntypedValue, 0, lvs...) + }), + } +} + +// GetMetricWithLabelValues replaces the method of the same name in +// MetricVec. The difference is that this method returns an Untyped and not a +// Metric so that no type conversion is required. +func (m *UntypedVec) GetMetricWithLabelValues(lvs ...string) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWithLabelValues(lvs...) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// GetMetricWith replaces the method of the same name in MetricVec. The +// difference is that this method returns an Untyped and not a Metric so that no +// type conversion is required. +func (m *UntypedVec) GetMetricWith(labels Labels) (Untyped, error) { + metric, err := m.MetricVec.GetMetricWith(labels) + if metric != nil { + return metric.(Untyped), err + } + return nil, err +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics where +// GetMetricWithLabelValues would have returned an error. By not returning an +// error, WithLabelValues allows shortcuts like +// myVec.WithLabelValues("404", "GET").Add(42) +func (m *UntypedVec) WithLabelValues(lvs ...string) Untyped { + return m.MetricVec.WithLabelValues(lvs...).(Untyped) +} + +// With works as GetMetricWith, but panics where GetMetricWithLabels would have +// returned an error. By not returning an error, With allows shortcuts like +// myVec.With(Labels{"code": "404", "method": "GET"}).Add(42) +func (m *UntypedVec) With(labels Labels) Untyped { + return m.MetricVec.With(labels).(Untyped) +} + +// UntypedFunc is an Untyped whose value is determined at collect time by +// calling a provided function. +// +// To create UntypedFunc instances, use NewUntypedFunc. +type UntypedFunc interface { + Metric + Collector +} + +// NewUntypedFunc creates a new UntypedFunc based on the provided +// UntypedOpts. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where an UntypedFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func NewUntypedFunc(opts UntypedOpts, function func() float64) UntypedFunc { + return newValueFunc(NewDesc( + BuildFQName(opts.Namespace, opts.Subsystem, opts.Name), + opts.Help, + nil, + opts.ConstLabels, + ), UntypedValue, function) +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/value.go b/vendor/github.com/prometheus/client_golang/prometheus/value.go new file mode 100644 index 000000000..a944c3775 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/value.go @@ -0,0 +1,234 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "errors" + "fmt" + "math" + "sort" + "sync/atomic" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" +) + +// ValueType is an enumeration of metric types that represent a simple value. +type ValueType int + +// Possible values for the ValueType enum. +const ( + _ ValueType = iota + CounterValue + GaugeValue + UntypedValue +) + +var errInconsistentCardinality = errors.New("inconsistent label cardinality") + +// value is a generic metric for simple values. It implements Metric, Collector, +// Counter, Gauge, and Untyped. Its effective type is determined by +// ValueType. This is a low-level building block used by the library to back the +// implementations of Counter, Gauge, and Untyped. +type value struct { + // valBits containst the bits of the represented float64 value. It has + // to go first in the struct to guarantee alignment for atomic + // operations. http://golang.org/pkg/sync/atomic/#pkg-note-BUG + valBits uint64 + + selfCollector + + desc *Desc + valType ValueType + labelPairs []*dto.LabelPair +} + +// newValue returns a newly allocated value with the given Desc, ValueType, +// sample value and label values. It panics if the number of label +// values is different from the number of variable labels in Desc. +func newValue(desc *Desc, valueType ValueType, val float64, labelValues ...string) *value { + if len(labelValues) != len(desc.variableLabels) { + panic(errInconsistentCardinality) + } + result := &value{ + desc: desc, + valType: valueType, + valBits: math.Float64bits(val), + labelPairs: makeLabelPairs(desc, labelValues), + } + result.init(result) + return result +} + +func (v *value) Desc() *Desc { + return v.desc +} + +func (v *value) Set(val float64) { + atomic.StoreUint64(&v.valBits, math.Float64bits(val)) +} + +func (v *value) Inc() { + v.Add(1) +} + +func (v *value) Dec() { + v.Add(-1) +} + +func (v *value) Add(val float64) { + for { + oldBits := atomic.LoadUint64(&v.valBits) + newBits := math.Float64bits(math.Float64frombits(oldBits) + val) + if atomic.CompareAndSwapUint64(&v.valBits, oldBits, newBits) { + return + } + } +} + +func (v *value) Sub(val float64) { + v.Add(val * -1) +} + +func (v *value) Write(out *dto.Metric) error { + val := math.Float64frombits(atomic.LoadUint64(&v.valBits)) + return populateMetric(v.valType, val, v.labelPairs, out) +} + +// valueFunc is a generic metric for simple values retrieved on collect time +// from a function. It implements Metric and Collector. Its effective type is +// determined by ValueType. This is a low-level building block used by the +// library to back the implementations of CounterFunc, GaugeFunc, and +// UntypedFunc. +type valueFunc struct { + selfCollector + + desc *Desc + valType ValueType + function func() float64 + labelPairs []*dto.LabelPair +} + +// newValueFunc returns a newly allocated valueFunc with the given Desc and +// ValueType. The value reported is determined by calling the given function +// from within the Write method. Take into account that metric collection may +// happen concurrently. If that results in concurrent calls to Write, like in +// the case where a valueFunc is directly registered with Prometheus, the +// provided function must be concurrency-safe. +func newValueFunc(desc *Desc, valueType ValueType, function func() float64) *valueFunc { + result := &valueFunc{ + desc: desc, + valType: valueType, + function: function, + labelPairs: makeLabelPairs(desc, nil), + } + result.init(result) + return result +} + +func (v *valueFunc) Desc() *Desc { + return v.desc +} + +func (v *valueFunc) Write(out *dto.Metric) error { + return populateMetric(v.valType, v.function(), v.labelPairs, out) +} + +// NewConstMetric returns a metric with one fixed value that cannot be +// changed. Users of this package will not have much use for it in regular +// operations. However, when implementing custom Collectors, it is useful as a +// throw-away metric that is generated on the fly to send it to Prometheus in +// the Collect method. NewConstMetric returns an error if the length of +// labelValues is not consistent with the variable labels in Desc. +func NewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) (Metric, error) { + if len(desc.variableLabels) != len(labelValues) { + return nil, errInconsistentCardinality + } + return &constMetric{ + desc: desc, + valType: valueType, + val: value, + labelPairs: makeLabelPairs(desc, labelValues), + }, nil +} + +// MustNewConstMetric is a version of NewConstMetric that panics where +// NewConstMetric would have returned an error. +func MustNewConstMetric(desc *Desc, valueType ValueType, value float64, labelValues ...string) Metric { + m, err := NewConstMetric(desc, valueType, value, labelValues...) + if err != nil { + panic(err) + } + return m +} + +type constMetric struct { + desc *Desc + valType ValueType + val float64 + labelPairs []*dto.LabelPair +} + +func (m *constMetric) Desc() *Desc { + return m.desc +} + +func (m *constMetric) Write(out *dto.Metric) error { + return populateMetric(m.valType, m.val, m.labelPairs, out) +} + +func populateMetric( + t ValueType, + v float64, + labelPairs []*dto.LabelPair, + m *dto.Metric, +) error { + m.Label = labelPairs + switch t { + case CounterValue: + m.Counter = &dto.Counter{Value: proto.Float64(v)} + case GaugeValue: + m.Gauge = &dto.Gauge{Value: proto.Float64(v)} + case UntypedValue: + m.Untyped = &dto.Untyped{Value: proto.Float64(v)} + default: + return fmt.Errorf("encountered unknown type %v", t) + } + return nil +} + +func makeLabelPairs(desc *Desc, labelValues []string) []*dto.LabelPair { + totalLen := len(desc.variableLabels) + len(desc.constLabelPairs) + if totalLen == 0 { + // Super fast path. + return nil + } + if len(desc.variableLabels) == 0 { + // Moderately fast path. + return desc.constLabelPairs + } + labelPairs := make([]*dto.LabelPair, 0, totalLen) + for i, n := range desc.variableLabels { + labelPairs = append(labelPairs, &dto.LabelPair{ + Name: proto.String(n), + Value: proto.String(labelValues[i]), + }) + } + for _, lp := range desc.constLabelPairs { + labelPairs = append(labelPairs, lp) + } + sort.Sort(LabelPairSorter(labelPairs)) + return labelPairs +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec.go b/vendor/github.com/prometheus/client_golang/prometheus/vec.go new file mode 100644 index 000000000..7f3eef9a4 --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec.go @@ -0,0 +1,404 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "sync" + + "github.com/prometheus/common/model" +) + +// MetricVec is a Collector to bundle metrics of the same name that +// differ in their label values. MetricVec is usually not used directly but as a +// building block for implementations of vectors of a given metric +// type. GaugeVec, CounterVec, SummaryVec, and UntypedVec are examples already +// provided in this package. +type MetricVec struct { + mtx sync.RWMutex // Protects the children. + children map[uint64][]metricWithLabelValues + desc *Desc + + newMetric func(labelValues ...string) Metric + hashAdd func(h uint64, s string) uint64 // replace hash function for testing collision handling + hashAddByte func(h uint64, b byte) uint64 +} + +// newMetricVec returns an initialized MetricVec. The concrete value is +// returned for embedding into another struct. +func newMetricVec(desc *Desc, newMetric func(lvs ...string) Metric) *MetricVec { + return &MetricVec{ + children: map[uint64][]metricWithLabelValues{}, + desc: desc, + newMetric: newMetric, + hashAdd: hashAdd, + hashAddByte: hashAddByte, + } +} + +// metricWithLabelValues provides the metric and its label values for +// disambiguation on hash collision. +type metricWithLabelValues struct { + values []string + metric Metric +} + +// Describe implements Collector. The length of the returned slice +// is always one. +func (m *MetricVec) Describe(ch chan<- *Desc) { + ch <- m.desc +} + +// Collect implements Collector. +func (m *MetricVec) Collect(ch chan<- Metric) { + m.mtx.RLock() + defer m.mtx.RUnlock() + + for _, metrics := range m.children { + for _, metric := range metrics { + ch <- metric.metric + } + } +} + +// GetMetricWithLabelValues returns the Metric for the given slice of label +// values (same order as the VariableLabels in Desc). If that combination of +// label values is accessed for the first time, a new Metric is created. +// +// It is possible to call this method without using the returned Metric to only +// create the new Metric but leave it at its start value (e.g. a Summary or +// Histogram without any observations). See also the SummaryVec example. +// +// Keeping the Metric for later use is possible (and should be considered if +// performance is critical), but keep in mind that Reset, DeleteLabelValues and +// Delete can be used to delete the Metric from the MetricVec. In that case, the +// Metric will still exist, but it will not be exported anymore, even if a +// Metric with the same label values is created later. See also the CounterVec +// example. +// +// An error is returned if the number of label values is not the same as the +// number of VariableLabels in Desc. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider GetMetricWith(Labels) as +// an alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the GaugeVec example. +func (m *MetricVec) GetMetricWithLabelValues(lvs ...string) (Metric, error) { + h, err := m.hashLabelValues(lvs) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabelValues(h, lvs), nil +} + +// GetMetricWith returns the Metric for the given Labels map (the label names +// must match those of the VariableLabels in Desc). If that label map is +// accessed for the first time, a new Metric is created. Implications of +// creating a Metric without using it and keeping the Metric for later use are +// the same as for GetMetricWithLabelValues. +// +// An error is returned if the number and names of the Labels are inconsistent +// with those of the VariableLabels in Desc. +// +// This method is used for the same purpose as +// GetMetricWithLabelValues(...string). See there for pros and cons of the two +// methods. +func (m *MetricVec) GetMetricWith(labels Labels) (Metric, error) { + h, err := m.hashLabels(labels) + if err != nil { + return nil, err + } + + return m.getOrCreateMetricWithLabels(h, labels), nil +} + +// WithLabelValues works as GetMetricWithLabelValues, but panics if an error +// occurs. The method allows neat syntax like: +// httpReqs.WithLabelValues("404", "POST").Inc() +func (m *MetricVec) WithLabelValues(lvs ...string) Metric { + metric, err := m.GetMetricWithLabelValues(lvs...) + if err != nil { + panic(err) + } + return metric +} + +// With works as GetMetricWith, but panics if an error occurs. The method allows +// neat syntax like: +// httpReqs.With(Labels{"status":"404", "method":"POST"}).Inc() +func (m *MetricVec) With(labels Labels) Metric { + metric, err := m.GetMetricWith(labels) + if err != nil { + panic(err) + } + return metric +} + +// DeleteLabelValues removes the metric where the variable labels are the same +// as those passed in as labels (same order as the VariableLabels in Desc). It +// returns true if a metric was deleted. +// +// It is not an error if the number of label values is not the same as the +// number of VariableLabels in Desc. However, such inconsistent label count can +// never match an actual Metric, so the method will always return false in that +// case. +// +// Note that for more than one label value, this method is prone to mistakes +// caused by an incorrect order of arguments. Consider Delete(Labels) as an +// alternative to avoid that type of mistake. For higher label numbers, the +// latter has a much more readable (albeit more verbose) syntax, but it comes +// with a performance overhead (for creating and processing the Labels map). +// See also the CounterVec example. +func (m *MetricVec) DeleteLabelValues(lvs ...string) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabelValues(lvs) + if err != nil { + return false + } + return m.deleteByHashWithLabelValues(h, lvs) +} + +// Delete deletes the metric where the variable labels are the same as those +// passed in as labels. It returns true if a metric was deleted. +// +// It is not an error if the number and names of the Labels are inconsistent +// with those of the VariableLabels in the Desc of the MetricVec. However, such +// inconsistent Labels can never match an actual Metric, so the method will +// always return false in that case. +// +// This method is used for the same purpose as DeleteLabelValues(...string). See +// there for pros and cons of the two methods. +func (m *MetricVec) Delete(labels Labels) bool { + m.mtx.Lock() + defer m.mtx.Unlock() + + h, err := m.hashLabels(labels) + if err != nil { + return false + } + + return m.deleteByHashWithLabels(h, labels) +} + +// deleteByHashWithLabelValues removes the metric from the hash bucket h. If +// there are multiple matches in the bucket, use lvs to select a metric and +// remove only that metric. +func (m *MetricVec) deleteByHashWithLabelValues(h uint64, lvs []string) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + + i := m.findMetricWithLabelValues(metrics, lvs) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// deleteByHashWithLabels removes the metric from the hash bucket h. If there +// are multiple matches in the bucket, use lvs to select a metric and remove +// only that metric. +func (m *MetricVec) deleteByHashWithLabels(h uint64, labels Labels) bool { + metrics, ok := m.children[h] + if !ok { + return false + } + i := m.findMetricWithLabels(metrics, labels) + if i >= len(metrics) { + return false + } + + if len(metrics) > 1 { + m.children[h] = append(metrics[:i], metrics[i+1:]...) + } else { + delete(m.children, h) + } + return true +} + +// Reset deletes all metrics in this vector. +func (m *MetricVec) Reset() { + m.mtx.Lock() + defer m.mtx.Unlock() + + for h := range m.children { + delete(m.children, h) + } +} + +func (m *MetricVec) hashLabelValues(vals []string) (uint64, error) { + if len(vals) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, val := range vals { + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +func (m *MetricVec) hashLabels(labels Labels) (uint64, error) { + if len(labels) != len(m.desc.variableLabels) { + return 0, errInconsistentCardinality + } + h := hashNew() + for _, label := range m.desc.variableLabels { + val, ok := labels[label] + if !ok { + return 0, fmt.Errorf("label name %q missing in label map", label) + } + h = m.hashAdd(h, val) + h = m.hashAddByte(h, model.SeparatorByte) + } + return h, nil +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabelValues(hash uint64, lvs []string) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithLabelValues(hash, lvs) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithLabelValues(hash, lvs) + if !ok { + // Copy to avoid allocation in case wo don't go down this code path. + copiedLVs := make([]string, len(lvs)) + copy(copiedLVs, lvs) + metric = m.newMetric(copiedLVs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: copiedLVs, metric: metric}) + } + return metric +} + +// getOrCreateMetricWithLabelValues retrieves the metric by hash and label value +// or creates it and returns the new one. +// +// This function holds the mutex. +func (m *MetricVec) getOrCreateMetricWithLabels(hash uint64, labels Labels) Metric { + m.mtx.RLock() + metric, ok := m.getMetricWithLabels(hash, labels) + m.mtx.RUnlock() + if ok { + return metric + } + + m.mtx.Lock() + defer m.mtx.Unlock() + metric, ok = m.getMetricWithLabels(hash, labels) + if !ok { + lvs := m.extractLabelValues(labels) + metric = m.newMetric(lvs...) + m.children[hash] = append(m.children[hash], metricWithLabelValues{values: lvs, metric: metric}) + } + return metric +} + +// getMetricWithLabelValues gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabelValues(h uint64, lvs []string) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabelValues(metrics, lvs); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// getMetricWithLabels gets a metric while handling possible collisions in +// the hash space. Must be called while holding read mutex. +func (m *MetricVec) getMetricWithLabels(h uint64, labels Labels) (Metric, bool) { + metrics, ok := m.children[h] + if ok { + if i := m.findMetricWithLabels(metrics, labels); i < len(metrics) { + return metrics[i].metric, true + } + } + return nil, false +} + +// findMetricWithLabelValues returns the index of the matching metric or +// len(metrics) if not found. +func (m *MetricVec) findMetricWithLabelValues(metrics []metricWithLabelValues, lvs []string) int { + for i, metric := range metrics { + if m.matchLabelValues(metric.values, lvs) { + return i + } + } + return len(metrics) +} + +// findMetricWithLabels returns the index of the matching metric or len(metrics) +// if not found. +func (m *MetricVec) findMetricWithLabels(metrics []metricWithLabelValues, labels Labels) int { + for i, metric := range metrics { + if m.matchLabels(metric.values, labels) { + return i + } + } + return len(metrics) +} + +func (m *MetricVec) matchLabelValues(values []string, lvs []string) bool { + if len(values) != len(lvs) { + return false + } + for i, v := range values { + if v != lvs[i] { + return false + } + } + return true +} + +func (m *MetricVec) matchLabels(values []string, labels Labels) bool { + if len(labels) != len(values) { + return false + } + for i, k := range m.desc.variableLabels { + if values[i] != labels[k] { + return false + } + } + return true +} + +func (m *MetricVec) extractLabelValues(labels Labels) []string { + labelValues := make([]string, len(labels)) + for i, k := range m.desc.variableLabels { + labelValues[i] = labels[k] + } + return labelValues +} diff --git a/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go new file mode 100644 index 000000000..445a6b39f --- /dev/null +++ b/vendor/github.com/prometheus/client_golang/prometheus/vec_test.go @@ -0,0 +1,312 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package prometheus + +import ( + "fmt" + "testing" + + dto "github.com/prometheus/client_model/go" +) + +func TestDelete(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDelete(t, vec) +} + +func TestDeleteWithCollisions(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDelete(t, vec) +} + +func testDelete(t *testing.T, vec *UntypedVec) { + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1", "l2": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l2": "v2", "l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + if got, want := vec.Delete(Labels{"l2": "v1", "l1": "v2"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.Delete(Labels{"l1": "v1"}), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestDeleteLabelValues(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testDeleteLabelValues(t, vec) +} + +func TestDeleteLabelValuesWithCollisions(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testDeleteLabelValues(t, vec) +} + +func testDeleteLabelValues(t *testing.T, vec *UntypedVec) { + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + vec.With(Labels{"l1": "v1", "l2": "v3"}).(Untyped).Set(42) // Add junk data for collision. + if got, want := vec.DeleteLabelValues("v1", "v2"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v2"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1", "v3"), true; got != want { + t.Errorf("got %v, want %v", got, want) + } + + vec.With(Labels{"l1": "v1", "l2": "v2"}).(Untyped).Set(42) + // Delete out of order. + if got, want := vec.DeleteLabelValues("v2", "v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } + if got, want := vec.DeleteLabelValues("v1"), false; got != want { + t.Errorf("got %v, want %v", got, want) + } +} + +func TestMetricVec(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + testMetricVec(t, vec) +} + +func TestMetricVecWithCollisions(t *testing.T) { + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + []string{"l1", "l2"}, + ) + vec.hashAdd = func(h uint64, s string) uint64 { return 1 } + vec.hashAddByte = func(h uint64, b byte) uint64 { return 1 } + testMetricVec(t, vec) +} + +func testMetricVec(t *testing.T, vec *UntypedVec) { + vec.Reset() // Actually test Reset now! + + var pair [2]string + // Keep track of metrics. + expected := map[[2]string]int{} + + for i := 0; i < 1000; i++ { + pair[0], pair[1] = fmt.Sprint(i%4), fmt.Sprint(i%5) // Varying combinations multiples. + expected[pair]++ + vec.WithLabelValues(pair[0], pair[1]).Inc() + + expected[[2]string{"v1", "v2"}]++ + vec.WithLabelValues("v1", "v2").(Untyped).Inc() + } + + var total int + for _, metrics := range vec.children { + for _, metric := range metrics { + total++ + copy(pair[:], metric.values) + + var metricOut dto.Metric + if err := metric.metric.Write(&metricOut); err != nil { + t.Fatal(err) + } + actual := *metricOut.Untyped.Value + + var actualPair [2]string + for i, label := range metricOut.Label { + actualPair[i] = *label.Value + } + + // Test output pair against metric.values to ensure we've selected + // the right one. We check this to ensure the below check means + // anything at all. + if actualPair != pair { + t.Fatalf("unexpected pair association in metric map: %v != %v", actualPair, pair) + } + + if actual != float64(expected[pair]) { + t.Fatalf("incorrect counter value for %v: %v != %v", pair, actual, expected[pair]) + } + } + } + + if total != len(expected) { + t.Fatalf("unexpected number of metrics: %v != %v", total, len(expected)) + } + + vec.Reset() + + if len(vec.children) > 0 { + t.Fatalf("reset failed") + } +} + +func TestCounterVecEndToEndWithCollision(t *testing.T) { + vec := NewCounterVec( + CounterOpts{ + Name: "test", + Help: "helpless", + }, + []string{"labelname"}, + ) + vec.WithLabelValues("77kepQFQ8Kl").Inc() + vec.WithLabelValues("!0IC=VloaY").Add(2) + + m := &dto.Metric{} + if err := vec.WithLabelValues("77kepQFQ8Kl").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "77kepQFQ8Kl"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 1.; got != want { + t.Errorf("got value %f, want %f", got, want) + } + m.Reset() + if err := vec.WithLabelValues("!0IC=VloaY").Write(m); err != nil { + t.Fatal(err) + } + if got, want := m.GetLabel()[0].GetValue(), "!0IC=VloaY"; got != want { + t.Errorf("got label value %q, want %q", got, want) + } + if got, want := m.GetCounter().GetValue(), 2.; got != want { + t.Errorf("got value %f, want %f", got, want) + } +} + +func BenchmarkMetricVecWithLabelValuesBasic(b *testing.B) { + benchmarkMetricVecWithLabelValues(b, map[string][]string{ + "l1": []string{"onevalue"}, + "l2": []string{"twovalue"}, + }) +} + +func BenchmarkMetricVecWithLabelValues2Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 10) +} + +func BenchmarkMetricVecWithLabelValues4Keys10ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 4, 10) +} + +func BenchmarkMetricVecWithLabelValues2Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 2, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys100ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 100) +} + +func BenchmarkMetricVecWithLabelValues10Keys1000ValueCardinality(b *testing.B) { + benchmarkMetricVecWithLabelValuesCardinality(b, 10, 1000) +} + +func benchmarkMetricVecWithLabelValuesCardinality(b *testing.B, nkeys, nvalues int) { + labels := map[string][]string{} + + for i := 0; i < nkeys; i++ { + var ( + k = fmt.Sprintf("key-%v", i) + vs = make([]string, 0, nvalues) + ) + for j := 0; j < nvalues; j++ { + vs = append(vs, fmt.Sprintf("value-%v", j)) + } + labels[k] = vs + } + + benchmarkMetricVecWithLabelValues(b, labels) +} + +func benchmarkMetricVecWithLabelValues(b *testing.B, labels map[string][]string) { + var keys []string + for k := range labels { // Map order dependent, who cares though. + keys = append(keys, k) + } + + values := make([]string, len(labels)) // Value cache for permutations. + vec := NewUntypedVec( + UntypedOpts{ + Name: "test", + Help: "helpless", + }, + keys, + ) + + b.ReportAllocs() + b.ResetTimer() + for i := 0; i < b.N; i++ { + // Varies input across provide map entries based on key size. + for j, k := range keys { + candidates := labels[k] + values[j] = candidates[i%len(candidates)] + } + + vec.WithLabelValues(values...) + } +} diff --git a/vendor/github.com/prometheus/client_model/.gitignore b/vendor/github.com/prometheus/client_model/.gitignore new file mode 100644 index 000000000..2f7896d1d --- /dev/null +++ b/vendor/github.com/prometheus/client_model/.gitignore @@ -0,0 +1 @@ +target/ diff --git a/vendor/github.com/prometheus/client_model/CONTRIBUTING.md b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md new file mode 100644 index 000000000..40503edbf --- /dev/null +++ b/vendor/github.com/prometheus/client_model/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/client_model/LICENSE b/vendor/github.com/prometheus/client_model/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/client_model/MAINTAINERS.md b/vendor/github.com/prometheus/client_model/MAINTAINERS.md new file mode 100644 index 000000000..3ede55fe1 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/MAINTAINERS.md @@ -0,0 +1 @@ +* Björn Rabenstein diff --git a/vendor/github.com/prometheus/client_model/Makefile b/vendor/github.com/prometheus/client_model/Makefile new file mode 100644 index 000000000..e147c69da --- /dev/null +++ b/vendor/github.com/prometheus/client_model/Makefile @@ -0,0 +1,62 @@ +# Copyright 2013 Prometheus Team +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at + +# http://www.apache.org/licenses/LICENSE-2.0 + +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +KEY_ID ?= _DEFINE_ME_ + +all: cpp go java python ruby + +SUFFIXES: + +cpp: cpp/metrics.pb.cc cpp/metrics.pb.h + +cpp/metrics.pb.cc: metrics.proto + protoc $< --cpp_out=cpp/ + +cpp/metrics.pb.h: metrics.proto + protoc $< --cpp_out=cpp/ + +go: go/metrics.pb.go + +go/metrics.pb.go: metrics.proto + protoc $< --go_out=go/ + +java: src/main/java/io/prometheus/client/Metrics.java pom.xml + mvn clean compile package + +src/main/java/io/prometheus/client/Metrics.java: metrics.proto + protoc $< --java_out=src/main/java + +python: python/prometheus/client/model/metrics_pb2.py + +python/prometheus/client/model/metrics_pb2.py: metrics.proto + mkdir -p python/prometheus/client/model + protoc $< --python_out=python/prometheus/client/model + +ruby: + $(MAKE) -C ruby build + +clean: + -rm -rf cpp/* + -rm -rf go/* + -rm -rf java/* + -rm -rf python/* + -$(MAKE) -C ruby clean + -mvn clean + +maven-deploy-snapshot: java + mvn clean deploy -Dgpg.keyname=$(KEY_ID) -DperformRelease=true + +maven-deploy-release: java + mvn clean release:clean release:prepare release:perform -Dgpg.keyname=$(KEY_ID) -DperformRelease=true + +.PHONY: all clean cpp go java maven-deploy-snapshot maven-deploy-release python ruby diff --git a/vendor/github.com/prometheus/client_model/NOTICE b/vendor/github.com/prometheus/client_model/NOTICE new file mode 100644 index 000000000..20110e410 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/NOTICE @@ -0,0 +1,5 @@ +Data model artifacts for Prometheus. +Copyright 2012-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/client_model/README.md b/vendor/github.com/prometheus/client_model/README.md new file mode 100644 index 000000000..a710042db --- /dev/null +++ b/vendor/github.com/prometheus/client_model/README.md @@ -0,0 +1,26 @@ +# Background +Under most circumstances, manually downloading this repository should never +be required. + +# Prerequisites +# Base +* [Google Protocol Buffers](https://developers.google.com/protocol-buffers) + +## Java +* [Apache Maven](http://maven.apache.org) +* [Prometheus Maven Repository](https://github.com/prometheus/io.prometheus-maven-repository) checked out into ../io.prometheus-maven-repository + +## Go +* [Go](http://golang.org) +* [goprotobuf](https://code.google.com/p/goprotobuf) + +## Ruby +* [Ruby](https://www.ruby-lang.org) +* [bundler](https://rubygems.org/gems/bundler) + +# Building + $ make + +# Getting Started + * The Go source code is periodically indexed: [Go Protocol Buffer Model](http://godoc.org/github.com/prometheus/client_model/go). + * All of the core developers are accessible via the [Prometheus Developers Mailinglist](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). diff --git a/vendor/github.com/prometheus/client_model/go/metrics.pb.go b/vendor/github.com/prometheus/client_model/go/metrics.pb.go new file mode 100644 index 000000000..b065f8683 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/go/metrics.pb.go @@ -0,0 +1,364 @@ +// Code generated by protoc-gen-go. +// source: metrics.proto +// DO NOT EDIT! + +/* +Package io_prometheus_client is a generated protocol buffer package. + +It is generated from these files: + metrics.proto + +It has these top-level messages: + LabelPair + Gauge + Counter + Quantile + Summary + Untyped + Histogram + Bucket + Metric + MetricFamily +*/ +package io_prometheus_client + +import proto "github.com/golang/protobuf/proto" +import math "math" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = math.Inf + +type MetricType int32 + +const ( + MetricType_COUNTER MetricType = 0 + MetricType_GAUGE MetricType = 1 + MetricType_SUMMARY MetricType = 2 + MetricType_UNTYPED MetricType = 3 + MetricType_HISTOGRAM MetricType = 4 +) + +var MetricType_name = map[int32]string{ + 0: "COUNTER", + 1: "GAUGE", + 2: "SUMMARY", + 3: "UNTYPED", + 4: "HISTOGRAM", +} +var MetricType_value = map[string]int32{ + "COUNTER": 0, + "GAUGE": 1, + "SUMMARY": 2, + "UNTYPED": 3, + "HISTOGRAM": 4, +} + +func (x MetricType) Enum() *MetricType { + p := new(MetricType) + *p = x + return p +} +func (x MetricType) String() string { + return proto.EnumName(MetricType_name, int32(x)) +} +func (x *MetricType) UnmarshalJSON(data []byte) error { + value, err := proto.UnmarshalJSONEnum(MetricType_value, data, "MetricType") + if err != nil { + return err + } + *x = MetricType(value) + return nil +} + +type LabelPair struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Value *string `protobuf:"bytes,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *LabelPair) Reset() { *m = LabelPair{} } +func (m *LabelPair) String() string { return proto.CompactTextString(m) } +func (*LabelPair) ProtoMessage() {} + +func (m *LabelPair) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *LabelPair) GetValue() string { + if m != nil && m.Value != nil { + return *m.Value + } + return "" +} + +type Gauge struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Gauge) Reset() { *m = Gauge{} } +func (m *Gauge) String() string { return proto.CompactTextString(m) } +func (*Gauge) ProtoMessage() {} + +func (m *Gauge) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Counter struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Counter) Reset() { *m = Counter{} } +func (m *Counter) String() string { return proto.CompactTextString(m) } +func (*Counter) ProtoMessage() {} + +func (m *Counter) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Quantile struct { + Quantile *float64 `protobuf:"fixed64,1,opt,name=quantile" json:"quantile,omitempty"` + Value *float64 `protobuf:"fixed64,2,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Quantile) Reset() { *m = Quantile{} } +func (m *Quantile) String() string { return proto.CompactTextString(m) } +func (*Quantile) ProtoMessage() {} + +func (m *Quantile) GetQuantile() float64 { + if m != nil && m.Quantile != nil { + return *m.Quantile + } + return 0 +} + +func (m *Quantile) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Summary struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Quantile []*Quantile `protobuf:"bytes,3,rep,name=quantile" json:"quantile,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Summary) Reset() { *m = Summary{} } +func (m *Summary) String() string { return proto.CompactTextString(m) } +func (*Summary) ProtoMessage() {} + +func (m *Summary) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Summary) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Summary) GetQuantile() []*Quantile { + if m != nil { + return m.Quantile + } + return nil +} + +type Untyped struct { + Value *float64 `protobuf:"fixed64,1,opt,name=value" json:"value,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Untyped) Reset() { *m = Untyped{} } +func (m *Untyped) String() string { return proto.CompactTextString(m) } +func (*Untyped) ProtoMessage() {} + +func (m *Untyped) GetValue() float64 { + if m != nil && m.Value != nil { + return *m.Value + } + return 0 +} + +type Histogram struct { + SampleCount *uint64 `protobuf:"varint,1,opt,name=sample_count" json:"sample_count,omitempty"` + SampleSum *float64 `protobuf:"fixed64,2,opt,name=sample_sum" json:"sample_sum,omitempty"` + Bucket []*Bucket `protobuf:"bytes,3,rep,name=bucket" json:"bucket,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Histogram) Reset() { *m = Histogram{} } +func (m *Histogram) String() string { return proto.CompactTextString(m) } +func (*Histogram) ProtoMessage() {} + +func (m *Histogram) GetSampleCount() uint64 { + if m != nil && m.SampleCount != nil { + return *m.SampleCount + } + return 0 +} + +func (m *Histogram) GetSampleSum() float64 { + if m != nil && m.SampleSum != nil { + return *m.SampleSum + } + return 0 +} + +func (m *Histogram) GetBucket() []*Bucket { + if m != nil { + return m.Bucket + } + return nil +} + +type Bucket struct { + CumulativeCount *uint64 `protobuf:"varint,1,opt,name=cumulative_count" json:"cumulative_count,omitempty"` + UpperBound *float64 `protobuf:"fixed64,2,opt,name=upper_bound" json:"upper_bound,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Bucket) Reset() { *m = Bucket{} } +func (m *Bucket) String() string { return proto.CompactTextString(m) } +func (*Bucket) ProtoMessage() {} + +func (m *Bucket) GetCumulativeCount() uint64 { + if m != nil && m.CumulativeCount != nil { + return *m.CumulativeCount + } + return 0 +} + +func (m *Bucket) GetUpperBound() float64 { + if m != nil && m.UpperBound != nil { + return *m.UpperBound + } + return 0 +} + +type Metric struct { + Label []*LabelPair `protobuf:"bytes,1,rep,name=label" json:"label,omitempty"` + Gauge *Gauge `protobuf:"bytes,2,opt,name=gauge" json:"gauge,omitempty"` + Counter *Counter `protobuf:"bytes,3,opt,name=counter" json:"counter,omitempty"` + Summary *Summary `protobuf:"bytes,4,opt,name=summary" json:"summary,omitempty"` + Untyped *Untyped `protobuf:"bytes,5,opt,name=untyped" json:"untyped,omitempty"` + Histogram *Histogram `protobuf:"bytes,7,opt,name=histogram" json:"histogram,omitempty"` + TimestampMs *int64 `protobuf:"varint,6,opt,name=timestamp_ms" json:"timestamp_ms,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *Metric) Reset() { *m = Metric{} } +func (m *Metric) String() string { return proto.CompactTextString(m) } +func (*Metric) ProtoMessage() {} + +func (m *Metric) GetLabel() []*LabelPair { + if m != nil { + return m.Label + } + return nil +} + +func (m *Metric) GetGauge() *Gauge { + if m != nil { + return m.Gauge + } + return nil +} + +func (m *Metric) GetCounter() *Counter { + if m != nil { + return m.Counter + } + return nil +} + +func (m *Metric) GetSummary() *Summary { + if m != nil { + return m.Summary + } + return nil +} + +func (m *Metric) GetUntyped() *Untyped { + if m != nil { + return m.Untyped + } + return nil +} + +func (m *Metric) GetHistogram() *Histogram { + if m != nil { + return m.Histogram + } + return nil +} + +func (m *Metric) GetTimestampMs() int64 { + if m != nil && m.TimestampMs != nil { + return *m.TimestampMs + } + return 0 +} + +type MetricFamily struct { + Name *string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + Help *string `protobuf:"bytes,2,opt,name=help" json:"help,omitempty"` + Type *MetricType `protobuf:"varint,3,opt,name=type,enum=io.prometheus.client.MetricType" json:"type,omitempty"` + Metric []*Metric `protobuf:"bytes,4,rep,name=metric" json:"metric,omitempty"` + XXX_unrecognized []byte `json:"-"` +} + +func (m *MetricFamily) Reset() { *m = MetricFamily{} } +func (m *MetricFamily) String() string { return proto.CompactTextString(m) } +func (*MetricFamily) ProtoMessage() {} + +func (m *MetricFamily) GetName() string { + if m != nil && m.Name != nil { + return *m.Name + } + return "" +} + +func (m *MetricFamily) GetHelp() string { + if m != nil && m.Help != nil { + return *m.Help + } + return "" +} + +func (m *MetricFamily) GetType() MetricType { + if m != nil && m.Type != nil { + return *m.Type + } + return MetricType_COUNTER +} + +func (m *MetricFamily) GetMetric() []*Metric { + if m != nil { + return m.Metric + } + return nil +} + +func init() { + proto.RegisterEnum("io.prometheus.client.MetricType", MetricType_name, MetricType_value) +} diff --git a/vendor/github.com/prometheus/client_model/metrics.proto b/vendor/github.com/prometheus/client_model/metrics.proto new file mode 100644 index 000000000..0b84af920 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/metrics.proto @@ -0,0 +1,81 @@ +// Copyright 2013 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +syntax = "proto2"; + +package io.prometheus.client; +option java_package = "io.prometheus.client"; + +message LabelPair { + optional string name = 1; + optional string value = 2; +} + +enum MetricType { + COUNTER = 0; + GAUGE = 1; + SUMMARY = 2; + UNTYPED = 3; + HISTOGRAM = 4; +} + +message Gauge { + optional double value = 1; +} + +message Counter { + optional double value = 1; +} + +message Quantile { + optional double quantile = 1; + optional double value = 2; +} + +message Summary { + optional uint64 sample_count = 1; + optional double sample_sum = 2; + repeated Quantile quantile = 3; +} + +message Untyped { + optional double value = 1; +} + +message Histogram { + optional uint64 sample_count = 1; + optional double sample_sum = 2; + repeated Bucket bucket = 3; // Ordered in increasing order of upper_bound, +Inf bucket is optional. +} + +message Bucket { + optional uint64 cumulative_count = 1; // Cumulative in increasing order. + optional double upper_bound = 2; // Inclusive. +} + +message Metric { + repeated LabelPair label = 1; + optional Gauge gauge = 2; + optional Counter counter = 3; + optional Summary summary = 4; + optional Untyped untyped = 5; + optional Histogram histogram = 7; + optional int64 timestamp_ms = 6; +} + +message MetricFamily { + optional string name = 1; + optional string help = 2; + optional MetricType type = 3; + repeated Metric metric = 4; +} diff --git a/vendor/github.com/prometheus/client_model/pom.xml b/vendor/github.com/prometheus/client_model/pom.xml new file mode 100644 index 000000000..4d34c9015 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/pom.xml @@ -0,0 +1,130 @@ + + + 4.0.0 + + io.prometheus.client + model + 0.0.3-SNAPSHOT + + + org.sonatype.oss + oss-parent + 7 + + + Prometheus Client Data Model + http://github.com/prometheus/client_model + + Prometheus Client Data Model: Generated Protocol Buffer Assets + + + + + The Apache Software License, Version 2.0 + http://www.apache.org/licenses/LICENSE-2.0.txt + repo + + + + + scm:git:git@github.com:prometheus/client_model.git + scm:git:git@github.com:prometheus/client_model.git + git@github.com:prometheus/client_model.git + + + + + mtp + Matt T. Proud + matt.proud@gmail.com + + + + + + com.google.protobuf + protobuf-java + 2.5.0 + + + + + + + org.apache.maven.plugins + maven-javadoc-plugin + 2.8 + + UTF-8 + UTF-8 + true + + + + generate-javadoc-site-report + site + + javadoc + + + + attach-javadocs + + jar + + + + + + maven-compiler-plugin + + 1.6 + 1.6 + + 3.1 + + + org.apache.maven.plugins + maven-source-plugin + 2.2.1 + + + attach-sources + + jar + + + + + + + + + release-sign-artifacts + + + performRelease + true + + + + + + org.apache.maven.plugins + maven-gpg-plugin + 1.4 + + + sign-artifacts + verify + + sign + + + + + + + + + diff --git a/vendor/github.com/prometheus/client_model/setup.py b/vendor/github.com/prometheus/client_model/setup.py new file mode 100644 index 000000000..67b9f20e3 --- /dev/null +++ b/vendor/github.com/prometheus/client_model/setup.py @@ -0,0 +1,23 @@ +#!/usr/bin/python + +from setuptools import setup + +setup( + name = 'prometheus_client_model', + version = '0.0.1', + author = 'Matt T. Proud', + author_email = 'matt.proud@gmail.com', + description = 'Data model artifacts for the Prometheus client.', + license = 'Apache License 2.0', + url = 'http://github.com/prometheus/client_model', + packages = ['prometheus', 'prometheus/client', 'prometheus/client/model'], + package_dir = {'': 'python'}, + requires = ['protobuf(==2.4.1)'], + platforms = 'Platform Independent', + classifiers = ['Development Status :: 3 - Alpha', + 'Intended Audience :: Developers', + 'Intended Audience :: System Administrators', + 'License :: OSI Approved :: Apache Software License', + 'Operating System :: OS Independent', + 'Topic :: Software Development :: Testing', + 'Topic :: System :: Monitoring']) diff --git a/vendor/github.com/prometheus/common/.travis.yml b/vendor/github.com/prometheus/common/.travis.yml new file mode 100644 index 000000000..2fe8e9ad7 --- /dev/null +++ b/vendor/github.com/prometheus/common/.travis.yml @@ -0,0 +1,6 @@ +sudo: false + +language: go +go: + - 1.7.5 + - tip diff --git a/vendor/github.com/prometheus/common/CONTRIBUTING.md b/vendor/github.com/prometheus/common/CONTRIBUTING.md new file mode 100644 index 000000000..40503edbf --- /dev/null +++ b/vendor/github.com/prometheus/common/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/common/LICENSE b/vendor/github.com/prometheus/common/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/common/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/common/MAINTAINERS.md b/vendor/github.com/prometheus/common/MAINTAINERS.md new file mode 100644 index 000000000..1b3152161 --- /dev/null +++ b/vendor/github.com/prometheus/common/MAINTAINERS.md @@ -0,0 +1 @@ +* Fabian Reinartz diff --git a/vendor/github.com/prometheus/common/NOTICE b/vendor/github.com/prometheus/common/NOTICE new file mode 100644 index 000000000..636a2c1a5 --- /dev/null +++ b/vendor/github.com/prometheus/common/NOTICE @@ -0,0 +1,5 @@ +Common libraries shared by Prometheus Go components. +Copyright 2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/common/README.md b/vendor/github.com/prometheus/common/README.md new file mode 100644 index 000000000..11a584945 --- /dev/null +++ b/vendor/github.com/prometheus/common/README.md @@ -0,0 +1,12 @@ +# Common +[![Build Status](https://travis-ci.org/prometheus/common.svg)](https://travis-ci.org/prometheus/common) + +This repository contains Go libraries that are shared across Prometheus +components and libraries. + +* **config**: Common configuration structures +* **expfmt**: Decoding and encoding for the exposition format +* **log**: A logging wrapper around [logrus](https://github.com/sirupsen/logrus) +* **model**: Shared data structures +* **route**: A routing wrapper around [httprouter](https://github.com/julienschmidt/httprouter) using `context.Context` +* **version**: Version informations and metric diff --git a/vendor/github.com/prometheus/common/expfmt/bench_test.go b/vendor/github.com/prometheus/common/expfmt/bench_test.go new file mode 100644 index 000000000..e539bfc13 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/bench_test.go @@ -0,0 +1,167 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "compress/gzip" + "io" + "io/ioutil" + "testing" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + + dto "github.com/prometheus/client_model/go" +) + +var parser TextParser + +// Benchmarks to show how much penalty text format parsing actually inflicts. +// +// Example results on Linux 3.13.0, Intel(R) Core(TM) i7-4700MQ CPU @ 2.40GHz, go1.4. +// +// BenchmarkParseText 1000 1188535 ns/op 205085 B/op 6135 allocs/op +// BenchmarkParseTextGzip 1000 1376567 ns/op 246224 B/op 6151 allocs/op +// BenchmarkParseProto 10000 172790 ns/op 52258 B/op 1160 allocs/op +// BenchmarkParseProtoGzip 5000 324021 ns/op 94931 B/op 1211 allocs/op +// BenchmarkParseProtoMap 10000 187946 ns/op 58714 B/op 1203 allocs/op +// +// CONCLUSION: The overhead for the map is negligible. Text format needs ~5x more allocations. +// Without compression, it needs ~7x longer, but with compression (the more relevant scenario), +// the difference becomes less relevant, only ~4x. +// +// The test data contains 248 samples. + +// BenchmarkParseText benchmarks the parsing of a text-format scrape into metric +// family DTOs. +func BenchmarkParseText(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + if _, err := parser.TextToMetricFamilies(bytes.NewReader(data)); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseTextGzip benchmarks the parsing of a gzipped text-format scrape +// into metric family DTOs. +func BenchmarkParseTextGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/text.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + if _, err := parser.TextToMetricFamilies(in); err != nil { + b.Fatal(err) + } + } +} + +// BenchmarkParseProto benchmarks the parsing of a protobuf-format scrape into +// metric family DTOs. Note that this does not build a map of metric families +// (as the text version does), because it is not required for Prometheus +// ingestion either. (However, it is required for the text-format parsing, as +// the metric family might be sprinkled all over the text, while the +// protobuf-format guarantees bundling at one place.) +func BenchmarkParseProto(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family.Reset() + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoGzip is like BenchmarkParseProto above, but parses gzipped +// protobuf format. +func BenchmarkParseProtoGzip(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf.gz") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + family := &dto.MetricFamily{} + in, err := gzip.NewReader(bytes.NewReader(data)) + if err != nil { + b.Fatal(err) + } + for { + family.Reset() + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + } + } +} + +// BenchmarkParseProtoMap is like BenchmarkParseProto but DOES put the parsed +// metric family DTOs into a map. This is not happening during Prometheus +// ingestion. It is just here to measure the overhead of that map creation and +// separate it from the overhead of the text format parsing. +func BenchmarkParseProtoMap(b *testing.B) { + b.StopTimer() + data, err := ioutil.ReadFile("testdata/protobuf") + if err != nil { + b.Fatal(err) + } + b.StartTimer() + + for i := 0; i < b.N; i++ { + families := map[string]*dto.MetricFamily{} + in := bytes.NewReader(data) + for { + family := &dto.MetricFamily{} + if _, err := pbutil.ReadDelimited(in, family); err != nil { + if err == io.EOF { + break + } + b.Fatal(err) + } + families[family.GetName()] = family + } + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode.go b/vendor/github.com/prometheus/common/expfmt/decode.go new file mode 100644 index 000000000..a7a42d5ef --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode.go @@ -0,0 +1,429 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "mime" + "net/http" + + dto "github.com/prometheus/client_model/go" + + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/model" +) + +// Decoder types decode an input stream into metric families. +type Decoder interface { + Decode(*dto.MetricFamily) error +} + +// DecodeOptions contains options used by the Decoder and in sample extraction. +type DecodeOptions struct { + // Timestamp is added to each value from the stream that has no explicit timestamp set. + Timestamp model.Time +} + +// ResponseFormat extracts the correct format from a HTTP response header. +// If no matching format can be found FormatUnknown is returned. +func ResponseFormat(h http.Header) Format { + ct := h.Get(hdrContentType) + + mediatype, params, err := mime.ParseMediaType(ct) + if err != nil { + return FmtUnknown + } + + const textType = "text/plain" + + switch mediatype { + case ProtoType: + if p, ok := params["proto"]; ok && p != ProtoProtocol { + return FmtUnknown + } + if e, ok := params["encoding"]; ok && e != "delimited" { + return FmtUnknown + } + return FmtProtoDelim + + case textType: + if v, ok := params["version"]; ok && v != TextVersion { + return FmtUnknown + } + return FmtText + } + + return FmtUnknown +} + +// NewDecoder returns a new decoder based on the given input format. +// If the input format does not imply otherwise, a text format decoder is returned. +func NewDecoder(r io.Reader, format Format) Decoder { + switch format { + case FmtProtoDelim: + return &protoDecoder{r: r} + } + return &textDecoder{r: r} +} + +// protoDecoder implements the Decoder interface for protocol buffers. +type protoDecoder struct { + r io.Reader +} + +// Decode implements the Decoder interface. +func (d *protoDecoder) Decode(v *dto.MetricFamily) error { + _, err := pbutil.ReadDelimited(d.r, v) + if err != nil { + return err + } + if !model.IsValidMetricName(model.LabelValue(v.GetName())) { + return fmt.Errorf("invalid metric name %q", v.GetName()) + } + for _, m := range v.GetMetric() { + if m == nil { + continue + } + for _, l := range m.GetLabel() { + if l == nil { + continue + } + if !model.LabelValue(l.GetValue()).IsValid() { + return fmt.Errorf("invalid label value %q", l.GetValue()) + } + if !model.LabelName(l.GetName()).IsValid() { + return fmt.Errorf("invalid label name %q", l.GetName()) + } + } + } + return nil +} + +// textDecoder implements the Decoder interface for the text protocol. +type textDecoder struct { + r io.Reader + p TextParser + fams []*dto.MetricFamily +} + +// Decode implements the Decoder interface. +func (d *textDecoder) Decode(v *dto.MetricFamily) error { + // TODO(fabxc): Wrap this as a line reader to make streaming safer. + if len(d.fams) == 0 { + // No cached metric families, read everything and parse metrics. + fams, err := d.p.TextToMetricFamilies(d.r) + if err != nil { + return err + } + if len(fams) == 0 { + return io.EOF + } + d.fams = make([]*dto.MetricFamily, 0, len(fams)) + for _, f := range fams { + d.fams = append(d.fams, f) + } + } + + *v = *d.fams[0] + d.fams = d.fams[1:] + + return nil +} + +// SampleDecoder wraps a Decoder to extract samples from the metric families +// decoded by the wrapped Decoder. +type SampleDecoder struct { + Dec Decoder + Opts *DecodeOptions + + f dto.MetricFamily +} + +// Decode calls the Decode method of the wrapped Decoder and then extracts the +// samples from the decoded MetricFamily into the provided model.Vector. +func (sd *SampleDecoder) Decode(s *model.Vector) error { + err := sd.Dec.Decode(&sd.f) + if err != nil { + return err + } + *s, err = extractSamples(&sd.f, sd.Opts) + return err +} + +// ExtractSamples builds a slice of samples from the provided metric +// families. If an error occurs during sample extraction, it continues to +// extract from the remaining metric families. The returned error is the last +// error that has occured. +func ExtractSamples(o *DecodeOptions, fams ...*dto.MetricFamily) (model.Vector, error) { + var ( + all model.Vector + lastErr error + ) + for _, f := range fams { + some, err := extractSamples(f, o) + if err != nil { + lastErr = err + continue + } + all = append(all, some...) + } + return all, lastErr +} + +func extractSamples(f *dto.MetricFamily, o *DecodeOptions) (model.Vector, error) { + switch f.GetType() { + case dto.MetricType_COUNTER: + return extractCounter(o, f), nil + case dto.MetricType_GAUGE: + return extractGauge(o, f), nil + case dto.MetricType_SUMMARY: + return extractSummary(o, f), nil + case dto.MetricType_UNTYPED: + return extractUntyped(o, f), nil + case dto.MetricType_HISTOGRAM: + return extractHistogram(o, f), nil + } + return nil, fmt.Errorf("expfmt.extractSamples: unknown metric family type %v", f.GetType()) +} + +func extractCounter(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Counter == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Counter.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractGauge(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Gauge == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Gauge.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractUntyped(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Untyped == nil { + continue + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + smpl := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Untyped.GetValue()), + } + + if m.TimestampMs != nil { + smpl.Timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } else { + smpl.Timestamp = o.Timestamp + } + + samples = append(samples, smpl) + } + + return samples +} + +func extractSummary(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Summary == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + for _, q := range m.Summary.Quantile { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + // BUG(matt): Update other names to "quantile". + lset[model.LabelName(model.QuantileLabel)] = model.LabelValue(fmt.Sprint(q.GetQuantile())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName()) + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetValue()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Summary.GetSampleCount()), + Timestamp: timestamp, + }) + } + + return samples +} + +func extractHistogram(o *DecodeOptions, f *dto.MetricFamily) model.Vector { + samples := make(model.Vector, 0, len(f.Metric)) + + for _, m := range f.Metric { + if m.Histogram == nil { + continue + } + + timestamp := o.Timestamp + if m.TimestampMs != nil { + timestamp = model.TimeFromUnixNano(*m.TimestampMs * 1000000) + } + + infSeen := false + + for _, q := range m.Histogram.Bucket { + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue(fmt.Sprint(q.GetUpperBound())) + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(q.GetCumulativeCount()), + Timestamp: timestamp, + }) + } + + lset := make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_sum") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleSum()), + Timestamp: timestamp, + }) + + lset = make(model.LabelSet, len(m.Label)+1) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_count") + + count := &model.Sample{ + Metric: model.Metric(lset), + Value: model.SampleValue(m.Histogram.GetSampleCount()), + Timestamp: timestamp, + } + samples = append(samples, count) + + if !infSeen { + // Append an infinity bucket sample. + lset := make(model.LabelSet, len(m.Label)+2) + for _, p := range m.Label { + lset[model.LabelName(p.GetName())] = model.LabelValue(p.GetValue()) + } + lset[model.LabelName(model.BucketLabel)] = model.LabelValue("+Inf") + lset[model.MetricNameLabel] = model.LabelValue(f.GetName() + "_bucket") + + samples = append(samples, &model.Sample{ + Metric: model.Metric(lset), + Value: count.Value, + Timestamp: timestamp, + }) + } + } + + return samples +} diff --git a/vendor/github.com/prometheus/common/expfmt/decode_test.go b/vendor/github.com/prometheus/common/expfmt/decode_test.go new file mode 100644 index 000000000..82c1130c9 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/decode_test.go @@ -0,0 +1,435 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "io" + "net/http" + "reflect" + "sort" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" + + "github.com/prometheus/common/model" +) + +func TestTextDecoder(t *testing.T) { + var ( + ts = model.Now() + in = ` +# Only a quite simple scenario with two metric families. +# More complicated tests of the parser itself can be found in the text package. +# TYPE mf2 counter +mf2 3 +mf1{label="value1"} -3.14 123456 +mf1{label="value2"} 42 +mf2 4 +` + out = model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf1", + "label": "value1", + }, + Value: -3.14, + Timestamp: 123456, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf1", + "label": "value2", + }, + Value: 42, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf2", + }, + Value: 3, + Timestamp: ts, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "mf2", + }, + Value: 4, + Timestamp: ts, + }, + } + ) + + dec := &SampleDecoder{ + Dec: &textDecoder{r: strings.NewReader(in)}, + Opts: &DecodeOptions{ + Timestamp: ts, + }, + } + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + if err != nil { + t.Fatal(err) + } + all = append(all, smpls...) + } + sort.Sort(all) + sort.Sort(out) + if !reflect.DeepEqual(all, out) { + t.Fatalf("output does not match") + } +} + +func TestProtoDecoder(t *testing.T) { + + var testTime = model.Now() + + scenarios := []struct { + in string + expected model.Vector + fail bool + }{ + { + in: "", + }, + { + in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_!abel_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", + fail: true, + }, + { + in: "\x8f\x01\n\rrequest_count\x12\x12Number of requests\x18\x00\"0\n#\n\x0fsome_label_name\x12\x10some_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00E\xc0\"6\n)\n\x12another_label_name\x12\x13another_label_value\x1a\t\t\x00\x00\x00\x00\x00\x00U@", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + }, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "another_label_name": "another_label_value", + }, + Value: 84, + Timestamp: testTime, + }, + }, + }, + { + in: "\xb9\x01\n\rrequest_count\x12\x12Number of requests\x18\x02\"O\n#\n\x0fsome_label_name\x12\x10some_label_value\"(\x1a\x12\t\xaeG\xe1z\x14\xae\xef?\x11\x00\x00\x00\x00\x00\x00E\xc0\x1a\x12\t+\x87\x16\xd9\xce\xf7\xef?\x11\x00\x00\x00\x00\x00\x00U\xc0\"A\n)\n\x12another_label_name\x12\x13another_label_value\"\x14\x1a\x12\t\x00\x00\x00\x00\x00\x00\xe0?\x11\x00\x00\x00\x00\x00\x00$@", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_count", + "some_label_name": "some_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_sum", + "some_label_name": "some_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + "quantile": "0.99", + }, + Value: -42, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "some_label_name": "some_label_value", + "quantile": "0.999", + }, + Value: -84, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_count", + "another_label_name": "another_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count_sum", + "another_label_name": "another_label_value", + }, + Value: 0, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + "another_label_name": "another_label_value", + "quantile": "0.5", + }, + Value: 10, + Timestamp: testTime, + }, + }, + }, + { + in: "\x8d\x01\n\x1drequest_duration_microseconds\x12\x15The response latency.\x18\x04\"S:Q\b\x85\x15\x11\xcd\xcc\xccL\x8f\xcb:A\x1a\v\b{\x11\x00\x00\x00\x00\x00\x00Y@\x1a\f\b\x9c\x03\x11\x00\x00\x00\x00\x00\x00^@\x1a\f\b\xd0\x04\x11\x00\x00\x00\x00\x00\x00b@\x1a\f\b\xf4\v\x11\x9a\x99\x99\x99\x99\x99e@\x1a\f\b\x85\x15\x11\x00\x00\x00\x00\x00\x00\xf0\u007f", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "100", + }, + Value: 123, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "120", + }, + Value: 412, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "144", + }, + Value: 592, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "172.8", + }, + Value: 1524, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_bucket", + "le": "+Inf", + }, + Value: 2693, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_sum", + }, + Value: 1756047.3, + Timestamp: testTime, + }, + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_duration_microseconds_count", + }, + Value: 2693, + Timestamp: testTime, + }, + }, + }, + { + // The metric type is unset in this protobuf, which needs to be handled + // correctly by the decoder. + in: "\x1c\n\rrequest_count\"\v\x1a\t\t\x00\x00\x00\x00\x00\x00\xf0?", + expected: model.Vector{ + &model.Sample{ + Metric: model.Metric{ + model.MetricNameLabel: "request_count", + }, + Value: 1, + Timestamp: testTime, + }, + }, + }, + } + + for i, scenario := range scenarios { + dec := &SampleDecoder{ + Dec: &protoDecoder{r: strings.NewReader(scenario.in)}, + Opts: &DecodeOptions{ + Timestamp: testTime, + }, + } + + var all model.Vector + for { + var smpls model.Vector + err := dec.Decode(&smpls) + if err == io.EOF { + break + } + if scenario.fail { + if err == nil { + t.Fatal("Expected error but got none") + } + break + } + if err != nil { + t.Fatal(err) + } + all = append(all, smpls...) + } + sort.Sort(all) + sort.Sort(scenario.expected) + if !reflect.DeepEqual(all, scenario.expected) { + t.Fatalf("%d. output does not match, want: %#v, got %#v", i, scenario.expected, all) + } + } +} + +func testDiscriminatorHTTPHeader(t testing.TB) { + var scenarios = []struct { + input map[string]string + output Format + err error + }{ + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="delimited"`}, + output: FmtProtoDelim, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="illegal"; encoding="delimited"`}, + output: FmtUnknown, + }, + { + input: map[string]string{"Content-Type": `application/vnd.google.protobuf; proto="io.prometheus.client.MetricFamily"; encoding="illegal"`}, + output: FmtUnknown, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.4`}, + output: FmtText, + }, + { + input: map[string]string{"Content-Type": `text/plain`}, + output: FmtText, + }, + { + input: map[string]string{"Content-Type": `text/plain; version=0.0.3`}, + output: FmtUnknown, + }, + } + + for i, scenario := range scenarios { + var header http.Header + + if len(scenario.input) > 0 { + header = http.Header{} + } + + for key, value := range scenario.input { + header.Add(key, value) + } + + actual := ResponseFormat(header) + + if scenario.output != actual { + t.Errorf("%d. expected %s, got %s", i, scenario.output, actual) + } + } +} + +func TestDiscriminatorHTTPHeader(t *testing.T) { + testDiscriminatorHTTPHeader(t) +} + +func BenchmarkDiscriminatorHTTPHeader(b *testing.B) { + for i := 0; i < b.N; i++ { + testDiscriminatorHTTPHeader(b) + } +} + +func TestExtractSamples(t *testing.T) { + var ( + goodMetricFamily1 = &dto.MetricFamily{ + Name: proto.String("foo"), + Help: proto.String("Help for foo."), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Counter: &dto.Counter{ + Value: proto.Float64(4711), + }, + }, + }, + } + goodMetricFamily2 = &dto.MetricFamily{ + Name: proto.String("bar"), + Help: proto.String("Help for bar."), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Gauge: &dto.Gauge{ + Value: proto.Float64(3.14), + }, + }, + }, + } + badMetricFamily = &dto.MetricFamily{ + Name: proto.String("bad"), + Help: proto.String("Help for bad."), + Type: dto.MetricType(42).Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Gauge: &dto.Gauge{ + Value: proto.Float64(2.7), + }, + }, + }, + } + + opts = &DecodeOptions{ + Timestamp: 42, + } + ) + + got, err := ExtractSamples(opts, goodMetricFamily1, goodMetricFamily2) + if err != nil { + t.Error("Unexpected error from ExtractSamples:", err) + } + want := model.Vector{ + &model.Sample{Metric: model.Metric{model.MetricNameLabel: "foo"}, Value: 4711, Timestamp: 42}, + &model.Sample{Metric: model.Metric{model.MetricNameLabel: "bar"}, Value: 3.14, Timestamp: 42}, + } + if !reflect.DeepEqual(got, want) { + t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want) + } + + got, err = ExtractSamples(opts, goodMetricFamily1, badMetricFamily, goodMetricFamily2) + if err == nil { + t.Error("Expected error from ExtractSamples") + } + if !reflect.DeepEqual(got, want) { + t.Errorf("unexpected samples extracted, got: %v, want: %v", got, want) + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/encode.go b/vendor/github.com/prometheus/common/expfmt/encode.go new file mode 100644 index 000000000..11839ed65 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/encode.go @@ -0,0 +1,88 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "net/http" + + "github.com/golang/protobuf/proto" + "github.com/matttproud/golang_protobuf_extensions/pbutil" + "github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg" + + dto "github.com/prometheus/client_model/go" +) + +// Encoder types encode metric families into an underlying wire protocol. +type Encoder interface { + Encode(*dto.MetricFamily) error +} + +type encoder func(*dto.MetricFamily) error + +func (e encoder) Encode(v *dto.MetricFamily) error { + return e(v) +} + +// Negotiate returns the Content-Type based on the given Accept header. +// If no appropriate accepted type is found, FmtText is returned. +func Negotiate(h http.Header) Format { + for _, ac := range goautoneg.ParseAccept(h.Get(hdrAccept)) { + // Check for protocol buffer + if ac.Type+"/"+ac.SubType == ProtoType && ac.Params["proto"] == ProtoProtocol { + switch ac.Params["encoding"] { + case "delimited": + return FmtProtoDelim + case "text": + return FmtProtoText + case "compact-text": + return FmtProtoCompact + } + } + // Check for text format. + ver := ac.Params["version"] + if ac.Type == "text" && ac.SubType == "plain" && (ver == TextVersion || ver == "") { + return FmtText + } + } + return FmtText +} + +// NewEncoder returns a new encoder based on content type negotiation. +func NewEncoder(w io.Writer, format Format) Encoder { + switch format { + case FmtProtoDelim: + return encoder(func(v *dto.MetricFamily) error { + _, err := pbutil.WriteDelimited(w, v) + return err + }) + case FmtProtoCompact: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, v.String()) + return err + }) + case FmtProtoText: + return encoder(func(v *dto.MetricFamily) error { + _, err := fmt.Fprintln(w, proto.MarshalTextString(v)) + return err + }) + case FmtText: + return encoder(func(v *dto.MetricFamily) error { + _, err := MetricFamilyToText(w, v) + return err + }) + } + panic("expfmt.NewEncoder: unknown format") +} diff --git a/vendor/github.com/prometheus/common/expfmt/expfmt.go b/vendor/github.com/prometheus/common/expfmt/expfmt.go new file mode 100644 index 000000000..c71bcb981 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/expfmt.go @@ -0,0 +1,38 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package expfmt contains tools for reading and writing Prometheus metrics. +package expfmt + +// Format specifies the HTTP content type of the different wire protocols. +type Format string + +// Constants to assemble the Content-Type values for the different wire protocols. +const ( + TextVersion = "0.0.4" + ProtoType = `application/vnd.google.protobuf` + ProtoProtocol = `io.prometheus.client.MetricFamily` + ProtoFmt = ProtoType + "; proto=" + ProtoProtocol + ";" + + // The Content-Type values for the different wire protocols. + FmtUnknown Format = `` + FmtText Format = `text/plain; version=` + TextVersion + `; charset=utf-8` + FmtProtoDelim Format = ProtoFmt + ` encoding=delimited` + FmtProtoText Format = ProtoFmt + ` encoding=text` + FmtProtoCompact Format = ProtoFmt + ` encoding=compact-text` +) + +const ( + hdrContentType = "Content-Type" + hdrAccept = "Accept" +) diff --git a/vendor/github.com/prometheus/common/expfmt/fuzz.go b/vendor/github.com/prometheus/common/expfmt/fuzz.go new file mode 100644 index 000000000..dc2eedeef --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/fuzz.go @@ -0,0 +1,36 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Build only when actually fuzzing +// +build gofuzz + +package expfmt + +import "bytes" + +// Fuzz text metric parser with with github.com/dvyukov/go-fuzz: +// +// go-fuzz-build github.com/prometheus/common/expfmt +// go-fuzz -bin expfmt-fuzz.zip -workdir fuzz +// +// Further input samples should go in the folder fuzz/corpus. +func Fuzz(in []byte) int { + parser := TextParser{} + _, err := parser.TextToMetricFamilies(bytes.NewReader(in)) + + if err != nil { + return 0 + } + + return 1 +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create.go b/vendor/github.com/prometheus/common/expfmt/text_create.go new file mode 100644 index 000000000..f11321cd0 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create.go @@ -0,0 +1,303 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "fmt" + "io" + "math" + "strings" + + dto "github.com/prometheus/client_model/go" + "github.com/prometheus/common/model" +) + +// MetricFamilyToText converts a MetricFamily proto message into text format and +// writes the resulting lines to 'out'. It returns the number of bytes written +// and any error encountered. The output will have the same order as the input, +// no further sorting is performed. Furthermore, this function assumes the input +// is already sanitized and does not perform any sanity checks. If the input +// contains duplicate metrics or invalid metric or label names, the conversion +// will result in invalid text format output. +// +// This method fulfills the type 'prometheus.encoder'. +func MetricFamilyToText(out io.Writer, in *dto.MetricFamily) (int, error) { + var written int + + // Fail-fast checks. + if len(in.Metric) == 0 { + return written, fmt.Errorf("MetricFamily has no metrics: %s", in) + } + name := in.GetName() + if name == "" { + return written, fmt.Errorf("MetricFamily has no name: %s", in) + } + + // Comments, first HELP, then TYPE. + if in.Help != nil { + n, err := fmt.Fprintf( + out, "# HELP %s %s\n", + name, escapeString(*in.Help, false), + ) + written += n + if err != nil { + return written, err + } + } + metricType := in.GetType() + n, err := fmt.Fprintf( + out, "# TYPE %s %s\n", + name, strings.ToLower(metricType.String()), + ) + written += n + if err != nil { + return written, err + } + + // Finally the samples, one line for each. + for _, metric := range in.Metric { + switch metricType { + case dto.MetricType_COUNTER: + if metric.Counter == nil { + return written, fmt.Errorf( + "expected counter in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Counter.GetValue(), + out, + ) + case dto.MetricType_GAUGE: + if metric.Gauge == nil { + return written, fmt.Errorf( + "expected gauge in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Gauge.GetValue(), + out, + ) + case dto.MetricType_UNTYPED: + if metric.Untyped == nil { + return written, fmt.Errorf( + "expected untyped in metric %s %s", name, metric, + ) + } + n, err = writeSample( + name, metric, "", "", + metric.Untyped.GetValue(), + out, + ) + case dto.MetricType_SUMMARY: + if metric.Summary == nil { + return written, fmt.Errorf( + "expected summary in metric %s %s", name, metric, + ) + } + for _, q := range metric.Summary.Quantile { + n, err = writeSample( + name, metric, + model.QuantileLabel, fmt.Sprint(q.GetQuantile()), + q.GetValue(), + out, + ) + written += n + if err != nil { + return written, err + } + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Summary.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Summary.GetSampleCount()), + out, + ) + case dto.MetricType_HISTOGRAM: + if metric.Histogram == nil { + return written, fmt.Errorf( + "expected histogram in metric %s %s", name, metric, + ) + } + infSeen := false + for _, q := range metric.Histogram.Bucket { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, fmt.Sprint(q.GetUpperBound()), + float64(q.GetCumulativeCount()), + out, + ) + written += n + if err != nil { + return written, err + } + if math.IsInf(q.GetUpperBound(), +1) { + infSeen = true + } + } + if !infSeen { + n, err = writeSample( + name+"_bucket", metric, + model.BucketLabel, "+Inf", + float64(metric.Histogram.GetSampleCount()), + out, + ) + if err != nil { + return written, err + } + written += n + } + n, err = writeSample( + name+"_sum", metric, "", "", + metric.Histogram.GetSampleSum(), + out, + ) + if err != nil { + return written, err + } + written += n + n, err = writeSample( + name+"_count", metric, "", "", + float64(metric.Histogram.GetSampleCount()), + out, + ) + default: + return written, fmt.Errorf( + "unexpected type in metric %s %s", name, metric, + ) + } + written += n + if err != nil { + return written, err + } + } + return written, nil +} + +// writeSample writes a single sample in text format to out, given the metric +// name, the metric proto message itself, optionally an additional label name +// and value (use empty strings if not required), and the value. The function +// returns the number of bytes written and any error encountered. +func writeSample( + name string, + metric *dto.Metric, + additionalLabelName, additionalLabelValue string, + value float64, + out io.Writer, +) (int, error) { + var written int + n, err := fmt.Fprint(out, name) + written += n + if err != nil { + return written, err + } + n, err = labelPairsToText( + metric.Label, + additionalLabelName, additionalLabelValue, + out, + ) + written += n + if err != nil { + return written, err + } + n, err = fmt.Fprintf(out, " %v", value) + written += n + if err != nil { + return written, err + } + if metric.TimestampMs != nil { + n, err = fmt.Fprintf(out, " %v", *metric.TimestampMs) + written += n + if err != nil { + return written, err + } + } + n, err = out.Write([]byte{'\n'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +// labelPairsToText converts a slice of LabelPair proto messages plus the +// explicitly given additional label pair into text formatted as required by the +// text format and writes it to 'out'. An empty slice in combination with an +// empty string 'additionalLabelName' results in nothing being +// written. Otherwise, the label pairs are written, escaped as required by the +// text format, and enclosed in '{...}'. The function returns the number of +// bytes written and any error encountered. +func labelPairsToText( + in []*dto.LabelPair, + additionalLabelName, additionalLabelValue string, + out io.Writer, +) (int, error) { + if len(in) == 0 && additionalLabelName == "" { + return 0, nil + } + var written int + separator := '{' + for _, lp := range in { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, lp.GetName(), escapeString(lp.GetValue(), true), + ) + written += n + if err != nil { + return written, err + } + separator = ',' + } + if additionalLabelName != "" { + n, err := fmt.Fprintf( + out, `%c%s="%s"`, + separator, additionalLabelName, + escapeString(additionalLabelValue, true), + ) + written += n + if err != nil { + return written, err + } + } + n, err := out.Write([]byte{'}'}) + written += n + if err != nil { + return written, err + } + return written, nil +} + +var ( + escape = strings.NewReplacer("\\", `\\`, "\n", `\n`) + escapeWithDoubleQuote = strings.NewReplacer("\\", `\\`, "\n", `\n`, "\"", `\"`) +) + +// escapeString replaces '\' by '\\', new line character by '\n', and - if +// includeDoubleQuote is true - '"' by '\"'. +func escapeString(v string, includeDoubleQuote bool) string { + if includeDoubleQuote { + return escapeWithDoubleQuote.Replace(v) + } + + return escape.Replace(v) +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_create_test.go b/vendor/github.com/prometheus/common/expfmt/text_create_test.go new file mode 100644 index 000000000..e4cc5d803 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_create_test.go @@ -0,0 +1,443 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bytes" + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + + dto "github.com/prometheus/client_model/go" +) + +func testCreate(t testing.TB) { + var scenarios = []struct { + in *dto.MetricFamily + out string + }{ + // 0: Counter, NaN as value, timestamp given. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + out: `# HELP name two-line\n doc str\\ing +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name{labelname="val2",basename="basevalue"} 0.23 1234567890 +`, + }, + // 1: Gauge, some escaping required, +Inf as value, multi-byte characters in label values. + { + in: &dto.MetricFamily{ + Name: proto.String("gauge_name"), + Help: proto.String("gauge\ndoc\nstr\"ing"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("val with\nnew line"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("val with \\backslash and \"quotes\""), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("Björn"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("佖佥"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(3.14E42), + }, + }, + }, + }, + out: `# HELP gauge_name gauge\ndoc\nstr"ing +# TYPE gauge_name gauge +gauge_name{name_1="val with\nnew line",name_2="val with \\backslash and \"quotes\""} +Inf +gauge_name{name_1="Björn",name_2="佖佥"} 3.14e+42 +`, + }, + // 2: Untyped, no help, one sample with no labels and -Inf as value, another sample with one label. + { + in: &dto.MetricFamily{ + Name: proto.String("untyped_name"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("value 1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(-1.23e-45), + }, + }, + }, + }, + out: `# TYPE untyped_name untyped +untyped_name -Inf +untyped_name{name_1="value 1"} -1.23e-45 +`, + }, + // 3: Summary. + { + in: &dto.MetricFamily{ + Name: proto.String("summary_name"), + Help: proto.String("summary docstring"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(-3.4567), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(-1.23), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(.2342354), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.99), + Value: proto.Float64(0), + }, + }, + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("name_1"), + Value: proto.String("value 1"), + }, + &dto.LabelPair{ + Name: proto.String("name_2"), + Value: proto.String("value 2"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(4711), + SampleSum: proto.Float64(2010.1971), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(1), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(2), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.99), + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + out: `# HELP summary_name summary docstring +# TYPE summary_name summary +summary_name{quantile="0.5"} -1.23 +summary_name{quantile="0.9"} 0.2342354 +summary_name{quantile="0.99"} 0 +summary_name_sum -3.4567 +summary_name_count 42 +summary_name{name_1="value 1",name_2="value 2",quantile="0.5"} 1 +summary_name{name_1="value 1",name_2="value 2",quantile="0.9"} 2 +summary_name{name_1="value 1",name_2="value 2",quantile="0.99"} 3 +summary_name_sum{name_1="value 1",name_2="value 2"} 2010.1971 +summary_name_count{name_1="value 1",name_2="value 2"} 4711 +`, + }, + // 4: Histogram + { + in: &dto.MetricFamily{ + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + out: `# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + }, + // 5: Histogram with missing +Inf bucket. + { + in: &dto.MetricFamily{ + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + }, + }, + }, + }, + }, + out: `# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + }, + // 6: No metric type, should result in default type Counter. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Metric: []*dto.Metric{ + &dto.Metric{ + Counter: &dto.Counter{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + out: `# HELP name doc string +# TYPE name counter +name -Inf +`, + }, + } + + for i, scenario := range scenarios { + out := bytes.NewBuffer(make([]byte, 0, len(scenario.out))) + n, err := MetricFamilyToText(out, scenario.in) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), n; expected != got { + t.Errorf( + "%d. expected %d bytes written, got %d", + i, expected, got, + ) + } + if expected, got := scenario.out, out.String(); expected != got { + t.Errorf( + "%d. expected out=%q, got %q", + i, expected, got, + ) + } + } + +} + +func TestCreate(t *testing.T) { + testCreate(t) +} + +func BenchmarkCreate(b *testing.B) { + for i := 0; i < b.N; i++ { + testCreate(b) + } +} + +func testCreateError(t testing.TB) { + var scenarios = []struct { + in *dto.MetricFamily + err string + }{ + // 0: No metric. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{}, + }, + err: "MetricFamily has no metrics", + }, + // 1: No metric name. + { + in: &dto.MetricFamily{ + Help: proto.String("doc string"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "MetricFamily has no name", + }, + // 2: Wrong type. + { + in: &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("doc string"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + err: "expected counter in metric", + }, + } + + for i, scenario := range scenarios { + var out bytes.Buffer + _, err := MetricFamilyToText(&out, scenario.in) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestCreateError(t *testing.T) { + testCreateError(t) +} + +func BenchmarkCreateError(b *testing.B) { + for i := 0; i < b.N; i++ { + testCreateError(b) + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse.go b/vendor/github.com/prometheus/common/expfmt/text_parse.go new file mode 100644 index 000000000..54bcfde29 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse.go @@ -0,0 +1,757 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "bufio" + "bytes" + "fmt" + "io" + "math" + "strconv" + "strings" + + dto "github.com/prometheus/client_model/go" + + "github.com/golang/protobuf/proto" + "github.com/prometheus/common/model" +) + +// A stateFn is a function that represents a state in a state machine. By +// executing it, the state is progressed to the next state. The stateFn returns +// another stateFn, which represents the new state. The end state is represented +// by nil. +type stateFn func() stateFn + +// ParseError signals errors while parsing the simple and flat text-based +// exchange format. +type ParseError struct { + Line int + Msg string +} + +// Error implements the error interface. +func (e ParseError) Error() string { + return fmt.Sprintf("text format parsing error in line %d: %s", e.Line, e.Msg) +} + +// TextParser is used to parse the simple and flat text-based exchange format. Its +// zero value is ready to use. +type TextParser struct { + metricFamiliesByName map[string]*dto.MetricFamily + buf *bufio.Reader // Where the parsed input is read through. + err error // Most recent error. + lineCount int // Tracks the line count for error messages. + currentByte byte // The most recent byte read. + currentToken bytes.Buffer // Re-used each time a token has to be gathered from multiple bytes. + currentMF *dto.MetricFamily + currentMetric *dto.Metric + currentLabelPair *dto.LabelPair + + // The remaining member variables are only used for summaries/histograms. + currentLabels map[string]string // All labels including '__name__' but excluding 'quantile'/'le' + // Summary specific. + summaries map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentQuantile float64 + // Histogram specific. + histograms map[uint64]*dto.Metric // Key is created with LabelsToSignature. + currentBucket float64 + // These tell us if the currently processed line ends on '_count' or + // '_sum' respectively and belong to a summary/histogram, representing the sample + // count and sum of that summary/histogram. + currentIsSummaryCount, currentIsSummarySum bool + currentIsHistogramCount, currentIsHistogramSum bool +} + +// TextToMetricFamilies reads 'in' as the simple and flat text-based exchange +// format and creates MetricFamily proto messages. It returns the MetricFamily +// proto messages in a map where the metric names are the keys, along with any +// error encountered. +// +// If the input contains duplicate metrics (i.e. lines with the same metric name +// and exactly the same label set), the resulting MetricFamily will contain +// duplicate Metric proto messages. Similar is true for duplicate label +// names. Checks for duplicates have to be performed separately, if required. +// Also note that neither the metrics within each MetricFamily are sorted nor +// the label pairs within each Metric. Sorting is not required for the most +// frequent use of this method, which is sample ingestion in the Prometheus +// server. However, for presentation purposes, you might want to sort the +// metrics, and in some cases, you must sort the labels, e.g. for consumption by +// the metric family injection hook of the Prometheus registry. +// +// Summaries and histograms are rather special beasts. You would probably not +// use them in the simple text format anyway. This method can deal with +// summaries and histograms if they are presented in exactly the way the +// text.Create function creates them. +// +// This method must not be called concurrently. If you want to parse different +// input concurrently, instantiate a separate Parser for each goroutine. +func (p *TextParser) TextToMetricFamilies(in io.Reader) (map[string]*dto.MetricFamily, error) { + p.reset(in) + for nextState := p.startOfLine; nextState != nil; nextState = nextState() { + // Magic happens here... + } + // Get rid of empty metric families. + for k, mf := range p.metricFamiliesByName { + if len(mf.GetMetric()) == 0 { + delete(p.metricFamiliesByName, k) + } + } + // If p.err is io.EOF now, we have run into a premature end of the input + // stream. Turn this error into something nicer and more + // meaningful. (io.EOF is often used as a signal for the legitimate end + // of an input stream.) + if p.err == io.EOF { + p.parseError("unexpected end of input stream") + } + return p.metricFamiliesByName, p.err +} + +func (p *TextParser) reset(in io.Reader) { + p.metricFamiliesByName = map[string]*dto.MetricFamily{} + if p.buf == nil { + p.buf = bufio.NewReader(in) + } else { + p.buf.Reset(in) + } + p.err = nil + p.lineCount = 0 + if p.summaries == nil || len(p.summaries) > 0 { + p.summaries = map[uint64]*dto.Metric{} + } + if p.histograms == nil || len(p.histograms) > 0 { + p.histograms = map[uint64]*dto.Metric{} + } + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() +} + +// startOfLine represents the state where the next byte read from p.buf is the +// start of a line (or whitespace leading up to it). +func (p *TextParser) startOfLine() stateFn { + p.lineCount++ + if p.skipBlankTab(); p.err != nil { + // End of input reached. This is the only case where + // that is not an error but a signal that we are done. + p.err = nil + return nil + } + switch p.currentByte { + case '#': + return p.startComment + case '\n': + return p.startOfLine // Empty line, start the next one. + } + return p.readingMetricName +} + +// startComment represents the state where the next byte read from p.buf is the +// start of a comment (or whitespace leading up to it). +func (p *TextParser) startComment() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + return p.startOfLine + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + // If we have hit the end of line already, there is nothing left + // to do. This is not considered a syntax error. + if p.currentByte == '\n' { + return p.startOfLine + } + keyword := p.currentToken.String() + if keyword != "HELP" && keyword != "TYPE" { + // Generic comment, ignore by fast forwarding to end of line. + for p.currentByte != '\n' { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return nil // Unexpected end of input. + } + } + return p.startOfLine + } + // There is something. Next has to be a metric name. + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenAsMetricName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + if !isBlankOrTab(p.currentByte) { + p.parseError("invalid metric name in comment") + return nil + } + p.setOrCreateCurrentMF() + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '\n' { + // At the end of the line already. + // Again, this is not considered a syntax error. + return p.startOfLine + } + switch keyword { + case "HELP": + return p.readingHelp + case "TYPE": + return p.readingType + } + panic(fmt.Sprintf("code error: unexpected keyword %q", keyword)) +} + +// readingMetricName represents the state where the last byte read (now in +// p.currentByte) is the first byte of a metric name. +func (p *TextParser) readingMetricName() stateFn { + if p.readTokenAsMetricName(); p.err != nil { + return nil + } + if p.currentToken.Len() == 0 { + p.parseError("invalid metric name") + return nil + } + p.setOrCreateCurrentMF() + // Now is the time to fix the type if it hasn't happened yet. + if p.currentMF.Type == nil { + p.currentMF.Type = dto.MetricType_UNTYPED.Enum() + } + p.currentMetric = &dto.Metric{} + // Do not append the newly created currentMetric to + // currentMF.Metric right now. First wait if this is a summary, + // and the metric exists already, which we can only know after + // having read all the labels. + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingLabels +} + +// readingLabels represents the state where the last byte read (now in +// p.currentByte) is either the first byte of the label set (i.e. a '{'), or the +// first byte of the value (otherwise). +func (p *TextParser) readingLabels() stateFn { + // Summaries/histograms are special. We have to reset the + // currentLabels map, currentQuantile and currentBucket before starting to + // read labels. + if p.currentMF.GetType() == dto.MetricType_SUMMARY || p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + p.currentLabels = map[string]string{} + p.currentLabels[string(model.MetricNameLabel)] = p.currentMF.GetName() + p.currentQuantile = math.NaN() + p.currentBucket = math.NaN() + } + if p.currentByte != '{' { + return p.readingValue + } + return p.startLabelName +} + +// startLabelName represents the state where the next byte read from p.buf is +// the start of a label name (or whitespace leading up to it). +func (p *TextParser) startLabelName() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte == '}' { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + } + if p.readTokenAsLabelName(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() == 0 { + p.parseError(fmt.Sprintf("invalid label name for metric %q", p.currentMF.GetName())) + return nil + } + p.currentLabelPair = &dto.LabelPair{Name: proto.String(p.currentToken.String())} + if p.currentLabelPair.GetName() == string(model.MetricNameLabel) { + p.parseError(fmt.Sprintf("label name %q is reserved", model.MetricNameLabel)) + return nil + } + // Special summary/histogram treatment. Don't add 'quantile' and 'le' + // labels to 'real' labels. + if !(p.currentMF.GetType() == dto.MetricType_SUMMARY && p.currentLabelPair.GetName() == model.QuantileLabel) && + !(p.currentMF.GetType() == dto.MetricType_HISTOGRAM && p.currentLabelPair.GetName() == model.BucketLabel) { + p.currentMetric.Label = append(p.currentMetric.Label, p.currentLabelPair) + } + if p.skipBlankTabIfCurrentBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '=' { + p.parseError(fmt.Sprintf("expected '=' after label name, found %q", p.currentByte)) + return nil + } + return p.startLabelValue +} + +// startLabelValue represents the state where the next byte read from p.buf is +// the start of a (quoted) label value (or whitespace leading up to it). +func (p *TextParser) startLabelValue() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentByte != '"' { + p.parseError(fmt.Sprintf("expected '\"' at start of label value, found %q", p.currentByte)) + return nil + } + if p.readTokenAsLabelValue(); p.err != nil { + return nil + } + if !model.LabelValue(p.currentToken.String()).IsValid() { + p.parseError(fmt.Sprintf("invalid label value %q", p.currentToken.String())) + return nil + } + p.currentLabelPair.Value = proto.String(p.currentToken.String()) + // Special treatment of summaries: + // - Quantile labels are special, will result in dto.Quantile later. + // - Other labels have to be added to currentLabels for signature calculation. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if p.currentLabelPair.GetName() == model.QuantileLabel { + if p.currentQuantile, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'quantile' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + // Similar special treatment of histograms. + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if p.currentLabelPair.GetName() == model.BucketLabel { + if p.currentBucket, p.err = strconv.ParseFloat(p.currentLabelPair.GetValue(), 64); p.err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value for 'le' label, got %q", p.currentLabelPair.GetValue())) + return nil + } + } else { + p.currentLabels[p.currentLabelPair.GetName()] = p.currentLabelPair.GetValue() + } + } + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + switch p.currentByte { + case ',': + return p.startLabelName + + case '}': + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + return p.readingValue + default: + p.parseError(fmt.Sprintf("unexpected end of label value %q", p.currentLabelPair.Value)) + return nil + } +} + +// readingValue represents the state where the last byte read (now in +// p.currentByte) is the first byte of the sample value (i.e. a float). +func (p *TextParser) readingValue() stateFn { + // When we are here, we have read all the labels, so for the + // special case of a summary/histogram, we can finally find out + // if the metric already exists. + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + signature := model.LabelsToSignature(p.currentLabels) + if summary := p.summaries[signature]; summary != nil { + p.currentMetric = summary + } else { + p.summaries[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + signature := model.LabelsToSignature(p.currentLabels) + if histogram := p.histograms[signature]; histogram != nil { + p.currentMetric = histogram + } else { + p.histograms[signature] = p.currentMetric + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + } else { + p.currentMF.Metric = append(p.currentMF.Metric, p.currentMetric) + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + value, err := strconv.ParseFloat(p.currentToken.String(), 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected float as value, got %q", p.currentToken.String())) + return nil + } + switch p.currentMF.GetType() { + case dto.MetricType_COUNTER: + p.currentMetric.Counter = &dto.Counter{Value: proto.Float64(value)} + case dto.MetricType_GAUGE: + p.currentMetric.Gauge = &dto.Gauge{Value: proto.Float64(value)} + case dto.MetricType_UNTYPED: + p.currentMetric.Untyped = &dto.Untyped{Value: proto.Float64(value)} + case dto.MetricType_SUMMARY: + // *sigh* + if p.currentMetric.Summary == nil { + p.currentMetric.Summary = &dto.Summary{} + } + switch { + case p.currentIsSummaryCount: + p.currentMetric.Summary.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsSummarySum: + p.currentMetric.Summary.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentQuantile): + p.currentMetric.Summary.Quantile = append( + p.currentMetric.Summary.Quantile, + &dto.Quantile{ + Quantile: proto.Float64(p.currentQuantile), + Value: proto.Float64(value), + }, + ) + } + case dto.MetricType_HISTOGRAM: + // *sigh* + if p.currentMetric.Histogram == nil { + p.currentMetric.Histogram = &dto.Histogram{} + } + switch { + case p.currentIsHistogramCount: + p.currentMetric.Histogram.SampleCount = proto.Uint64(uint64(value)) + case p.currentIsHistogramSum: + p.currentMetric.Histogram.SampleSum = proto.Float64(value) + case !math.IsNaN(p.currentBucket): + p.currentMetric.Histogram.Bucket = append( + p.currentMetric.Histogram.Bucket, + &dto.Bucket{ + UpperBound: proto.Float64(p.currentBucket), + CumulativeCount: proto.Uint64(uint64(value)), + }, + ) + } + default: + p.err = fmt.Errorf("unexpected type for metric name %q", p.currentMF.GetName()) + } + if p.currentByte == '\n' { + return p.startOfLine + } + return p.startTimestamp +} + +// startTimestamp represents the state where the next byte read from p.buf is +// the start of the timestamp (or whitespace leading up to it). +func (p *TextParser) startTimestamp() stateFn { + if p.skipBlankTab(); p.err != nil { + return nil // Unexpected end of input. + } + if p.readTokenUntilWhitespace(); p.err != nil { + return nil // Unexpected end of input. + } + timestamp, err := strconv.ParseInt(p.currentToken.String(), 10, 64) + if err != nil { + // Create a more helpful error message. + p.parseError(fmt.Sprintf("expected integer as timestamp, got %q", p.currentToken.String())) + return nil + } + p.currentMetric.TimestampMs = proto.Int64(timestamp) + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + if p.currentToken.Len() > 0 { + p.parseError(fmt.Sprintf("spurious string after timestamp: %q", p.currentToken.String())) + return nil + } + return p.startOfLine +} + +// readingHelp represents the state where the last byte read (now in +// p.currentByte) is the first byte of the docstring after 'HELP'. +func (p *TextParser) readingHelp() stateFn { + if p.currentMF.Help != nil { + p.parseError(fmt.Sprintf("second HELP line for metric name %q", p.currentMF.GetName())) + return nil + } + // Rest of line is the docstring. + if p.readTokenUntilNewline(true); p.err != nil { + return nil // Unexpected end of input. + } + p.currentMF.Help = proto.String(p.currentToken.String()) + return p.startOfLine +} + +// readingType represents the state where the last byte read (now in +// p.currentByte) is the first byte of the type hint after 'HELP'. +func (p *TextParser) readingType() stateFn { + if p.currentMF.Type != nil { + p.parseError(fmt.Sprintf("second TYPE line for metric name %q, or TYPE reported after samples", p.currentMF.GetName())) + return nil + } + // Rest of line is the type. + if p.readTokenUntilNewline(false); p.err != nil { + return nil // Unexpected end of input. + } + metricType, ok := dto.MetricType_value[strings.ToUpper(p.currentToken.String())] + if !ok { + p.parseError(fmt.Sprintf("unknown metric type %q", p.currentToken.String())) + return nil + } + p.currentMF.Type = dto.MetricType(metricType).Enum() + return p.startOfLine +} + +// parseError sets p.err to a ParseError at the current line with the given +// message. +func (p *TextParser) parseError(msg string) { + p.err = ParseError{ + Line: p.lineCount, + Msg: msg, + } +} + +// skipBlankTab reads (and discards) bytes from p.buf until it encounters a byte +// that is neither ' ' nor '\t'. That byte is left in p.currentByte. +func (p *TextParser) skipBlankTab() { + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil || !isBlankOrTab(p.currentByte) { + return + } + } +} + +// skipBlankTabIfCurrentBlankTab works exactly as skipBlankTab but doesn't do +// anything if p.currentByte is neither ' ' nor '\t'. +func (p *TextParser) skipBlankTabIfCurrentBlankTab() { + if isBlankOrTab(p.currentByte) { + p.skipBlankTab() + } +} + +// readTokenUntilWhitespace copies bytes from p.buf into p.currentToken. The +// first byte considered is the byte already read (now in p.currentByte). The +// first whitespace byte encountered is still copied into p.currentByte, but not +// into p.currentToken. +func (p *TextParser) readTokenUntilWhitespace() { + p.currentToken.Reset() + for p.err == nil && !isBlankOrTab(p.currentByte) && p.currentByte != '\n' { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenUntilNewline copies bytes from p.buf into p.currentToken. The first +// byte considered is the byte already read (now in p.currentByte). The first +// newline byte encountered is still copied into p.currentByte, but not into +// p.currentToken. If recognizeEscapeSequence is true, two escape sequences are +// recognized: '\\' tranlates into '\', and '\n' into a line-feed character. All +// other escape sequences are invalid and cause an error. +func (p *TextParser) readTokenUntilNewline(recognizeEscapeSequence bool) { + p.currentToken.Reset() + escaped := false + for p.err == nil { + if recognizeEscapeSequence && escaped { + switch p.currentByte { + case '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + } else { + switch p.currentByte { + case '\n': + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } + p.currentByte, p.err = p.buf.ReadByte() + } +} + +// readTokenAsMetricName copies a metric name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a metric name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsMetricName() { + p.currentToken.Reset() + if !isValidMetricNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidMetricNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelName copies a label name from p.buf into p.currentToken. +// The first byte considered is the byte already read (now in p.currentByte). +// The first byte not part of a label name is still copied into p.currentByte, +// but not into p.currentToken. +func (p *TextParser) readTokenAsLabelName() { + p.currentToken.Reset() + if !isValidLabelNameStart(p.currentByte) { + return + } + for { + p.currentToken.WriteByte(p.currentByte) + p.currentByte, p.err = p.buf.ReadByte() + if p.err != nil || !isValidLabelNameContinuation(p.currentByte) { + return + } + } +} + +// readTokenAsLabelValue copies a label value from p.buf into p.currentToken. +// In contrast to the other 'readTokenAs...' functions, which start with the +// last read byte in p.currentByte, this method ignores p.currentByte and starts +// with reading a new byte from p.buf. The first byte not part of a label value +// is still copied into p.currentByte, but not into p.currentToken. +func (p *TextParser) readTokenAsLabelValue() { + p.currentToken.Reset() + escaped := false + for { + if p.currentByte, p.err = p.buf.ReadByte(); p.err != nil { + return + } + if escaped { + switch p.currentByte { + case '"', '\\': + p.currentToken.WriteByte(p.currentByte) + case 'n': + p.currentToken.WriteByte('\n') + default: + p.parseError(fmt.Sprintf("invalid escape sequence '\\%c'", p.currentByte)) + return + } + escaped = false + continue + } + switch p.currentByte { + case '"': + return + case '\n': + p.parseError(fmt.Sprintf("label value %q contains unescaped new-line", p.currentToken.String())) + return + case '\\': + escaped = true + default: + p.currentToken.WriteByte(p.currentByte) + } + } +} + +func (p *TextParser) setOrCreateCurrentMF() { + p.currentIsSummaryCount = false + p.currentIsSummarySum = false + p.currentIsHistogramCount = false + p.currentIsHistogramSum = false + name := p.currentToken.String() + if p.currentMF = p.metricFamiliesByName[name]; p.currentMF != nil { + return + } + // Try out if this is a _sum or _count for a summary/histogram. + summaryName := summaryMetricName(name) + if p.currentMF = p.metricFamiliesByName[summaryName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_SUMMARY { + if isCount(name) { + p.currentIsSummaryCount = true + } + if isSum(name) { + p.currentIsSummarySum = true + } + return + } + } + histogramName := histogramMetricName(name) + if p.currentMF = p.metricFamiliesByName[histogramName]; p.currentMF != nil { + if p.currentMF.GetType() == dto.MetricType_HISTOGRAM { + if isCount(name) { + p.currentIsHistogramCount = true + } + if isSum(name) { + p.currentIsHistogramSum = true + } + return + } + } + p.currentMF = &dto.MetricFamily{Name: proto.String(name)} + p.metricFamiliesByName[name] = p.currentMF +} + +func isValidLabelNameStart(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' +} + +func isValidLabelNameContinuation(b byte) bool { + return isValidLabelNameStart(b) || (b >= '0' && b <= '9') +} + +func isValidMetricNameStart(b byte) bool { + return isValidLabelNameStart(b) || b == ':' +} + +func isValidMetricNameContinuation(b byte) bool { + return isValidLabelNameContinuation(b) || b == ':' +} + +func isBlankOrTab(b byte) bool { + return b == ' ' || b == '\t' +} + +func isCount(name string) bool { + return len(name) > 6 && name[len(name)-6:] == "_count" +} + +func isSum(name string) bool { + return len(name) > 4 && name[len(name)-4:] == "_sum" +} + +func isBucket(name string) bool { + return len(name) > 7 && name[len(name)-7:] == "_bucket" +} + +func summaryMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + default: + return name + } +} + +func histogramMetricName(name string) string { + switch { + case isCount(name): + return name[:len(name)-6] + case isSum(name): + return name[:len(name)-4] + case isBucket(name): + return name[:len(name)-7] + default: + return name + } +} diff --git a/vendor/github.com/prometheus/common/expfmt/text_parse_test.go b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go new file mode 100644 index 000000000..76c951185 --- /dev/null +++ b/vendor/github.com/prometheus/common/expfmt/text_parse_test.go @@ -0,0 +1,593 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package expfmt + +import ( + "math" + "strings" + "testing" + + "github.com/golang/protobuf/proto" + dto "github.com/prometheus/client_model/go" +) + +func testTextParse(t testing.TB) { + var scenarios = []struct { + in string + out []*dto.MetricFamily + }{ + // 0: Empty lines as input. + { + in: ` + +`, + out: []*dto.MetricFamily{}, + }, + // 1: Minimal case. + { + in: ` +minimal_metric 1.234 +another_metric -3e3 103948 +# Even that: +no_labels{} 3 +# HELP line for non-existing metric will be ignored. +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("minimal_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(1.234), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_metric"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-3e3), + }, + TimestampMs: proto.Int64(103948), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("no_labels"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(3), + }, + }, + }, + }, + }, + }, + // 2: Counters & gauges, docstrings, various whitespace, escape sequences. + { + in: ` +# A normal comment. +# +# TYPE name counter +name{labelname="val1",basename="basevalue"} NaN +name {labelname="val2",basename="base\"v\\al\nue"} 0.23 1234567890 +# HELP name two-line\n doc str\\ing + + # HELP name2 doc str"ing 2 + # TYPE name2 gauge +name2{labelname="val2" ,basename = "basevalue2" } +Inf 54321 +name2{ labelname = "val1" , }-Inf +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("name"), + Help: proto.String("two-line\n doc str\\ing"), + Type: dto.MetricType_COUNTER.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(math.NaN()), + }, + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("base\"v\\al\nue"), + }, + }, + Counter: &dto.Counter{ + Value: proto.Float64(.23), + }, + TimestampMs: proto.Int64(1234567890), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("name2"), + Help: proto.String("doc str\"ing 2"), + Type: dto.MetricType_GAUGE.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("basename"), + Value: proto.String("basevalue2"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(+1)), + }, + TimestampMs: proto.Int64(54321), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("labelname"), + Value: proto.String("val1"), + }, + }, + Gauge: &dto.Gauge{ + Value: proto.Float64(math.Inf(-1)), + }, + }, + }, + }, + }, + }, + // 3: The evil summary, mixed with other types and funny comments. + { + in: ` +# TYPE my_summary summary +my_summary{n1="val1",quantile="0.5"} 110 +decoy -1 -2 +my_summary{n1="val1",quantile="0.9"} 140 1 +my_summary_count{n1="val1"} 42 +# Latest timestamp wins in case of a summary. +my_summary_sum{n1="val1"} 4711 2 +fake_sum{n1="val1"} 2001 +# TYPE another_summary summary +another_summary_count{n2="val2",n1="val1"} 20 +my_summary_count{n2="val2",n1="val1"} 5 5 +another_summary{n1="val1",n2="val2",quantile=".3"} -1.2 +my_summary_sum{n1="val2"} 08 15 +my_summary{n1="val3", quantile="0.2"} 4711 + my_summary{n1="val1",n2="val2",quantile="-12.34",} NaN +# some +# funny comments +# HELP +# HELP +# HELP my_summary +# HELP my_summary +`, + out: []*dto.MetricFamily{ + &dto.MetricFamily{ + Name: proto.String("fake_sum"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Untyped: &dto.Untyped{ + Value: proto.Float64(2001), + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("decoy"), + Type: dto.MetricType_UNTYPED.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Untyped: &dto.Untyped{ + Value: proto.Float64(-1), + }, + TimestampMs: proto.Int64(-2), + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("my_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(42), + SampleSum: proto.Float64(4711), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.5), + Value: proto.Float64(110), + }, + &dto.Quantile{ + Quantile: proto.Float64(0.9), + Value: proto.Float64(140), + }, + }, + }, + TimestampMs: proto.Int64(2), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(5), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(-12.34), + Value: proto.Float64(math.NaN()), + }, + }, + }, + TimestampMs: proto.Int64(5), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val2"), + }, + }, + Summary: &dto.Summary{ + SampleSum: proto.Float64(8), + }, + TimestampMs: proto.Int64(15), + }, + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val3"), + }, + }, + Summary: &dto.Summary{ + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.2), + Value: proto.Float64(4711), + }, + }, + }, + }, + }, + }, + &dto.MetricFamily{ + Name: proto.String("another_summary"), + Type: dto.MetricType_SUMMARY.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Label: []*dto.LabelPair{ + &dto.LabelPair{ + Name: proto.String("n2"), + Value: proto.String("val2"), + }, + &dto.LabelPair{ + Name: proto.String("n1"), + Value: proto.String("val1"), + }, + }, + Summary: &dto.Summary{ + SampleCount: proto.Uint64(20), + Quantile: []*dto.Quantile{ + &dto.Quantile{ + Quantile: proto.Float64(0.3), + Value: proto.Float64(-1.2), + }, + }, + }, + }, + }, + }, + }, + }, + // 4: The histogram. + { + in: ` +# HELP request_duration_microseconds The response latency. +# TYPE request_duration_microseconds histogram +request_duration_microseconds_bucket{le="100"} 123 +request_duration_microseconds_bucket{le="120"} 412 +request_duration_microseconds_bucket{le="144"} 592 +request_duration_microseconds_bucket{le="172.8"} 1524 +request_duration_microseconds_bucket{le="+Inf"} 2693 +request_duration_microseconds_sum 1.7560473e+06 +request_duration_microseconds_count 2693 +`, + out: []*dto.MetricFamily{ + { + Name: proto.String("request_duration_microseconds"), + Help: proto.String("The response latency."), + Type: dto.MetricType_HISTOGRAM.Enum(), + Metric: []*dto.Metric{ + &dto.Metric{ + Histogram: &dto.Histogram{ + SampleCount: proto.Uint64(2693), + SampleSum: proto.Float64(1756047.3), + Bucket: []*dto.Bucket{ + &dto.Bucket{ + UpperBound: proto.Float64(100), + CumulativeCount: proto.Uint64(123), + }, + &dto.Bucket{ + UpperBound: proto.Float64(120), + CumulativeCount: proto.Uint64(412), + }, + &dto.Bucket{ + UpperBound: proto.Float64(144), + CumulativeCount: proto.Uint64(592), + }, + &dto.Bucket{ + UpperBound: proto.Float64(172.8), + CumulativeCount: proto.Uint64(1524), + }, + &dto.Bucket{ + UpperBound: proto.Float64(math.Inf(+1)), + CumulativeCount: proto.Uint64(2693), + }, + }, + }, + }, + }, + }, + }, + }, + } + + for i, scenario := range scenarios { + out, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err != nil { + t.Errorf("%d. error: %s", i, err) + continue + } + if expected, got := len(scenario.out), len(out); expected != got { + t.Errorf( + "%d. expected %d MetricFamilies, got %d", + i, expected, got, + ) + } + for _, expected := range scenario.out { + got, ok := out[expected.GetName()] + if !ok { + t.Errorf( + "%d. expected MetricFamily %q, found none", + i, expected.GetName(), + ) + continue + } + if expected.String() != got.String() { + t.Errorf( + "%d. expected MetricFamily %s, got %s", + i, expected, got, + ) + } + } + } +} + +func TestTextParse(t *testing.T) { + testTextParse(t) +} + +func BenchmarkTextParse(b *testing.B) { + for i := 0; i < b.N; i++ { + testTextParse(b) + } +} + +func testTextParseError(t testing.TB) { + var scenarios = []struct { + in string + err string + }{ + // 0: No new-line at end of input. + { + in: ` +bla 3.14 +blubber 42`, + err: "text format parsing error in line 3: unexpected end of input stream", + }, + // 1: Invalid escape sequence in label value. + { + in: `metric{label="\t"} 3.14`, + err: "text format parsing error in line 1: invalid escape sequence", + }, + // 2: Newline in label value. + { + in: ` +metric{label="new +line"} 3.14 +`, + err: `text format parsing error in line 2: label value "new" contains unescaped new-line`, + }, + // 3: + { + in: `metric{@="bla"} 3.14`, + err: "text format parsing error in line 1: invalid label name for metric", + }, + // 4: + { + in: `metric{__name__="bla"} 3.14`, + err: `text format parsing error in line 1: label name "__name__" is reserved`, + }, + // 5: + { + in: `metric{label+="bla"} 3.14`, + err: "text format parsing error in line 1: expected '=' after label name", + }, + // 6: + { + in: `metric{label=bla} 3.14`, + err: "text format parsing error in line 1: expected '\"' at start of label value", + }, + // 7: + { + in: ` +# TYPE metric summary +metric{quantile="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'quantile' label", + }, + // 8: + { + in: `metric{label="bla"+} 3.14`, + err: "text format parsing error in line 1: unexpected end of label value", + }, + // 9: + { + in: `metric{label="bla"} 3.14 2.72 +`, + err: "text format parsing error in line 1: expected integer as timestamp", + }, + // 10: + { + in: `metric{label="bla"} 3.14 2 3 +`, + err: "text format parsing error in line 1: spurious string after timestamp", + }, + // 11: + { + in: `metric{label="bla"} blubb +`, + err: "text format parsing error in line 1: expected float as value", + }, + // 12: + { + in: ` +# HELP metric one +# HELP metric two +`, + err: "text format parsing error in line 3: second HELP line for metric name", + }, + // 13: + { + in: ` +# TYPE metric counter +# TYPE metric untyped +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +metric 4.12 +# TYPE metric counter +`, + err: `text format parsing error in line 3: second TYPE line for metric name "metric", or TYPE reported after samples`, + }, + // 14: + { + in: ` +# TYPE metric bla +`, + err: "text format parsing error in line 2: unknown metric type", + }, + // 15: + { + in: ` +# TYPE met-ric +`, + err: "text format parsing error in line 2: invalid metric name in comment", + }, + // 16: + { + in: `@invalidmetric{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 17: + { + in: `{label="bla"} 3.14 2`, + err: "text format parsing error in line 1: invalid metric name", + }, + // 18: + { + in: ` +# TYPE metric histogram +metric_bucket{le="bla"} 3.14 +`, + err: "text format parsing error in line 3: expected float as value for 'le' label", + }, + // 19: Invalid UTF-8 in label value. + { + in: "metric{l=\"\xbd\"} 3.14\n", + err: "text format parsing error in line 1: invalid label value \"\\xbd\"", + }, + } + + for i, scenario := range scenarios { + _, err := parser.TextToMetricFamilies(strings.NewReader(scenario.in)) + if err == nil { + t.Errorf("%d. expected error, got nil", i) + continue + } + if expected, got := scenario.err, err.Error(); strings.Index(got, expected) != 0 { + t.Errorf( + "%d. expected error starting with %q, got %q", + i, expected, got, + ) + } + } + +} + +func TestTextParseError(t *testing.T) { + testTextParseError(t) +} + +func BenchmarkParseError(b *testing.B) { + for i := 0; i < b.N; i++ { + testTextParseError(b) + } +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt new file mode 100644 index 000000000..7723656d5 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/README.txt @@ -0,0 +1,67 @@ +PACKAGE + +package goautoneg +import "bitbucket.org/ww/goautoneg" + +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +FUNCTIONS + +func Negotiate(header string, alternatives []string) (content_type string) +Negotiate the most appropriate content_type given the accept header +and a list of alternatives. + +func ParseAccept(header string) (accept []Accept) +Parse an Accept Header string returning a sorted list +of clauses + + +TYPES + +type Accept struct { + Type, SubType string + Q float32 + Params map[string]string +} +Structure to represent a clause in an HTTP Accept Header + + +SUBDIRECTORIES + + .hg diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go new file mode 100644 index 000000000..648b38cb6 --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg.go @@ -0,0 +1,162 @@ +/* +HTTP Content-Type Autonegotiation. + +The functions in this package implement the behaviour specified in +http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html + +Copyright (c) 2011, Open Knowledge Foundation Ltd. +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright + notice, this list of conditions and the following disclaimer. + + Redistributions in binary form must reproduce the above copyright + notice, this list of conditions and the following disclaimer in + the documentation and/or other materials provided with the + distribution. + + Neither the name of the Open Knowledge Foundation Ltd. nor the + names of its contributors may be used to endorse or promote + products derived from this software without specific prior written + permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS +"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT +LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR +A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT +LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, +DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY +THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT +(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. + + +*/ +package goautoneg + +import ( + "sort" + "strconv" + "strings" +) + +// Structure to represent a clause in an HTTP Accept Header +type Accept struct { + Type, SubType string + Q float64 + Params map[string]string +} + +// For internal use, so that we can use the sort interface +type accept_slice []Accept + +func (accept accept_slice) Len() int { + slice := []Accept(accept) + return len(slice) +} + +func (accept accept_slice) Less(i, j int) bool { + slice := []Accept(accept) + ai, aj := slice[i], slice[j] + if ai.Q > aj.Q { + return true + } + if ai.Type != "*" && aj.Type == "*" { + return true + } + if ai.SubType != "*" && aj.SubType == "*" { + return true + } + return false +} + +func (accept accept_slice) Swap(i, j int) { + slice := []Accept(accept) + slice[i], slice[j] = slice[j], slice[i] +} + +// Parse an Accept Header string returning a sorted list +// of clauses +func ParseAccept(header string) (accept []Accept) { + parts := strings.Split(header, ",") + accept = make([]Accept, 0, len(parts)) + for _, part := range parts { + part := strings.Trim(part, " ") + + a := Accept{} + a.Params = make(map[string]string) + a.Q = 1.0 + + mrp := strings.Split(part, ";") + + media_range := mrp[0] + sp := strings.Split(media_range, "/") + a.Type = strings.Trim(sp[0], " ") + + switch { + case len(sp) == 1 && a.Type == "*": + a.SubType = "*" + case len(sp) == 2: + a.SubType = strings.Trim(sp[1], " ") + default: + continue + } + + if len(mrp) == 1 { + accept = append(accept, a) + continue + } + + for _, param := range mrp[1:] { + sp := strings.SplitN(param, "=", 2) + if len(sp) != 2 { + continue + } + token := strings.Trim(sp[0], " ") + if token == "q" { + a.Q, _ = strconv.ParseFloat(sp[1], 32) + } else { + a.Params[token] = strings.Trim(sp[1], " ") + } + } + + accept = append(accept, a) + } + + slice := accept_slice(accept) + sort.Sort(slice) + + return +} + +// Negotiate the most appropriate content_type given the accept header +// and a list of alternatives. +func Negotiate(header string, alternatives []string) (content_type string) { + asp := make([][]string, 0, len(alternatives)) + for _, ctype := range alternatives { + asp = append(asp, strings.SplitN(ctype, "/", 2)) + } + for _, clause := range ParseAccept(header) { + for i, ctsp := range asp { + if clause.Type == ctsp[0] && clause.SubType == ctsp[1] { + content_type = alternatives[i] + return + } + if clause.Type == ctsp[0] && clause.SubType == "*" { + content_type = alternatives[i] + return + } + if clause.Type == "*" && clause.SubType == "*" { + content_type = alternatives[i] + return + } + } + } + return +} diff --git a/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go new file mode 100644 index 000000000..41d328f1d --- /dev/null +++ b/vendor/github.com/prometheus/common/internal/bitbucket.org/ww/goautoneg/autoneg_test.go @@ -0,0 +1,33 @@ +package goautoneg + +import ( + "testing" +) + +var chrome = "application/xml,application/xhtml+xml,text/html;q=0.9,text/plain;q=0.8,image/png,*/*;q=0.5" + +func TestParseAccept(t *testing.T) { + alternatives := []string{"text/html", "image/png"} + content_type := Negotiate(chrome, alternatives) + if content_type != "image/png" { + t.Errorf("got %s expected image/png", content_type) + } + + alternatives = []string{"text/html", "text/plain", "text/n3"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/html" { + t.Errorf("got %s expected text/html", content_type) + } + + alternatives = []string{"text/n3", "text/plain"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/plain" { + t.Errorf("got %s expected text/plain", content_type) + } + + alternatives = []string{"text/n3", "application/rdf+xml"} + content_type = Negotiate(chrome, alternatives) + if content_type != "text/n3" { + t.Errorf("got %s expected text/n3", content_type) + } +} diff --git a/vendor/github.com/prometheus/common/model/alert.go b/vendor/github.com/prometheus/common/model/alert.go new file mode 100644 index 000000000..35e739c7a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert.go @@ -0,0 +1,136 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "time" +) + +type AlertStatus string + +const ( + AlertFiring AlertStatus = "firing" + AlertResolved AlertStatus = "resolved" +) + +// Alert is a generic representation of an alert in the Prometheus eco-system. +type Alert struct { + // Label value pairs for purpose of aggregation, matching, and disposition + // dispatching. This must minimally include an "alertname" label. + Labels LabelSet `json:"labels"` + + // Extra key/value information which does not define alert identity. + Annotations LabelSet `json:"annotations"` + + // The known time range for this alert. Both ends are optional. + StartsAt time.Time `json:"startsAt,omitempty"` + EndsAt time.Time `json:"endsAt,omitempty"` + GeneratorURL string `json:"generatorURL"` +} + +// Name returns the name of the alert. It is equivalent to the "alertname" label. +func (a *Alert) Name() string { + return string(a.Labels[AlertNameLabel]) +} + +// Fingerprint returns a unique hash for the alert. It is equivalent to +// the fingerprint of the alert's label set. +func (a *Alert) Fingerprint() Fingerprint { + return a.Labels.Fingerprint() +} + +func (a *Alert) String() string { + s := fmt.Sprintf("%s[%s]", a.Name(), a.Fingerprint().String()[:7]) + if a.Resolved() { + return s + "[resolved]" + } + return s + "[active]" +} + +// Resolved returns true iff the activity interval ended in the past. +func (a *Alert) Resolved() bool { + return a.ResolvedAt(time.Now()) +} + +// ResolvedAt returns true off the activity interval ended before +// the given timestamp. +func (a *Alert) ResolvedAt(ts time.Time) bool { + if a.EndsAt.IsZero() { + return false + } + return !a.EndsAt.After(ts) +} + +// Status returns the status of the alert. +func (a *Alert) Status() AlertStatus { + if a.Resolved() { + return AlertResolved + } + return AlertFiring +} + +// Validate checks whether the alert data is inconsistent. +func (a *Alert) Validate() error { + if a.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if !a.EndsAt.IsZero() && a.EndsAt.Before(a.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if err := a.Labels.Validate(); err != nil { + return fmt.Errorf("invalid label set: %s", err) + } + if len(a.Labels) == 0 { + return fmt.Errorf("at least one label pair required") + } + if err := a.Annotations.Validate(); err != nil { + return fmt.Errorf("invalid annotations: %s", err) + } + return nil +} + +// Alert is a list of alerts that can be sorted in chronological order. +type Alerts []*Alert + +func (as Alerts) Len() int { return len(as) } +func (as Alerts) Swap(i, j int) { as[i], as[j] = as[j], as[i] } + +func (as Alerts) Less(i, j int) bool { + if as[i].StartsAt.Before(as[j].StartsAt) { + return true + } + if as[i].EndsAt.Before(as[j].EndsAt) { + return true + } + return as[i].Fingerprint() < as[j].Fingerprint() +} + +// HasFiring returns true iff one of the alerts is not resolved. +func (as Alerts) HasFiring() bool { + for _, a := range as { + if !a.Resolved() { + return true + } + } + return false +} + +// Status returns StatusFiring iff at least one of the alerts is firing. +func (as Alerts) Status() AlertStatus { + if as.HasFiring() { + return AlertFiring + } + return AlertResolved +} diff --git a/vendor/github.com/prometheus/common/model/alert_test.go b/vendor/github.com/prometheus/common/model/alert_test.go new file mode 100644 index 000000000..9692bca21 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/alert_test.go @@ -0,0 +1,118 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" + "testing" + "time" +) + +func TestAlertValidate(t *testing.T) { + ts := time.Now() + + var cases = []struct { + alert *Alert + err string + }{ + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + }, + err: "start time missing", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts, + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts.Add(1 * time.Minute), + }, + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + StartsAt: ts, + EndsAt: ts.Add(-1 * time.Minute), + }, + err: "start time must be before end time", + }, + { + alert: &Alert{ + StartsAt: ts, + }, + err: "at least one label pair required", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b", "!bad": "label"}, + StartsAt: ts, + }, + err: "invalid label set: invalid name", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b", "bad": "\xfflabel"}, + StartsAt: ts, + }, + err: "invalid label set: invalid value", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + Annotations: LabelSet{"!bad": "label"}, + StartsAt: ts, + }, + err: "invalid annotations: invalid name", + }, + { + alert: &Alert{ + Labels: LabelSet{"a": "b"}, + Annotations: LabelSet{"bad": "\xfflabel"}, + StartsAt: ts, + }, + err: "invalid annotations: invalid value", + }, + } + + for i, c := range cases { + err := c.alert.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/fingerprinting.go b/vendor/github.com/prometheus/common/model/fingerprinting.go new file mode 100644 index 000000000..fc4de4106 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fingerprinting.go @@ -0,0 +1,105 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "strconv" +) + +// Fingerprint provides a hash-capable representation of a Metric. +// For our purposes, FNV-1A 64-bit is used. +type Fingerprint uint64 + +// FingerprintFromString transforms a string representation into a Fingerprint. +func FingerprintFromString(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + return Fingerprint(num), err +} + +// ParseFingerprint parses the input string into a fingerprint. +func ParseFingerprint(s string) (Fingerprint, error) { + num, err := strconv.ParseUint(s, 16, 64) + if err != nil { + return 0, err + } + return Fingerprint(num), nil +} + +func (f Fingerprint) String() string { + return fmt.Sprintf("%016x", uint64(f)) +} + +// Fingerprints represents a collection of Fingerprint subject to a given +// natural sorting scheme. It implements sort.Interface. +type Fingerprints []Fingerprint + +// Len implements sort.Interface. +func (f Fingerprints) Len() int { + return len(f) +} + +// Less implements sort.Interface. +func (f Fingerprints) Less(i, j int) bool { + return f[i] < f[j] +} + +// Swap implements sort.Interface. +func (f Fingerprints) Swap(i, j int) { + f[i], f[j] = f[j], f[i] +} + +// FingerprintSet is a set of Fingerprints. +type FingerprintSet map[Fingerprint]struct{} + +// Equal returns true if both sets contain the same elements (and not more). +func (s FingerprintSet) Equal(o FingerprintSet) bool { + if len(s) != len(o) { + return false + } + + for k := range s { + if _, ok := o[k]; !ok { + return false + } + } + + return true +} + +// Intersection returns the elements contained in both sets. +func (s FingerprintSet) Intersection(o FingerprintSet) FingerprintSet { + myLength, otherLength := len(s), len(o) + if myLength == 0 || otherLength == 0 { + return FingerprintSet{} + } + + subSet := s + superSet := o + + if otherLength < myLength { + subSet = o + superSet = s + } + + out := FingerprintSet{} + + for k := range subSet { + if _, ok := superSet[k]; ok { + out[k] = struct{}{} + } + } + + return out +} diff --git a/vendor/github.com/prometheus/common/model/fnv.go b/vendor/github.com/prometheus/common/model/fnv.go new file mode 100644 index 000000000..038fc1c90 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/fnv.go @@ -0,0 +1,42 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +// Inline and byte-free variant of hash/fnv's fnv64a. + +const ( + offset64 = 14695981039346656037 + prime64 = 1099511628211 +) + +// hashNew initializies a new fnv64a hash value. +func hashNew() uint64 { + return offset64 +} + +// hashAdd adds a string to a fnv64a hash value, returning the updated hash. +func hashAdd(h uint64, s string) uint64 { + for i := 0; i < len(s); i++ { + h ^= uint64(s[i]) + h *= prime64 + } + return h +} + +// hashAddByte adds a byte to a fnv64a hash value, returning the updated hash. +func hashAddByte(h uint64, b byte) uint64 { + h ^= uint64(b) + h *= prime64 + return h +} diff --git a/vendor/github.com/prometheus/common/model/labels.go b/vendor/github.com/prometheus/common/model/labels.go new file mode 100644 index 000000000..41051a01a --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels.go @@ -0,0 +1,210 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "strings" + "unicode/utf8" +) + +const ( + // AlertNameLabel is the name of the label containing the an alert's name. + AlertNameLabel = "alertname" + + // ExportedLabelPrefix is the prefix to prepend to the label names present in + // exported metrics if a label of the same name is added by the server. + ExportedLabelPrefix = "exported_" + + // MetricNameLabel is the label name indicating the metric name of a + // timeseries. + MetricNameLabel = "__name__" + + // SchemeLabel is the name of the label that holds the scheme on which to + // scrape a target. + SchemeLabel = "__scheme__" + + // AddressLabel is the name of the label that holds the address of + // a scrape target. + AddressLabel = "__address__" + + // MetricsPathLabel is the name of the label that holds the path on which to + // scrape a target. + MetricsPathLabel = "__metrics_path__" + + // ReservedLabelPrefix is a prefix which is not legal in user-supplied + // label names. + ReservedLabelPrefix = "__" + + // MetaLabelPrefix is a prefix for labels that provide meta information. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. + MetaLabelPrefix = "__meta_" + + // TmpLabelPrefix is a prefix for temporary labels as part of relabelling. + // Labels with this prefix are used for intermediate label processing and + // will not be attached to time series. This is reserved for use in + // Prometheus configuration files by users. + TmpLabelPrefix = "__tmp_" + + // ParamLabelPrefix is a prefix for labels that provide URL parameters + // used to scrape a target. + ParamLabelPrefix = "__param_" + + // JobLabel is the label name indicating the job from which a timeseries + // was scraped. + JobLabel = "job" + + // InstanceLabel is the label name used for the instance label. + InstanceLabel = "instance" + + // BucketLabel is used for the label that defines the upper bound of a + // bucket of a histogram ("le" -> "less or equal"). + BucketLabel = "le" + + // QuantileLabel is used for the label that defines the quantile in a + // summary. + QuantileLabel = "quantile" +) + +// LabelNameRE is a regular expression matching valid label names. Note that the +// IsValid method of LabelName performs the same check but faster than a match +// with this regular expression. +var LabelNameRE = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]*$") + +// A LabelName is a key for a LabelSet or Metric. It has a value associated +// therewith. +type LabelName string + +// IsValid is true iff the label name matches the pattern of LabelNameRE. This +// method, however, does not use LabelNameRE for the check but a much faster +// hardcoded implementation. +func (ln LabelName) IsValid() bool { + if len(ln) == 0 { + return false + } + for i, b := range ln { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (ln *LabelName) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (ln *LabelName) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + if !LabelName(s).IsValid() { + return fmt.Errorf("%q is not a valid label name", s) + } + *ln = LabelName(s) + return nil +} + +// LabelNames is a sortable LabelName slice. In implements sort.Interface. +type LabelNames []LabelName + +func (l LabelNames) Len() int { + return len(l) +} + +func (l LabelNames) Less(i, j int) bool { + return l[i] < l[j] +} + +func (l LabelNames) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +func (l LabelNames) String() string { + labelStrings := make([]string, 0, len(l)) + for _, label := range l { + labelStrings = append(labelStrings, string(label)) + } + return strings.Join(labelStrings, ", ") +} + +// A LabelValue is an associated value for a LabelName. +type LabelValue string + +// IsValid returns true iff the string is a valid UTF8. +func (lv LabelValue) IsValid() bool { + return utf8.ValidString(string(lv)) +} + +// LabelValues is a sortable LabelValue slice. It implements sort.Interface. +type LabelValues []LabelValue + +func (l LabelValues) Len() int { + return len(l) +} + +func (l LabelValues) Less(i, j int) bool { + return string(l[i]) < string(l[j]) +} + +func (l LabelValues) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} + +// LabelPair pairs a name with a value. +type LabelPair struct { + Name LabelName + Value LabelValue +} + +// LabelPairs is a sortable slice of LabelPair pointers. It implements +// sort.Interface. +type LabelPairs []*LabelPair + +func (l LabelPairs) Len() int { + return len(l) +} + +func (l LabelPairs) Less(i, j int) bool { + switch { + case l[i].Name > l[j].Name: + return false + case l[i].Name < l[j].Name: + return true + case l[i].Value > l[j].Value: + return false + case l[i].Value < l[j].Value: + return true + default: + return false + } +} + +func (l LabelPairs) Swap(i, j int) { + l[i], l[j] = l[j], l[i] +} diff --git a/vendor/github.com/prometheus/common/model/labels_test.go b/vendor/github.com/prometheus/common/model/labels_test.go new file mode 100644 index 000000000..e8df28ffa --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labels_test.go @@ -0,0 +1,140 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" + "testing" +) + +func testLabelNames(t testing.TB) { + var scenarios = []struct { + in LabelNames + out LabelNames + }{ + { + in: LabelNames{"ZZZ", "zzz"}, + out: LabelNames{"ZZZ", "zzz"}, + }, + { + in: LabelNames{"aaa", "AAA"}, + out: LabelNames{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelNames(t *testing.T) { + testLabelNames(t) +} + +func BenchmarkLabelNames(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelNames(b) + } +} + +func testLabelValues(t testing.TB) { + var scenarios = []struct { + in LabelValues + out LabelValues + }{ + { + in: LabelValues{"ZZZ", "zzz"}, + out: LabelValues{"ZZZ", "zzz"}, + }, + { + in: LabelValues{"aaa", "AAA"}, + out: LabelValues{"AAA", "aaa"}, + }, + } + + for i, scenario := range scenarios { + sort.Sort(scenario.in) + + for j, expected := range scenario.out { + if expected != scenario.in[j] { + t.Errorf("%d.%d expected %s, got %s", i, j, expected, scenario.in[j]) + } + } + } +} + +func TestLabelValues(t *testing.T) { + testLabelValues(t) +} + +func BenchmarkLabelValues(b *testing.B) { + for i := 0; i < b.N; i++ { + testLabelValues(b) + } +} + +func TestLabelNameIsValid(t *testing.T) { + var scenarios = []struct { + ln LabelName + valid bool + }{ + { + ln: "Avalid_23name", + valid: true, + }, + { + ln: "_Avalid_23name", + valid: true, + }, + { + ln: "1valid_23name", + valid: false, + }, + { + ln: "avalid_23name", + valid: true, + }, + { + ln: "Ava:lid_23name", + valid: false, + }, + { + ln: "a lid_23name", + valid: false, + }, + { + ln: ":leading_colon", + valid: false, + }, + { + ln: "colon:in:the:middle", + valid: false, + }, + } + + for _, s := range scenarios { + if s.ln.IsValid() != s.valid { + t.Errorf("Expected %v for %q using IsValid method", s.valid, s.ln) + } + if LabelNameRE.MatchString(string(s.ln)) != s.valid { + t.Errorf("Expected %v for %q using regexp match", s.valid, s.ln) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/labelset.go b/vendor/github.com/prometheus/common/model/labelset.go new file mode 100644 index 000000000..6eda08a73 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/labelset.go @@ -0,0 +1,169 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "sort" + "strings" +) + +// A LabelSet is a collection of LabelName and LabelValue pairs. The LabelSet +// may be fully-qualified down to the point where it may resolve to a single +// Metric in the data store or not. All operations that occur within the realm +// of a LabelSet can emit a vector of Metric entities to which the LabelSet may +// match. +type LabelSet map[LabelName]LabelValue + +// Validate checks whether all names and values in the label set +// are valid. +func (ls LabelSet) Validate() error { + for ln, lv := range ls { + if !ln.IsValid() { + return fmt.Errorf("invalid name %q", ln) + } + if !lv.IsValid() { + return fmt.Errorf("invalid value %q", lv) + } + } + return nil +} + +// Equal returns true iff both label sets have exactly the same key/value pairs. +func (ls LabelSet) Equal(o LabelSet) bool { + if len(ls) != len(o) { + return false + } + for ln, lv := range ls { + olv, ok := o[ln] + if !ok { + return false + } + if olv != lv { + return false + } + } + return true +} + +// Before compares the metrics, using the following criteria: +// +// If m has fewer labels than o, it is before o. If it has more, it is not. +// +// If the number of labels is the same, the superset of all label names is +// sorted alphanumerically. The first differing label pair found in that order +// determines the outcome: If the label does not exist at all in m, then m is +// before o, and vice versa. Otherwise the label value is compared +// alphanumerically. +// +// If m and o are equal, the method returns false. +func (ls LabelSet) Before(o LabelSet) bool { + if len(ls) < len(o) { + return true + } + if len(ls) > len(o) { + return false + } + + lns := make(LabelNames, 0, len(ls)+len(o)) + for ln := range ls { + lns = append(lns, ln) + } + for ln := range o { + lns = append(lns, ln) + } + // It's probably not worth it to de-dup lns. + sort.Sort(lns) + for _, ln := range lns { + mlv, ok := ls[ln] + if !ok { + return true + } + olv, ok := o[ln] + if !ok { + return false + } + if mlv < olv { + return true + } + if mlv > olv { + return false + } + } + return false +} + +// Clone returns a copy of the label set. +func (ls LabelSet) Clone() LabelSet { + lsn := make(LabelSet, len(ls)) + for ln, lv := range ls { + lsn[ln] = lv + } + return lsn +} + +// Merge is a helper function to non-destructively merge two label sets. +func (l LabelSet) Merge(other LabelSet) LabelSet { + result := make(LabelSet, len(l)) + + for k, v := range l { + result[k] = v + } + + for k, v := range other { + result[k] = v + } + + return result +} + +func (l LabelSet) String() string { + lstrs := make([]string, 0, len(l)) + for l, v := range l { + lstrs = append(lstrs, fmt.Sprintf("%s=%q", l, v)) + } + + sort.Strings(lstrs) + return fmt.Sprintf("{%s}", strings.Join(lstrs, ", ")) +} + +// Fingerprint returns the LabelSet's fingerprint. +func (ls LabelSet) Fingerprint() Fingerprint { + return labelSetToFingerprint(ls) +} + +// FastFingerprint returns the LabelSet's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (ls LabelSet) FastFingerprint() Fingerprint { + return labelSetToFastFingerprint(ls) +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (l *LabelSet) UnmarshalJSON(b []byte) error { + var m map[LabelName]LabelValue + if err := json.Unmarshal(b, &m); err != nil { + return err + } + // encoding/json only unmarshals maps of the form map[string]T. It treats + // LabelName as a string and does not call its UnmarshalJSON method. + // Thus, we have to replicate the behavior here. + for ln := range m { + if !ln.IsValid() { + return fmt.Errorf("%q is not a valid label name", ln) + } + } + *l = LabelSet(m) + return nil +} diff --git a/vendor/github.com/prometheus/common/model/metric.go b/vendor/github.com/prometheus/common/model/metric.go new file mode 100644 index 000000000..f7250909b --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric.go @@ -0,0 +1,103 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "regexp" + "sort" + "strings" +) + +var ( + separator = []byte{0} + // MetricNameRE is a regular expression matching valid metric + // names. Note that the IsValidMetricName function performs the same + // check but faster than a match with this regular expression. + MetricNameRE = regexp.MustCompile(`^[a-zA-Z_:][a-zA-Z0-9_:]*$`) +) + +// A Metric is similar to a LabelSet, but the key difference is that a Metric is +// a singleton and refers to one and only one stream of samples. +type Metric LabelSet + +// Equal compares the metrics. +func (m Metric) Equal(o Metric) bool { + return LabelSet(m).Equal(LabelSet(o)) +} + +// Before compares the metrics' underlying label sets. +func (m Metric) Before(o Metric) bool { + return LabelSet(m).Before(LabelSet(o)) +} + +// Clone returns a copy of the Metric. +func (m Metric) Clone() Metric { + clone := make(Metric, len(m)) + for k, v := range m { + clone[k] = v + } + return clone +} + +func (m Metric) String() string { + metricName, hasName := m[MetricNameLabel] + numLabels := len(m) - 1 + if !hasName { + numLabels = len(m) + } + labelStrings := make([]string, 0, numLabels) + for label, value := range m { + if label != MetricNameLabel { + labelStrings = append(labelStrings, fmt.Sprintf("%s=%q", label, value)) + } + } + + switch numLabels { + case 0: + if hasName { + return string(metricName) + } + return "{}" + default: + sort.Strings(labelStrings) + return fmt.Sprintf("%s{%s}", metricName, strings.Join(labelStrings, ", ")) + } +} + +// Fingerprint returns a Metric's Fingerprint. +func (m Metric) Fingerprint() Fingerprint { + return LabelSet(m).Fingerprint() +} + +// FastFingerprint returns a Metric's Fingerprint calculated by a faster hashing +// algorithm, which is, however, more susceptible to hash collisions. +func (m Metric) FastFingerprint() Fingerprint { + return LabelSet(m).FastFingerprint() +} + +// IsValidMetricName returns true iff name matches the pattern of MetricNameRE. +// This function, however, does not use MetricNameRE for the check but a much +// faster hardcoded implementation. +func IsValidMetricName(n LabelValue) bool { + if len(n) == 0 { + return false + } + for i, b := range n { + if !((b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') || b == '_' || b == ':' || (b >= '0' && b <= '9' && i > 0)) { + return false + } + } + return true +} diff --git a/vendor/github.com/prometheus/common/model/metric_test.go b/vendor/github.com/prometheus/common/model/metric_test.go new file mode 100644 index 000000000..06f9de525 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/metric_test.go @@ -0,0 +1,132 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import "testing" + +func testMetric(t testing.TB) { + var scenarios = []struct { + input LabelSet + fingerprint Fingerprint + fastFingerprint Fingerprint + }{ + { + input: LabelSet{}, + fingerprint: 14695981039346656037, + fastFingerprint: 14695981039346656037, + }, + { + input: LabelSet{ + "first_name": "electro", + "occupation": "robot", + "manufacturer": "westinghouse", + }, + fingerprint: 5911716720268894962, + fastFingerprint: 11310079640881077873, + }, + { + input: LabelSet{ + "x": "y", + }, + fingerprint: 8241431561484471700, + fastFingerprint: 13948396922932177635, + }, + { + input: LabelSet{ + "a": "bb", + "b": "c", + }, + fingerprint: 3016285359649981711, + fastFingerprint: 3198632812309449502, + }, + { + input: LabelSet{ + "a": "b", + "bb": "c", + }, + fingerprint: 7122421792099404749, + fastFingerprint: 5774953389407657638, + }, + } + + for i, scenario := range scenarios { + input := Metric(scenario.input) + + if scenario.fingerprint != input.Fingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fingerprint, input.Fingerprint()) + } + if scenario.fastFingerprint != input.FastFingerprint() { + t.Errorf("%d. expected %d, got %d", i, scenario.fastFingerprint, input.FastFingerprint()) + } + } +} + +func TestMetric(t *testing.T) { + testMetric(t) +} + +func BenchmarkMetric(b *testing.B) { + for i := 0; i < b.N; i++ { + testMetric(b) + } +} + +func TestMetricNameIsValid(t *testing.T) { + var scenarios = []struct { + mn LabelValue + valid bool + }{ + { + mn: "Avalid_23name", + valid: true, + }, + { + mn: "_Avalid_23name", + valid: true, + }, + { + mn: "1valid_23name", + valid: false, + }, + { + mn: "avalid_23name", + valid: true, + }, + { + mn: "Ava:lid_23name", + valid: true, + }, + { + mn: "a lid_23name", + valid: false, + }, + { + mn: ":leading_colon", + valid: true, + }, + { + mn: "colon:in:the:middle", + valid: true, + }, + } + + for _, s := range scenarios { + if IsValidMetricName(s.mn) != s.valid { + t.Errorf("Expected %v for %q using IsValidMetricName function", s.valid, s.mn) + } + if MetricNameRE.MatchString(string(s.mn)) != s.valid { + t.Errorf("Expected %v for %q using regexp matching", s.valid, s.mn) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/model.go b/vendor/github.com/prometheus/common/model/model.go new file mode 100644 index 000000000..a7b969170 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/model.go @@ -0,0 +1,16 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package model contains common data structures that are shared across +// Prometheus components and libraries. +package model diff --git a/vendor/github.com/prometheus/common/model/signature.go b/vendor/github.com/prometheus/common/model/signature.go new file mode 100644 index 000000000..8762b13c6 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature.go @@ -0,0 +1,144 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "sort" +) + +// SeparatorByte is a byte that cannot occur in valid UTF-8 sequences and is +// used to separate label names, label values, and other strings from each other +// when calculating their combined hash value (aka signature aka fingerprint). +const SeparatorByte byte = 255 + +var ( + // cache the signature of an empty label set. + emptyLabelSignature = hashNew() +) + +// LabelsToSignature returns a quasi-unique signature (i.e., fingerprint) for a +// given label set. (Collisions are possible but unlikely if the number of label +// sets the function is applied to is small.) +func LabelsToSignature(labels map[string]string) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + labelNames := make([]string, 0, len(labels)) + for labelName := range labels { + labelNames = append(labelNames, labelName) + } + sort.Strings(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, labelName) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, labels[labelName]) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// labelSetToFingerprint works exactly as LabelsToSignature but takes a LabelSet as +// parameter (rather than a label map) and returns a Fingerprint. +func labelSetToFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + labelNames := make(LabelNames, 0, len(ls)) + for labelName := range ls { + labelNames = append(labelNames, labelName) + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(ls[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return Fingerprint(sum) +} + +// labelSetToFastFingerprint works similar to labelSetToFingerprint but uses a +// faster and less allocation-heavy hash function, which is more susceptible to +// create hash collisions. Therefore, collision detection should be applied. +func labelSetToFastFingerprint(ls LabelSet) Fingerprint { + if len(ls) == 0 { + return Fingerprint(emptyLabelSignature) + } + + var result uint64 + for labelName, labelValue := range ls { + sum := hashNew() + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(labelValue)) + result ^= sum + } + return Fingerprint(result) +} + +// SignatureForLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and only includes the labels with the +// specified LabelNames into the signature calculation. The labels passed in +// will be sorted by this function. +func SignatureForLabels(m Metric, labels ...LabelName) uint64 { + if len(labels) == 0 { + return emptyLabelSignature + } + + sort.Sort(LabelNames(labels)) + + sum := hashNew() + for _, label := range labels { + sum = hashAdd(sum, string(label)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[label])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} + +// SignatureWithoutLabels works like LabelsToSignature but takes a Metric as +// parameter (rather than a label map) and excludes the labels with any of the +// specified LabelNames from the signature calculation. +func SignatureWithoutLabels(m Metric, labels map[LabelName]struct{}) uint64 { + if len(m) == 0 { + return emptyLabelSignature + } + + labelNames := make(LabelNames, 0, len(m)) + for labelName := range m { + if _, exclude := labels[labelName]; !exclude { + labelNames = append(labelNames, labelName) + } + } + if len(labelNames) == 0 { + return emptyLabelSignature + } + sort.Sort(labelNames) + + sum := hashNew() + for _, labelName := range labelNames { + sum = hashAdd(sum, string(labelName)) + sum = hashAddByte(sum, SeparatorByte) + sum = hashAdd(sum, string(m[labelName])) + sum = hashAddByte(sum, SeparatorByte) + } + return sum +} diff --git a/vendor/github.com/prometheus/common/model/signature_test.go b/vendor/github.com/prometheus/common/model/signature_test.go new file mode 100644 index 000000000..d59c8a8c3 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/signature_test.go @@ -0,0 +1,314 @@ +// Copyright 2014 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "runtime" + "sync" + "testing" +) + +func TestLabelsToSignature(t *testing.T) { + var scenarios = []struct { + in map[string]string + out uint64 + }{ + { + in: map[string]string{}, + out: 14695981039346656037, + }, + { + in: map[string]string{"name": "garland, briggs", "fear": "love is not enough"}, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := LabelsToSignature(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestMetricToFingerprint(t *testing.T) { + var scenarios = []struct { + in LabelSet + out Fingerprint + }{ + { + in: LabelSet{}, + out: 14695981039346656037, + }, + { + in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := labelSetToFingerprint(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestMetricToFastFingerprint(t *testing.T) { + var scenarios = []struct { + in LabelSet + out Fingerprint + }{ + { + in: LabelSet{}, + out: 14695981039346656037, + }, + { + in: LabelSet{"name": "garland, briggs", "fear": "love is not enough"}, + out: 12952432476264840823, + }, + } + + for i, scenario := range scenarios { + actual := labelSetToFastFingerprint(scenario.in) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestSignatureForLabels(t *testing.T) { + var scenarios = []struct { + in Metric + labels LabelNames + out uint64 + }{ + { + in: Metric{}, + labels: nil, + out: 14695981039346656037, + }, + { + in: Metric{}, + labels: LabelNames{"empty"}, + out: 7187873163539638612, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{"empty"}, + out: 7187873163539638612, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{"fear", "name"}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, + labels: LabelNames{"fear", "name"}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: LabelNames{}, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: nil, + out: 14695981039346656037, + }, + } + + for i, scenario := range scenarios { + actual := SignatureForLabels(scenario.in, scenario.labels...) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func TestSignatureWithoutLabels(t *testing.T) { + var scenarios = []struct { + in Metric + labels map[LabelName]struct{} + out uint64 + }{ + { + in: Metric{}, + labels: nil, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: map[LabelName]struct{}{"fear": struct{}{}, "name": struct{}{}}, + out: 14695981039346656037, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough", "foo": "bar"}, + labels: map[LabelName]struct{}{"foo": struct{}{}}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: map[LabelName]struct{}{}, + out: 5799056148416392346, + }, + { + in: Metric{"name": "garland, briggs", "fear": "love is not enough"}, + labels: nil, + out: 5799056148416392346, + }, + } + + for i, scenario := range scenarios { + actual := SignatureWithoutLabels(scenario.in, scenario.labels) + + if actual != scenario.out { + t.Errorf("%d. expected %d, got %d", i, scenario.out, actual) + } + } +} + +func benchmarkLabelToSignature(b *testing.B, l map[string]string, e uint64) { + for i := 0; i < b.N; i++ { + if a := LabelsToSignature(l); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, l, a) + } + } +} + +func BenchmarkLabelToSignatureScalar(b *testing.B) { + benchmarkLabelToSignature(b, nil, 14695981039346656037) +} + +func BenchmarkLabelToSignatureSingle(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value"}, 5146282821936882169) +} + +func BenchmarkLabelToSignatureDouble(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) +} + +func BenchmarkLabelToSignatureTriple(b *testing.B) { + benchmarkLabelToSignature(b, map[string]string{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) +} + +func benchmarkMetricToFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { + for i := 0; i < b.N; i++ { + if a := labelSetToFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } +} + +func BenchmarkMetricToFingerprintScalar(b *testing.B) { + benchmarkMetricToFingerprint(b, nil, 14695981039346656037) +} + +func BenchmarkMetricToFingerprintSingle(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5146282821936882169) +} + +func BenchmarkMetricToFingerprintDouble(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 3195800080984914717) +} + +func BenchmarkMetricToFingerprintTriple(b *testing.B) { + benchmarkMetricToFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 13843036195897128121) +} + +func benchmarkMetricToFastFingerprint(b *testing.B, ls LabelSet, e Fingerprint) { + for i := 0; i < b.N; i++ { + if a := labelSetToFastFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } +} + +func BenchmarkMetricToFastFingerprintScalar(b *testing.B) { + benchmarkMetricToFastFingerprint(b, nil, 14695981039346656037) +} + +func BenchmarkMetricToFastFingerprintSingle(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value"}, 5147259542624943964) +} + +func BenchmarkMetricToFastFingerprintDouble(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value"}, 18269973311206963528) +} + +func BenchmarkMetricToFastFingerprintTriple(b *testing.B) { + benchmarkMetricToFastFingerprint(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676) +} + +func BenchmarkEmptyLabelSignature(b *testing.B) { + input := []map[string]string{nil, {}} + + var ms runtime.MemStats + runtime.ReadMemStats(&ms) + + alloc := ms.Alloc + + for _, labels := range input { + LabelsToSignature(labels) + } + + runtime.ReadMemStats(&ms) + + if got := ms.Alloc; alloc != got { + b.Fatal("expected LabelsToSignature with empty labels not to perform allocations") + } +} + +func benchmarkMetricToFastFingerprintConc(b *testing.B, ls LabelSet, e Fingerprint, concLevel int) { + var start, end sync.WaitGroup + start.Add(1) + end.Add(concLevel) + + for i := 0; i < concLevel; i++ { + go func() { + start.Wait() + for j := b.N / concLevel; j >= 0; j-- { + if a := labelSetToFastFingerprint(ls); a != e { + b.Fatalf("expected signature of %d for %s, got %d", e, ls, a) + } + } + end.Done() + }() + } + b.ResetTimer() + start.Done() + end.Wait() +} + +func BenchmarkMetricToFastFingerprintTripleConc1(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 1) +} + +func BenchmarkMetricToFastFingerprintTripleConc2(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 2) +} + +func BenchmarkMetricToFastFingerprintTripleConc4(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 4) +} + +func BenchmarkMetricToFastFingerprintTripleConc8(b *testing.B) { + benchmarkMetricToFastFingerprintConc(b, LabelSet{"first-label": "first-label-value", "second-label": "second-label-value", "third-label": "third-label-value"}, 15738406913934009676, 8) +} diff --git a/vendor/github.com/prometheus/common/model/silence.go b/vendor/github.com/prometheus/common/model/silence.go new file mode 100644 index 000000000..7538e2997 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence.go @@ -0,0 +1,106 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "regexp" + "time" +) + +// Matcher describes a matches the value of a given label. +type Matcher struct { + Name LabelName `json:"name"` + Value string `json:"value"` + IsRegex bool `json:"isRegex"` +} + +func (m *Matcher) UnmarshalJSON(b []byte) error { + type plain Matcher + if err := json.Unmarshal(b, (*plain)(m)); err != nil { + return err + } + + if len(m.Name) == 0 { + return fmt.Errorf("label name in matcher must not be empty") + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return err + } + } + return nil +} + +// Validate returns true iff all fields of the matcher have valid values. +func (m *Matcher) Validate() error { + if !m.Name.IsValid() { + return fmt.Errorf("invalid name %q", m.Name) + } + if m.IsRegex { + if _, err := regexp.Compile(m.Value); err != nil { + return fmt.Errorf("invalid regular expression %q", m.Value) + } + } else if !LabelValue(m.Value).IsValid() || len(m.Value) == 0 { + return fmt.Errorf("invalid value %q", m.Value) + } + return nil +} + +// Silence defines the representation of a silence definiton +// in the Prometheus eco-system. +type Silence struct { + ID uint64 `json:"id,omitempty"` + + Matchers []*Matcher `json:"matchers"` + + StartsAt time.Time `json:"startsAt"` + EndsAt time.Time `json:"endsAt"` + + CreatedAt time.Time `json:"createdAt,omitempty"` + CreatedBy string `json:"createdBy"` + Comment string `json:"comment,omitempty"` +} + +// Validate returns true iff all fields of the silence have valid values. +func (s *Silence) Validate() error { + if len(s.Matchers) == 0 { + return fmt.Errorf("at least one matcher required") + } + for _, m := range s.Matchers { + if err := m.Validate(); err != nil { + return fmt.Errorf("invalid matcher: %s", err) + } + } + if s.StartsAt.IsZero() { + return fmt.Errorf("start time missing") + } + if s.EndsAt.IsZero() { + return fmt.Errorf("end time missing") + } + if s.EndsAt.Before(s.StartsAt) { + return fmt.Errorf("start time must be before end time") + } + if s.CreatedBy == "" { + return fmt.Errorf("creator information missing") + } + if s.Comment == "" { + return fmt.Errorf("comment missing") + } + if s.CreatedAt.IsZero() { + return fmt.Errorf("creation timestamp missing") + } + return nil +} diff --git a/vendor/github.com/prometheus/common/model/silence_test.go b/vendor/github.com/prometheus/common/model/silence_test.go new file mode 100644 index 000000000..8eaaf0744 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/silence_test.go @@ -0,0 +1,228 @@ +// Copyright 2015 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "strings" + "testing" + "time" +) + +func TestMatcherValidate(t *testing.T) { + var cases = []struct { + matcher *Matcher + err string + }{ + { + matcher: &Matcher{ + Name: "name", + Value: "value", + }, + }, + { + matcher: &Matcher{ + Name: "name", + Value: "value", + IsRegex: true, + }, + }, + { + matcher: &Matcher{ + Name: "name!", + Value: "value", + }, + err: "invalid name", + }, + { + matcher: &Matcher{ + Name: "", + Value: "value", + }, + err: "invalid name", + }, + { + matcher: &Matcher{ + Name: "name", + Value: "value\xff", + }, + err: "invalid value", + }, + { + matcher: &Matcher{ + Name: "name", + Value: "", + }, + err: "invalid value", + }, + } + + for i, c := range cases { + err := c.matcher.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} + +func TestSilenceValidate(t *testing.T) { + ts := time.Now() + + var cases = []struct { + sil *Silence + err string + }{ + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + {Name: "name", Value: "value"}, + {Name: "name", Value: "value"}, + {Name: "name", Value: "value", IsRegex: true}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts.Add(-1 * time.Minute), + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "start time must be before end time", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "end time missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "start time missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "!name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "invalid matcher", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + CreatedBy: "name", + }, + err: "comment missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedBy: "name", + Comment: "comment", + }, + err: "creation timestamp missing", + }, + { + sil: &Silence{ + Matchers: []*Matcher{ + {Name: "name", Value: "value"}, + }, + StartsAt: ts, + EndsAt: ts, + CreatedAt: ts, + Comment: "comment", + }, + err: "creator information missing", + }, + } + + for i, c := range cases { + err := c.sil.Validate() + if err == nil { + if c.err == "" { + continue + } + t.Errorf("%d. Expected error %q but got none", i, c.err) + continue + } + if c.err == "" && err != nil { + t.Errorf("%d. Expected no error but got %q", i, err) + continue + } + if !strings.Contains(err.Error(), c.err) { + t.Errorf("%d. Expected error to contain %q but got %q", i, c.err, err) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/time.go b/vendor/github.com/prometheus/common/model/time.go new file mode 100644 index 000000000..74ed5a9f7 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time.go @@ -0,0 +1,264 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "fmt" + "math" + "regexp" + "strconv" + "strings" + "time" +) + +const ( + // MinimumTick is the minimum supported time resolution. This has to be + // at least time.Second in order for the code below to work. + minimumTick = time.Millisecond + // second is the Time duration equivalent to one second. + second = int64(time.Second / minimumTick) + // The number of nanoseconds per minimum tick. + nanosPerTick = int64(minimumTick / time.Nanosecond) + + // Earliest is the earliest Time representable. Handy for + // initializing a high watermark. + Earliest = Time(math.MinInt64) + // Latest is the latest Time representable. Handy for initializing + // a low watermark. + Latest = Time(math.MaxInt64) +) + +// Time is the number of milliseconds since the epoch +// (1970-01-01 00:00 UTC) excluding leap seconds. +type Time int64 + +// Interval describes and interval between two timestamps. +type Interval struct { + Start, End Time +} + +// Now returns the current time as a Time. +func Now() Time { + return TimeFromUnixNano(time.Now().UnixNano()) +} + +// TimeFromUnix returns the Time equivalent to the Unix Time t +// provided in seconds. +func TimeFromUnix(t int64) Time { + return Time(t * second) +} + +// TimeFromUnixNano returns the Time equivalent to the Unix Time +// t provided in nanoseconds. +func TimeFromUnixNano(t int64) Time { + return Time(t / nanosPerTick) +} + +// Equal reports whether two Times represent the same instant. +func (t Time) Equal(o Time) bool { + return t == o +} + +// Before reports whether the Time t is before o. +func (t Time) Before(o Time) bool { + return t < o +} + +// After reports whether the Time t is after o. +func (t Time) After(o Time) bool { + return t > o +} + +// Add returns the Time t + d. +func (t Time) Add(d time.Duration) Time { + return t + Time(d/minimumTick) +} + +// Sub returns the Duration t - o. +func (t Time) Sub(o Time) time.Duration { + return time.Duration(t-o) * minimumTick +} + +// Time returns the time.Time representation of t. +func (t Time) Time() time.Time { + return time.Unix(int64(t)/second, (int64(t)%second)*nanosPerTick) +} + +// Unix returns t as a Unix time, the number of seconds elapsed +// since January 1, 1970 UTC. +func (t Time) Unix() int64 { + return int64(t) / second +} + +// UnixNano returns t as a Unix time, the number of nanoseconds elapsed +// since January 1, 1970 UTC. +func (t Time) UnixNano() int64 { + return int64(t) * nanosPerTick +} + +// The number of digits after the dot. +var dotPrecision = int(math.Log10(float64(second))) + +// String returns a string representation of the Time. +func (t Time) String() string { + return strconv.FormatFloat(float64(t)/float64(second), 'f', -1, 64) +} + +// MarshalJSON implements the json.Marshaler interface. +func (t Time) MarshalJSON() ([]byte, error) { + return []byte(t.String()), nil +} + +// UnmarshalJSON implements the json.Unmarshaler interface. +func (t *Time) UnmarshalJSON(b []byte) error { + p := strings.Split(string(b), ".") + switch len(p) { + case 1: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + *t = Time(v * second) + + case 2: + v, err := strconv.ParseInt(string(p[0]), 10, 64) + if err != nil { + return err + } + v *= second + + prec := dotPrecision - len(p[1]) + if prec < 0 { + p[1] = p[1][:dotPrecision] + } else if prec > 0 { + p[1] = p[1] + strings.Repeat("0", prec) + } + + va, err := strconv.ParseInt(p[1], 10, 32) + if err != nil { + return err + } + + *t = Time(v + va) + + default: + return fmt.Errorf("invalid time %q", string(b)) + } + return nil +} + +// Duration wraps time.Duration. It is used to parse the custom duration format +// from YAML. +// This type should not propagate beyond the scope of input/output processing. +type Duration time.Duration + +// Set implements pflag/flag.Value +func (d *Duration) Set(s string) error { + var err error + *d, err = ParseDuration(s) + return err +} + +// Type implements pflag.Value +func (d *Duration) Type() string { + return "duration" +} + +var durationRE = regexp.MustCompile("^([0-9]+)(y|w|d|h|m|s|ms)$") + +// ParseDuration parses a string into a time.Duration, assuming that a year +// always has 365d, a week always has 7d, and a day always has 24h. +func ParseDuration(durationStr string) (Duration, error) { + matches := durationRE.FindStringSubmatch(durationStr) + if len(matches) != 3 { + return 0, fmt.Errorf("not a valid duration string: %q", durationStr) + } + var ( + n, _ = strconv.Atoi(matches[1]) + dur = time.Duration(n) * time.Millisecond + ) + switch unit := matches[2]; unit { + case "y": + dur *= 1000 * 60 * 60 * 24 * 365 + case "w": + dur *= 1000 * 60 * 60 * 24 * 7 + case "d": + dur *= 1000 * 60 * 60 * 24 + case "h": + dur *= 1000 * 60 * 60 + case "m": + dur *= 1000 * 60 + case "s": + dur *= 1000 + case "ms": + // Value already correct + default: + return 0, fmt.Errorf("invalid time unit in duration string: %q", unit) + } + return Duration(dur), nil +} + +func (d Duration) String() string { + var ( + ms = int64(time.Duration(d) / time.Millisecond) + unit = "ms" + ) + if ms == 0 { + return "0s" + } + factors := map[string]int64{ + "y": 1000 * 60 * 60 * 24 * 365, + "w": 1000 * 60 * 60 * 24 * 7, + "d": 1000 * 60 * 60 * 24, + "h": 1000 * 60 * 60, + "m": 1000 * 60, + "s": 1000, + "ms": 1, + } + + switch int64(0) { + case ms % factors["y"]: + unit = "y" + case ms % factors["w"]: + unit = "w" + case ms % factors["d"]: + unit = "d" + case ms % factors["h"]: + unit = "h" + case ms % factors["m"]: + unit = "m" + case ms % factors["s"]: + unit = "s" + } + return fmt.Sprintf("%v%v", ms/factors[unit], unit) +} + +// MarshalYAML implements the yaml.Marshaler interface. +func (d Duration) MarshalYAML() (interface{}, error) { + return d.String(), nil +} + +// UnmarshalYAML implements the yaml.Unmarshaler interface. +func (d *Duration) UnmarshalYAML(unmarshal func(interface{}) error) error { + var s string + if err := unmarshal(&s); err != nil { + return err + } + dur, err := ParseDuration(s) + if err != nil { + return err + } + *d = dur + return nil +} diff --git a/vendor/github.com/prometheus/common/model/time_test.go b/vendor/github.com/prometheus/common/model/time_test.go new file mode 100644 index 000000000..3efdd65ff --- /dev/null +++ b/vendor/github.com/prometheus/common/model/time_test.go @@ -0,0 +1,132 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "testing" + "time" +) + +func TestComparators(t *testing.T) { + t1a := TimeFromUnix(0) + t1b := TimeFromUnix(0) + t2 := TimeFromUnix(2*second - 1) + + if !t1a.Equal(t1b) { + t.Fatalf("Expected %s to be equal to %s", t1a, t1b) + } + if t1a.Equal(t2) { + t.Fatalf("Expected %s to not be equal to %s", t1a, t2) + } + + if !t1a.Before(t2) { + t.Fatalf("Expected %s to be before %s", t1a, t2) + } + if t1a.Before(t1b) { + t.Fatalf("Expected %s to not be before %s", t1a, t1b) + } + + if !t2.After(t1a) { + t.Fatalf("Expected %s to be after %s", t2, t1a) + } + if t1b.After(t1a) { + t.Fatalf("Expected %s to not be after %s", t1b, t1a) + } +} + +func TestTimeConversions(t *testing.T) { + unixSecs := int64(1136239445) + unixNsecs := int64(123456789) + unixNano := unixSecs*1e9 + unixNsecs + + t1 := time.Unix(unixSecs, unixNsecs-unixNsecs%nanosPerTick) + t2 := time.Unix(unixSecs, unixNsecs) + + ts := TimeFromUnixNano(unixNano) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + // Test available precision. + ts = TimeFromUnixNano(t2.UnixNano()) + if !ts.Time().Equal(t1) { + t.Fatalf("Expected %s, got %s", t1, ts.Time()) + } + + if ts.UnixNano() != unixNano-unixNano%nanosPerTick { + t.Fatalf("Expected %d, got %d", unixNano, ts.UnixNano()) + } +} + +func TestDuration(t *testing.T) { + duration := time.Second + time.Minute + time.Hour + goTime := time.Unix(1136239445, 0) + + ts := TimeFromUnix(goTime.Unix()) + if !goTime.Add(duration).Equal(ts.Add(duration).Time()) { + t.Fatalf("Expected %s to be equal to %s", goTime.Add(duration), ts.Add(duration)) + } + + earlier := ts.Add(-duration) + delta := ts.Sub(earlier) + if delta != duration { + t.Fatalf("Expected %s to be equal to %s", delta, duration) + } +} + +func TestParseDuration(t *testing.T) { + var cases = []struct { + in string + out time.Duration + }{ + { + in: "0s", + out: 0, + }, { + in: "324ms", + out: 324 * time.Millisecond, + }, { + in: "3s", + out: 3 * time.Second, + }, { + in: "5m", + out: 5 * time.Minute, + }, { + in: "1h", + out: time.Hour, + }, { + in: "4d", + out: 4 * 24 * time.Hour, + }, { + in: "3w", + out: 3 * 7 * 24 * time.Hour, + }, { + in: "10y", + out: 10 * 365 * 24 * time.Hour, + }, + } + + for _, c := range cases { + d, err := ParseDuration(c.in) + if err != nil { + t.Errorf("Unexpected error on input %q", c.in) + } + if time.Duration(d) != c.out { + t.Errorf("Expected %v but got %v", c.out, d) + } + if d.String() != c.in { + t.Errorf("Expected duration string %q but got %q", c.in, d.String()) + } + } +} diff --git a/vendor/github.com/prometheus/common/model/value.go b/vendor/github.com/prometheus/common/model/value.go new file mode 100644 index 000000000..c9ed3ffd8 --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value.go @@ -0,0 +1,416 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "fmt" + "math" + "sort" + "strconv" + "strings" +) + +var ( + // ZeroSamplePair is the pseudo zero-value of SamplePair used to signal a + // non-existing sample pair. It is a SamplePair with timestamp Earliest and + // value 0.0. Note that the natural zero value of SamplePair has a timestamp + // of 0, which is possible to appear in a real SamplePair and thus not + // suitable to signal a non-existing SamplePair. + ZeroSamplePair = SamplePair{Timestamp: Earliest} + + // ZeroSample is the pseudo zero-value of Sample used to signal a + // non-existing sample. It is a Sample with timestamp Earliest, value 0.0, + // and metric nil. Note that the natural zero value of Sample has a timestamp + // of 0, which is possible to appear in a real Sample and thus not suitable + // to signal a non-existing Sample. + ZeroSample = Sample{Timestamp: Earliest} +) + +// A SampleValue is a representation of a value for a given sample at a given +// time. +type SampleValue float64 + +// MarshalJSON implements json.Marshaler. +func (v SampleValue) MarshalJSON() ([]byte, error) { + return json.Marshal(v.String()) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (v *SampleValue) UnmarshalJSON(b []byte) error { + if len(b) < 2 || b[0] != '"' || b[len(b)-1] != '"' { + return fmt.Errorf("sample value must be a quoted string") + } + f, err := strconv.ParseFloat(string(b[1:len(b)-1]), 64) + if err != nil { + return err + } + *v = SampleValue(f) + return nil +} + +// Equal returns true if the value of v and o is equal or if both are NaN. Note +// that v==o is false if both are NaN. If you want the conventional float +// behavior, use == to compare two SampleValues. +func (v SampleValue) Equal(o SampleValue) bool { + if v == o { + return true + } + return math.IsNaN(float64(v)) && math.IsNaN(float64(o)) +} + +func (v SampleValue) String() string { + return strconv.FormatFloat(float64(v), 'f', -1, 64) +} + +// SamplePair pairs a SampleValue with a Timestamp. +type SamplePair struct { + Timestamp Time + Value SampleValue +} + +// MarshalJSON implements json.Marshaler. +func (s SamplePair) MarshalJSON() ([]byte, error) { + t, err := json.Marshal(s.Timestamp) + if err != nil { + return nil, err + } + v, err := json.Marshal(s.Value) + if err != nil { + return nil, err + } + return []byte(fmt.Sprintf("[%s,%s]", t, v)), nil +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *SamplePair) UnmarshalJSON(b []byte) error { + v := [...]json.Unmarshaler{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Equal returns true if this SamplePair and o have equal Values and equal +// Timestamps. The sematics of Value equality is defined by SampleValue.Equal. +func (s *SamplePair) Equal(o *SamplePair) bool { + return s == o || (s.Value.Equal(o.Value) && s.Timestamp.Equal(o.Timestamp)) +} + +func (s SamplePair) String() string { + return fmt.Sprintf("%s @[%s]", s.Value, s.Timestamp) +} + +// Sample is a sample pair associated with a metric. +type Sample struct { + Metric Metric `json:"metric"` + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +// Equal compares first the metrics, then the timestamp, then the value. The +// sematics of value equality is defined by SampleValue.Equal. +func (s *Sample) Equal(o *Sample) bool { + if s == o { + return true + } + + if !s.Metric.Equal(o.Metric) { + return false + } + if !s.Timestamp.Equal(o.Timestamp) { + return false + } + + return s.Value.Equal(o.Value) +} + +func (s Sample) String() string { + return fmt.Sprintf("%s => %s", s.Metric, SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }) +} + +// MarshalJSON implements json.Marshaler. +func (s Sample) MarshalJSON() ([]byte, error) { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + return json.Marshal(&v) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Sample) UnmarshalJSON(b []byte) error { + v := struct { + Metric Metric `json:"metric"` + Value SamplePair `json:"value"` + }{ + Metric: s.Metric, + Value: SamplePair{ + Timestamp: s.Timestamp, + Value: s.Value, + }, + } + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + s.Metric = v.Metric + s.Timestamp = v.Value.Timestamp + s.Value = v.Value.Value + + return nil +} + +// Samples is a sortable Sample slice. It implements sort.Interface. +type Samples []*Sample + +func (s Samples) Len() int { + return len(s) +} + +// Less compares first the metrics, then the timestamp. +func (s Samples) Less(i, j int) bool { + switch { + case s[i].Metric.Before(s[j].Metric): + return true + case s[j].Metric.Before(s[i].Metric): + return false + case s[i].Timestamp.Before(s[j].Timestamp): + return true + default: + return false + } +} + +func (s Samples) Swap(i, j int) { + s[i], s[j] = s[j], s[i] +} + +// Equal compares two sets of samples and returns true if they are equal. +func (s Samples) Equal(o Samples) bool { + if len(s) != len(o) { + return false + } + + for i, sample := range s { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// SampleStream is a stream of Values belonging to an attached COWMetric. +type SampleStream struct { + Metric Metric `json:"metric"` + Values []SamplePair `json:"values"` +} + +func (ss SampleStream) String() string { + vals := make([]string, len(ss.Values)) + for i, v := range ss.Values { + vals[i] = v.String() + } + return fmt.Sprintf("%s =>\n%s", ss.Metric, strings.Join(vals, "\n")) +} + +// Value is a generic interface for values resulting from a query evaluation. +type Value interface { + Type() ValueType + String() string +} + +func (Matrix) Type() ValueType { return ValMatrix } +func (Vector) Type() ValueType { return ValVector } +func (*Scalar) Type() ValueType { return ValScalar } +func (*String) Type() ValueType { return ValString } + +type ValueType int + +const ( + ValNone ValueType = iota + ValScalar + ValVector + ValMatrix + ValString +) + +// MarshalJSON implements json.Marshaler. +func (et ValueType) MarshalJSON() ([]byte, error) { + return json.Marshal(et.String()) +} + +func (et *ValueType) UnmarshalJSON(b []byte) error { + var s string + if err := json.Unmarshal(b, &s); err != nil { + return err + } + switch s { + case "": + *et = ValNone + case "scalar": + *et = ValScalar + case "vector": + *et = ValVector + case "matrix": + *et = ValMatrix + case "string": + *et = ValString + default: + return fmt.Errorf("unknown value type %q", s) + } + return nil +} + +func (e ValueType) String() string { + switch e { + case ValNone: + return "" + case ValScalar: + return "scalar" + case ValVector: + return "vector" + case ValMatrix: + return "matrix" + case ValString: + return "string" + } + panic("ValueType.String: unhandled value type") +} + +// Scalar is a scalar value evaluated at the set timestamp. +type Scalar struct { + Value SampleValue `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s Scalar) String() string { + return fmt.Sprintf("scalar: %v @[%v]", s.Value, s.Timestamp) +} + +// MarshalJSON implements json.Marshaler. +func (s Scalar) MarshalJSON() ([]byte, error) { + v := strconv.FormatFloat(float64(s.Value), 'f', -1, 64) + return json.Marshal([...]interface{}{s.Timestamp, string(v)}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *Scalar) UnmarshalJSON(b []byte) error { + var f string + v := [...]interface{}{&s.Timestamp, &f} + + if err := json.Unmarshal(b, &v); err != nil { + return err + } + + value, err := strconv.ParseFloat(f, 64) + if err != nil { + return fmt.Errorf("error parsing sample value: %s", err) + } + s.Value = SampleValue(value) + return nil +} + +// String is a string value evaluated at the set timestamp. +type String struct { + Value string `json:"value"` + Timestamp Time `json:"timestamp"` +} + +func (s *String) String() string { + return s.Value +} + +// MarshalJSON implements json.Marshaler. +func (s String) MarshalJSON() ([]byte, error) { + return json.Marshal([]interface{}{s.Timestamp, s.Value}) +} + +// UnmarshalJSON implements json.Unmarshaler. +func (s *String) UnmarshalJSON(b []byte) error { + v := [...]interface{}{&s.Timestamp, &s.Value} + return json.Unmarshal(b, &v) +} + +// Vector is basically only an alias for Samples, but the +// contract is that in a Vector, all Samples have the same timestamp. +type Vector []*Sample + +func (vec Vector) String() string { + entries := make([]string, len(vec)) + for i, s := range vec { + entries[i] = s.String() + } + return strings.Join(entries, "\n") +} + +func (vec Vector) Len() int { return len(vec) } +func (vec Vector) Swap(i, j int) { vec[i], vec[j] = vec[j], vec[i] } + +// Less compares first the metrics, then the timestamp. +func (vec Vector) Less(i, j int) bool { + switch { + case vec[i].Metric.Before(vec[j].Metric): + return true + case vec[j].Metric.Before(vec[i].Metric): + return false + case vec[i].Timestamp.Before(vec[j].Timestamp): + return true + default: + return false + } +} + +// Equal compares two sets of samples and returns true if they are equal. +func (vec Vector) Equal(o Vector) bool { + if len(vec) != len(o) { + return false + } + + for i, sample := range vec { + if !sample.Equal(o[i]) { + return false + } + } + return true +} + +// Matrix is a list of time series. +type Matrix []*SampleStream + +func (m Matrix) Len() int { return len(m) } +func (m Matrix) Less(i, j int) bool { return m[i].Metric.Before(m[j].Metric) } +func (m Matrix) Swap(i, j int) { m[i], m[j] = m[j], m[i] } + +func (mat Matrix) String() string { + matCp := make(Matrix, len(mat)) + copy(matCp, mat) + sort.Sort(matCp) + + strs := make([]string, len(matCp)) + + for i, ss := range matCp { + strs[i] = ss.String() + } + + return strings.Join(strs, "\n") +} diff --git a/vendor/github.com/prometheus/common/model/value_test.go b/vendor/github.com/prometheus/common/model/value_test.go new file mode 100644 index 000000000..b97dcf84c --- /dev/null +++ b/vendor/github.com/prometheus/common/model/value_test.go @@ -0,0 +1,468 @@ +// Copyright 2013 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package model + +import ( + "encoding/json" + "math" + "reflect" + "sort" + "testing" +) + +func TestEqualValues(t *testing.T) { + tests := map[string]struct { + in1, in2 SampleValue + want bool + }{ + "equal floats": { + in1: 3.14, + in2: 3.14, + want: true, + }, + "unequal floats": { + in1: 3.14, + in2: 3.1415, + want: false, + }, + "positive inifinities": { + in1: SampleValue(math.Inf(+1)), + in2: SampleValue(math.Inf(+1)), + want: true, + }, + "negative inifinities": { + in1: SampleValue(math.Inf(-1)), + in2: SampleValue(math.Inf(-1)), + want: true, + }, + "different inifinities": { + in1: SampleValue(math.Inf(+1)), + in2: SampleValue(math.Inf(-1)), + want: false, + }, + "number and infinity": { + in1: 42, + in2: SampleValue(math.Inf(+1)), + want: false, + }, + "number and NaN": { + in1: 42, + in2: SampleValue(math.NaN()), + want: false, + }, + "NaNs": { + in1: SampleValue(math.NaN()), + in2: SampleValue(math.NaN()), + want: true, // !!! + }, + } + + for name, test := range tests { + got := test.in1.Equal(test.in2) + if got != test.want { + t.Errorf("Comparing %s, %f and %f: got %t, want %t", name, test.in1, test.in2, got, test.want) + } + } +} + +func TestEqualSamples(t *testing.T) { + testSample := &Sample{} + + tests := map[string]struct { + in1, in2 *Sample + want bool + }{ + "equal pointers": { + in1: testSample, + in2: testSample, + want: true, + }, + "different metrics": { + in1: &Sample{Metric: Metric{"foo": "bar"}}, + in2: &Sample{Metric: Metric{"foo": "biz"}}, + want: false, + }, + "different timestamp": { + in1: &Sample{Timestamp: 0}, + in2: &Sample{Timestamp: 1}, + want: false, + }, + "different value": { + in1: &Sample{Value: 0}, + in2: &Sample{Value: 1}, + want: false, + }, + "equal samples": { + in1: &Sample{ + Metric: Metric{"foo": "bar"}, + Timestamp: 0, + Value: 1, + }, + in2: &Sample{ + Metric: Metric{"foo": "bar"}, + Timestamp: 0, + Value: 1, + }, + want: true, + }, + } + + for name, test := range tests { + got := test.in1.Equal(test.in2) + if got != test.want { + t.Errorf("Comparing %s, %v and %v: got %t, want %t", name, test.in1, test.in2, got, test.want) + } + } + +} + +func TestSamplePairJSON(t *testing.T) { + input := []struct { + plain string + value SamplePair + }{ + { + plain: `[1234.567,"123.1"]`, + value: SamplePair{ + Value: 123.1, + Timestamp: 1234567, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sp SamplePair + err = json.Unmarshal(b, &sp) + if err != nil { + t.Error(err) + continue + } + + if sp != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sp) + } + } +} + +func TestSampleJSON(t *testing.T) { + input := []struct { + plain string + value Sample + }{ + { + plain: `{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}`, + value: Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv Sample + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if !reflect.DeepEqual(sv, test.value) { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestVectorJSON(t *testing.T) { + input := []struct { + plain string + value Vector + }{ + { + plain: `[]`, + value: Vector{}, + }, + { + plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]}]`, + value: Vector{&Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }}, + }, + { + plain: `[{"metric":{"__name__":"test_metric"},"value":[1234.567,"123.1"]},{"metric":{"foo":"bar"},"value":[1.234,"+Inf"]}]`, + value: Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "test_metric", + }, + Value: 123.1, + Timestamp: 1234567, + }, + &Sample{ + Metric: Metric{ + "foo": "bar", + }, + Value: SampleValue(math.Inf(1)), + Timestamp: 1234, + }, + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var vec Vector + err = json.Unmarshal(b, &vec) + if err != nil { + t.Error(err) + continue + } + + if !reflect.DeepEqual(vec, test.value) { + t.Errorf("decoding error: expected %v, got %v", test.value, vec) + } + } +} + +func TestScalarJSON(t *testing.T) { + input := []struct { + plain string + value Scalar + }{ + { + plain: `[123.456,"456"]`, + value: Scalar{ + Timestamp: 123456, + Value: 456, + }, + }, + { + plain: `[123123.456,"+Inf"]`, + value: Scalar{ + Timestamp: 123123456, + Value: SampleValue(math.Inf(1)), + }, + }, + { + plain: `[123123.456,"-Inf"]`, + value: Scalar{ + Timestamp: 123123456, + Value: SampleValue(math.Inf(-1)), + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv Scalar + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if sv != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestStringJSON(t *testing.T) { + input := []struct { + plain string + value String + }{ + { + plain: `[123.456,"test"]`, + value: String{ + Timestamp: 123456, + Value: "test", + }, + }, + { + plain: `[123123.456,"台北"]`, + value: String{ + Timestamp: 123123456, + Value: "台北", + }, + }, + } + + for _, test := range input { + b, err := json.Marshal(test.value) + if err != nil { + t.Error(err) + continue + } + + if string(b) != test.plain { + t.Errorf("encoding error: expected %q, got %q", test.plain, b) + continue + } + + var sv String + err = json.Unmarshal(b, &sv) + if err != nil { + t.Error(err) + continue + } + + if sv != test.value { + t.Errorf("decoding error: expected %v, got %v", test.value, sv) + } + } +} + +func TestVectorSort(t *testing.T) { + input := Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + } + + expected := Vector{ + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "A", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "B", + }, + Timestamp: 2, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 1, + }, + &Sample{ + Metric: Metric{ + MetricNameLabel: "C", + }, + Timestamp: 2, + }, + } + + sort.Sort(input) + + for i, actual := range input { + actualFp := actual.Metric.Fingerprint() + expectedFp := expected[i].Metric.Fingerprint() + + if actualFp != expectedFp { + t.Fatalf("%d. Incorrect fingerprint. Got %s; want %s", i, actualFp.String(), expectedFp.String()) + } + + if actual.Timestamp != expected[i].Timestamp { + t.Fatalf("%d. Incorrect timestamp. Got %s; want %s", i, actual.Timestamp, expected[i].Timestamp) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/.gitignore b/vendor/github.com/prometheus/procfs/.gitignore new file mode 100644 index 000000000..25e3659ab --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.gitignore @@ -0,0 +1 @@ +/fixtures/ diff --git a/vendor/github.com/prometheus/procfs/.travis.yml b/vendor/github.com/prometheus/procfs/.travis.yml new file mode 100644 index 000000000..5416cf8a2 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/.travis.yml @@ -0,0 +1,15 @@ +sudo: false + +language: go + +go: +- 1.7.x +- 1.8.x +- 1.9.x +- 1.10.x +- 1.x + +go_import_path: github.com/prometheus/procfs + +script: +- make style check_license vet test staticcheck diff --git a/vendor/github.com/prometheus/procfs/CONTRIBUTING.md b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md new file mode 100644 index 000000000..40503edbf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/CONTRIBUTING.md @@ -0,0 +1,18 @@ +# Contributing + +Prometheus uses GitHub to manage reviews of pull requests. + +* If you have a trivial fix or improvement, go ahead and create a pull request, + addressing (with `@...`) the maintainer of this repository (see + [MAINTAINERS.md](MAINTAINERS.md)) in the description of the pull request. + +* If you plan to do something more involved, first discuss your ideas + on our [mailing list](https://groups.google.com/forum/?fromgroups#!forum/prometheus-developers). + This will avoid unnecessary work and surely give you and us a good deal + of inspiration. + +* Relevant coding style guidelines are the [Go Code Review + Comments](https://code.google.com/p/go-wiki/wiki/CodeReviewComments) + and the _Formatting and style_ section of Peter Bourgon's [Go: Best + Practices for Production + Environments](http://peter.bourgon.org/go-in-production/#formatting-and-style). diff --git a/vendor/github.com/prometheus/procfs/LICENSE b/vendor/github.com/prometheus/procfs/LICENSE new file mode 100644 index 000000000..261eeb9e9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/LICENSE @@ -0,0 +1,201 @@ + Apache License + Version 2.0, January 2004 + http://www.apache.org/licenses/ + + TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION + + 1. Definitions. + + "License" shall mean the terms and conditions for use, reproduction, + and distribution as defined by Sections 1 through 9 of this document. + + "Licensor" shall mean the copyright owner or entity authorized by + the copyright owner that is granting the License. + + "Legal Entity" shall mean the union of the acting entity and all + other entities that control, are controlled by, or are under common + control with that entity. For the purposes of this definition, + "control" means (i) the power, direct or indirect, to cause the + direction or management of such entity, whether by contract or + otherwise, or (ii) ownership of fifty percent (50%) or more of the + outstanding shares, or (iii) beneficial ownership of such entity. + + "You" (or "Your") shall mean an individual or Legal Entity + exercising permissions granted by this License. + + "Source" form shall mean the preferred form for making modifications, + including but not limited to software source code, documentation + source, and configuration files. + + "Object" form shall mean any form resulting from mechanical + transformation or translation of a Source form, including but + not limited to compiled object code, generated documentation, + and conversions to other media types. + + "Work" shall mean the work of authorship, whether in Source or + Object form, made available under the License, as indicated by a + copyright notice that is included in or attached to the work + (an example is provided in the Appendix below). + + "Derivative Works" shall mean any work, whether in Source or Object + form, that is based on (or derived from) the Work and for which the + editorial revisions, annotations, elaborations, or other modifications + represent, as a whole, an original work of authorship. For the purposes + of this License, Derivative Works shall not include works that remain + separable from, or merely link (or bind by name) to the interfaces of, + the Work and Derivative Works thereof. + + "Contribution" shall mean any work of authorship, including + the original version of the Work and any modifications or additions + to that Work or Derivative Works thereof, that is intentionally + submitted to Licensor for inclusion in the Work by the copyright owner + or by an individual or Legal Entity authorized to submit on behalf of + the copyright owner. For the purposes of this definition, "submitted" + means any form of electronic, verbal, or written communication sent + to the Licensor or its representatives, including but not limited to + communication on electronic mailing lists, source code control systems, + and issue tracking systems that are managed by, or on behalf of, the + Licensor for the purpose of discussing and improving the Work, but + excluding communication that is conspicuously marked or otherwise + designated in writing by the copyright owner as "Not a Contribution." + + "Contributor" shall mean Licensor and any individual or Legal Entity + on behalf of whom a Contribution has been received by Licensor and + subsequently incorporated within the Work. + + 2. Grant of Copyright License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + copyright license to reproduce, prepare Derivative Works of, + publicly display, publicly perform, sublicense, and distribute the + Work and such Derivative Works in Source or Object form. + + 3. Grant of Patent License. Subject to the terms and conditions of + this License, each Contributor hereby grants to You a perpetual, + worldwide, non-exclusive, no-charge, royalty-free, irrevocable + (except as stated in this section) patent license to make, have made, + use, offer to sell, sell, import, and otherwise transfer the Work, + where such license applies only to those patent claims licensable + by such Contributor that are necessarily infringed by their + Contribution(s) alone or by combination of their Contribution(s) + with the Work to which such Contribution(s) was submitted. If You + institute patent litigation against any entity (including a + cross-claim or counterclaim in a lawsuit) alleging that the Work + or a Contribution incorporated within the Work constitutes direct + or contributory patent infringement, then any patent licenses + granted to You under this License for that Work shall terminate + as of the date such litigation is filed. + + 4. Redistribution. You may reproduce and distribute copies of the + Work or Derivative Works thereof in any medium, with or without + modifications, and in Source or Object form, provided that You + meet the following conditions: + + (a) You must give any other recipients of the Work or + Derivative Works a copy of this License; and + + (b) You must cause any modified files to carry prominent notices + stating that You changed the files; and + + (c) You must retain, in the Source form of any Derivative Works + that You distribute, all copyright, patent, trademark, and + attribution notices from the Source form of the Work, + excluding those notices that do not pertain to any part of + the Derivative Works; and + + (d) If the Work includes a "NOTICE" text file as part of its + distribution, then any Derivative Works that You distribute must + include a readable copy of the attribution notices contained + within such NOTICE file, excluding those notices that do not + pertain to any part of the Derivative Works, in at least one + of the following places: within a NOTICE text file distributed + as part of the Derivative Works; within the Source form or + documentation, if provided along with the Derivative Works; or, + within a display generated by the Derivative Works, if and + wherever such third-party notices normally appear. The contents + of the NOTICE file are for informational purposes only and + do not modify the License. You may add Your own attribution + notices within Derivative Works that You distribute, alongside + or as an addendum to the NOTICE text from the Work, provided + that such additional attribution notices cannot be construed + as modifying the License. + + You may add Your own copyright statement to Your modifications and + may provide additional or different license terms and conditions + for use, reproduction, or distribution of Your modifications, or + for any such Derivative Works as a whole, provided Your use, + reproduction, and distribution of the Work otherwise complies with + the conditions stated in this License. + + 5. Submission of Contributions. Unless You explicitly state otherwise, + any Contribution intentionally submitted for inclusion in the Work + by You to the Licensor shall be under the terms and conditions of + this License, without any additional terms or conditions. + Notwithstanding the above, nothing herein shall supersede or modify + the terms of any separate license agreement you may have executed + with Licensor regarding such Contributions. + + 6. Trademarks. This License does not grant permission to use the trade + names, trademarks, service marks, or product names of the Licensor, + except as required for reasonable and customary use in describing the + origin of the Work and reproducing the content of the NOTICE file. + + 7. Disclaimer of Warranty. Unless required by applicable law or + agreed to in writing, Licensor provides the Work (and each + Contributor provides its Contributions) on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or + implied, including, without limitation, any warranties or conditions + of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A + PARTICULAR PURPOSE. You are solely responsible for determining the + appropriateness of using or redistributing the Work and assume any + risks associated with Your exercise of permissions under this License. + + 8. Limitation of Liability. In no event and under no legal theory, + whether in tort (including negligence), contract, or otherwise, + unless required by applicable law (such as deliberate and grossly + negligent acts) or agreed to in writing, shall any Contributor be + liable to You for damages, including any direct, indirect, special, + incidental, or consequential damages of any character arising as a + result of this License or out of the use or inability to use the + Work (including but not limited to damages for loss of goodwill, + work stoppage, computer failure or malfunction, or any and all + other commercial damages or losses), even if such Contributor + has been advised of the possibility of such damages. + + 9. Accepting Warranty or Additional Liability. While redistributing + the Work or Derivative Works thereof, You may choose to offer, + and charge a fee for, acceptance of support, warranty, indemnity, + or other liability obligations and/or rights consistent with this + License. However, in accepting such obligations, You may act only + on Your own behalf and on Your sole responsibility, not on behalf + of any other Contributor, and only if You agree to indemnify, + defend, and hold each Contributor harmless for any liability + incurred by, or claims asserted against, such Contributor by reason + of your accepting any such warranty or additional liability. + + END OF TERMS AND CONDITIONS + + APPENDIX: How to apply the Apache License to your work. + + To apply the Apache License to your work, attach the following + boilerplate notice, with the fields enclosed by brackets "[]" + replaced with your own identifying information. (Don't include + the brackets!) The text should be enclosed in the appropriate + comment syntax for the file format. We also recommend that a + file or class name and description of purpose be included on the + same "printed page" as the copyright notice for easier + identification within third-party archives. + + Copyright [yyyy] [name of copyright owner] + + Licensed under the Apache License, Version 2.0 (the "License"); + you may not use this file except in compliance with the License. + You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, + WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. + See the License for the specific language governing permissions and + limitations under the License. diff --git a/vendor/github.com/prometheus/procfs/MAINTAINERS.md b/vendor/github.com/prometheus/procfs/MAINTAINERS.md new file mode 100644 index 000000000..35993c41c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/MAINTAINERS.md @@ -0,0 +1 @@ +* Tobias Schmidt diff --git a/vendor/github.com/prometheus/procfs/Makefile b/vendor/github.com/prometheus/procfs/Makefile new file mode 100644 index 000000000..5c8f72625 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/Makefile @@ -0,0 +1,71 @@ +# Copyright 2018 The Prometheus Authors +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# Ensure GOBIN is not set during build so that promu is installed to the correct path +unexport GOBIN + +GO ?= go +GOFMT ?= $(GO)fmt +FIRST_GOPATH := $(firstword $(subst :, ,$(shell $(GO) env GOPATH))) +STATICCHECK := $(FIRST_GOPATH)/bin/staticcheck +pkgs = $(shell $(GO) list ./... | grep -v /vendor/) + +PREFIX ?= $(shell pwd) +BIN_DIR ?= $(shell pwd) + +ifdef DEBUG + bindata_flags = -debug +endif + +STATICCHECK_IGNORE = + +all: format staticcheck build test + +style: + @echo ">> checking code style" + @! $(GOFMT) -d $(shell find . -path ./vendor -prune -o -name '*.go' -print) | grep '^' + +check_license: + @echo ">> checking license header" + @./scripts/check_license.sh + +test: fixtures/.unpacked sysfs/fixtures/.unpacked + @echo ">> running all tests" + @$(GO) test -race $(shell $(GO) list ./... | grep -v /vendor/ | grep -v examples) + +format: + @echo ">> formatting code" + @$(GO) fmt $(pkgs) + +vet: + @echo ">> vetting code" + @$(GO) vet $(pkgs) + +staticcheck: $(STATICCHECK) + @echo ">> running staticcheck" + @$(STATICCHECK) -ignore "$(STATICCHECK_IGNORE)" $(pkgs) + +%/.unpacked: %.ttar + ./ttar -C $(dir $*) -x -f $*.ttar + touch $@ + +$(FIRST_GOPATH)/bin/staticcheck: + @GOOS= GOARCH= $(GO) get -u honnef.co/go/tools/cmd/staticcheck + +.PHONY: all style check_license format test vet staticcheck + +# Declaring the binaries at their default locations as PHONY targets is a hack +# to ensure the latest version is downloaded on every make execution. +# If this is not desired, copy/symlink these binaries to a different path and +# set the respective environment variables. +.PHONY: $(GOPATH)/bin/staticcheck diff --git a/vendor/github.com/prometheus/procfs/NOTICE b/vendor/github.com/prometheus/procfs/NOTICE new file mode 100644 index 000000000..53c5e9aa1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/NOTICE @@ -0,0 +1,7 @@ +procfs provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +Copyright 2014-2015 The Prometheus Authors + +This product includes software developed at +SoundCloud Ltd. (http://soundcloud.com/). diff --git a/vendor/github.com/prometheus/procfs/README.md b/vendor/github.com/prometheus/procfs/README.md new file mode 100644 index 000000000..209549471 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/README.md @@ -0,0 +1,11 @@ +# procfs + +This procfs package provides functions to retrieve system, kernel and process +metrics from the pseudo-filesystem proc. + +*WARNING*: This package is a work in progress. Its API may still break in +backwards-incompatible ways without warnings. Use it at your own risk. + +[![GoDoc](https://godoc.org/github.com/prometheus/procfs?status.png)](https://godoc.org/github.com/prometheus/procfs) +[![Build Status](https://travis-ci.org/prometheus/procfs.svg?branch=master)](https://travis-ci.org/prometheus/procfs) +[![Go Report Card](https://goreportcard.com/badge/github.com/prometheus/procfs)](https://goreportcard.com/report/github.com/prometheus/procfs) diff --git a/vendor/github.com/prometheus/procfs/buddyinfo.go b/vendor/github.com/prometheus/procfs/buddyinfo.go new file mode 100644 index 000000000..d3a826807 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo.go @@ -0,0 +1,95 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// A BuddyInfo is the details parsed from /proc/buddyinfo. +// The data is comprised of an array of free fragments of each size. +// The sizes are 2^n*PAGE_SIZE, where n is the array index. +type BuddyInfo struct { + Node string + Zone string + Sizes []float64 +} + +// NewBuddyInfo reads the buddyinfo statistics. +func NewBuddyInfo() ([]BuddyInfo, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewBuddyInfo() +} + +// NewBuddyInfo reads the buddyinfo statistics from the specified `proc` filesystem. +func (fs FS) NewBuddyInfo() ([]BuddyInfo, error) { + file, err := os.Open(fs.Path("buddyinfo")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseBuddyInfo(file) +} + +func parseBuddyInfo(r io.Reader) ([]BuddyInfo, error) { + var ( + buddyInfo = []BuddyInfo{} + scanner = bufio.NewScanner(r) + bucketCount = -1 + ) + + for scanner.Scan() { + var err error + line := scanner.Text() + parts := strings.Fields(line) + + if len(parts) < 4 { + return nil, fmt.Errorf("invalid number of fields when parsing buddyinfo") + } + + node := strings.TrimRight(parts[1], ",") + zone := strings.TrimRight(parts[3], ",") + arraySize := len(parts[4:]) + + if bucketCount == -1 { + bucketCount = arraySize + } else { + if bucketCount != arraySize { + return nil, fmt.Errorf("mismatch in number of buddyinfo buckets, previous count %d, new count %d", bucketCount, arraySize) + } + } + + sizes := make([]float64, arraySize) + for i := 0; i < arraySize; i++ { + sizes[i], err = strconv.ParseFloat(parts[i+4], 64) + if err != nil { + return nil, fmt.Errorf("invalid value in buddyinfo: %s", err) + } + } + + buddyInfo = append(buddyInfo, BuddyInfo{node, zone, sizes}) + } + + return buddyInfo, scanner.Err() +} diff --git a/vendor/github.com/prometheus/procfs/buddyinfo_test.go b/vendor/github.com/prometheus/procfs/buddyinfo_test.go new file mode 100644 index 000000000..bcf9355ca --- /dev/null +++ b/vendor/github.com/prometheus/procfs/buddyinfo_test.go @@ -0,0 +1,64 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "strings" + "testing" +) + +func TestBuddyInfo(t *testing.T) { + buddyInfo, err := FS("fixtures/buddyinfo/valid").NewBuddyInfo() + if err != nil { + t.Fatal(err) + } + + if want, got := "DMA", buddyInfo[0].Zone; want != got { + t.Errorf("want Node 0, Zone %s, got %s", want, got) + } + + if want, got := "Normal", buddyInfo[2].Zone; want != got { + t.Errorf("want Node 0, Zone %s, got %s", want, got) + } + + if want, got := 4381.0, buddyInfo[2].Sizes[0]; want != got { + t.Errorf("want Node 0, Zone Normal %f, got %f", want, got) + } + + if want, got := 572.0, buddyInfo[1].Sizes[1]; want != got { + t.Errorf("want Node 0, Zone DMA32 %f, got %f", want, got) + } +} + +func TestBuddyInfoShort(t *testing.T) { + _, err := FS("fixtures/buddyinfo/short").NewBuddyInfo() + if err == nil { + t.Errorf("expected error, but none occurred") + } + + if want, got := "invalid number of fields when parsing buddyinfo", err.Error(); want != got { + t.Errorf("wrong error returned, wanted %q, got %q", want, got) + } +} + +func TestBuddyInfoSizeMismatch(t *testing.T) { + _, err := FS("fixtures/buddyinfo/sizemismatch").NewBuddyInfo() + if err == nil { + t.Errorf("expected error, but none occurred") + } + + if want, got := "mismatch in number of buddyinfo buckets", err.Error(); !strings.HasPrefix(got, want) { + t.Errorf("wrong error returned, wanted prefix %q, got %q", want, got) + } +} diff --git a/vendor/github.com/prometheus/procfs/doc.go b/vendor/github.com/prometheus/procfs/doc.go new file mode 100644 index 000000000..e2acd6d40 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/doc.go @@ -0,0 +1,45 @@ +// Copyright 2014 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package procfs provides functions to retrieve system, kernel and process +// metrics from the pseudo-filesystem proc. +// +// Example: +// +// package main +// +// import ( +// "fmt" +// "log" +// +// "github.com/prometheus/procfs" +// ) +// +// func main() { +// p, err := procfs.Self() +// if err != nil { +// log.Fatalf("could not get process: %s", err) +// } +// +// stat, err := p.NewStat() +// if err != nil { +// log.Fatalf("could not get process stat: %s", err) +// } +// +// fmt.Printf("command: %s\n", stat.Comm) +// fmt.Printf("cpu time: %fs\n", stat.CPUTime()) +// fmt.Printf("vsize: %dB\n", stat.VirtualMemory()) +// fmt.Printf("rss: %dB\n", stat.ResidentMemory()) +// } +// +package procfs diff --git a/vendor/github.com/prometheus/procfs/fixtures.ttar b/vendor/github.com/prometheus/procfs/fixtures.ttar new file mode 100644 index 000000000..3ee8291e8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fixtures.ttar @@ -0,0 +1,446 @@ +# Archive created by ttar -c -f fixtures.ttar fixtures/ +Directory: fixtures +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/cmdline +Lines: 1 +vimNULLBYTEtest.goNULLBYTE+10NULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/comm +Lines: 1 +vim +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/exe +SymlinkTo: /usr/bin/vim +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/10 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/io +Lines: 7 +rchar: 750339 +wchar: 818609 +syscr: 7405 +syscw: 5245 +read_bytes: 1024 +write_bytes: 2048 +cancelled_write_bytes: -1024 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 62898 62898 processes +Max open files 2048 4096 files +Max locked memory 65536 65536 bytes +Max address space 8589934592 unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 62898 62898 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/mountstats +Lines: 19 +device rootfs mounted on / with fstype rootfs +device sysfs mounted on /sys with fstype sysfs +device proc mounted on /proc with fstype proc +device /dev/sda1 mounted on / with fstype ext4 +device 192.168.1.1:/srv/test mounted on /mnt/nfs/test with fstype nfs4 statvers=1.1 + opts: rw,vers=4.0,rsize=1048576,wsize=1048576,namlen=255,acregmin=3,acregmax=60,acdirmin=30,acdirmax=60,hard,proto=tcp,port=0,timeo=600,retrans=2,sec=sys,clientaddr=192.168.1.5,local_lock=none + age: 13968 + caps: caps=0xfff7,wtmult=512,dtsize=32768,bsize=0,namlen=255 + nfsv4: bm0=0xfdffafff,bm1=0xf9be3e,bm2=0x0,acl=0x0,pnfs=not configured + sec: flavor=1,pseudoflavor=1 + events: 52 226 0 0 1 13 398 0 0 331 0 47 0 0 77 0 0 77 0 0 0 0 0 0 0 0 0 + bytes: 1207640230 0 0 0 1210214218 0 295483 0 + RPC iostats version: 1.0 p/v: 100003/4 (nfs) + xprt: tcp 832 0 1 0 11 6428 6428 0 12154 0 24 26 5726 + per-op statistics + NULL: 0 0 0 0 0 0 0 0 + READ: 1298 1298 0 207680 1210292152 6 79386 79407 + WRITE: 0 0 0 0 0 0 0 0 + +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/net/dev +Lines: 4 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed + lo: 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 + eth0: 438 5 0 0 0 0 0 0 648 8 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26231/ns +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/mnt +SymlinkTo: mnt:[4026531840] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/ns/net +SymlinkTo: net:[4026531993] +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26231/stat +Lines: 1 +26231 (vim) R 5392 7446 5392 34835 7446 4218880 32533 309516 26 82 1677 44 158 99 20 0 1 0 82375 56274944 1981 18446744073709551615 4194304 6294284 140736914091744 140736914087944 139965136429984 0 0 12288 1870679807 0 0 0 17 0 0 0 31 0 0 8391624 8481048 16420864 140736914093252 140736914093279 140736914093279 140736914096107 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/cmdline +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/comm +Lines: 1 +ata_sff +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26232/fd +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/0 +SymlinkTo: ../../symlinktargets/abc +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/1 +SymlinkTo: ../../symlinktargets/def +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/2 +SymlinkTo: ../../symlinktargets/ghi +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/3 +SymlinkTo: ../../symlinktargets/uvw +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/fd/4 +SymlinkTo: ../../symlinktargets/xyz +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/limits +Lines: 17 +Limit Soft Limit Hard Limit Units +Max cpu time unlimited unlimited seconds +Max file size unlimited unlimited bytes +Max data size unlimited unlimited bytes +Max stack size 8388608 unlimited bytes +Max core file size 0 unlimited bytes +Max resident set unlimited unlimited bytes +Max processes 29436 29436 processes +Max open files 1024 4096 files +Max locked memory 65536 65536 bytes +Max address space unlimited unlimited bytes +Max file locks unlimited unlimited locks +Max pending signals 29436 29436 signals +Max msgqueue size 819200 819200 bytes +Max nice priority 0 0 +Max realtime priority 0 0 +Max realtime timeout unlimited unlimited us +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26232/stat +Lines: 1 +33 (ata_sff) S 2 0 0 0 -1 69238880 0 0 0 0 0 0 0 0 0 -20 1 0 5 0 0 18446744073709551615 0 0 0 0 0 0 0 2147483647 0 18446744073709551615 0 0 17 1 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/26233 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/26233/cmdline +Lines: 1 +com.github.uiautomatorNULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTENULLBYTEEOF +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/584 +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/584/stat +Lines: 2 +1020 ((a b ) ( c d) ) R 28378 1020 28378 34842 1020 4218880 286 0 0 0 0 0 0 0 20 0 1 0 10839175 10395648 155 18446744073709551615 4194304 4238788 140736466511168 140736466511168 140609271124624 0 0 0 0 0 0 0 17 5 0 0 0 0 0 6336016 6337300 25579520 140736466515030 140736466515061 140736466515061 140736466518002 0 +#!/bin/cat /proc/self/stat +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/short +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/short/buddyinfo +Lines: 3 +Node 0, zone +Node 0, zone +Node 0, zone +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/sizemismatch +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/sizemismatch/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/buddyinfo/valid +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/buddyinfo/valid/buddyinfo +Lines: 3 +Node 0, zone DMA 1 0 1 0 2 1 1 0 1 1 3 +Node 0, zone DMA32 759 572 791 475 194 45 12 0 0 0 0 +Node 0, zone Normal 4381 1093 185 1530 567 102 4 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/fs/xfs +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/fs/xfs/stat +Lines: 23 +extent_alloc 92447 97589 92448 93751 +abt 0 0 0 0 +blk_map 1767055 188820 184891 92447 92448 2140766 0 +bmbt 0 0 0 0 +dir 185039 92447 92444 136422 +trans 706 944304 0 +ig 185045 58807 0 126238 0 33637 22 +log 2883 113448 9 17360 739 +push_ail 945014 0 134260 15483 0 3940 464 159985 0 40 +xstrat 92447 0 +rw 107739 94045 +attr 4 0 0 0 +icluster 8677 7849 135802 +vnodes 92601 0 0 0 92444 92444 92444 0 +buf 2666287 7122 2659202 3599 2 7085 0 10297 7085 +abtb2 184941 1277345 13257 13278 0 0 0 0 0 0 0 0 0 0 2746147 +abtc2 345295 2416764 172637 172658 0 0 0 0 0 0 0 0 0 0 21406023 +bmbt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ibt2 343004 1358467 0 0 0 0 0 0 0 0 0 0 0 0 0 +fibt2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +qm 0 0 0 0 0 0 0 0 +xpc 399724544 92823103 86219234 +debug 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/mdstat +Lines: 26 +Personalities : [linear] [multipath] [raid0] [raid1] [raid6] [raid5] [raid4] [raid10] +md3 : active raid6 sda1[8] sdh1[7] sdg1[6] sdf1[5] sde1[11] sdd1[3] sdc1[10] sdb1[9] + 5853468288 blocks super 1.2 level 6, 64k chunk, algorithm 2 [8/8] [UUUUUUUU] + +md127 : active raid1 sdi2[0] sdj2[1] + 312319552 blocks [2/2] [UU] + +md0 : active raid1 sdk[2](S) sdi1[0] sdj1[1] + 248896 blocks [2/2] [UU] + +md4 : inactive raid1 sda3[0] sdb3[1] + 4883648 blocks [2/2] [UU] + +md6 : active raid1 sdb2[2] sda2[0] + 195310144 blocks [2/1] [U_] + [=>...................] recovery = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md8 : active raid1 sdb1[1] sda1[0] + 195310144 blocks [2/2] [UU] + [=>...................] resync = 8.5% (16775552/195310144) finish=17.0min speed=259783K/sec + +md7 : active raid6 sdb1[0] sde1[3] sdd1[2] sdc1[1] + 7813735424 blocks super 1.2 level 6, 512k chunk, algorithm 2 [4/3] [U_UU] + bitmap: 0/30 pages [0KB], 65536KB chunk + +unused devices: +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/dev +Lines: 6 +Inter-| Receive | Transmit + face |bytes packets errs drop fifo frame compressed multicast|bytes packets errs drop fifo colls carrier compressed +vethf345468: 648 8 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + lo: 1664039048 1566805 0 0 0 0 0 0 1664039048 1566805 0 0 0 0 0 0 +docker0: 2568 38 0 0 0 0 0 0 438 5 0 0 0 0 0 0 + eth0: 874354587 1036395 0 0 0 0 0 0 563352563 732147 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs +Lines: 21 +IP Virtual Server version 1.2.1 (size=4096) +Prot LocalAddress:Port Scheduler Flags + -> RemoteAddress:Port Forward Weight ActiveConn InActConn +TCP C0A80016:0CEA wlc + -> C0A85216:0CEA Tunnel 100 248 2 + -> C0A85318:0CEA Tunnel 100 248 2 + -> C0A85315:0CEA Tunnel 100 248 1 +TCP C0A80039:0CEA wlc + -> C0A85416:0CEA Tunnel 0 0 0 + -> C0A85215:0CEA Tunnel 100 1499 0 + -> C0A83215:0CEA Tunnel 100 1498 0 +TCP C0A80037:0CEA wlc + -> C0A8321A:0CEA Tunnel 0 0 0 + -> C0A83120:0CEA Tunnel 100 0 0 +TCP [2620:0000:0000:0000:0000:0000:0000:0001]:0050 sh + -> [2620:0000:0000:0000:0000:0000:0000:0002]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0003]:0050 Route 1 0 0 + -> [2620:0000:0000:0000:0000:0000:0000:0004]:0050 Route 1 1 1 +FWM 10001000 wlc + -> C0A8321A:0CEA Route 0 0 1 + -> C0A83215:0CEA Route 0 0 2 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/ip_vs_stats +Lines: 6 + Total Incoming Outgoing Incoming Outgoing + Conns Packets Packets Bytes Bytes + 16AA370 E33656E5 0 51D8C8883AB3 0 + + Conns/s Pkts/s Pkts/s Bytes/s Bytes/s + 4 1FB3C 0 1282A8F 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/net/rpc +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfs +Lines: 5 +net 18628 0 18628 6 +rpc 4329785 0 4338291 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 +proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/rpc/nfsd +Lines: 11 +rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 0 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 18628 0 18628 6 +rpc 18628 0 0 0 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/net/xfrm_stat +Lines: 28 +XfrmInError 1 +XfrmInBufferError 2 +XfrmInHdrError 4 +XfrmInNoStates 3 +XfrmInStateProtoError 40 +XfrmInStateModeError 100 +XfrmInStateSeqError 6000 +XfrmInStateExpired 4 +XfrmInStateMismatch 23451 +XfrmInStateInvalid 55555 +XfrmInTmplMismatch 51 +XfrmInNoPols 65432 +XfrmInPolBlock 100 +XfrmInPolError 10000 +XfrmOutError 1000000 +XfrmOutBundleGenError 43321 +XfrmOutBundleCheckError 555 +XfrmOutNoStates 869 +XfrmOutStateProtoError 4542 +XfrmOutStateModeError 4 +XfrmOutStateSeqError 543 +XfrmOutStateExpired 565 +XfrmOutPolBlock 43456 +XfrmOutPolDead 7656 +XfrmOutPolError 1454 +XfrmFwdHdrError 6654 +XfrmOutStateInvalid 28765 +XfrmAcquireError 24532 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/self +SymlinkTo: 26231 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/stat +Lines: 16 +cpu 301854 612 111922 8979004 3552 2 3944 0 0 0 +cpu0 44490 19 21045 1087069 220 1 3410 0 0 0 +cpu1 47869 23 16474 1110787 591 0 46 0 0 0 +cpu2 46504 36 15916 1112321 441 0 326 0 0 0 +cpu3 47054 102 15683 1113230 533 0 60 0 0 0 +cpu4 28413 25 10776 1140321 217 0 8 0 0 0 +cpu5 29271 101 11586 1136270 672 0 30 0 0 0 +cpu6 29152 36 10276 1139721 319 0 29 0 0 0 +cpu7 29098 268 10164 1139282 555 0 31 0 0 0 +intr 8885917 17 0 0 0 0 0 0 0 1 79281 0 0 0 0 0 0 0 231237 0 0 0 0 250586 103 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 223424 190745 13 906 1283803 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +ctxt 38014093 +btime 1418183276 +processes 26442 +procs_running 2 +procs_blocked 1 +softirq 5057579 250191 1481983 1647 211099 186066 0 1783454 622196 12499 508444 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Directory: fixtures/symlinktargets +Mode: 755 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/README +Lines: 2 +This directory contains some empty files that are the symlinks the files in the "fd" directory point to. +They are otherwise ignored by the tests +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/abc +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/def +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/ghi +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/uvw +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - +Path: fixtures/symlinktargets/xyz +Lines: 0 +Mode: 644 +# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - diff --git a/vendor/github.com/prometheus/procfs/fs.go b/vendor/github.com/prometheus/procfs/fs.go new file mode 100644 index 000000000..b6c6b2ce1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs.go @@ -0,0 +1,82 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "path" + + "github.com/prometheus/procfs/nfs" + "github.com/prometheus/procfs/xfs" +) + +// FS represents the pseudo-filesystem proc, which provides an interface to +// kernel data structures. +type FS string + +// DefaultMountPoint is the common mount point of the proc filesystem. +const DefaultMountPoint = "/proc" + +// NewFS returns a new FS mounted under the given mountPoint. It will error +// if the mount point can't be read. +func NewFS(mountPoint string) (FS, error) { + info, err := os.Stat(mountPoint) + if err != nil { + return "", fmt.Errorf("could not read %s: %s", mountPoint, err) + } + if !info.IsDir() { + return "", fmt.Errorf("mount point %s is not a directory", mountPoint) + } + + return FS(mountPoint), nil +} + +// Path returns the path of the given subsystem relative to the procfs root. +func (fs FS) Path(p ...string) string { + return path.Join(append([]string{string(fs)}, p...)...) +} + +// XFSStats retrieves XFS filesystem runtime statistics. +func (fs FS) XFSStats() (*xfs.Stats, error) { + f, err := os.Open(fs.Path("fs/xfs/stat")) + if err != nil { + return nil, err + } + defer f.Close() + + return xfs.ParseStats(f) +} + +// NFSClientRPCStats retrieves NFS client RPC statistics. +func (fs FS) NFSClientRPCStats() (*nfs.ClientRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfs")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseClientRPCStats(f) +} + +// NFSdServerRPCStats retrieves NFS daemon RPC statistics. +func (fs FS) NFSdServerRPCStats() (*nfs.ServerRPCStats, error) { + f, err := os.Open(fs.Path("net/rpc/nfsd")) + if err != nil { + return nil, err + } + defer f.Close() + + return nfs.ParseServerRPCStats(f) +} diff --git a/vendor/github.com/prometheus/procfs/fs_test.go b/vendor/github.com/prometheus/procfs/fs_test.go new file mode 100644 index 000000000..a4e07f5c8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/fs_test.go @@ -0,0 +1,39 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import "testing" + +func TestNewFS(t *testing.T) { + if _, err := NewFS("foobar"); err == nil { + t.Error("want NewFS to fail for non-existing mount point") + } + + if _, err := NewFS("procfs.go"); err == nil { + t.Error("want NewFS to fail if mount point is not a directory") + } +} + +func TestFSXFSStats(t *testing.T) { + stats, err := FS("fixtures").XFSStats() + if err != nil { + t.Fatalf("failed to parse XFS stats: %v", err) + } + + // Very lightweight test just to sanity check the path used + // to open XFS stats. Heavier tests in package xfs. + if want, got := uint32(92447), stats.ExtentAllocation.ExtentsAllocated; want != got { + t.Errorf("unexpected extents allocated:\nwant: %d\nhave: %d", want, got) + } +} diff --git a/vendor/github.com/prometheus/procfs/internal/util/parse.go b/vendor/github.com/prometheus/procfs/internal/util/parse.go new file mode 100644 index 000000000..1ad21c91a --- /dev/null +++ b/vendor/github.com/prometheus/procfs/internal/util/parse.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package util + +import "strconv" + +// ParseUint32s parses a slice of strings into a slice of uint32s. +func ParseUint32s(ss []string) ([]uint32, error) { + us := make([]uint32, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 32) + if err != nil { + return nil, err + } + + us = append(us, uint32(u)) + } + + return us, nil +} + +// ParseUint64s parses a slice of strings into a slice of uint64s. +func ParseUint64s(ss []string) ([]uint64, error) { + us := make([]uint64, 0, len(ss)) + for _, s := range ss { + u, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + us = append(us, u) + } + + return us, nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs.go b/vendor/github.com/prometheus/procfs/ipvs.go new file mode 100644 index 000000000..e36d4a3bd --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs.go @@ -0,0 +1,259 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "encoding/hex" + "errors" + "fmt" + "io" + "io/ioutil" + "net" + "os" + "strconv" + "strings" +) + +// IPVSStats holds IPVS statistics, as exposed by the kernel in `/proc/net/ip_vs_stats`. +type IPVSStats struct { + // Total count of connections. + Connections uint64 + // Total incoming packages processed. + IncomingPackets uint64 + // Total outgoing packages processed. + OutgoingPackets uint64 + // Total incoming traffic. + IncomingBytes uint64 + // Total outgoing traffic. + OutgoingBytes uint64 +} + +// IPVSBackendStatus holds current metrics of one virtual / real address pair. +type IPVSBackendStatus struct { + // The local (virtual) IP address. + LocalAddress net.IP + // The remote (real) IP address. + RemoteAddress net.IP + // The local (virtual) port. + LocalPort uint16 + // The remote (real) port. + RemotePort uint16 + // The local firewall mark + LocalMark string + // The transport protocol (TCP, UDP). + Proto string + // The current number of active connections for this virtual/real address pair. + ActiveConn uint64 + // The current number of inactive connections for this virtual/real address pair. + InactConn uint64 + // The current weight of this virtual/real address pair. + Weight uint64 +} + +// NewIPVSStats reads the IPVS statistics. +func NewIPVSStats() (IPVSStats, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return IPVSStats{}, err + } + + return fs.NewIPVSStats() +} + +// NewIPVSStats reads the IPVS statistics from the specified `proc` filesystem. +func (fs FS) NewIPVSStats() (IPVSStats, error) { + file, err := os.Open(fs.Path("net/ip_vs_stats")) + if err != nil { + return IPVSStats{}, err + } + defer file.Close() + + return parseIPVSStats(file) +} + +// parseIPVSStats performs the actual parsing of `ip_vs_stats`. +func parseIPVSStats(file io.Reader) (IPVSStats, error) { + var ( + statContent []byte + statLines []string + statFields []string + stats IPVSStats + ) + + statContent, err := ioutil.ReadAll(file) + if err != nil { + return IPVSStats{}, err + } + + statLines = strings.SplitN(string(statContent), "\n", 4) + if len(statLines) != 4 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: too short") + } + + statFields = strings.Fields(statLines[2]) + if len(statFields) != 5 { + return IPVSStats{}, errors.New("ip_vs_stats corrupt: unexpected number of fields") + } + + stats.Connections, err = strconv.ParseUint(statFields[0], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingPackets, err = strconv.ParseUint(statFields[1], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingPackets, err = strconv.ParseUint(statFields[2], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.IncomingBytes, err = strconv.ParseUint(statFields[3], 16, 64) + if err != nil { + return IPVSStats{}, err + } + stats.OutgoingBytes, err = strconv.ParseUint(statFields[4], 16, 64) + if err != nil { + return IPVSStats{}, err + } + + return stats, nil +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs. +func NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return []IPVSBackendStatus{}, err + } + + return fs.NewIPVSBackendStatus() +} + +// NewIPVSBackendStatus reads and returns the status of all (virtual,real) server pairs from the specified `proc` filesystem. +func (fs FS) NewIPVSBackendStatus() ([]IPVSBackendStatus, error) { + file, err := os.Open(fs.Path("net/ip_vs")) + if err != nil { + return nil, err + } + defer file.Close() + + return parseIPVSBackendStatus(file) +} + +func parseIPVSBackendStatus(file io.Reader) ([]IPVSBackendStatus, error) { + var ( + status []IPVSBackendStatus + scanner = bufio.NewScanner(file) + proto string + localMark string + localAddress net.IP + localPort uint16 + err error + ) + + for scanner.Scan() { + fields := strings.Fields(scanner.Text()) + if len(fields) == 0 { + continue + } + switch { + case fields[0] == "IP" || fields[0] == "Prot" || fields[1] == "RemoteAddress:Port": + continue + case fields[0] == "TCP" || fields[0] == "UDP": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = "" + localAddress, localPort, err = parseIPPort(fields[1]) + if err != nil { + return nil, err + } + case fields[0] == "FWM": + if len(fields) < 2 { + continue + } + proto = fields[0] + localMark = fields[1] + localAddress = nil + localPort = 0 + case fields[0] == "->": + if len(fields) < 6 { + continue + } + remoteAddress, remotePort, err := parseIPPort(fields[1]) + if err != nil { + return nil, err + } + weight, err := strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + activeConn, err := strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + inactConn, err := strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + status = append(status, IPVSBackendStatus{ + LocalAddress: localAddress, + LocalPort: localPort, + LocalMark: localMark, + RemoteAddress: remoteAddress, + RemotePort: remotePort, + Proto: proto, + Weight: weight, + ActiveConn: activeConn, + InactConn: inactConn, + }) + } + } + return status, nil +} + +func parseIPPort(s string) (net.IP, uint16, error) { + var ( + ip net.IP + err error + ) + + switch len(s) { + case 13: + ip, err = hex.DecodeString(s[0:8]) + if err != nil { + return nil, 0, err + } + case 46: + ip = net.ParseIP(s[1:40]) + if ip == nil { + return nil, 0, fmt.Errorf("invalid IPv6 address: %s", s[1:40]) + } + default: + return nil, 0, fmt.Errorf("unexpected IP:Port: %s", s) + } + + portString := s[len(s)-4:] + if len(portString) != 4 { + return nil, 0, fmt.Errorf("unexpected port string format: %s", portString) + } + port, err := strconv.ParseUint(portString, 16, 16) + if err != nil { + return nil, 0, err + } + + return ip, uint16(port), nil +} diff --git a/vendor/github.com/prometheus/procfs/ipvs_test.go b/vendor/github.com/prometheus/procfs/ipvs_test.go new file mode 100644 index 000000000..9c34e6d0d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ipvs_test.go @@ -0,0 +1,250 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "net" + "testing" +) + +var ( + expectedIPVSStats = IPVSStats{ + Connections: 23765872, + IncomingPackets: 3811989221, + OutgoingPackets: 0, + IncomingBytes: 89991519156915, + OutgoingBytes: 0, + } + expectedIPVSBackendStatuses = []IPVSBackendStatus{ + { + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.82.22"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 2, + }, + { + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.83.24"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 2, + }, + { + LocalAddress: net.ParseIP("192.168.0.22"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.83.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 248, + InactConn: 1, + }, + { + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.84.22"), + RemotePort: 3306, + Proto: "TCP", + Weight: 0, + ActiveConn: 0, + InactConn: 0, + }, + { + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.82.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 1499, + InactConn: 0, + }, + { + LocalAddress: net.ParseIP("192.168.0.57"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.50.21"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 1498, + InactConn: 0, + }, + { + LocalAddress: net.ParseIP("192.168.0.55"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.50.26"), + RemotePort: 3306, + Proto: "TCP", + Weight: 0, + ActiveConn: 0, + InactConn: 0, + }, + { + LocalAddress: net.ParseIP("192.168.0.55"), + LocalPort: 3306, + RemoteAddress: net.ParseIP("192.168.49.32"), + RemotePort: 3306, + Proto: "TCP", + Weight: 100, + ActiveConn: 0, + InactConn: 0, + }, + { + LocalAddress: net.ParseIP("2620::1"), + LocalPort: 80, + RemoteAddress: net.ParseIP("2620::2"), + RemotePort: 80, + Proto: "TCP", + Weight: 1, + ActiveConn: 0, + InactConn: 0, + }, + { + LocalAddress: net.ParseIP("2620::1"), + LocalPort: 80, + RemoteAddress: net.ParseIP("2620::3"), + RemotePort: 80, + Proto: "TCP", + Weight: 1, + ActiveConn: 0, + InactConn: 0, + }, + { + LocalAddress: net.ParseIP("2620::1"), + LocalPort: 80, + RemoteAddress: net.ParseIP("2620::4"), + RemotePort: 80, + Proto: "TCP", + Weight: 1, + ActiveConn: 1, + InactConn: 1, + }, + { + LocalMark: "10001000", + RemoteAddress: net.ParseIP("192.168.50.26"), + RemotePort: 3306, + Proto: "FWM", + Weight: 0, + ActiveConn: 0, + InactConn: 1, + }, + { + LocalMark: "10001000", + RemoteAddress: net.ParseIP("192.168.50.21"), + RemotePort: 3306, + Proto: "FWM", + Weight: 0, + ActiveConn: 0, + InactConn: 2, + }, + } +) + +func TestIPVSStats(t *testing.T) { + stats, err := FS("fixtures").NewIPVSStats() + if err != nil { + t.Fatal(err) + } + + if stats != expectedIPVSStats { + t.Errorf("want %+v, have %+v", expectedIPVSStats, stats) + } +} + +func TestParseIPPort(t *testing.T) { + ip := net.ParseIP("192.168.0.22") + port := uint16(3306) + + gotIP, gotPort, err := parseIPPort("C0A80016:0CEA") + if err != nil { + t.Fatal(err) + } + if !(gotIP.Equal(ip) && port == gotPort) { + t.Errorf("want %s:%d, have %s:%d", ip, port, gotIP, gotPort) + } +} + +func TestParseIPPortInvalid(t *testing.T) { + testcases := []string{ + "", + "C0A80016", + "C0A800:1234", + "FOOBARBA:1234", + "C0A80016:0CEA:1234", + } + + for _, s := range testcases { + ip, port, err := parseIPPort(s) + if ip != nil || port != uint16(0) || err == nil { + t.Errorf("Expected error for input %s, have ip = %s, port = %v, err = %v", s, ip, port, err) + } + } +} + +func TestParseIPPortIPv6(t *testing.T) { + ip := net.ParseIP("dead:beef::1") + port := uint16(8080) + + gotIP, gotPort, err := parseIPPort("[DEAD:BEEF:0000:0000:0000:0000:0000:0001]:1F90") + if err != nil { + t.Fatal(err) + } + if !(gotIP.Equal(ip) && port == gotPort) { + t.Errorf("want %s:%d, have %s:%d", ip, port, gotIP, gotPort) + } +} + +func TestIPVSBackendStatus(t *testing.T) { + backendStats, err := FS("fixtures").NewIPVSBackendStatus() + if err != nil { + t.Fatal(err) + } + if want, have := len(expectedIPVSBackendStatuses), len(backendStats); want != have { + t.Fatalf("want %d backend statuses, have %d", want, have) + } + + for idx, expect := range expectedIPVSBackendStatuses { + if !backendStats[idx].LocalAddress.Equal(expect.LocalAddress) { + t.Errorf("want LocalAddress %s, have %s", expect.LocalAddress, backendStats[idx].LocalAddress) + } + if backendStats[idx].LocalPort != expect.LocalPort { + t.Errorf("want LocalPort %d, have %d", expect.LocalPort, backendStats[idx].LocalPort) + } + if !backendStats[idx].RemoteAddress.Equal(expect.RemoteAddress) { + t.Errorf("want RemoteAddress %s, have %s", expect.RemoteAddress, backendStats[idx].RemoteAddress) + } + if backendStats[idx].RemotePort != expect.RemotePort { + t.Errorf("want RemotePort %d, have %d", expect.RemotePort, backendStats[idx].RemotePort) + } + if backendStats[idx].Proto != expect.Proto { + t.Errorf("want Proto %s, have %s", expect.Proto, backendStats[idx].Proto) + } + if backendStats[idx].Weight != expect.Weight { + t.Errorf("want Weight %d, have %d", expect.Weight, backendStats[idx].Weight) + } + if backendStats[idx].ActiveConn != expect.ActiveConn { + t.Errorf("want ActiveConn %d, have %d", expect.ActiveConn, backendStats[idx].ActiveConn) + } + if backendStats[idx].InactConn != expect.InactConn { + t.Errorf("want InactConn %d, have %d", expect.InactConn, backendStats[idx].InactConn) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/mdstat.go b/vendor/github.com/prometheus/procfs/mdstat.go new file mode 100644 index 000000000..9dc19583d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat.go @@ -0,0 +1,151 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "regexp" + "strconv" + "strings" +) + +var ( + statuslineRE = regexp.MustCompile(`(\d+) blocks .*\[(\d+)/(\d+)\] \[[U_]+\]`) + buildlineRE = regexp.MustCompile(`\((\d+)/\d+\)`) +) + +// MDStat holds info parsed from /proc/mdstat. +type MDStat struct { + // Name of the device. + Name string + // activity-state of the device. + ActivityState string + // Number of active disks. + DisksActive int64 + // Total number of disks the device consists of. + DisksTotal int64 + // Number of blocks the device holds. + BlocksTotal int64 + // Number of blocks on the device that are in sync. + BlocksSynced int64 +} + +// ParseMDStat parses an mdstat-file and returns a struct with the relevant infos. +func (fs FS) ParseMDStat() (mdstates []MDStat, err error) { + mdStatusFilePath := fs.Path("mdstat") + content, err := ioutil.ReadFile(mdStatusFilePath) + if err != nil { + return []MDStat{}, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + mdStates := []MDStat{} + lines := strings.Split(string(content), "\n") + for i, l := range lines { + if l == "" { + continue + } + if l[0] == ' ' { + continue + } + if strings.HasPrefix(l, "Personalities") || strings.HasPrefix(l, "unused") { + continue + } + + mainLine := strings.Split(l, " ") + if len(mainLine) < 3 { + return mdStates, fmt.Errorf("error parsing mdline: %s", l) + } + mdName := mainLine[0] + activityState := mainLine[2] + + if len(lines) <= i+3 { + return mdStates, fmt.Errorf( + "error parsing %s: too few lines for md device %s", + mdStatusFilePath, + mdName, + ) + } + + active, total, size, err := evalStatusline(lines[i+1]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + + // j is the line number of the syncing-line. + j := i + 2 + if strings.Contains(lines[i+2], "bitmap") { // skip bitmap line + j = i + 3 + } + + // If device is syncing at the moment, get the number of currently + // synced bytes, otherwise that number equals the size of the device. + syncedBlocks := size + if strings.Contains(lines[j], "recovery") || strings.Contains(lines[j], "resync") { + syncedBlocks, err = evalBuildline(lines[j]) + if err != nil { + return mdStates, fmt.Errorf("error parsing %s: %s", mdStatusFilePath, err) + } + } + + mdStates = append(mdStates, MDStat{ + Name: mdName, + ActivityState: activityState, + DisksActive: active, + DisksTotal: total, + BlocksTotal: size, + BlocksSynced: syncedBlocks, + }) + } + + return mdStates, nil +} + +func evalStatusline(statusline string) (active, total, size int64, err error) { + matches := statuslineRE.FindStringSubmatch(statusline) + if len(matches) != 4 { + return 0, 0, 0, fmt.Errorf("unexpected statusline: %s", statusline) + } + + size, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + total, err = strconv.ParseInt(matches[2], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + active, err = strconv.ParseInt(matches[3], 10, 64) + if err != nil { + return 0, 0, 0, fmt.Errorf("unexpected statusline %s: %s", statusline, err) + } + + return active, total, size, nil +} + +func evalBuildline(buildline string) (syncedBlocks int64, err error) { + matches := buildlineRE.FindStringSubmatch(buildline) + if len(matches) != 2 { + return 0, fmt.Errorf("unexpected buildline: %s", buildline) + } + + syncedBlocks, err = strconv.ParseInt(matches[1], 10, 64) + if err != nil { + return 0, fmt.Errorf("%s in buildline: %s", err, buildline) + } + + return syncedBlocks, nil +} diff --git a/vendor/github.com/prometheus/procfs/mdstat_test.go b/vendor/github.com/prometheus/procfs/mdstat_test.go new file mode 100644 index 000000000..8819228f1 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mdstat_test.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "testing" +) + +func TestMDStat(t *testing.T) { + mdStates, err := FS("fixtures").ParseMDStat() + if err != nil { + t.Fatalf("parsing of reference-file failed entirely: %s", err) + } + + refs := map[string]MDStat{ + "md3": {"md3", "active", 8, 8, 5853468288, 5853468288}, + "md127": {"md127", "active", 2, 2, 312319552, 312319552}, + "md0": {"md0", "active", 2, 2, 248896, 248896}, + "md4": {"md4", "inactive", 2, 2, 4883648, 4883648}, + "md6": {"md6", "active", 1, 2, 195310144, 16775552}, + "md8": {"md8", "active", 2, 2, 195310144, 16775552}, + "md7": {"md7", "active", 3, 4, 7813735424, 7813735424}, + } + + if want, have := len(refs), len(mdStates); want != have { + t.Errorf("want %d parsed md-devices, have %d", want, have) + } + for _, md := range mdStates { + if want, have := refs[md.Name], md; want != have { + t.Errorf("%s: want %v, have %v", md.Name, want, have) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/mountstats.go b/vendor/github.com/prometheus/procfs/mountstats.go new file mode 100644 index 000000000..e95ddbc67 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats.go @@ -0,0 +1,569 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +// While implementing parsing of /proc/[pid]/mountstats, this blog was used +// heavily as a reference: +// https://utcc.utoronto.ca/~cks/space/blog/linux/NFSMountstatsIndex +// +// Special thanks to Chris Siebenmann for all of his posts explaining the +// various statistics available for NFS. + +import ( + "bufio" + "fmt" + "io" + "strconv" + "strings" + "time" +) + +// Constants shared between multiple functions. +const ( + deviceEntryLen = 8 + + fieldBytesLen = 8 + fieldEventsLen = 27 + + statVersion10 = "1.0" + statVersion11 = "1.1" + + fieldTransport10Len = 10 + fieldTransport11Len = 13 +) + +// A Mount is a device mount parsed from /proc/[pid]/mountstats. +type Mount struct { + // Name of the device. + Device string + // The mount point of the device. + Mount string + // The filesystem type used by the device. + Type string + // If available additional statistics related to this Mount. + // Use a type assertion to determine if additional statistics are available. + Stats MountStats +} + +// A MountStats is a type which contains detailed statistics for a specific +// type of Mount. +type MountStats interface { + mountStats() +} + +// A MountStatsNFS is a MountStats implementation for NFSv3 and v4 mounts. +type MountStatsNFS struct { + // The version of statistics provided. + StatVersion string + // The age of the NFS mount. + Age time.Duration + // Statistics related to byte counters for various operations. + Bytes NFSBytesStats + // Statistics related to various NFS event occurrences. + Events NFSEventsStats + // Statistics broken down by filesystem operation. + Operations []NFSOperationStats + // Statistics about the NFS RPC transport. + Transport NFSTransportStats +} + +// mountStats implements MountStats. +func (m MountStatsNFS) mountStats() {} + +// A NFSBytesStats contains statistics about the number of bytes read and written +// by an NFS client to and from an NFS server. +type NFSBytesStats struct { + // Number of bytes read using the read() syscall. + Read uint64 + // Number of bytes written using the write() syscall. + Write uint64 + // Number of bytes read using the read() syscall in O_DIRECT mode. + DirectRead uint64 + // Number of bytes written using the write() syscall in O_DIRECT mode. + DirectWrite uint64 + // Number of bytes read from the NFS server, in total. + ReadTotal uint64 + // Number of bytes written to the NFS server, in total. + WriteTotal uint64 + // Number of pages read directly via mmap()'d files. + ReadPages uint64 + // Number of pages written directly via mmap()'d files. + WritePages uint64 +} + +// A NFSEventsStats contains statistics about NFS event occurrences. +type NFSEventsStats struct { + // Number of times cached inode attributes are re-validated from the server. + InodeRevalidate uint64 + // Number of times cached dentry nodes are re-validated from the server. + DnodeRevalidate uint64 + // Number of times an inode cache is cleared. + DataInvalidate uint64 + // Number of times cached inode attributes are invalidated. + AttributeInvalidate uint64 + // Number of times files or directories have been open()'d. + VFSOpen uint64 + // Number of times a directory lookup has occurred. + VFSLookup uint64 + // Number of times permissions have been checked. + VFSAccess uint64 + // Number of updates (and potential writes) to pages. + VFSUpdatePage uint64 + // Number of pages read directly via mmap()'d files. + VFSReadPage uint64 + // Number of times a group of pages have been read. + VFSReadPages uint64 + // Number of pages written directly via mmap()'d files. + VFSWritePage uint64 + // Number of times a group of pages have been written. + VFSWritePages uint64 + // Number of times directory entries have been read with getdents(). + VFSGetdents uint64 + // Number of times attributes have been set on inodes. + VFSSetattr uint64 + // Number of pending writes that have been forcefully flushed to the server. + VFSFlush uint64 + // Number of times fsync() has been called on directories and files. + VFSFsync uint64 + // Number of times locking has been attempted on a file. + VFSLock uint64 + // Number of times files have been closed and released. + VFSFileRelease uint64 + // Unknown. Possibly unused. + CongestionWait uint64 + // Number of times files have been truncated. + Truncation uint64 + // Number of times a file has been grown due to writes beyond its existing end. + WriteExtension uint64 + // Number of times a file was removed while still open by another process. + SillyRename uint64 + // Number of times the NFS server gave less data than expected while reading. + ShortRead uint64 + // Number of times the NFS server wrote less data than expected while writing. + ShortWrite uint64 + // Number of times the NFS server indicated EJUKEBOX; retrieving data from + // offline storage. + JukeboxDelay uint64 + // Number of NFS v4.1+ pNFS reads. + PNFSRead uint64 + // Number of NFS v4.1+ pNFS writes. + PNFSWrite uint64 +} + +// A NFSOperationStats contains statistics for a single operation. +type NFSOperationStats struct { + // The name of the operation. + Operation string + // Number of requests performed for this operation. + Requests uint64 + // Number of times an actual RPC request has been transmitted for this operation. + Transmissions uint64 + // Number of times a request has had a major timeout. + MajorTimeouts uint64 + // Number of bytes sent for this operation, including RPC headers and payload. + BytesSent uint64 + // Number of bytes received for this operation, including RPC headers and payload. + BytesReceived uint64 + // Duration all requests spent queued for transmission before they were sent. + CumulativeQueueTime time.Duration + // Duration it took to get a reply back after the request was transmitted. + CumulativeTotalResponseTime time.Duration + // Duration from when a request was enqueued to when it was completely handled. + CumulativeTotalRequestTime time.Duration +} + +// A NFSTransportStats contains statistics for the NFS mount RPC requests and +// responses. +type NFSTransportStats struct { + // The local port used for the NFS mount. + Port uint64 + // Number of times the client has had to establish a connection from scratch + // to the NFS server. + Bind uint64 + // Number of times the client has made a TCP connection to the NFS server. + Connect uint64 + // Duration (in jiffies, a kernel internal unit of time) the NFS mount has + // spent waiting for connections to the server to be established. + ConnectIdleTime uint64 + // Duration since the NFS mount last saw any RPC traffic. + IdleTime time.Duration + // Number of RPC requests for this mount sent to the NFS server. + Sends uint64 + // Number of RPC responses for this mount received from the NFS server. + Receives uint64 + // Number of times the NFS server sent a response with a transaction ID + // unknown to this client. + BadTransactionIDs uint64 + // A running counter, incremented on each request as the current difference + // ebetween sends and receives. + CumulativeActiveRequests uint64 + // A running counter, incremented on each request by the current backlog + // queue size. + CumulativeBacklog uint64 + + // Stats below only available with stat version 1.1. + + // Maximum number of simultaneously active RPC requests ever used. + MaximumRPCSlotsUsed uint64 + // A running counter, incremented on each request as the current size of the + // sending queue. + CumulativeSendingQueue uint64 + // A running counter, incremented on each request as the current size of the + // pending queue. + CumulativePendingQueue uint64 +} + +// parseMountStats parses a /proc/[pid]/mountstats file and returns a slice +// of Mount structures containing detailed information about each mount. +// If available, statistics for each mount are parsed as well. +func parseMountStats(r io.Reader) ([]*Mount, error) { + const ( + device = "device" + statVersionPrefix = "statvers=" + + nfs3Type = "nfs" + nfs4Type = "nfs4" + ) + + var mounts []*Mount + + s := bufio.NewScanner(r) + for s.Scan() { + // Only look for device entries in this function + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 || ss[0] != device { + continue + } + + m, err := parseMount(ss) + if err != nil { + return nil, err + } + + // Does this mount also possess statistics information? + if len(ss) > deviceEntryLen { + // Only NFSv3 and v4 are supported for parsing statistics + if m.Type != nfs3Type && m.Type != nfs4Type { + return nil, fmt.Errorf("cannot parse MountStats for fstype %q", m.Type) + } + + statVersion := strings.TrimPrefix(ss[8], statVersionPrefix) + + stats, err := parseMountStatsNFS(s, statVersion) + if err != nil { + return nil, err + } + + m.Stats = stats + } + + mounts = append(mounts, m) + } + + return mounts, s.Err() +} + +// parseMount parses an entry in /proc/[pid]/mountstats in the format: +// device [device] mounted on [mount] with fstype [type] +func parseMount(ss []string) (*Mount, error) { + if len(ss) < deviceEntryLen { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + + // Check for specific words appearing at specific indices to ensure + // the format is consistent with what we expect + format := []struct { + i int + s string + }{ + {i: 0, s: "device"}, + {i: 2, s: "mounted"}, + {i: 3, s: "on"}, + {i: 5, s: "with"}, + {i: 6, s: "fstype"}, + } + + for _, f := range format { + if ss[f.i] != f.s { + return nil, fmt.Errorf("invalid device entry: %v", ss) + } + } + + return &Mount{ + Device: ss[1], + Mount: ss[4], + Type: ss[7], + }, nil +} + +// parseMountStatsNFS parses a MountStatsNFS by scanning additional information +// related to NFS statistics. +func parseMountStatsNFS(s *bufio.Scanner, statVersion string) (*MountStatsNFS, error) { + // Field indicators for parsing specific types of data + const ( + fieldAge = "age:" + fieldBytes = "bytes:" + fieldEvents = "events:" + fieldPerOpStats = "per-op" + fieldTransport = "xprt:" + ) + + stats := &MountStatsNFS{ + StatVersion: statVersion, + } + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + break + } + if len(ss) < 2 { + return nil, fmt.Errorf("not enough information for NFS stats: %v", ss) + } + + switch ss[0] { + case fieldAge: + // Age integer is in seconds + d, err := time.ParseDuration(ss[1] + "s") + if err != nil { + return nil, err + } + + stats.Age = d + case fieldBytes: + bstats, err := parseNFSBytesStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Bytes = *bstats + case fieldEvents: + estats, err := parseNFSEventsStats(ss[1:]) + if err != nil { + return nil, err + } + + stats.Events = *estats + case fieldTransport: + if len(ss) < 3 { + return nil, fmt.Errorf("not enough information for NFS transport stats: %v", ss) + } + + tstats, err := parseNFSTransportStats(ss[2:], statVersion) + if err != nil { + return nil, err + } + + stats.Transport = *tstats + } + + // When encountering "per-operation statistics", we must break this + // loop and parse them separately to ensure we can terminate parsing + // before reaching another device entry; hence why this 'if' statement + // is not just another switch case + if ss[0] == fieldPerOpStats { + break + } + } + + if err := s.Err(); err != nil { + return nil, err + } + + // NFS per-operation stats appear last before the next device entry + perOpStats, err := parseNFSOperationStats(s) + if err != nil { + return nil, err + } + + stats.Operations = perOpStats + + return stats, nil +} + +// parseNFSBytesStats parses a NFSBytesStats line using an input set of +// integer fields. +func parseNFSBytesStats(ss []string) (*NFSBytesStats, error) { + if len(ss) != fieldBytesLen { + return nil, fmt.Errorf("invalid NFS bytes stats: %v", ss) + } + + ns := make([]uint64, 0, fieldBytesLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSBytesStats{ + Read: ns[0], + Write: ns[1], + DirectRead: ns[2], + DirectWrite: ns[3], + ReadTotal: ns[4], + WriteTotal: ns[5], + ReadPages: ns[6], + WritePages: ns[7], + }, nil +} + +// parseNFSEventsStats parses a NFSEventsStats line using an input set of +// integer fields. +func parseNFSEventsStats(ss []string) (*NFSEventsStats, error) { + if len(ss) != fieldEventsLen { + return nil, fmt.Errorf("invalid NFS events stats: %v", ss) + } + + ns := make([]uint64, 0, fieldEventsLen) + for _, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + return &NFSEventsStats{ + InodeRevalidate: ns[0], + DnodeRevalidate: ns[1], + DataInvalidate: ns[2], + AttributeInvalidate: ns[3], + VFSOpen: ns[4], + VFSLookup: ns[5], + VFSAccess: ns[6], + VFSUpdatePage: ns[7], + VFSReadPage: ns[8], + VFSReadPages: ns[9], + VFSWritePage: ns[10], + VFSWritePages: ns[11], + VFSGetdents: ns[12], + VFSSetattr: ns[13], + VFSFlush: ns[14], + VFSFsync: ns[15], + VFSLock: ns[16], + VFSFileRelease: ns[17], + CongestionWait: ns[18], + Truncation: ns[19], + WriteExtension: ns[20], + SillyRename: ns[21], + ShortRead: ns[22], + ShortWrite: ns[23], + JukeboxDelay: ns[24], + PNFSRead: ns[25], + PNFSWrite: ns[26], + }, nil +} + +// parseNFSOperationStats parses a slice of NFSOperationStats by scanning +// additional information about per-operation statistics until an empty +// line is reached. +func parseNFSOperationStats(s *bufio.Scanner) ([]NFSOperationStats, error) { + const ( + // Number of expected fields in each per-operation statistics set + numFields = 9 + ) + + var ops []NFSOperationStats + + for s.Scan() { + ss := strings.Fields(string(s.Bytes())) + if len(ss) == 0 { + // Must break when reading a blank line after per-operation stats to + // enable top-level function to parse the next device entry + break + } + + if len(ss) != numFields { + return nil, fmt.Errorf("invalid NFS per-operations stats: %v", ss) + } + + // Skip string operation name for integers + ns := make([]uint64, 0, numFields-1) + for _, st := range ss[1:] { + n, err := strconv.ParseUint(st, 10, 64) + if err != nil { + return nil, err + } + + ns = append(ns, n) + } + + ops = append(ops, NFSOperationStats{ + Operation: strings.TrimSuffix(ss[0], ":"), + Requests: ns[0], + Transmissions: ns[1], + MajorTimeouts: ns[2], + BytesSent: ns[3], + BytesReceived: ns[4], + CumulativeQueueTime: time.Duration(ns[5]) * time.Millisecond, + CumulativeTotalResponseTime: time.Duration(ns[6]) * time.Millisecond, + CumulativeTotalRequestTime: time.Duration(ns[7]) * time.Millisecond, + }) + } + + return ops, s.Err() +} + +// parseNFSTransportStats parses a NFSTransportStats line using an input set of +// integer fields matched to a specific stats version. +func parseNFSTransportStats(ss []string, statVersion string) (*NFSTransportStats, error) { + switch statVersion { + case statVersion10: + if len(ss) != fieldTransport10Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.0 statement: %v", ss) + } + case statVersion11: + if len(ss) != fieldTransport11Len { + return nil, fmt.Errorf("invalid NFS transport stats 1.1 statement: %v", ss) + } + default: + return nil, fmt.Errorf("unrecognized NFS transport stats version: %q", statVersion) + } + + // Allocate enough for v1.1 stats since zero value for v1.1 stats will be okay + // in a v1.0 response. + // + // Note: slice length must be set to length of v1.1 stats to avoid a panic when + // only v1.0 stats are present. + // See: https://github.com/prometheus/node_exporter/issues/571. + ns := make([]uint64, fieldTransport11Len) + for i, s := range ss { + n, err := strconv.ParseUint(s, 10, 64) + if err != nil { + return nil, err + } + + ns[i] = n + } + + return &NFSTransportStats{ + Port: ns[0], + Bind: ns[1], + Connect: ns[2], + ConnectIdleTime: ns[3], + IdleTime: time.Duration(ns[4]) * time.Second, + Sends: ns[5], + Receives: ns[6], + BadTransactionIDs: ns[7], + CumulativeActiveRequests: ns[8], + CumulativeBacklog: ns[9], + MaximumRPCSlotsUsed: ns[10], + CumulativeSendingQueue: ns[11], + CumulativePendingQueue: ns[12], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/mountstats_test.go b/vendor/github.com/prometheus/procfs/mountstats_test.go new file mode 100644 index 000000000..7df1d15f4 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/mountstats_test.go @@ -0,0 +1,286 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "reflect" + "strings" + "testing" + "time" +) + +func TestMountStats(t *testing.T) { + tests := []struct { + name string + s string + mounts []*Mount + invalid bool + }{ + { + name: "no devices", + s: `hello`, + }, + { + name: "device has too few fields", + s: `device foo`, + invalid: true, + }, + { + name: "device incorrect format", + s: `device rootfs BAD on / with fstype rootfs`, + invalid: true, + }, + { + name: "device incorrect format", + s: `device rootfs mounted BAD / with fstype rootfs`, + invalid: true, + }, + { + name: "device incorrect format", + s: `device rootfs mounted on / BAD fstype rootfs`, + invalid: true, + }, + { + name: "device incorrect format", + s: `device rootfs mounted on / with BAD rootfs`, + invalid: true, + }, + { + name: "device rootfs cannot have stats", + s: `device rootfs mounted on / with fstype rootfs stats`, + invalid: true, + }, + { + name: "NFSv4 device with too little info", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nhello", + invalid: true, + }, + { + name: "NFSv4 device with bad bytes", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nbytes: 0", + invalid: true, + }, + { + name: "NFSv4 device with bad events", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nevents: 0", + invalid: true, + }, + { + name: "NFSv4 device with bad per-op stats", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nper-op statistics\nFOO 0", + invalid: true, + }, + { + name: "NFSv4 device with bad transport stats", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nxprt: tcp", + invalid: true, + }, + { + name: "NFSv4 device with bad transport version", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=foo\nxprt: tcp 0", + invalid: true, + }, + { + name: "NFSv4 device with bad transport stats version 1.0", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.0\nxprt: tcp 0 0 0 0 0 0 0 0 0 0 0 0 0", + invalid: true, + }, + { + name: "NFSv4 device with bad transport stats version 1.1", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs4 statvers=1.1\nxprt: tcp 0 0 0 0 0 0 0 0 0 0", + invalid: true, + }, + { + name: "NFSv3 device with transport stats version 1.0 OK", + s: "device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs statvers=1.0\nxprt: tcp 1 2 3 4 5 6 7 8 9 10", + mounts: []*Mount{{ + Device: "192.168.1.1:/srv", + Mount: "/mnt/nfs", + Type: "nfs", + Stats: &MountStatsNFS{ + StatVersion: "1.0", + Transport: NFSTransportStats{ + Port: 1, + Bind: 2, + Connect: 3, + ConnectIdleTime: 4, + IdleTime: 5 * time.Second, + Sends: 6, + Receives: 7, + BadTransactionIDs: 8, + CumulativeActiveRequests: 9, + CumulativeBacklog: 10, + }, + }, + }}, + }, + { + name: "device rootfs OK", + s: `device rootfs mounted on / with fstype rootfs`, + mounts: []*Mount{{ + Device: "rootfs", + Mount: "/", + Type: "rootfs", + }}, + }, + { + name: "NFSv3 device with minimal stats OK", + s: `device 192.168.1.1:/srv mounted on /mnt/nfs with fstype nfs statvers=1.1`, + mounts: []*Mount{{ + Device: "192.168.1.1:/srv", + Mount: "/mnt/nfs", + Type: "nfs", + Stats: &MountStatsNFS{ + StatVersion: "1.1", + }, + }}, + }, + { + name: "fixtures OK", + mounts: []*Mount{ + { + Device: "rootfs", + Mount: "/", + Type: "rootfs", + }, + { + Device: "sysfs", + Mount: "/sys", + Type: "sysfs", + }, + { + Device: "proc", + Mount: "/proc", + Type: "proc", + }, + { + Device: "/dev/sda1", + Mount: "/", + Type: "ext4", + }, + { + Device: "192.168.1.1:/srv/test", + Mount: "/mnt/nfs/test", + Type: "nfs4", + Stats: &MountStatsNFS{ + StatVersion: "1.1", + Age: 13968 * time.Second, + Bytes: NFSBytesStats{ + Read: 1207640230, + ReadTotal: 1210214218, + ReadPages: 295483, + }, + Events: NFSEventsStats{ + InodeRevalidate: 52, + DnodeRevalidate: 226, + VFSOpen: 1, + VFSLookup: 13, + VFSAccess: 398, + VFSReadPages: 331, + VFSWritePages: 47, + VFSFlush: 77, + VFSFileRelease: 77, + }, + Operations: []NFSOperationStats{ + { + Operation: "NULL", + }, + { + Operation: "READ", + Requests: 1298, + Transmissions: 1298, + BytesSent: 207680, + BytesReceived: 1210292152, + CumulativeQueueTime: 6 * time.Millisecond, + CumulativeTotalResponseTime: 79386 * time.Millisecond, + CumulativeTotalRequestTime: 79407 * time.Millisecond, + }, + { + Operation: "WRITE", + }, + }, + Transport: NFSTransportStats{ + Port: 832, + Connect: 1, + IdleTime: 11 * time.Second, + Sends: 6428, + Receives: 6428, + CumulativeActiveRequests: 12154, + MaximumRPCSlotsUsed: 24, + CumulativeSendingQueue: 26, + CumulativePendingQueue: 5726, + }, + }, + }, + }, + }, + } + + for i, tt := range tests { + t.Logf("[%02d] test %q", i, tt.name) + + var mounts []*Mount + var err error + + if tt.s != "" { + mounts, err = parseMountStats(strings.NewReader(tt.s)) + } else { + proc, e := FS("fixtures").NewProc(26231) + if e != nil { + t.Fatalf("failed to create proc: %v", err) + } + + mounts, err = proc.MountStats() + } + + if tt.invalid && err == nil { + t.Error("expected an error, but none occurred") + } + if !tt.invalid && err != nil { + t.Errorf("unexpected error: %v", err) + } + + if want, have := tt.mounts, mounts; !reflect.DeepEqual(want, have) { + t.Errorf("mounts:\nwant:\n%v\nhave:\n%v", mountsStr(want), mountsStr(have)) + } + } +} + +func mountsStr(mounts []*Mount) string { + var out string + for i, m := range mounts { + out += fmt.Sprintf("[%d] %q on %q (%q)", i, m.Device, m.Mount, m.Type) + + stats, ok := m.Stats.(*MountStatsNFS) + if !ok { + out += "\n" + continue + } + + out += fmt.Sprintf("\n\t- v%s, age: %s", stats.StatVersion, stats.Age) + out += fmt.Sprintf("\n\t- bytes: %v", stats.Bytes) + out += fmt.Sprintf("\n\t- events: %v", stats.Events) + out += fmt.Sprintf("\n\t- transport: %v", stats.Transport) + out += fmt.Sprintf("\n\t- per-operation stats:") + + for _, o := range stats.Operations { + out += fmt.Sprintf("\n\t\t- %v", o) + } + + out += "\n" + } + + return out +} diff --git a/vendor/github.com/prometheus/procfs/net_dev.go b/vendor/github.com/prometheus/procfs/net_dev.go new file mode 100644 index 000000000..6c17affe8 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev.go @@ -0,0 +1,216 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "errors" + "os" + "sort" + "strconv" + "strings" +) + +// NetDevLine is single line parsed from /proc/net/dev or /proc/[pid]/net/dev. +type NetDevLine struct { + Name string `json:"name"` // The name of the interface. + RxBytes uint64 `json:"rx_bytes"` // Cumulative count of bytes received. + RxPackets uint64 `json:"rx_packets"` // Cumulative count of packets received. + RxErrors uint64 `json:"rx_errors"` // Cumulative count of receive errors encountered. + RxDropped uint64 `json:"rx_dropped"` // Cumulative count of packets dropped while receiving. + RxFIFO uint64 `json:"rx_fifo"` // Cumulative count of FIFO buffer errors. + RxFrame uint64 `json:"rx_frame"` // Cumulative count of packet framing errors. + RxCompressed uint64 `json:"rx_compressed"` // Cumulative count of compressed packets received by the device driver. + RxMulticast uint64 `json:"rx_multicast"` // Cumulative count of multicast frames received by the device driver. + TxBytes uint64 `json:"tx_bytes"` // Cumulative count of bytes transmitted. + TxPackets uint64 `json:"tx_packets"` // Cumulative count of packets transmitted. + TxErrors uint64 `json:"tx_errors"` // Cumulative count of transmit errors encountered. + TxDropped uint64 `json:"tx_dropped"` // Cumulative count of packets dropped while transmitting. + TxFIFO uint64 `json:"tx_fifo"` // Cumulative count of FIFO buffer errors. + TxCollisions uint64 `json:"tx_collisions"` // Cumulative count of collisions detected on the interface. + TxCarrier uint64 `json:"tx_carrier"` // Cumulative count of carrier losses detected by the device driver. + TxCompressed uint64 `json:"tx_compressed"` // Cumulative count of compressed packets transmitted by the device driver. +} + +// NetDev is parsed from /proc/net/dev or /proc/[pid]/net/dev. The map keys +// are interface names. +type NetDev map[string]NetDevLine + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func NewNetDev() (NetDev, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return nil, err + } + + return fs.NewNetDev() +} + +// NewNetDev returns kernel/system statistics read from /proc/net/dev. +func (fs FS) NewNetDev() (NetDev, error) { + return newNetDev(fs.Path("net/dev")) +} + +// NewNetDev returns kernel/system statistics read from /proc/[pid]/net/dev. +func (p Proc) NewNetDev() (NetDev, error) { + return newNetDev(p.path("net/dev")) +} + +// newNetDev creates a new NetDev from the contents of the given file. +func newNetDev(file string) (NetDev, error) { + f, err := os.Open(file) + if err != nil { + return NetDev{}, err + } + defer f.Close() + + nd := NetDev{} + s := bufio.NewScanner(f) + for n := 0; s.Scan(); n++ { + // Skip the 2 header lines. + if n < 2 { + continue + } + + line, err := nd.parseLine(s.Text()) + if err != nil { + return nd, err + } + + nd[line.Name] = *line + } + + return nd, s.Err() +} + +// parseLine parses a single line from the /proc/net/dev file. Header lines +// must be filtered prior to calling this method. +func (nd NetDev) parseLine(rawLine string) (*NetDevLine, error) { + parts := strings.SplitN(rawLine, ":", 2) + if len(parts) != 2 { + return nil, errors.New("invalid net/dev line, missing colon") + } + fields := strings.Fields(strings.TrimSpace(parts[1])) + + var err error + line := &NetDevLine{} + + // Interface Name + line.Name = strings.TrimSpace(parts[0]) + if line.Name == "" { + return nil, errors.New("invalid net/dev line, empty interface name") + } + + // RX + line.RxBytes, err = strconv.ParseUint(fields[0], 10, 64) + if err != nil { + return nil, err + } + line.RxPackets, err = strconv.ParseUint(fields[1], 10, 64) + if err != nil { + return nil, err + } + line.RxErrors, err = strconv.ParseUint(fields[2], 10, 64) + if err != nil { + return nil, err + } + line.RxDropped, err = strconv.ParseUint(fields[3], 10, 64) + if err != nil { + return nil, err + } + line.RxFIFO, err = strconv.ParseUint(fields[4], 10, 64) + if err != nil { + return nil, err + } + line.RxFrame, err = strconv.ParseUint(fields[5], 10, 64) + if err != nil { + return nil, err + } + line.RxCompressed, err = strconv.ParseUint(fields[6], 10, 64) + if err != nil { + return nil, err + } + line.RxMulticast, err = strconv.ParseUint(fields[7], 10, 64) + if err != nil { + return nil, err + } + + // TX + line.TxBytes, err = strconv.ParseUint(fields[8], 10, 64) + if err != nil { + return nil, err + } + line.TxPackets, err = strconv.ParseUint(fields[9], 10, 64) + if err != nil { + return nil, err + } + line.TxErrors, err = strconv.ParseUint(fields[10], 10, 64) + if err != nil { + return nil, err + } + line.TxDropped, err = strconv.ParseUint(fields[11], 10, 64) + if err != nil { + return nil, err + } + line.TxFIFO, err = strconv.ParseUint(fields[12], 10, 64) + if err != nil { + return nil, err + } + line.TxCollisions, err = strconv.ParseUint(fields[13], 10, 64) + if err != nil { + return nil, err + } + line.TxCarrier, err = strconv.ParseUint(fields[14], 10, 64) + if err != nil { + return nil, err + } + line.TxCompressed, err = strconv.ParseUint(fields[15], 10, 64) + if err != nil { + return nil, err + } + + return line, nil +} + +// Total aggregates the values across interfaces and returns a new NetDevLine. +// The Name field will be a sorted comma seperated list of interface names. +func (nd NetDev) Total() NetDevLine { + total := NetDevLine{} + + names := make([]string, 0, len(nd)) + for _, ifc := range nd { + names = append(names, ifc.Name) + total.RxBytes += ifc.RxBytes + total.RxPackets += ifc.RxPackets + total.RxPackets += ifc.RxPackets + total.RxErrors += ifc.RxErrors + total.RxDropped += ifc.RxDropped + total.RxFIFO += ifc.RxFIFO + total.RxFrame += ifc.RxFrame + total.RxCompressed += ifc.RxCompressed + total.RxMulticast += ifc.RxMulticast + total.TxBytes += ifc.TxBytes + total.TxPackets += ifc.TxPackets + total.TxErrors += ifc.TxErrors + total.TxDropped += ifc.TxDropped + total.TxFIFO += ifc.TxFIFO + total.TxCollisions += ifc.TxCollisions + total.TxCarrier += ifc.TxCarrier + total.TxCompressed += ifc.TxCompressed + } + sort.Strings(names) + total.Name = strings.Join(names, ", ") + + return total +} diff --git a/vendor/github.com/prometheus/procfs/net_dev_test.go b/vendor/github.com/prometheus/procfs/net_dev_test.go new file mode 100644 index 000000000..b162e9c95 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/net_dev_test.go @@ -0,0 +1,86 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "testing" +) + +func TestNetDevParseLine(t *testing.T) { + const rawLine = ` eth0: 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16` + + have, err := NetDev{}.parseLine(rawLine) + if err != nil { + t.Fatal(err) + } + + want := NetDevLine{"eth0", 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16} + if want != *have { + t.Errorf("want %v, have %v", want, have) + } +} + +func TestNewNetDev(t *testing.T) { + fs, err := NewFS("fixtures") + if err != nil { + t.Fatal(err) + } + + nd, err := fs.NewNetDev() + if err != nil { + t.Fatal(err) + } + + lines := map[string]NetDevLine{ + "vethf345468": {Name: "vethf345468", RxBytes: 648, RxPackets: 8, TxBytes: 438, TxPackets: 5}, + "lo": {Name: "lo", RxBytes: 1664039048, RxPackets: 1566805, TxBytes: 1664039048, TxPackets: 1566805}, + "docker0": {Name: "docker0", RxBytes: 2568, RxPackets: 38, TxBytes: 438, TxPackets: 5}, + "eth0": {Name: "eth0", RxBytes: 874354587, RxPackets: 1036395, TxBytes: 563352563, TxPackets: 732147}, + } + + if want, have := len(lines), len(nd); want != have { + t.Errorf("want %d parsed net/dev lines, have %d", want, have) + } + for _, line := range nd { + if want, have := lines[line.Name], line; want != have { + t.Errorf("%s: want %v, have %v", line.Name, want, have) + } + } +} + +func TestProcNewNetDev(t *testing.T) { + p, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + + nd, err := p.NewNetDev() + if err != nil { + t.Fatal(err) + } + + lines := map[string]NetDevLine{ + "lo": {Name: "lo"}, + "eth0": {Name: "eth0", RxBytes: 438, RxPackets: 5, TxBytes: 648, TxPackets: 8}, + } + + if want, have := len(lines), len(nd); want != have { + t.Errorf("want %d parsed net/dev lines, have %d", want, have) + } + for _, line := range nd { + if want, have := lines[line.Name], line; want != have { + t.Errorf("%s: want %v, have %v", line.Name, want, have) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/nfs/nfs.go b/vendor/github.com/prometheus/procfs/nfs/nfs.go new file mode 100644 index 000000000..651bf6819 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/nfs.go @@ -0,0 +1,263 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package nfs implements parsing of /proc/net/rpc/nfsd. +// Fields are documented in https://www.svennd.be/nfsd-stats-explained-procnetrpcnfsd/ +package nfs + +// ReplyCache models the "rc" line. +type ReplyCache struct { + Hits uint64 + Misses uint64 + NoCache uint64 +} + +// FileHandles models the "fh" line. +type FileHandles struct { + Stale uint64 + TotalLookups uint64 + AnonLookups uint64 + DirNoCache uint64 + NoDirNoCache uint64 +} + +// InputOutput models the "io" line. +type InputOutput struct { + Read uint64 + Write uint64 +} + +// Threads models the "th" line. +type Threads struct { + Threads uint64 + FullCnt uint64 +} + +// ReadAheadCache models the "ra" line. +type ReadAheadCache struct { + CacheSize uint64 + CacheHistogram []uint64 + NotFound uint64 +} + +// Network models the "net" line. +type Network struct { + NetCount uint64 + UDPCount uint64 + TCPCount uint64 + TCPConnect uint64 +} + +// ClientRPC models the nfs "rpc" line. +type ClientRPC struct { + RPCCount uint64 + Retransmissions uint64 + AuthRefreshes uint64 +} + +// ServerRPC models the nfsd "rpc" line. +type ServerRPC struct { + RPCCount uint64 + BadCnt uint64 + BadFmt uint64 + BadAuth uint64 + BadcInt uint64 +} + +// V2Stats models the "proc2" line. +type V2Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Root uint64 + Lookup uint64 + ReadLink uint64 + Read uint64 + WrCache uint64 + Write uint64 + Create uint64 + Remove uint64 + Rename uint64 + Link uint64 + SymLink uint64 + MkDir uint64 + RmDir uint64 + ReadDir uint64 + FsStat uint64 +} + +// V3Stats models the "proc3" line. +type V3Stats struct { + Null uint64 + GetAttr uint64 + SetAttr uint64 + Lookup uint64 + Access uint64 + ReadLink uint64 + Read uint64 + Write uint64 + Create uint64 + MkDir uint64 + SymLink uint64 + MkNod uint64 + Remove uint64 + RmDir uint64 + Rename uint64 + Link uint64 + ReadDir uint64 + ReadDirPlus uint64 + FsStat uint64 + FsInfo uint64 + PathConf uint64 + Commit uint64 +} + +// ClientV4Stats models the nfs "proc4" line. +type ClientV4Stats struct { + Null uint64 + Read uint64 + Write uint64 + Commit uint64 + Open uint64 + OpenConfirm uint64 + OpenNoattr uint64 + OpenDowngrade uint64 + Close uint64 + Setattr uint64 + FsInfo uint64 + Renew uint64 + SetClientID uint64 + SetClientIDConfirm uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Access uint64 + Getattr uint64 + Lookup uint64 + LookupRoot uint64 + Remove uint64 + Rename uint64 + Link uint64 + Symlink uint64 + Create uint64 + Pathconf uint64 + StatFs uint64 + ReadLink uint64 + ReadDir uint64 + ServerCaps uint64 + DelegReturn uint64 + GetACL uint64 + SetACL uint64 + FsLocations uint64 + ReleaseLockowner uint64 + Secinfo uint64 + FsidPresent uint64 + ExchangeID uint64 + CreateSession uint64 + DestroySession uint64 + Sequence uint64 + GetLeaseTime uint64 + ReclaimComplete uint64 + LayoutGet uint64 + GetDeviceInfo uint64 + LayoutCommit uint64 + LayoutReturn uint64 + SecinfoNoName uint64 + TestStateID uint64 + FreeStateID uint64 + GetDeviceList uint64 + BindConnToSession uint64 + DestroyClientID uint64 + Seek uint64 + Allocate uint64 + DeAllocate uint64 + LayoutStats uint64 + Clone uint64 +} + +// ServerV4Stats models the nfsd "proc4" line. +type ServerV4Stats struct { + Null uint64 + Compound uint64 +} + +// V4Ops models the "proc4ops" line: NFSv4 operations +// Variable list, see: +// v4.0 https://tools.ietf.org/html/rfc3010 (38 operations) +// v4.1 https://tools.ietf.org/html/rfc5661 (58 operations) +// v4.2 https://tools.ietf.org/html/draft-ietf-nfsv4-minorversion2-41 (71 operations) +type V4Ops struct { + //Values uint64 // Variable depending on v4.x sub-version. TODO: Will this always at least include the fields in this struct? + Op0Unused uint64 + Op1Unused uint64 + Op2Future uint64 + Access uint64 + Close uint64 + Commit uint64 + Create uint64 + DelegPurge uint64 + DelegReturn uint64 + GetAttr uint64 + GetFH uint64 + Link uint64 + Lock uint64 + Lockt uint64 + Locku uint64 + Lookup uint64 + LookupRoot uint64 + Nverify uint64 + Open uint64 + OpenAttr uint64 + OpenConfirm uint64 + OpenDgrd uint64 + PutFH uint64 + PutPubFH uint64 + PutRootFH uint64 + Read uint64 + ReadDir uint64 + ReadLink uint64 + Remove uint64 + Rename uint64 + Renew uint64 + RestoreFH uint64 + SaveFH uint64 + SecInfo uint64 + SetAttr uint64 + Verify uint64 + Write uint64 + RelLockOwner uint64 +} + +// ClientRPCStats models all stats from /proc/net/rpc/nfs. +type ClientRPCStats struct { + Network Network + ClientRPC ClientRPC + V2Stats V2Stats + V3Stats V3Stats + ClientV4Stats ClientV4Stats +} + +// ServerRPCStats models all stats from /proc/net/rpc/nfsd. +type ServerRPCStats struct { + ReplyCache ReplyCache + FileHandles FileHandles + InputOutput InputOutput + Threads Threads + ReadAheadCache ReadAheadCache + Network Network + ServerRPC ServerRPC + V2Stats V2Stats + V3Stats V3Stats + ServerV4Stats ServerV4Stats + V4Ops V4Ops +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse.go b/vendor/github.com/prometheus/procfs/nfs/parse.go new file mode 100644 index 000000000..95a83cc5b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse.go @@ -0,0 +1,317 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "fmt" +) + +func parseReplyCache(v []uint64) (ReplyCache, error) { + if len(v) != 3 { + return ReplyCache{}, fmt.Errorf("invalid ReplyCache line %q", v) + } + + return ReplyCache{ + Hits: v[0], + Misses: v[1], + NoCache: v[2], + }, nil +} + +func parseFileHandles(v []uint64) (FileHandles, error) { + if len(v) != 5 { + return FileHandles{}, fmt.Errorf("invalid FileHandles, line %q", v) + } + + return FileHandles{ + Stale: v[0], + TotalLookups: v[1], + AnonLookups: v[2], + DirNoCache: v[3], + NoDirNoCache: v[4], + }, nil +} + +func parseInputOutput(v []uint64) (InputOutput, error) { + if len(v) != 2 { + return InputOutput{}, fmt.Errorf("invalid InputOutput line %q", v) + } + + return InputOutput{ + Read: v[0], + Write: v[1], + }, nil +} + +func parseThreads(v []uint64) (Threads, error) { + if len(v) != 2 { + return Threads{}, fmt.Errorf("invalid Threads line %q", v) + } + + return Threads{ + Threads: v[0], + FullCnt: v[1], + }, nil +} + +func parseReadAheadCache(v []uint64) (ReadAheadCache, error) { + if len(v) != 12 { + return ReadAheadCache{}, fmt.Errorf("invalid ReadAheadCache line %q", v) + } + + return ReadAheadCache{ + CacheSize: v[0], + CacheHistogram: v[1:11], + NotFound: v[11], + }, nil +} + +func parseNetwork(v []uint64) (Network, error) { + if len(v) != 4 { + return Network{}, fmt.Errorf("invalid Network line %q", v) + } + + return Network{ + NetCount: v[0], + UDPCount: v[1], + TCPCount: v[2], + TCPConnect: v[3], + }, nil +} + +func parseServerRPC(v []uint64) (ServerRPC, error) { + if len(v) != 5 { + return ServerRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ServerRPC{ + RPCCount: v[0], + BadCnt: v[1], + BadFmt: v[2], + BadAuth: v[3], + BadcInt: v[4], + }, nil +} + +func parseClientRPC(v []uint64) (ClientRPC, error) { + if len(v) != 3 { + return ClientRPC{}, fmt.Errorf("invalid RPC line %q", v) + } + + return ClientRPC{ + RPCCount: v[0], + Retransmissions: v[1], + AuthRefreshes: v[2], + }, nil +} + +func parseV2Stats(v []uint64) (V2Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 18 { + return V2Stats{}, fmt.Errorf("invalid V2Stats line %q", v) + } + + return V2Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Root: v[4], + Lookup: v[5], + ReadLink: v[6], + Read: v[7], + WrCache: v[8], + Write: v[9], + Create: v[10], + Remove: v[11], + Rename: v[12], + Link: v[13], + SymLink: v[14], + MkDir: v[15], + RmDir: v[16], + ReadDir: v[17], + FsStat: v[18], + }, nil +} + +func parseV3Stats(v []uint64) (V3Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 22 { + return V3Stats{}, fmt.Errorf("invalid V3Stats line %q", v) + } + + return V3Stats{ + Null: v[1], + GetAttr: v[2], + SetAttr: v[3], + Lookup: v[4], + Access: v[5], + ReadLink: v[6], + Read: v[7], + Write: v[8], + Create: v[9], + MkDir: v[10], + SymLink: v[11], + MkNod: v[12], + Remove: v[13], + RmDir: v[14], + Rename: v[15], + Link: v[16], + ReadDir: v[17], + ReadDirPlus: v[18], + FsStat: v[19], + FsInfo: v[20], + PathConf: v[21], + Commit: v[22], + }, nil +} + +func parseClientV4Stats(v []uint64) (ClientV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values { + return ClientV4Stats{}, fmt.Errorf("invalid ClientV4Stats line %q", v) + } + + // This function currently supports mapping 59 NFS v4 client stats. Older + // kernels may emit fewer stats, so we must detect this and pad out the + // values to match the expected slice size. + if values < 59 { + newValues := make([]uint64, 60) + copy(newValues, v) + v = newValues + } + + return ClientV4Stats{ + Null: v[1], + Read: v[2], + Write: v[3], + Commit: v[4], + Open: v[5], + OpenConfirm: v[6], + OpenNoattr: v[7], + OpenDowngrade: v[8], + Close: v[9], + Setattr: v[10], + FsInfo: v[11], + Renew: v[12], + SetClientID: v[13], + SetClientIDConfirm: v[14], + Lock: v[15], + Lockt: v[16], + Locku: v[17], + Access: v[18], + Getattr: v[19], + Lookup: v[20], + LookupRoot: v[21], + Remove: v[22], + Rename: v[23], + Link: v[24], + Symlink: v[25], + Create: v[26], + Pathconf: v[27], + StatFs: v[28], + ReadLink: v[29], + ReadDir: v[30], + ServerCaps: v[31], + DelegReturn: v[32], + GetACL: v[33], + SetACL: v[34], + FsLocations: v[35], + ReleaseLockowner: v[36], + Secinfo: v[37], + FsidPresent: v[38], + ExchangeID: v[39], + CreateSession: v[40], + DestroySession: v[41], + Sequence: v[42], + GetLeaseTime: v[43], + ReclaimComplete: v[44], + LayoutGet: v[45], + GetDeviceInfo: v[46], + LayoutCommit: v[47], + LayoutReturn: v[48], + SecinfoNoName: v[49], + TestStateID: v[50], + FreeStateID: v[51], + GetDeviceList: v[52], + BindConnToSession: v[53], + DestroyClientID: v[54], + Seek: v[55], + Allocate: v[56], + DeAllocate: v[57], + LayoutStats: v[58], + Clone: v[59], + }, nil +} + +func parseServerV4Stats(v []uint64) (ServerV4Stats, error) { + values := int(v[0]) + if len(v[1:]) != values || values != 2 { + return ServerV4Stats{}, fmt.Errorf("invalid V4Stats line %q", v) + } + + return ServerV4Stats{ + Null: v[1], + Compound: v[2], + }, nil +} + +func parseV4Ops(v []uint64) (V4Ops, error) { + values := int(v[0]) + if len(v[1:]) != values || values < 39 { + return V4Ops{}, fmt.Errorf("invalid V4Ops line %q", v) + } + + stats := V4Ops{ + Op0Unused: v[1], + Op1Unused: v[2], + Op2Future: v[3], + Access: v[4], + Close: v[5], + Commit: v[6], + Create: v[7], + DelegPurge: v[8], + DelegReturn: v[9], + GetAttr: v[10], + GetFH: v[11], + Link: v[12], + Lock: v[13], + Lockt: v[14], + Locku: v[15], + Lookup: v[16], + LookupRoot: v[17], + Nverify: v[18], + Open: v[19], + OpenAttr: v[20], + OpenConfirm: v[21], + OpenDgrd: v[22], + PutFH: v[23], + PutPubFH: v[24], + PutRootFH: v[25], + Read: v[26], + ReadDir: v[27], + ReadLink: v[28], + Remove: v[29], + Rename: v[30], + Renew: v[31], + RestoreFH: v[32], + SaveFH: v[33], + SecInfo: v[34], + SetAttr: v[35], + Verify: v[36], + Write: v[37], + RelLockOwner: v[38], + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go new file mode 100644 index 000000000..c0d3a5ad9 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs.go @@ -0,0 +1,67 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseClientRPCStats returns stats read from /proc/net/rpc/nfs +func ParseClientRPCStats(r io.Reader) (*ClientRPCStats, error) { + stats := &ClientRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFS metric line %q", line) + } + + values, err := util.ParseUint64s(parts[1:]) + if err != nil { + return nil, fmt.Errorf("error parsing NFS metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ClientRPC, err = parseClientRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ClientV4Stats, err = parseClientV4Stats(values) + default: + return nil, fmt.Errorf("unknown NFS metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFS metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFS file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go new file mode 100644 index 000000000..8ebcfd16e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfs_test.go @@ -0,0 +1,305 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs_test + +import ( + "reflect" + "strings" + "testing" + + "github.com/prometheus/procfs/nfs" +) + +func TestNewNFSClientRPCStats(t *testing.T) { + tests := []struct { + name string + content string + stats *nfs.ClientRPCStats + invalid bool + }{ + { + name: "invalid file", + content: "invalid", + invalid: true, + }, { + name: "good old kernel version file", + content: `net 70 70 69 45 +rpc 1218785755 374636 1218815394 +proc2 18 16 57 74 52 71 73 45 86 0 52 83 61 17 53 50 23 70 82 +proc3 22 0 1061909262 48906 4077635 117661341 5 29391916 2570425 2993289 590 0 0 7815 15 1130 0 3983 92385 13332 2 1 23729 +proc4 48 98 51 54 83 85 23 24 1 28 73 68 83 12 84 39 68 59 58 88 29 74 69 96 21 84 15 53 86 54 66 56 97 36 49 32 85 81 11 58 32 67 13 28 35 90 1 26 1337 +`, + stats: &nfs.ClientRPCStats{ + Network: nfs.Network{ + NetCount: 70, + UDPCount: 70, + TCPCount: 69, + TCPConnect: 45, + }, + ClientRPC: nfs.ClientRPC{ + RPCCount: 1218785755, + Retransmissions: 374636, + AuthRefreshes: 1218815394, + }, + V2Stats: nfs.V2Stats{ + Null: 16, + GetAttr: 57, + SetAttr: 74, + Root: 52, + Lookup: 71, + ReadLink: 73, + Read: 45, + WrCache: 86, + Write: 0, + Create: 52, + Remove: 83, + Rename: 61, + Link: 17, + SymLink: 53, + MkDir: 50, + RmDir: 23, + ReadDir: 70, + FsStat: 82, + }, + V3Stats: nfs.V3Stats{ + Null: 0, + GetAttr: 1061909262, + SetAttr: 48906, + Lookup: 4077635, + Access: 117661341, + ReadLink: 5, + Read: 29391916, + Write: 2570425, + Create: 2993289, + MkDir: 590, + SymLink: 0, + MkNod: 0, + Remove: 7815, + RmDir: 15, + Rename: 1130, + Link: 0, + ReadDir: 3983, + ReadDirPlus: 92385, + FsStat: 13332, + FsInfo: 2, + PathConf: 1, + Commit: 23729}, + ClientV4Stats: nfs.ClientV4Stats{ + Null: 98, + Read: 51, + Write: 54, + Commit: 83, + Open: 85, + OpenConfirm: 23, + OpenNoattr: 24, + OpenDowngrade: 1, + Close: 28, + Setattr: 73, + FsInfo: 68, + Renew: 83, + SetClientID: 12, + SetClientIDConfirm: 84, + Lock: 39, + Lockt: 68, + Locku: 59, + Access: 58, + Getattr: 88, + Lookup: 29, + LookupRoot: 74, + Remove: 69, + Rename: 96, + Link: 21, + Symlink: 84, + Create: 15, + Pathconf: 53, + StatFs: 86, + ReadLink: 54, + ReadDir: 66, + ServerCaps: 56, + DelegReturn: 97, + GetACL: 36, + SetACL: 49, + FsLocations: 32, + ReleaseLockowner: 85, + Secinfo: 81, + FsidPresent: 11, + ExchangeID: 58, + CreateSession: 32, + DestroySession: 67, + Sequence: 13, + GetLeaseTime: 28, + ReclaimComplete: 35, + LayoutGet: 90, + GetDeviceInfo: 1, + LayoutCommit: 26, + LayoutReturn: 1337, + SecinfoNoName: 0, + TestStateID: 0, + FreeStateID: 0, + GetDeviceList: 0, + BindConnToSession: 0, + DestroyClientID: 0, + Seek: 0, + Allocate: 0, + DeAllocate: 0, + LayoutStats: 0, + Clone: 0, + }, + }, + }, { + name: "good file", + content: `net 18628 0 18628 6 +rpc 4329785 0 4338291 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 1 4084749 29200 94754 32580 186 47747 7981 8639 0 6356 0 6962 0 7958 0 0 241 4 4 2 39 +proc4 61 1 0 0 0 0 0 0 0 0 0 0 0 1 1 0 0 0 0 0 0 0 2 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +`, + stats: &nfs.ClientRPCStats{ + Network: nfs.Network{ + NetCount: 18628, + UDPCount: 0, + TCPCount: 18628, + TCPConnect: 6, + }, + ClientRPC: nfs.ClientRPC{ + RPCCount: 4329785, + Retransmissions: 0, + AuthRefreshes: 4338291, + }, + V2Stats: nfs.V2Stats{ + Null: 2, + GetAttr: 69, + SetAttr: 0, + Root: 0, + Lookup: 4410, + ReadLink: 0, + Read: 0, + WrCache: 0, + Write: 0, + Create: 0, + Remove: 0, + Rename: 0, + Link: 0, + SymLink: 0, + MkDir: 0, + RmDir: 0, + ReadDir: 99, + FsStat: 2, + }, + V3Stats: nfs.V3Stats{ + Null: 1, + GetAttr: 4084749, + SetAttr: 29200, + Lookup: 94754, + Access: 32580, + ReadLink: 186, + Read: 47747, + Write: 7981, + Create: 8639, + MkDir: 0, + SymLink: 6356, + MkNod: 0, + Remove: 6962, + RmDir: 0, + Rename: 7958, + Link: 0, + ReadDir: 0, + ReadDirPlus: 241, + FsStat: 4, + FsInfo: 4, + PathConf: 2, + Commit: 39, + }, + ClientV4Stats: nfs.ClientV4Stats{ + Null: 1, + Read: 0, + Write: 0, + Commit: 0, + Open: 0, + OpenConfirm: 0, + OpenNoattr: 0, + OpenDowngrade: 0, + Close: 0, + Setattr: 0, + FsInfo: 0, + Renew: 0, + SetClientID: 1, + SetClientIDConfirm: 1, + Lock: 0, + Lockt: 0, + Locku: 0, + Access: 0, + Getattr: 0, + Lookup: 0, + LookupRoot: 0, + Remove: 2, + Rename: 0, + Link: 0, + Symlink: 0, + Create: 0, + Pathconf: 0, + StatFs: 0, + ReadLink: 0, + ReadDir: 0, + ServerCaps: 0, + DelegReturn: 0, + GetACL: 0, + SetACL: 0, + FsLocations: 0, + ReleaseLockowner: 0, + Secinfo: 0, + FsidPresent: 0, + ExchangeID: 0, + CreateSession: 0, + DestroySession: 0, + Sequence: 0, + GetLeaseTime: 0, + ReclaimComplete: 0, + LayoutGet: 0, + GetDeviceInfo: 0, + LayoutCommit: 0, + LayoutReturn: 0, + SecinfoNoName: 0, + TestStateID: 0, + FreeStateID: 0, + GetDeviceList: 0, + BindConnToSession: 0, + DestroyClientID: 0, + Seek: 0, + Allocate: 0, + DeAllocate: 0, + LayoutStats: 0, + Clone: 0, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stats, err := nfs.ParseClientRPCStats(strings.NewReader(tt.content)) + + if tt.invalid && err == nil { + t.Fatal("expected an error, but none occurred") + } + if !tt.invalid && err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if want, have := tt.stats, stats; !reflect.DeepEqual(want, have) { + t.Fatalf("unexpected NFS stats:\nwant:\n%v\nhave:\n%v", want, have) + } + }) + } +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go new file mode 100644 index 000000000..57bb4a358 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd.go @@ -0,0 +1,89 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseServerRPCStats returns stats read from /proc/net/rpc/nfsd +func ParseServerRPCStats(r io.Reader) (*ServerRPCStats, error) { + stats := &ServerRPCStats{} + + scanner := bufio.NewScanner(r) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + return nil, fmt.Errorf("invalid NFSd metric line %q", line) + } + label := parts[0] + + var values []uint64 + var err error + if label == "th" { + if len(parts) < 3 { + return nil, fmt.Errorf("invalid NFSd th metric line %q", line) + } + values, err = util.ParseUint64s(parts[1:3]) + } else { + values, err = util.ParseUint64s(parts[1:]) + } + if err != nil { + return nil, fmt.Errorf("error parsing NFSd metric line: %s", err) + } + + switch metricLine := parts[0]; metricLine { + case "rc": + stats.ReplyCache, err = parseReplyCache(values) + case "fh": + stats.FileHandles, err = parseFileHandles(values) + case "io": + stats.InputOutput, err = parseInputOutput(values) + case "th": + stats.Threads, err = parseThreads(values) + case "ra": + stats.ReadAheadCache, err = parseReadAheadCache(values) + case "net": + stats.Network, err = parseNetwork(values) + case "rpc": + stats.ServerRPC, err = parseServerRPC(values) + case "proc2": + stats.V2Stats, err = parseV2Stats(values) + case "proc3": + stats.V3Stats, err = parseV3Stats(values) + case "proc4": + stats.ServerV4Stats, err = parseServerV4Stats(values) + case "proc4ops": + stats.V4Ops, err = parseV4Ops(values) + default: + return nil, fmt.Errorf("unknown NFSd metric line %q", metricLine) + } + if err != nil { + return nil, fmt.Errorf("errors parsing NFSd metric line: %s", err) + } + } + + if err := scanner.Err(); err != nil { + return nil, fmt.Errorf("error scanning NFSd file: %s", err) + } + + return stats, nil +} diff --git a/vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go new file mode 100644 index 000000000..b09b3b580 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/nfs/parse_nfsd_test.go @@ -0,0 +1,196 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package nfs_test + +import ( + "reflect" + "strings" + "testing" + + "github.com/prometheus/procfs/nfs" +) + +func TestNewNFSdServerRPCStats(t *testing.T) { + tests := []struct { + name string + content string + stats *nfs.ServerRPCStats + invalid bool + }{ + { + name: "invalid file", + content: "invalid", + invalid: true, + }, { + name: "good file", + content: `rc 0 6 18622 +fh 0 0 0 0 0 +io 157286400 0 +th 8 0 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 0.000 +ra 32 0 0 0 0 0 0 0 0 0 0 0 +net 18628 0 18628 6 +rpc 18628 0 0 0 0 +proc2 18 2 69 0 0 4410 0 0 0 0 0 0 0 0 0 0 0 99 2 +proc3 22 2 112 0 2719 111 0 0 0 0 0 0 0 0 0 0 0 27 216 0 2 1 0 +proc4 2 2 10853 +proc4ops 72 0 0 0 1098 2 0 0 0 0 8179 5896 0 0 0 0 5900 0 0 2 0 2 0 9609 0 2 150 1272 0 0 0 1236 0 0 0 0 3 3 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 0 +`, + stats: &nfs.ServerRPCStats{ + ReplyCache: nfs.ReplyCache{ + Hits: 0, + Misses: 6, + NoCache: 18622, + }, + FileHandles: nfs.FileHandles{ + Stale: 0, + TotalLookups: 0, + AnonLookups: 0, + DirNoCache: 0, + NoDirNoCache: 0, + }, + InputOutput: nfs.InputOutput{ + Read: 157286400, + Write: 0, + }, + Threads: nfs.Threads{ + Threads: 8, + FullCnt: 0, + }, + ReadAheadCache: nfs.ReadAheadCache{ + CacheSize: 32, + CacheHistogram: []uint64{0, 0, 0, 0, 0, 0, 0, 0, 0, 0}, + NotFound: 0, + }, + Network: nfs.Network{ + NetCount: 18628, + UDPCount: 0, + TCPCount: 18628, + TCPConnect: 6, + }, + ServerRPC: nfs.ServerRPC{ + RPCCount: 18628, + BadCnt: 0, + BadFmt: 0, + BadAuth: 0, + BadcInt: 0, + }, + V2Stats: nfs.V2Stats{ + Null: 2, + GetAttr: 69, + SetAttr: 0, + Root: 0, + Lookup: 4410, + ReadLink: 0, + Read: 0, + WrCache: 0, + Write: 0, + Create: 0, + Remove: 0, + Rename: 0, + Link: 0, + SymLink: 0, + MkDir: 0, + RmDir: 0, + ReadDir: 99, + FsStat: 2, + }, + V3Stats: nfs.V3Stats{ + Null: 2, + GetAttr: 112, + SetAttr: 0, + Lookup: 2719, + Access: 111, + ReadLink: 0, + Read: 0, + Write: 0, + Create: 0, + MkDir: 0, + SymLink: 0, + MkNod: 0, + Remove: 0, + RmDir: 0, + Rename: 0, + Link: 0, + ReadDir: 27, + ReadDirPlus: 216, + FsStat: 0, + FsInfo: 2, + PathConf: 1, + Commit: 0, + }, + ServerV4Stats: nfs.ServerV4Stats{ + Null: 2, + Compound: 10853, + }, + V4Ops: nfs.V4Ops{ + Op0Unused: 0, + Op1Unused: 0, + Op2Future: 0, + Access: 1098, + Close: 2, + Commit: 0, + Create: 0, + DelegPurge: 0, + DelegReturn: 0, + GetAttr: 8179, + GetFH: 5896, + Link: 0, + Lock: 0, + Lockt: 0, + Locku: 0, + Lookup: 5900, + LookupRoot: 0, + Nverify: 0, + Open: 2, + OpenAttr: 0, + OpenConfirm: 2, + OpenDgrd: 0, + PutFH: 9609, + PutPubFH: 0, + PutRootFH: 2, + Read: 150, + ReadDir: 1272, + ReadLink: 0, + Remove: 0, + Rename: 0, + Renew: 1236, + RestoreFH: 0, + SaveFH: 0, + SecInfo: 0, + SetAttr: 0, + Verify: 3, + Write: 3, + RelLockOwner: 0, + }, + }, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + stats, err := nfs.ParseServerRPCStats(strings.NewReader(tt.content)) + + if tt.invalid && err == nil { + t.Fatal("expected an error, but none occurred") + } + if !tt.invalid && err != nil { + t.Fatalf("unexpected error: %v", err) + } + + if want, have := tt.stats, stats; !reflect.DeepEqual(want, have) { + t.Fatalf("unexpected NFS stats:\nwant:\n%v\nhave:\n%v", want, have) + } + }) + } +} diff --git a/vendor/github.com/prometheus/procfs/proc.go b/vendor/github.com/prometheus/procfs/proc.go new file mode 100644 index 000000000..7cf5b8acf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc.go @@ -0,0 +1,238 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" + "strconv" + "strings" +) + +// Proc provides information about a running process. +type Proc struct { + // The process ID. + PID int + + fs FS +} + +// Procs represents a list of Proc structs. +type Procs []Proc + +func (p Procs) Len() int { return len(p) } +func (p Procs) Swap(i, j int) { p[i], p[j] = p[j], p[i] } +func (p Procs) Less(i, j int) bool { return p[i].PID < p[j].PID } + +// Self returns a process for the current process read via /proc/self. +func Self() (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.Self() +} + +// NewProc returns a process for the given pid under /proc. +func NewProc(pid int) (Proc, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// AllProcs returns a list of all currently available processes under /proc. +func AllProcs() (Procs, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Procs{}, err + } + return fs.AllProcs() +} + +// Self returns a process for the current process. +func (fs FS) Self() (Proc, error) { + p, err := os.Readlink(fs.Path("self")) + if err != nil { + return Proc{}, err + } + pid, err := strconv.Atoi(strings.Replace(p, string(fs), "", -1)) + if err != nil { + return Proc{}, err + } + return fs.NewProc(pid) +} + +// NewProc returns a process for the given pid. +func (fs FS) NewProc(pid int) (Proc, error) { + if _, err := os.Stat(fs.Path(strconv.Itoa(pid))); err != nil { + return Proc{}, err + } + return Proc{PID: pid, fs: fs}, nil +} + +// AllProcs returns a list of all currently available processes. +func (fs FS) AllProcs() (Procs, error) { + d, err := os.Open(fs.Path()) + if err != nil { + return Procs{}, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return Procs{}, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + p := Procs{} + for _, n := range names { + pid, err := strconv.ParseInt(n, 10, 64) + if err != nil { + continue + } + p = append(p, Proc{PID: int(pid), fs: fs}) + } + + return p, nil +} + +// CmdLine returns the command line of a process. +func (p Proc) CmdLine() ([]string, error) { + f, err := os.Open(p.path("cmdline")) + if err != nil { + return nil, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return nil, err + } + + if len(data) < 1 { + return []string{}, nil + } + + return strings.Split(string(bytes.TrimRight(data, string("\x00"))), string(byte(0))), nil +} + +// Comm returns the command name of a process. +func (p Proc) Comm() (string, error) { + f, err := os.Open(p.path("comm")) + if err != nil { + return "", err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return "", err + } + + return strings.TrimSpace(string(data)), nil +} + +// Executable returns the absolute path of the executable command of a process. +func (p Proc) Executable() (string, error) { + exe, err := os.Readlink(p.path("exe")) + if os.IsNotExist(err) { + return "", nil + } + + return exe, err +} + +// FileDescriptors returns the currently open file descriptors of a process. +func (p Proc) FileDescriptors() ([]uintptr, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + fds := make([]uintptr, len(names)) + for i, n := range names { + fd, err := strconv.ParseInt(n, 10, 32) + if err != nil { + return nil, fmt.Errorf("could not parse fd %s: %s", n, err) + } + fds[i] = uintptr(fd) + } + + return fds, nil +} + +// FileDescriptorTargets returns the targets of all file descriptors of a process. +// If a file descriptor is not a symlink to a file (like a socket), that value will be the empty string. +func (p Proc) FileDescriptorTargets() ([]string, error) { + names, err := p.fileDescriptors() + if err != nil { + return nil, err + } + + targets := make([]string, len(names)) + + for i, name := range names { + target, err := os.Readlink(p.path("fd", name)) + if err == nil { + targets[i] = target + } + } + + return targets, nil +} + +// FileDescriptorsLen returns the number of currently open file descriptors of +// a process. +func (p Proc) FileDescriptorsLen() (int, error) { + fds, err := p.fileDescriptors() + if err != nil { + return 0, err + } + + return len(fds), nil +} + +// MountStats retrieves statistics and configuration for mount points in a +// process's namespace. +func (p Proc) MountStats() ([]*Mount, error) { + f, err := os.Open(p.path("mountstats")) + if err != nil { + return nil, err + } + defer f.Close() + + return parseMountStats(f) +} + +func (p Proc) fileDescriptors() ([]string, error) { + d, err := os.Open(p.path("fd")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("could not read %s: %s", d.Name(), err) + } + + return names, nil +} + +func (p Proc) path(pa ...string) string { + return p.fs.Path(append([]string{strconv.Itoa(p.PID)}, pa...)...) +} diff --git a/vendor/github.com/prometheus/procfs/proc_io.go b/vendor/github.com/prometheus/procfs/proc_io.go new file mode 100644 index 000000000..0251c83bf --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io.go @@ -0,0 +1,65 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "io/ioutil" + "os" +) + +// ProcIO models the content of /proc//io. +type ProcIO struct { + // Chars read. + RChar uint64 + // Chars written. + WChar uint64 + // Read syscalls. + SyscR uint64 + // Write syscalls. + SyscW uint64 + // Bytes read. + ReadBytes uint64 + // Bytes written. + WriteBytes uint64 + // Bytes written, but taking into account truncation. See + // Documentation/filesystems/proc.txt in the kernel sources for + // detailed explanation. + CancelledWriteBytes int64 +} + +// NewIO creates a new ProcIO instance from a given Proc instance. +func (p Proc) NewIO() (ProcIO, error) { + pio := ProcIO{} + + f, err := os.Open(p.path("io")) + if err != nil { + return pio, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return pio, err + } + + ioFormat := "rchar: %d\nwchar: %d\nsyscr: %d\nsyscw: %d\n" + + "read_bytes: %d\nwrite_bytes: %d\n" + + "cancelled_write_bytes: %d\n" + + _, err = fmt.Sscanf(string(data), ioFormat, &pio.RChar, &pio.WChar, &pio.SyscR, + &pio.SyscW, &pio.ReadBytes, &pio.WriteBytes, &pio.CancelledWriteBytes) + + return pio, err +} diff --git a/vendor/github.com/prometheus/procfs/proc_io_test.go b/vendor/github.com/prometheus/procfs/proc_io_test.go new file mode 100644 index 000000000..1afdbd463 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_io_test.go @@ -0,0 +1,46 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import "testing" + +func TestProcIO(t *testing.T) { + p, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + + s, err := p.NewIO() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int64 + have int64 + }{ + {name: "RChar", want: 750339, have: int64(s.RChar)}, + {name: "WChar", want: 818609, have: int64(s.WChar)}, + {name: "SyscR", want: 7405, have: int64(s.SyscR)}, + {name: "SyscW", want: 5245, have: int64(s.SyscW)}, + {name: "ReadBytes", want: 1024, have: int64(s.ReadBytes)}, + {name: "WriteBytes", want: 2048, have: int64(s.WriteBytes)}, + {name: "CancelledWriteBytes", want: -1024, have: s.CancelledWriteBytes}, + } { + if test.want != test.have { + t.Errorf("want %s %d, have %d", test.name, test.want, test.have) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits.go b/vendor/github.com/prometheus/procfs/proc_limits.go new file mode 100644 index 000000000..f04ba6fda --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits.go @@ -0,0 +1,150 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "regexp" + "strconv" +) + +// ProcLimits represents the soft limits for each of the process's resource +// limits. For more information see getrlimit(2): +// http://man7.org/linux/man-pages/man2/getrlimit.2.html. +type ProcLimits struct { + // CPU time limit in seconds. + CPUTime int64 + // Maximum size of files that the process may create. + FileSize int64 + // Maximum size of the process's data segment (initialized data, + // uninitialized data, and heap). + DataSize int64 + // Maximum size of the process stack in bytes. + StackSize int64 + // Maximum size of a core file. + CoreFileSize int64 + // Limit of the process's resident set in pages. + ResidentSet int64 + // Maximum number of processes that can be created for the real user ID of + // the calling process. + Processes int64 + // Value one greater than the maximum file descriptor number that can be + // opened by this process. + OpenFiles int64 + // Maximum number of bytes of memory that may be locked into RAM. + LockedMemory int64 + // Maximum size of the process's virtual memory address space in bytes. + AddressSpace int64 + // Limit on the combined number of flock(2) locks and fcntl(2) leases that + // this process may establish. + FileLocks int64 + // Limit of signals that may be queued for the real user ID of the calling + // process. + PendingSignals int64 + // Limit on the number of bytes that can be allocated for POSIX message + // queues for the real user ID of the calling process. + MsqqueueSize int64 + // Limit of the nice priority set using setpriority(2) or nice(2). + NicePriority int64 + // Limit of the real-time priority set using sched_setscheduler(2) or + // sched_setparam(2). + RealtimePriority int64 + // Limit (in microseconds) on the amount of CPU time that a process + // scheduled under a real-time scheduling policy may consume without making + // a blocking system call. + RealtimeTimeout int64 +} + +const ( + limitsFields = 3 + limitsUnlimited = "unlimited" +) + +var ( + limitsDelimiter = regexp.MustCompile(" +") +) + +// NewLimits returns the current soft limits of the process. +func (p Proc) NewLimits() (ProcLimits, error) { + f, err := os.Open(p.path("limits")) + if err != nil { + return ProcLimits{}, err + } + defer f.Close() + + var ( + l = ProcLimits{} + s = bufio.NewScanner(f) + ) + for s.Scan() { + fields := limitsDelimiter.Split(s.Text(), limitsFields) + if len(fields) != limitsFields { + return ProcLimits{}, fmt.Errorf( + "couldn't parse %s line %s", f.Name(), s.Text()) + } + + switch fields[0] { + case "Max cpu time": + l.CPUTime, err = parseInt(fields[1]) + case "Max file size": + l.FileSize, err = parseInt(fields[1]) + case "Max data size": + l.DataSize, err = parseInt(fields[1]) + case "Max stack size": + l.StackSize, err = parseInt(fields[1]) + case "Max core file size": + l.CoreFileSize, err = parseInt(fields[1]) + case "Max resident set": + l.ResidentSet, err = parseInt(fields[1]) + case "Max processes": + l.Processes, err = parseInt(fields[1]) + case "Max open files": + l.OpenFiles, err = parseInt(fields[1]) + case "Max locked memory": + l.LockedMemory, err = parseInt(fields[1]) + case "Max address space": + l.AddressSpace, err = parseInt(fields[1]) + case "Max file locks": + l.FileLocks, err = parseInt(fields[1]) + case "Max pending signals": + l.PendingSignals, err = parseInt(fields[1]) + case "Max msgqueue size": + l.MsqqueueSize, err = parseInt(fields[1]) + case "Max nice priority": + l.NicePriority, err = parseInt(fields[1]) + case "Max realtime priority": + l.RealtimePriority, err = parseInt(fields[1]) + case "Max realtime timeout": + l.RealtimeTimeout, err = parseInt(fields[1]) + } + if err != nil { + return ProcLimits{}, err + } + } + + return l, s.Err() +} + +func parseInt(s string) (int64, error) { + if s == limitsUnlimited { + return -1, nil + } + i, err := strconv.ParseInt(s, 10, 64) + if err != nil { + return 0, fmt.Errorf("couldn't parse value %s: %s", s, err) + } + return i, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_limits_test.go b/vendor/github.com/prometheus/procfs/proc_limits_test.go new file mode 100644 index 000000000..ebb43ae74 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_limits_test.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import "testing" + +func TestNewLimits(t *testing.T) { + p, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + + l, err := p.NewLimits() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int64 + have int64 + }{ + {name: "cpu time", want: -1, have: l.CPUTime}, + {name: "open files", want: 2048, have: l.OpenFiles}, + {name: "msgqueue size", want: 819200, have: l.MsqqueueSize}, + {name: "nice priority", want: 0, have: l.NicePriority}, + {name: "address space", want: 8589934592, have: l.AddressSpace}, + } { + if test.want != test.have { + t.Errorf("want %s %d, have %d", test.name, test.want, test.have) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns.go b/vendor/github.com/prometheus/procfs/proc_ns.go new file mode 100644 index 000000000..d06c26eba --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns.go @@ -0,0 +1,68 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "fmt" + "os" + "strconv" + "strings" +) + +// Namespace represents a single namespace of a process. +type Namespace struct { + Type string // Namespace type. + Inode uint32 // Inode number of the namespace. If two processes are in the same namespace their inodes will match. +} + +// Namespaces contains all of the namespaces that the process is contained in. +type Namespaces map[string]Namespace + +// NewNamespaces reads from /proc/[pid/ns/* to get the namespaces of which the +// process is a member. +func (p Proc) NewNamespaces() (Namespaces, error) { + d, err := os.Open(p.path("ns")) + if err != nil { + return nil, err + } + defer d.Close() + + names, err := d.Readdirnames(-1) + if err != nil { + return nil, fmt.Errorf("failed to read contents of ns dir: %v", err) + } + + ns := make(Namespaces, len(names)) + for _, name := range names { + target, err := os.Readlink(p.path("ns", name)) + if err != nil { + return nil, err + } + + fields := strings.SplitN(target, ":", 2) + if len(fields) != 2 { + return nil, fmt.Errorf("failed to parse namespace type and inode from '%v'", target) + } + + typ := fields[0] + inode, err := strconv.ParseUint(strings.Trim(fields[1], "[]"), 10, 32) + if err != nil { + return nil, fmt.Errorf("failed to parse inode from '%v': %v", fields[1], err) + } + + ns[name] = Namespace{typ, uint32(inode)} + } + + return ns, nil +} diff --git a/vendor/github.com/prometheus/procfs/proc_ns_test.go b/vendor/github.com/prometheus/procfs/proc_ns_test.go new file mode 100644 index 000000000..abfd63e5f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_ns_test.go @@ -0,0 +1,44 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "testing" +) + +func TestNewNamespaces(t *testing.T) { + p, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + + namespaces, err := p.NewNamespaces() + if err != nil { + t.Fatal(err) + } + + expectedNamespaces := map[string]Namespace{ + "mnt": {"mnt", 4026531840}, + "net": {"net", 4026531993}, + } + + if want, have := len(expectedNamespaces), len(namespaces); want != have { + t.Errorf("want %d parsed namespaces, have %d", want, have) + } + for _, ns := range namespaces { + if want, have := expectedNamespaces[ns.Type], ns; want != have { + t.Errorf("%s: want %v, have %v", ns.Type, want, have) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat.go b/vendor/github.com/prometheus/procfs/proc_stat.go new file mode 100644 index 000000000..3cf2a9f18 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat.go @@ -0,0 +1,188 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bytes" + "fmt" + "io/ioutil" + "os" +) + +// Originally, this USER_HZ value was dynamically retrieved via a sysconf call +// which required cgo. However, that caused a lot of problems regarding +// cross-compilation. Alternatives such as running a binary to determine the +// value, or trying to derive it in some other way were all problematic. After +// much research it was determined that USER_HZ is actually hardcoded to 100 on +// all Go-supported platforms as of the time of this writing. This is why we +// decided to hardcode it here as well. It is not impossible that there could +// be systems with exceptions, but they should be very exotic edge cases, and +// in that case, the worst outcome will be two misreported metrics. +// +// See also the following discussions: +// +// - https://github.com/prometheus/node_exporter/issues/52 +// - https://github.com/prometheus/procfs/pull/2 +// - http://stackoverflow.com/questions/17410841/how-does-user-hz-solve-the-jiffy-scaling-issue +const userHZ = 100 + +// ProcStat provides status information about the process, +// read from /proc/[pid]/stat. +type ProcStat struct { + // The process ID. + PID int + // The filename of the executable. + Comm string + // The process state. + State string + // The PID of the parent of this process. + PPID int + // The process group ID of the process. + PGRP int + // The session ID of the process. + Session int + // The controlling terminal of the process. + TTY int + // The ID of the foreground process group of the controlling terminal of + // the process. + TPGID int + // The kernel flags word of the process. + Flags uint + // The number of minor faults the process has made which have not required + // loading a memory page from disk. + MinFlt uint + // The number of minor faults that the process's waited-for children have + // made. + CMinFlt uint + // The number of major faults the process has made which have required + // loading a memory page from disk. + MajFlt uint + // The number of major faults that the process's waited-for children have + // made. + CMajFlt uint + // Amount of time that this process has been scheduled in user mode, + // measured in clock ticks. + UTime uint + // Amount of time that this process has been scheduled in kernel mode, + // measured in clock ticks. + STime uint + // Amount of time that this process's waited-for children have been + // scheduled in user mode, measured in clock ticks. + CUTime uint + // Amount of time that this process's waited-for children have been + // scheduled in kernel mode, measured in clock ticks. + CSTime uint + // For processes running a real-time scheduling policy, this is the negated + // scheduling priority, minus one. + Priority int + // The nice value, a value in the range 19 (low priority) to -20 (high + // priority). + Nice int + // Number of threads in this process. + NumThreads int + // The time the process started after system boot, the value is expressed + // in clock ticks. + Starttime uint64 + // Virtual memory size in bytes. + VSize int + // Resident set size in pages. + RSS int + + fs FS +} + +// NewStat returns the current status information of the process. +func (p Proc) NewStat() (ProcStat, error) { + f, err := os.Open(p.path("stat")) + if err != nil { + return ProcStat{}, err + } + defer f.Close() + + data, err := ioutil.ReadAll(f) + if err != nil { + return ProcStat{}, err + } + + var ( + ignore int + + s = ProcStat{PID: p.PID, fs: p.fs} + l = bytes.Index(data, []byte("(")) + r = bytes.LastIndex(data, []byte(")")) + ) + + if l < 0 || r < 0 { + return ProcStat{}, fmt.Errorf( + "unexpected format, couldn't extract comm: %s", + data, + ) + } + + s.Comm = string(data[l+1 : r]) + _, err = fmt.Fscan( + bytes.NewBuffer(data[r+2:]), + &s.State, + &s.PPID, + &s.PGRP, + &s.Session, + &s.TTY, + &s.TPGID, + &s.Flags, + &s.MinFlt, + &s.CMinFlt, + &s.MajFlt, + &s.CMajFlt, + &s.UTime, + &s.STime, + &s.CUTime, + &s.CSTime, + &s.Priority, + &s.Nice, + &s.NumThreads, + &ignore, + &s.Starttime, + &s.VSize, + &s.RSS, + ) + if err != nil { + return ProcStat{}, err + } + + return s, nil +} + +// VirtualMemory returns the virtual memory size in bytes. +func (s ProcStat) VirtualMemory() int { + return s.VSize +} + +// ResidentMemory returns the resident memory size in bytes. +func (s ProcStat) ResidentMemory() int { + return s.RSS * os.Getpagesize() +} + +// StartTime returns the unix timestamp of the process in seconds. +func (s ProcStat) StartTime() (float64, error) { + stat, err := s.fs.NewStat() + if err != nil { + return 0, err + } + return float64(stat.BootTime) + (float64(s.Starttime) / userHZ), nil +} + +// CPUTime returns the total CPU user and system time in seconds. +func (s ProcStat) CPUTime() float64 { + return float64(s.UTime+s.STime) / userHZ +} diff --git a/vendor/github.com/prometheus/procfs/proc_stat_test.go b/vendor/github.com/prometheus/procfs/proc_stat_test.go new file mode 100644 index 000000000..e2df8845f --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_stat_test.go @@ -0,0 +1,123 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "os" + "testing" +) + +func TestProcStat(t *testing.T) { + p, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + + s, err := p.NewStat() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int + have int + }{ + {name: "pid", want: 26231, have: s.PID}, + {name: "user time", want: 1677, have: int(s.UTime)}, + {name: "system time", want: 44, have: int(s.STime)}, + {name: "start time", want: 82375, have: int(s.Starttime)}, + {name: "virtual memory size", want: 56274944, have: s.VSize}, + {name: "resident set size", want: 1981, have: s.RSS}, + } { + if test.want != test.have { + t.Errorf("want %s %d, have %d", test.name, test.want, test.have) + } + } +} + +func TestProcStatComm(t *testing.T) { + s1, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + if want, have := "vim", s1.Comm; want != have { + t.Errorf("want comm %s, have %s", want, have) + } + + s2, err := testProcStat(584) + if err != nil { + t.Fatal(err) + } + if want, have := "(a b ) ( c d) ", s2.Comm; want != have { + t.Errorf("want comm %s, have %s", want, have) + } +} + +func TestProcStatVirtualMemory(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, have := 56274944, s.VirtualMemory(); want != have { + t.Errorf("want virtual memory %d, have %d", want, have) + } +} + +func TestProcStatResidentMemory(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, have := 1981*os.Getpagesize(), s.ResidentMemory(); want != have { + t.Errorf("want resident memory %d, have %d", want, have) + } +} + +func TestProcStatStartTime(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + time, err := s.StartTime() + if err != nil { + t.Fatal(err) + } + if want, have := 1418184099.75, time; want != have { + t.Errorf("want start time %f, have %f", want, have) + } +} + +func TestProcStatCPUTime(t *testing.T) { + s, err := testProcStat(26231) + if err != nil { + t.Fatal(err) + } + + if want, have := 17.21, s.CPUTime(); want != have { + t.Errorf("want cpu time %f, have %f", want, have) + } +} + +func testProcStat(pid int) (ProcStat, error) { + p, err := FS("fixtures").NewProc(pid) + if err != nil { + return ProcStat{}, err + } + + return p.NewStat() +} diff --git a/vendor/github.com/prometheus/procfs/proc_test.go b/vendor/github.com/prometheus/procfs/proc_test.go new file mode 100644 index 000000000..ee7e69d6b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/proc_test.go @@ -0,0 +1,174 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "reflect" + "sort" + "testing" +) + +func TestSelf(t *testing.T) { + fs := FS("fixtures") + + p1, err := fs.NewProc(26231) + if err != nil { + t.Fatal(err) + } + p2, err := fs.Self() + if err != nil { + t.Fatal(err) + } + + if !reflect.DeepEqual(p1, p2) { + t.Errorf("want process %v, have %v", p1, p2) + } +} + +func TestAllProcs(t *testing.T) { + procs, err := FS("fixtures").AllProcs() + if err != nil { + t.Fatal(err) + } + sort.Sort(procs) + for i, p := range []*Proc{{PID: 584}, {PID: 26231}} { + if want, have := p.PID, procs[i].PID; want != have { + t.Errorf("want processes %d, have %d", want, have) + } + } +} + +func TestCmdLine(t *testing.T) { + for _, tt := range []struct { + process int + want []string + }{ + {process: 26231, want: []string{"vim", "test.go", "+10"}}, + {process: 26232, want: []string{}}, + {process: 26233, want: []string{"com.github.uiautomator"}}, + } { + p1, err := FS("fixtures").NewProc(tt.process) + if err != nil { + t.Fatal(err) + } + c1, err := p1.CmdLine() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tt.want, c1) { + t.Errorf("want cmdline %v, have %v", tt.want, c1) + } + } +} + +func TestComm(t *testing.T) { + for _, tt := range []struct { + process int + want string + }{ + {process: 26231, want: "vim"}, + {process: 26232, want: "ata_sff"}, + } { + p1, err := FS("fixtures").NewProc(tt.process) + if err != nil { + t.Fatal(err) + } + c1, err := p1.Comm() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tt.want, c1) { + t.Errorf("want comm %v, have %v", tt.want, c1) + } + } +} + +func TestExecutable(t *testing.T) { + for _, tt := range []struct { + process int + want string + }{ + {process: 26231, want: "/usr/bin/vim"}, + {process: 26232, want: ""}, + } { + p, err := FS("fixtures").NewProc(tt.process) + if err != nil { + t.Fatal(err) + } + exe, err := p.Executable() + if err != nil { + t.Fatal(err) + } + if !reflect.DeepEqual(tt.want, exe) { + t.Errorf("want absolute path to cmdline %v, have %v", tt.want, exe) + } + } +} + +func TestFileDescriptors(t *testing.T) { + p1, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + fds, err := p1.FileDescriptors() + if err != nil { + t.Fatal(err) + } + sort.Sort(byUintptr(fds)) + if want := []uintptr{0, 1, 2, 3, 10}; !reflect.DeepEqual(want, fds) { + t.Errorf("want fds %v, have %v", want, fds) + } +} + +func TestFileDescriptorTargets(t *testing.T) { + p1, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + fds, err := p1.FileDescriptorTargets() + if err != nil { + t.Fatal(err) + } + sort.Strings(fds) + var want = []string{ + "../../symlinktargets/abc", + "../../symlinktargets/def", + "../../symlinktargets/ghi", + "../../symlinktargets/uvw", + "../../symlinktargets/xyz", + } + if !reflect.DeepEqual(want, fds) { + t.Errorf("want fds %v, have %v", want, fds) + } +} + +func TestFileDescriptorsLen(t *testing.T) { + p1, err := FS("fixtures").NewProc(26231) + if err != nil { + t.Fatal(err) + } + l, err := p1.FileDescriptorsLen() + if err != nil { + t.Fatal(err) + } + if want, have := 5, l; want != have { + t.Errorf("want fds %d, have %d", want, have) + } +} + +type byUintptr []uintptr + +func (a byUintptr) Len() int { return len(a) } +func (a byUintptr) Swap(i, j int) { a[i], a[j] = a[j], a[i] } +func (a byUintptr) Less(i, j int) bool { return a[i] < a[j] } diff --git a/vendor/github.com/prometheus/procfs/stat.go b/vendor/github.com/prometheus/procfs/stat.go new file mode 100644 index 000000000..61eb6b0e3 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat.go @@ -0,0 +1,232 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "io" + "os" + "strconv" + "strings" +) + +// CPUStat shows how much time the cpu spend in various stages. +type CPUStat struct { + User float64 + Nice float64 + System float64 + Idle float64 + Iowait float64 + IRQ float64 + SoftIRQ float64 + Steal float64 + Guest float64 + GuestNice float64 +} + +// SoftIRQStat represent the softirq statistics as exported in the procfs stat file. +// A nice introduction can be found at https://0xax.gitbooks.io/linux-insides/content/interrupts/interrupts-9.html +// It is possible to get per-cpu stats by reading /proc/softirqs +type SoftIRQStat struct { + Hi uint64 + Timer uint64 + NetTx uint64 + NetRx uint64 + Block uint64 + BlockIoPoll uint64 + Tasklet uint64 + Sched uint64 + Hrtimer uint64 + Rcu uint64 +} + +// Stat represents kernel/system statistics. +type Stat struct { + // Boot time in seconds since the Epoch. + BootTime uint64 + // Summed up cpu statistics. + CPUTotal CPUStat + // Per-CPU statistics. + CPU []CPUStat + // Number of times interrupts were handled, which contains numbered and unnumbered IRQs. + IRQTotal uint64 + // Number of times a numbered IRQ was triggered. + IRQ []uint64 + // Number of times a context switch happened. + ContextSwitches uint64 + // Number of times a process was created. + ProcessCreated uint64 + // Number of processes currently running. + ProcessesRunning uint64 + // Number of processes currently blocked (waiting for IO). + ProcessesBlocked uint64 + // Number of times a softirq was scheduled. + SoftIRQTotal uint64 + // Detailed softirq statistics. + SoftIRQ SoftIRQStat +} + +// NewStat returns kernel/system statistics read from /proc/stat. +func NewStat() (Stat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return Stat{}, err + } + + return fs.NewStat() +} + +// Parse a cpu statistics line and returns the CPUStat struct plus the cpu id (or -1 for the overall sum). +func parseCPUStat(line string) (CPUStat, int64, error) { + cpuStat := CPUStat{} + var cpu string + + count, err := fmt.Sscanf(line, "%s %f %f %f %f %f %f %f %f %f %f", + &cpu, + &cpuStat.User, &cpuStat.Nice, &cpuStat.System, &cpuStat.Idle, + &cpuStat.Iowait, &cpuStat.IRQ, &cpuStat.SoftIRQ, &cpuStat.Steal, + &cpuStat.Guest, &cpuStat.GuestNice) + + if err != nil && err != io.EOF { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): %s", line, err) + } + if count == 0 { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu): 0 elements parsed", line) + } + + cpuStat.User /= userHZ + cpuStat.Nice /= userHZ + cpuStat.System /= userHZ + cpuStat.Idle /= userHZ + cpuStat.Iowait /= userHZ + cpuStat.IRQ /= userHZ + cpuStat.SoftIRQ /= userHZ + cpuStat.Steal /= userHZ + cpuStat.Guest /= userHZ + cpuStat.GuestNice /= userHZ + + if cpu == "cpu" { + return cpuStat, -1, nil + } + + cpuID, err := strconv.ParseInt(cpu[3:], 10, 64) + if err != nil { + return CPUStat{}, -1, fmt.Errorf("couldn't parse %s (cpu/cpuid): %s", line, err) + } + + return cpuStat, cpuID, nil +} + +// Parse a softirq line. +func parseSoftIRQStat(line string) (SoftIRQStat, uint64, error) { + softIRQStat := SoftIRQStat{} + var total uint64 + var prefix string + + _, err := fmt.Sscanf(line, "%s %d %d %d %d %d %d %d %d %d %d %d", + &prefix, &total, + &softIRQStat.Hi, &softIRQStat.Timer, &softIRQStat.NetTx, &softIRQStat.NetRx, + &softIRQStat.Block, &softIRQStat.BlockIoPoll, + &softIRQStat.Tasklet, &softIRQStat.Sched, + &softIRQStat.Hrtimer, &softIRQStat.Rcu) + + if err != nil { + return SoftIRQStat{}, 0, fmt.Errorf("couldn't parse %s (softirq): %s", line, err) + } + + return softIRQStat, total, nil +} + +// NewStat returns an information about current kernel/system statistics. +func (fs FS) NewStat() (Stat, error) { + // See https://www.kernel.org/doc/Documentation/filesystems/proc.txt + + f, err := os.Open(fs.Path("stat")) + if err != nil { + return Stat{}, err + } + defer f.Close() + + stat := Stat{} + + scanner := bufio.NewScanner(f) + for scanner.Scan() { + line := scanner.Text() + parts := strings.Fields(scanner.Text()) + // require at least + if len(parts) < 2 { + continue + } + switch { + case parts[0] == "btime": + if stat.BootTime, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (btime): %s", parts[1], err) + } + case parts[0] == "intr": + if stat.IRQTotal, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr): %s", parts[1], err) + } + numberedIRQs := parts[2:] + stat.IRQ = make([]uint64, len(numberedIRQs)) + for i, count := range numberedIRQs { + if stat.IRQ[i], err = strconv.ParseUint(count, 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (intr%d): %s", count, i, err) + } + } + case parts[0] == "ctxt": + if stat.ContextSwitches, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (ctxt): %s", parts[1], err) + } + case parts[0] == "processes": + if stat.ProcessCreated, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (processes): %s", parts[1], err) + } + case parts[0] == "procs_running": + if stat.ProcessesRunning, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_running): %s", parts[1], err) + } + case parts[0] == "procs_blocked": + if stat.ProcessesBlocked, err = strconv.ParseUint(parts[1], 10, 64); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s (procs_blocked): %s", parts[1], err) + } + case parts[0] == "softirq": + softIRQStats, total, err := parseSoftIRQStat(line) + if err != nil { + return Stat{}, err + } + stat.SoftIRQTotal = total + stat.SoftIRQ = softIRQStats + case strings.HasPrefix(parts[0], "cpu"): + cpuStat, cpuID, err := parseCPUStat(line) + if err != nil { + return Stat{}, err + } + if cpuID == -1 { + stat.CPUTotal = cpuStat + } else { + for int64(len(stat.CPU)) <= cpuID { + stat.CPU = append(stat.CPU, CPUStat{}) + } + stat.CPU[cpuID] = cpuStat + } + } + } + + if err := scanner.Err(); err != nil { + return Stat{}, fmt.Errorf("couldn't parse %s: %s", f.Name(), err) + } + + return stat, nil +} diff --git a/vendor/github.com/prometheus/procfs/stat_test.go b/vendor/github.com/prometheus/procfs/stat_test.go new file mode 100644 index 000000000..2043b5e43 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/stat_test.go @@ -0,0 +1,74 @@ +// Copyright 2018 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import "testing" + +func TestStat(t *testing.T) { + s, err := FS("fixtures").NewStat() + if err != nil { + t.Fatal(err) + } + + // cpu + if want, have := float64(301854)/userHZ, s.CPUTotal.User; want != have { + t.Errorf("want cpu/user %v, have %v", want, have) + } + if want, have := float64(31)/userHZ, s.CPU[7].SoftIRQ; want != have { + t.Errorf("want cpu7/softirq %v, have %v", want, have) + } + + // intr + if want, have := uint64(8885917), s.IRQTotal; want != have { + t.Errorf("want irq/total %d, have %d", want, have) + } + if want, have := uint64(1), s.IRQ[8]; want != have { + t.Errorf("want irq8 %d, have %d", want, have) + } + + // ctxt + if want, have := uint64(38014093), s.ContextSwitches; want != have { + t.Errorf("want context switches (ctxt) %d, have %d", want, have) + } + + // btime + if want, have := uint64(1418183276), s.BootTime; want != have { + t.Errorf("want boot time (btime) %d, have %d", want, have) + } + + // processes + if want, have := uint64(26442), s.ProcessCreated; want != have { + t.Errorf("want process created (processes) %d, have %d", want, have) + } + + // procs_running + if want, have := uint64(2), s.ProcessesRunning; want != have { + t.Errorf("want processes running (procs_running) %d, have %d", want, have) + } + + // procs_blocked + if want, have := uint64(1), s.ProcessesBlocked; want != have { + t.Errorf("want processes blocked (procs_blocked) %d, have %d", want, have) + } + + // softirq + if want, have := uint64(5057579), s.SoftIRQTotal; want != have { + t.Errorf("want softirq total %d, have %d", want, have) + } + + if want, have := uint64(508444), s.SoftIRQ.Rcu; want != have { + t.Errorf("want softirq RCU %d, have %d", want, have) + } + +} diff --git a/vendor/github.com/prometheus/procfs/ttar b/vendor/github.com/prometheus/procfs/ttar new file mode 100755 index 000000000..b0171a12b --- /dev/null +++ b/vendor/github.com/prometheus/procfs/ttar @@ -0,0 +1,389 @@ +#!/usr/bin/env bash + +# Purpose: plain text tar format +# Limitations: - only suitable for text files, directories, and symlinks +# - stores only filename, content, and mode +# - not designed for untrusted input +# +# Note: must work with bash version 3.2 (macOS) + +# Copyright 2017 Roger Luethi +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +set -o errexit -o nounset + +# Sanitize environment (for instance, standard sorting of glob matches) +export LC_ALL=C + +path="" +CMD="" +ARG_STRING="$*" + +#------------------------------------------------------------------------------ +# Not all sed implementations can work on null bytes. In order to make ttar +# work out of the box on macOS, use Python as a stream editor. + +USE_PYTHON=0 + +PYTHON_CREATE_FILTER=$(cat << 'PCF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'EOF', r'\EOF', line) + line = re.sub(r'NULLBYTE', r'\NULLBYTE', line) + line = re.sub('\x00', r'NULLBYTE', line) + sys.stdout.write(line) +PCF +) + +PYTHON_EXTRACT_FILTER=$(cat << 'PEF' +#!/usr/bin/env python + +import re +import sys + +for line in sys.stdin: + line = re.sub(r'(?/dev/null; then + echo "ERROR Python not found. Aborting." + exit 2 + fi + USE_PYTHON=1 + fi +} + +#------------------------------------------------------------------------------ + +function usage { + bname=$(basename "$0") + cat << USAGE +Usage: $bname [-C ] -c -f (create archive) + $bname -t -f (list archive contents) + $bname [-C ] -x -f (extract archive) + +Options: + -C (change directory) + -v (verbose) + +Example: Change to sysfs directory, create ttar file from fixtures directory + $bname -C sysfs -c -f sysfs/fixtures.ttar fixtures/ +USAGE +exit "$1" +} + +function vecho { + if [ "${VERBOSE:-}" == "yes" ]; then + echo >&7 "$@" + fi +} + +function set_cmd { + if [ -n "$CMD" ]; then + echo "ERROR: more than one command given" + echo + usage 2 + fi + CMD=$1 +} + +unset VERBOSE + +while getopts :cf:htxvC: opt; do + case $opt in + c) + set_cmd "create" + ;; + f) + ARCHIVE=$OPTARG + ;; + h) + usage 0 + ;; + t) + set_cmd "list" + ;; + x) + set_cmd "extract" + ;; + v) + VERBOSE=yes + exec 7>&1 + ;; + C) + CDIR=$OPTARG + ;; + *) + echo >&2 "ERROR: invalid option -$OPTARG" + echo + usage 1 + ;; + esac +done + +# Remove processed options from arguments +shift $(( OPTIND - 1 )); + +if [ "${CMD:-}" == "" ]; then + echo >&2 "ERROR: no command given" + echo + usage 1 +elif [ "${ARCHIVE:-}" == "" ]; then + echo >&2 "ERROR: no archive name given" + echo + usage 1 +fi + +function list { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while read -r line; do + line_no=$(( line_no + 1 )) + if [ $size -gt 0 ]; then + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + echo "$path" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + echo "$path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + echo "$path -> ${BASH_REMATCH[1]}" + fi + done < "$ttar_file" +} + +function extract { + local path="" + local size=0 + local line_no=0 + local ttar_file=$1 + if [ -n "${2:-}" ]; then + echo >&2 "ERROR: too many arguments." + echo + usage 1 + fi + if [ ! -e "$ttar_file" ]; then + echo >&2 "ERROR: file not found ($ttar_file)" + echo + usage 1 + fi + while IFS= read -r line; do + line_no=$(( line_no + 1 )) + local eof_without_newline + if [ "$size" -gt 0 ]; then + if [[ "$line" =~ [^\\]EOF ]]; then + # An EOF not preceeded by a backslash indicates that the line + # does not end with a newline + eof_without_newline=1 + else + eof_without_newline=0 + fi + # Replace NULLBYTE with null byte if at beginning of line + # Replace NULLBYTE with null byte unless preceeded by backslash + # Remove one backslash in front of NULLBYTE (if any) + # Remove EOF unless preceeded by backslash + # Remove one backslash in front of EOF + if [ $USE_PYTHON -eq 1 ]; then + echo -n "$line" | python -c "$PYTHON_EXTRACT_FILTER" >> "$path" + else + # The repeated pattern makes up for sed's lack of negative + # lookbehind assertions (for consecutive null bytes). + echo -n "$line" | \ + sed -e 's/^NULLBYTE/\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\([^\\]\)NULLBYTE/\1\x0/g; + s/\\NULLBYTE/NULLBYTE/g; + s/\([^\\]\)EOF/\1/g; + s/\\EOF/EOF/g; + ' >> "$path" + fi + if [[ "$eof_without_newline" -eq 0 ]]; then + echo >> "$path" + fi + size=$(( size - 1 )) + continue + fi + if [[ $line =~ ^Path:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + if [ -e "$path" ] || [ -L "$path" ]; then + rm "$path" + fi + elif [[ $line =~ ^Lines:\ (.*)$ ]]; then + size=${BASH_REMATCH[1]} + # Create file even if it is zero-length. + touch "$path" + vecho " $path" + elif [[ $line =~ ^Mode:\ (.*)$ ]]; then + mode=${BASH_REMATCH[1]} + chmod "$mode" "$path" + vecho "$mode" + elif [[ $line =~ ^Directory:\ (.*)$ ]]; then + path=${BASH_REMATCH[1]} + mkdir -p "$path" + vecho " $path/" + elif [[ $line =~ ^SymlinkTo:\ (.*)$ ]]; then + ln -s "${BASH_REMATCH[1]}" "$path" + vecho " $path -> ${BASH_REMATCH[1]}" + elif [[ $line =~ ^# ]]; then + # Ignore comments between files + continue + else + echo >&2 "ERROR: Unknown keyword on line $line_no: $line" + exit 1 + fi + done < "$ttar_file" +} + +function div { + echo "# ttar - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -" \ + "- - - - - -" +} + +function get_mode { + local mfile=$1 + if [ -z "${STAT_OPTION:-}" ]; then + if stat -c '%a' "$mfile" >/dev/null 2>&1; then + # GNU stat + STAT_OPTION='-c' + STAT_FORMAT='%a' + else + # BSD stat + STAT_OPTION='-f' + # Octal output, user/group/other (omit file type, sticky bit) + STAT_FORMAT='%OLp' + fi + fi + stat "${STAT_OPTION}" "${STAT_FORMAT}" "$mfile" +} + +function _create { + shopt -s nullglob + local mode + local eof_without_newline + while (( "$#" )); do + file=$1 + if [ -L "$file" ]; then + echo "Path: $file" + symlinkTo=$(readlink "$file") + echo "SymlinkTo: $symlinkTo" + vecho " $file -> $symlinkTo" + div + elif [ -d "$file" ]; then + # Strip trailing slash (if there is one) + file=${file%/} + echo "Directory: $file" + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file/" + div + # Find all files and dirs, including hidden/dot files + for x in "$file/"{*,.[^.]*}; do + _create "$x" + done + elif [ -f "$file" ]; then + echo "Path: $file" + lines=$(wc -l "$file"|awk '{print $1}') + eof_without_newline=0 + if [[ "$(wc -c "$file"|awk '{print $1}')" -gt 0 ]] && \ + [[ "$(tail -c 1 "$file" | wc -l)" -eq 0 ]]; then + eof_without_newline=1 + lines=$((lines+1)) + fi + echo "Lines: $lines" + # Add backslash in front of EOF + # Add backslash in front of NULLBYTE + # Replace null byte with NULLBYTE + if [ $USE_PYTHON -eq 1 ]; then + < "$file" python -c "$PYTHON_CREATE_FILTER" + else + < "$file" \ + sed 's/EOF/\\EOF/g; + s/NULLBYTE/\\NULLBYTE/g; + s/\x0/NULLBYTE/g; + ' + fi + if [[ "$eof_without_newline" -eq 1 ]]; then + # Finish line with EOF to indicate that the original line did + # not end with a linefeed + echo "EOF" + fi + mode=$(get_mode "$file") + echo "Mode: $mode" + vecho "$mode $file" + div + else + echo >&2 "ERROR: file not found ($file in $(pwd))" + exit 2 + fi + shift + done +} + +function create { + ttar_file=$1 + shift + if [ -z "${1:-}" ]; then + echo >&2 "ERROR: missing arguments." + echo + usage 1 + fi + if [ -e "$ttar_file" ]; then + rm "$ttar_file" + fi + exec > "$ttar_file" + echo "# Archive created by ttar $ARG_STRING" + _create "$@" +} + +test_environment + +if [ -n "${CDIR:-}" ]; then + if [[ "$ARCHIVE" != /* ]]; then + # Relative path: preserve the archive's location before changing + # directory + ARCHIVE="$(pwd)/$ARCHIVE" + fi + cd "$CDIR" +fi + +"$CMD" "$ARCHIVE" "$@" diff --git a/vendor/github.com/prometheus/procfs/xfrm.go b/vendor/github.com/prometheus/procfs/xfrm.go new file mode 100644 index 000000000..ffe9df50d --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm.go @@ -0,0 +1,187 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "bufio" + "fmt" + "os" + "strconv" + "strings" +) + +// XfrmStat models the contents of /proc/net/xfrm_stat. +type XfrmStat struct { + // All errors which are not matched by other + XfrmInError int + // No buffer is left + XfrmInBufferError int + // Header Error + XfrmInHdrError int + // No state found + // i.e. either inbound SPI, address, or IPSEC protocol at SA is wrong + XfrmInNoStates int + // Transformation protocol specific error + // e.g. SA Key is wrong + XfrmInStateProtoError int + // Transformation mode specific error + XfrmInStateModeError int + // Sequence error + // e.g. sequence number is out of window + XfrmInStateSeqError int + // State is expired + XfrmInStateExpired int + // State has mismatch option + // e.g. UDP encapsulation type is mismatched + XfrmInStateMismatch int + // State is invalid + XfrmInStateInvalid int + // No matching template for states + // e.g. Inbound SAs are correct but SP rule is wrong + XfrmInTmplMismatch int + // No policy is found for states + // e.g. Inbound SAs are correct but no SP is found + XfrmInNoPols int + // Policy discards + XfrmInPolBlock int + // Policy error + XfrmInPolError int + // All errors which are not matched by others + XfrmOutError int + // Bundle generation error + XfrmOutBundleGenError int + // Bundle check error + XfrmOutBundleCheckError int + // No state was found + XfrmOutNoStates int + // Transformation protocol specific error + XfrmOutStateProtoError int + // Transportation mode specific error + XfrmOutStateModeError int + // Sequence error + // i.e sequence number overflow + XfrmOutStateSeqError int + // State is expired + XfrmOutStateExpired int + // Policy discads + XfrmOutPolBlock int + // Policy is dead + XfrmOutPolDead int + // Policy Error + XfrmOutPolError int + XfrmFwdHdrError int + XfrmOutStateInvalid int + XfrmAcquireError int +} + +// NewXfrmStat reads the xfrm_stat statistics. +func NewXfrmStat() (XfrmStat, error) { + fs, err := NewFS(DefaultMountPoint) + if err != nil { + return XfrmStat{}, err + } + + return fs.NewXfrmStat() +} + +// NewXfrmStat reads the xfrm_stat statistics from the 'proc' filesystem. +func (fs FS) NewXfrmStat() (XfrmStat, error) { + file, err := os.Open(fs.Path("net/xfrm_stat")) + if err != nil { + return XfrmStat{}, err + } + defer file.Close() + + var ( + x = XfrmStat{} + s = bufio.NewScanner(file) + ) + + for s.Scan() { + fields := strings.Fields(s.Text()) + + if len(fields) != 2 { + return XfrmStat{}, fmt.Errorf( + "couldnt parse %s line %s", file.Name(), s.Text()) + } + + name := fields[0] + value, err := strconv.Atoi(fields[1]) + if err != nil { + return XfrmStat{}, err + } + + switch name { + case "XfrmInError": + x.XfrmInError = value + case "XfrmInBufferError": + x.XfrmInBufferError = value + case "XfrmInHdrError": + x.XfrmInHdrError = value + case "XfrmInNoStates": + x.XfrmInNoStates = value + case "XfrmInStateProtoError": + x.XfrmInStateProtoError = value + case "XfrmInStateModeError": + x.XfrmInStateModeError = value + case "XfrmInStateSeqError": + x.XfrmInStateSeqError = value + case "XfrmInStateExpired": + x.XfrmInStateExpired = value + case "XfrmInStateInvalid": + x.XfrmInStateInvalid = value + case "XfrmInTmplMismatch": + x.XfrmInTmplMismatch = value + case "XfrmInNoPols": + x.XfrmInNoPols = value + case "XfrmInPolBlock": + x.XfrmInPolBlock = value + case "XfrmInPolError": + x.XfrmInPolError = value + case "XfrmOutError": + x.XfrmOutError = value + case "XfrmInStateMismatch": + x.XfrmInStateMismatch = value + case "XfrmOutBundleGenError": + x.XfrmOutBundleGenError = value + case "XfrmOutBundleCheckError": + x.XfrmOutBundleCheckError = value + case "XfrmOutNoStates": + x.XfrmOutNoStates = value + case "XfrmOutStateProtoError": + x.XfrmOutStateProtoError = value + case "XfrmOutStateModeError": + x.XfrmOutStateModeError = value + case "XfrmOutStateSeqError": + x.XfrmOutStateSeqError = value + case "XfrmOutStateExpired": + x.XfrmOutStateExpired = value + case "XfrmOutPolBlock": + x.XfrmOutPolBlock = value + case "XfrmOutPolDead": + x.XfrmOutPolDead = value + case "XfrmOutPolError": + x.XfrmOutPolError = value + case "XfrmFwdHdrError": + x.XfrmFwdHdrError = value + case "XfrmOutStateInvalid": + x.XfrmOutStateInvalid = value + case "XfrmAcquireError": + x.XfrmAcquireError = value + } + + } + + return x, s.Err() +} diff --git a/vendor/github.com/prometheus/procfs/xfrm_test.go b/vendor/github.com/prometheus/procfs/xfrm_test.go new file mode 100644 index 000000000..5918c390e --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfrm_test.go @@ -0,0 +1,66 @@ +// Copyright 2017 Prometheus Team +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package procfs + +import ( + "testing" +) + +func TestXfrmStats(t *testing.T) { + xfrmStats, err := FS("fixtures").NewXfrmStat() + if err != nil { + t.Fatal(err) + } + + for _, test := range []struct { + name string + want int + got int + }{ + {name: "XfrmInError", want: 1, got: xfrmStats.XfrmInError}, + {name: "XfrmInBufferError", want: 2, got: xfrmStats.XfrmInBufferError}, + {name: "XfrmInHdrError", want: 4, got: xfrmStats.XfrmInHdrError}, + {name: "XfrmInNoStates", want: 3, got: xfrmStats.XfrmInNoStates}, + {name: "XfrmInStateProtoError", want: 40, got: xfrmStats.XfrmInStateProtoError}, + {name: "XfrmInStateModeError", want: 100, got: xfrmStats.XfrmInStateModeError}, + {name: "XfrmInStateSeqError", want: 6000, got: xfrmStats.XfrmInStateSeqError}, + {name: "XfrmInStateExpired", want: 4, got: xfrmStats.XfrmInStateExpired}, + {name: "XfrmInStateMismatch", want: 23451, got: xfrmStats.XfrmInStateMismatch}, + {name: "XfrmInStateInvalid", want: 55555, got: xfrmStats.XfrmInStateInvalid}, + {name: "XfrmInTmplMismatch", want: 51, got: xfrmStats.XfrmInTmplMismatch}, + {name: "XfrmInNoPols", want: 65432, got: xfrmStats.XfrmInNoPols}, + {name: "XfrmInPolBlock", want: 100, got: xfrmStats.XfrmInPolBlock}, + {name: "XfrmInPolError", want: 10000, got: xfrmStats.XfrmInPolError}, + {name: "XfrmOutError", want: 1000000, got: xfrmStats.XfrmOutError}, + {name: "XfrmOutBundleGenError", want: 43321, got: xfrmStats.XfrmOutBundleGenError}, + {name: "XfrmOutBundleCheckError", want: 555, got: xfrmStats.XfrmOutBundleCheckError}, + {name: "XfrmOutNoStates", want: 869, got: xfrmStats.XfrmOutNoStates}, + {name: "XfrmOutStateProtoError", want: 4542, got: xfrmStats.XfrmOutStateProtoError}, + {name: "XfrmOutStateModeError", want: 4, got: xfrmStats.XfrmOutStateModeError}, + {name: "XfrmOutStateSeqError", want: 543, got: xfrmStats.XfrmOutStateSeqError}, + {name: "XfrmOutStateExpired", want: 565, got: xfrmStats.XfrmOutStateExpired}, + {name: "XfrmOutPolBlock", want: 43456, got: xfrmStats.XfrmOutPolBlock}, + {name: "XfrmOutPolDead", want: 7656, got: xfrmStats.XfrmOutPolDead}, + {name: "XfrmOutPolError", want: 1454, got: xfrmStats.XfrmOutPolError}, + {name: "XfrmFwdHdrError", want: 6654, got: xfrmStats.XfrmFwdHdrError}, + {name: "XfrmOutStateInvaliad", want: 28765, got: xfrmStats.XfrmOutStateInvalid}, + {name: "XfrmAcquireError", want: 24532, got: xfrmStats.XfrmAcquireError}, + {name: "XfrmInStateInvalid", want: 55555, got: xfrmStats.XfrmInStateInvalid}, + {name: "XfrmOutError", want: 1000000, got: xfrmStats.XfrmOutError}, + } { + if test.want != test.got { + t.Errorf("Want %s %d, have %d", test.name, test.want, test.got) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse.go b/vendor/github.com/prometheus/procfs/xfs/parse.go new file mode 100644 index 000000000..2bc0ef342 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse.go @@ -0,0 +1,330 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs + +import ( + "bufio" + "fmt" + "io" + "strings" + + "github.com/prometheus/procfs/internal/util" +) + +// ParseStats parses a Stats from an input io.Reader, using the format +// found in /proc/fs/xfs/stat. +func ParseStats(r io.Reader) (*Stats, error) { + const ( + // Fields parsed into stats structures. + fieldExtentAlloc = "extent_alloc" + fieldAbt = "abt" + fieldBlkMap = "blk_map" + fieldBmbt = "bmbt" + fieldDir = "dir" + fieldTrans = "trans" + fieldIg = "ig" + fieldLog = "log" + fieldRw = "rw" + fieldAttr = "attr" + fieldIcluster = "icluster" + fieldVnodes = "vnodes" + fieldBuf = "buf" + fieldXpc = "xpc" + + // Unimplemented at this time due to lack of documentation. + fieldPushAil = "push_ail" + fieldXstrat = "xstrat" + fieldAbtb2 = "abtb2" + fieldAbtc2 = "abtc2" + fieldBmbt2 = "bmbt2" + fieldIbt2 = "ibt2" + fieldFibt2 = "fibt2" + fieldQm = "qm" + fieldDebug = "debug" + ) + + var xfss Stats + + s := bufio.NewScanner(r) + for s.Scan() { + // Expect at least a string label and a single integer value, ex: + // - abt 0 + // - rw 1 2 + ss := strings.Fields(string(s.Bytes())) + if len(ss) < 2 { + continue + } + label := ss[0] + + // Extended precision counters are uint64 values. + if label == fieldXpc { + us, err := util.ParseUint64s(ss[1:]) + if err != nil { + return nil, err + } + + xfss.ExtendedPrecision, err = extendedPrecisionStats(us) + if err != nil { + return nil, err + } + + continue + } + + // All other counters are uint32 values. + us, err := util.ParseUint32s(ss[1:]) + if err != nil { + return nil, err + } + + switch label { + case fieldExtentAlloc: + xfss.ExtentAllocation, err = extentAllocationStats(us) + case fieldAbt: + xfss.AllocationBTree, err = btreeStats(us) + case fieldBlkMap: + xfss.BlockMapping, err = blockMappingStats(us) + case fieldBmbt: + xfss.BlockMapBTree, err = btreeStats(us) + case fieldDir: + xfss.DirectoryOperation, err = directoryOperationStats(us) + case fieldTrans: + xfss.Transaction, err = transactionStats(us) + case fieldIg: + xfss.InodeOperation, err = inodeOperationStats(us) + case fieldLog: + xfss.LogOperation, err = logOperationStats(us) + case fieldRw: + xfss.ReadWrite, err = readWriteStats(us) + case fieldAttr: + xfss.AttributeOperation, err = attributeOperationStats(us) + case fieldIcluster: + xfss.InodeClustering, err = inodeClusteringStats(us) + case fieldVnodes: + xfss.Vnode, err = vnodeStats(us) + case fieldBuf: + xfss.Buffer, err = bufferStats(us) + } + if err != nil { + return nil, err + } + } + + return &xfss, s.Err() +} + +// extentAllocationStats builds an ExtentAllocationStats from a slice of uint32s. +func extentAllocationStats(us []uint32) (ExtentAllocationStats, error) { + if l := len(us); l != 4 { + return ExtentAllocationStats{}, fmt.Errorf("incorrect number of values for XFS extent allocation stats: %d", l) + } + + return ExtentAllocationStats{ + ExtentsAllocated: us[0], + BlocksAllocated: us[1], + ExtentsFreed: us[2], + BlocksFreed: us[3], + }, nil +} + +// btreeStats builds a BTreeStats from a slice of uint32s. +func btreeStats(us []uint32) (BTreeStats, error) { + if l := len(us); l != 4 { + return BTreeStats{}, fmt.Errorf("incorrect number of values for XFS btree stats: %d", l) + } + + return BTreeStats{ + Lookups: us[0], + Compares: us[1], + RecordsInserted: us[2], + RecordsDeleted: us[3], + }, nil +} + +// BlockMappingStat builds a BlockMappingStats from a slice of uint32s. +func blockMappingStats(us []uint32) (BlockMappingStats, error) { + if l := len(us); l != 7 { + return BlockMappingStats{}, fmt.Errorf("incorrect number of values for XFS block mapping stats: %d", l) + } + + return BlockMappingStats{ + Reads: us[0], + Writes: us[1], + Unmaps: us[2], + ExtentListInsertions: us[3], + ExtentListDeletions: us[4], + ExtentListLookups: us[5], + ExtentListCompares: us[6], + }, nil +} + +// DirectoryOperationStats builds a DirectoryOperationStats from a slice of uint32s. +func directoryOperationStats(us []uint32) (DirectoryOperationStats, error) { + if l := len(us); l != 4 { + return DirectoryOperationStats{}, fmt.Errorf("incorrect number of values for XFS directory operation stats: %d", l) + } + + return DirectoryOperationStats{ + Lookups: us[0], + Creates: us[1], + Removes: us[2], + Getdents: us[3], + }, nil +} + +// TransactionStats builds a TransactionStats from a slice of uint32s. +func transactionStats(us []uint32) (TransactionStats, error) { + if l := len(us); l != 3 { + return TransactionStats{}, fmt.Errorf("incorrect number of values for XFS transaction stats: %d", l) + } + + return TransactionStats{ + Sync: us[0], + Async: us[1], + Empty: us[2], + }, nil +} + +// InodeOperationStats builds an InodeOperationStats from a slice of uint32s. +func inodeOperationStats(us []uint32) (InodeOperationStats, error) { + if l := len(us); l != 7 { + return InodeOperationStats{}, fmt.Errorf("incorrect number of values for XFS inode operation stats: %d", l) + } + + return InodeOperationStats{ + Attempts: us[0], + Found: us[1], + Recycle: us[2], + Missed: us[3], + Duplicate: us[4], + Reclaims: us[5], + AttributeChange: us[6], + }, nil +} + +// LogOperationStats builds a LogOperationStats from a slice of uint32s. +func logOperationStats(us []uint32) (LogOperationStats, error) { + if l := len(us); l != 5 { + return LogOperationStats{}, fmt.Errorf("incorrect number of values for XFS log operation stats: %d", l) + } + + return LogOperationStats{ + Writes: us[0], + Blocks: us[1], + NoInternalBuffers: us[2], + Force: us[3], + ForceSleep: us[4], + }, nil +} + +// ReadWriteStats builds a ReadWriteStats from a slice of uint32s. +func readWriteStats(us []uint32) (ReadWriteStats, error) { + if l := len(us); l != 2 { + return ReadWriteStats{}, fmt.Errorf("incorrect number of values for XFS read write stats: %d", l) + } + + return ReadWriteStats{ + Read: us[0], + Write: us[1], + }, nil +} + +// AttributeOperationStats builds an AttributeOperationStats from a slice of uint32s. +func attributeOperationStats(us []uint32) (AttributeOperationStats, error) { + if l := len(us); l != 4 { + return AttributeOperationStats{}, fmt.Errorf("incorrect number of values for XFS attribute operation stats: %d", l) + } + + return AttributeOperationStats{ + Get: us[0], + Set: us[1], + Remove: us[2], + List: us[3], + }, nil +} + +// InodeClusteringStats builds an InodeClusteringStats from a slice of uint32s. +func inodeClusteringStats(us []uint32) (InodeClusteringStats, error) { + if l := len(us); l != 3 { + return InodeClusteringStats{}, fmt.Errorf("incorrect number of values for XFS inode clustering stats: %d", l) + } + + return InodeClusteringStats{ + Iflush: us[0], + Flush: us[1], + FlushInode: us[2], + }, nil +} + +// VnodeStats builds a VnodeStats from a slice of uint32s. +func vnodeStats(us []uint32) (VnodeStats, error) { + // The attribute "Free" appears to not be available on older XFS + // stats versions. Therefore, 7 or 8 elements may appear in + // this slice. + l := len(us) + if l != 7 && l != 8 { + return VnodeStats{}, fmt.Errorf("incorrect number of values for XFS vnode stats: %d", l) + } + + s := VnodeStats{ + Active: us[0], + Allocate: us[1], + Get: us[2], + Hold: us[3], + Release: us[4], + Reclaim: us[5], + Remove: us[6], + } + + // Skip adding free, unless it is present. The zero value will + // be used in place of an actual count. + if l == 7 { + return s, nil + } + + s.Free = us[7] + return s, nil +} + +// BufferStats builds a BufferStats from a slice of uint32s. +func bufferStats(us []uint32) (BufferStats, error) { + if l := len(us); l != 9 { + return BufferStats{}, fmt.Errorf("incorrect number of values for XFS buffer stats: %d", l) + } + + return BufferStats{ + Get: us[0], + Create: us[1], + GetLocked: us[2], + GetLockedWaited: us[3], + BusyLocked: us[4], + MissLocked: us[5], + PageRetries: us[6], + PageFound: us[7], + GetRead: us[8], + }, nil +} + +// ExtendedPrecisionStats builds an ExtendedPrecisionStats from a slice of uint32s. +func extendedPrecisionStats(us []uint64) (ExtendedPrecisionStats, error) { + if l := len(us); l != 3 { + return ExtendedPrecisionStats{}, fmt.Errorf("incorrect number of values for XFS extended precision stats: %d", l) + } + + return ExtendedPrecisionStats{ + FlushBytes: us[0], + WriteBytes: us[1], + ReadBytes: us[2], + }, nil +} diff --git a/vendor/github.com/prometheus/procfs/xfs/parse_test.go b/vendor/github.com/prometheus/procfs/xfs/parse_test.go new file mode 100644 index 000000000..2e946c2c5 --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/parse_test.go @@ -0,0 +1,442 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package xfs_test + +import ( + "reflect" + "strings" + "testing" + + "github.com/prometheus/procfs" + "github.com/prometheus/procfs/xfs" +) + +func TestParseStats(t *testing.T) { + tests := []struct { + name string + s string + fs bool + stats *xfs.Stats + invalid bool + }{ + { + name: "empty file OK", + }, + { + name: "short or empty lines and unknown labels ignored", + s: "one\n\ntwo 1 2 3\n", + stats: &xfs.Stats{}, + }, + { + name: "bad uint32", + s: "extent_alloc XXX", + invalid: true, + }, + { + name: "bad uint64", + s: "xpc XXX", + invalid: true, + }, + { + name: "extent_alloc bad", + s: "extent_alloc 1", + invalid: true, + }, + { + name: "extent_alloc OK", + s: "extent_alloc 1 2 3 4", + stats: &xfs.Stats{ + ExtentAllocation: xfs.ExtentAllocationStats{ + ExtentsAllocated: 1, + BlocksAllocated: 2, + ExtentsFreed: 3, + BlocksFreed: 4, + }, + }, + }, + { + name: "abt bad", + s: "abt 1", + invalid: true, + }, + { + name: "abt OK", + s: "abt 1 2 3 4", + stats: &xfs.Stats{ + AllocationBTree: xfs.BTreeStats{ + Lookups: 1, + Compares: 2, + RecordsInserted: 3, + RecordsDeleted: 4, + }, + }, + }, + { + name: "blk_map bad", + s: "blk_map 1", + invalid: true, + }, + { + name: "blk_map OK", + s: "blk_map 1 2 3 4 5 6 7", + stats: &xfs.Stats{ + BlockMapping: xfs.BlockMappingStats{ + Reads: 1, + Writes: 2, + Unmaps: 3, + ExtentListInsertions: 4, + ExtentListDeletions: 5, + ExtentListLookups: 6, + ExtentListCompares: 7, + }, + }, + }, + { + name: "bmbt bad", + s: "bmbt 1", + invalid: true, + }, + { + name: "bmbt OK", + s: "bmbt 1 2 3 4", + stats: &xfs.Stats{ + BlockMapBTree: xfs.BTreeStats{ + Lookups: 1, + Compares: 2, + RecordsInserted: 3, + RecordsDeleted: 4, + }, + }, + }, + { + name: "dir bad", + s: "dir 1", + invalid: true, + }, + { + name: "dir OK", + s: "dir 1 2 3 4", + stats: &xfs.Stats{ + DirectoryOperation: xfs.DirectoryOperationStats{ + Lookups: 1, + Creates: 2, + Removes: 3, + Getdents: 4, + }, + }, + }, + { + name: "trans bad", + s: "trans 1", + invalid: true, + }, + { + name: "trans OK", + s: "trans 1 2 3", + stats: &xfs.Stats{ + Transaction: xfs.TransactionStats{ + Sync: 1, + Async: 2, + Empty: 3, + }, + }, + }, + { + name: "ig bad", + s: "ig 1", + invalid: true, + }, + { + name: "ig OK", + s: "ig 1 2 3 4 5 6 7", + stats: &xfs.Stats{ + InodeOperation: xfs.InodeOperationStats{ + Attempts: 1, + Found: 2, + Recycle: 3, + Missed: 4, + Duplicate: 5, + Reclaims: 6, + AttributeChange: 7, + }, + }, + }, + { + name: "log bad", + s: "log 1", + invalid: true, + }, + { + name: "log OK", + s: "log 1 2 3 4 5", + stats: &xfs.Stats{ + LogOperation: xfs.LogOperationStats{ + Writes: 1, + Blocks: 2, + NoInternalBuffers: 3, + Force: 4, + ForceSleep: 5, + }, + }, + }, + { + name: "rw bad", + s: "rw 1", + invalid: true, + }, + { + name: "rw OK", + s: "rw 1 2", + stats: &xfs.Stats{ + ReadWrite: xfs.ReadWriteStats{ + Read: 1, + Write: 2, + }, + }, + }, + { + name: "attr bad", + s: "attr 1", + invalid: true, + }, + { + name: "attr OK", + s: "attr 1 2 3 4", + stats: &xfs.Stats{ + AttributeOperation: xfs.AttributeOperationStats{ + Get: 1, + Set: 2, + Remove: 3, + List: 4, + }, + }, + }, + { + name: "icluster bad", + s: "icluster 1", + invalid: true, + }, + { + name: "icluster OK", + s: "icluster 1 2 3", + stats: &xfs.Stats{ + InodeClustering: xfs.InodeClusteringStats{ + Iflush: 1, + Flush: 2, + FlushInode: 3, + }, + }, + }, + { + name: "vnodes bad", + s: "vnodes 1", + invalid: true, + }, + { + name: "vnodes (missing free) OK", + s: "vnodes 1 2 3 4 5 6 7", + stats: &xfs.Stats{ + Vnode: xfs.VnodeStats{ + Active: 1, + Allocate: 2, + Get: 3, + Hold: 4, + Release: 5, + Reclaim: 6, + Remove: 7, + }, + }, + }, + { + name: "vnodes (with free) OK", + s: "vnodes 1 2 3 4 5 6 7 8", + stats: &xfs.Stats{ + Vnode: xfs.VnodeStats{ + Active: 1, + Allocate: 2, + Get: 3, + Hold: 4, + Release: 5, + Reclaim: 6, + Remove: 7, + Free: 8, + }, + }, + }, + { + name: "buf bad", + s: "buf 1", + invalid: true, + }, + { + name: "buf OK", + s: "buf 1 2 3 4 5 6 7 8 9", + stats: &xfs.Stats{ + Buffer: xfs.BufferStats{ + Get: 1, + Create: 2, + GetLocked: 3, + GetLockedWaited: 4, + BusyLocked: 5, + MissLocked: 6, + PageRetries: 7, + PageFound: 8, + GetRead: 9, + }, + }, + }, + { + name: "xpc bad", + s: "xpc 1", + invalid: true, + }, + { + name: "xpc OK", + s: "xpc 1 2 3", + stats: &xfs.Stats{ + ExtendedPrecision: xfs.ExtendedPrecisionStats{ + FlushBytes: 1, + WriteBytes: 2, + ReadBytes: 3, + }, + }, + }, + { + name: "fixtures OK", + fs: true, + stats: &xfs.Stats{ + ExtentAllocation: xfs.ExtentAllocationStats{ + ExtentsAllocated: 92447, + BlocksAllocated: 97589, + ExtentsFreed: 92448, + BlocksFreed: 93751, + }, + AllocationBTree: xfs.BTreeStats{ + Lookups: 0, + Compares: 0, + RecordsInserted: 0, + RecordsDeleted: 0, + }, + BlockMapping: xfs.BlockMappingStats{ + Reads: 1767055, + Writes: 188820, + Unmaps: 184891, + ExtentListInsertions: 92447, + ExtentListDeletions: 92448, + ExtentListLookups: 2140766, + ExtentListCompares: 0, + }, + BlockMapBTree: xfs.BTreeStats{ + Lookups: 0, + Compares: 0, + RecordsInserted: 0, + RecordsDeleted: 0, + }, + DirectoryOperation: xfs.DirectoryOperationStats{ + Lookups: 185039, + Creates: 92447, + Removes: 92444, + Getdents: 136422, + }, + Transaction: xfs.TransactionStats{ + Sync: 706, + Async: 944304, + Empty: 0, + }, + InodeOperation: xfs.InodeOperationStats{ + Attempts: 185045, + Found: 58807, + Recycle: 0, + Missed: 126238, + Duplicate: 0, + Reclaims: 33637, + AttributeChange: 22, + }, + LogOperation: xfs.LogOperationStats{ + Writes: 2883, + Blocks: 113448, + NoInternalBuffers: 9, + Force: 17360, + ForceSleep: 739, + }, + ReadWrite: xfs.ReadWriteStats{ + Read: 107739, + Write: 94045, + }, + AttributeOperation: xfs.AttributeOperationStats{ + Get: 4, + Set: 0, + Remove: 0, + List: 0, + }, + InodeClustering: xfs.InodeClusteringStats{ + Iflush: 8677, + Flush: 7849, + FlushInode: 135802, + }, + Vnode: xfs.VnodeStats{ + Active: 92601, + Allocate: 0, + Get: 0, + Hold: 0, + Release: 92444, + Reclaim: 92444, + Remove: 92444, + Free: 0, + }, + Buffer: xfs.BufferStats{ + Get: 2666287, + Create: 7122, + GetLocked: 2659202, + GetLockedWaited: 3599, + BusyLocked: 2, + MissLocked: 7085, + PageRetries: 0, + PageFound: 10297, + GetRead: 7085, + }, + ExtendedPrecision: xfs.ExtendedPrecisionStats{ + FlushBytes: 399724544, + WriteBytes: 92823103, + ReadBytes: 86219234, + }, + }, + }, + } + + for _, tt := range tests { + var ( + stats *xfs.Stats + err error + ) + + if tt.s != "" { + stats, err = xfs.ParseStats(strings.NewReader(tt.s)) + } + if tt.fs { + stats, err = procfs.FS("../fixtures").XFSStats() + } + + if tt.invalid && err == nil { + t.Error("expected an error, but none occurred") + } + if !tt.invalid && err != nil { + t.Errorf("unexpected error: %v", err) + } + + if want, have := tt.stats, stats; !reflect.DeepEqual(want, have) { + t.Errorf("unexpected XFS stats:\nwant:\n%v\nhave:\n%v", want, have) + } + } +} diff --git a/vendor/github.com/prometheus/procfs/xfs/xfs.go b/vendor/github.com/prometheus/procfs/xfs/xfs.go new file mode 100644 index 000000000..d86794b7c --- /dev/null +++ b/vendor/github.com/prometheus/procfs/xfs/xfs.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Prometheus Authors +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package xfs provides access to statistics exposed by the XFS filesystem. +package xfs + +// Stats contains XFS filesystem runtime statistics, parsed from +// /proc/fs/xfs/stat. +// +// The names and meanings of each statistic were taken from +// http://xfs.org/index.php/Runtime_Stats and xfs_stats.h in the Linux +// kernel source. Most counters are uint32s (same data types used in +// xfs_stats.h), but some of the "extended precision stats" are uint64s. +type Stats struct { + // The name of the filesystem used to source these statistics. + // If empty, this indicates aggregated statistics for all XFS + // filesystems on the host. + Name string + + ExtentAllocation ExtentAllocationStats + AllocationBTree BTreeStats + BlockMapping BlockMappingStats + BlockMapBTree BTreeStats + DirectoryOperation DirectoryOperationStats + Transaction TransactionStats + InodeOperation InodeOperationStats + LogOperation LogOperationStats + ReadWrite ReadWriteStats + AttributeOperation AttributeOperationStats + InodeClustering InodeClusteringStats + Vnode VnodeStats + Buffer BufferStats + ExtendedPrecision ExtendedPrecisionStats +} + +// ExtentAllocationStats contains statistics regarding XFS extent allocations. +type ExtentAllocationStats struct { + ExtentsAllocated uint32 + BlocksAllocated uint32 + ExtentsFreed uint32 + BlocksFreed uint32 +} + +// BTreeStats contains statistics regarding an XFS internal B-tree. +type BTreeStats struct { + Lookups uint32 + Compares uint32 + RecordsInserted uint32 + RecordsDeleted uint32 +} + +// BlockMappingStats contains statistics regarding XFS block maps. +type BlockMappingStats struct { + Reads uint32 + Writes uint32 + Unmaps uint32 + ExtentListInsertions uint32 + ExtentListDeletions uint32 + ExtentListLookups uint32 + ExtentListCompares uint32 +} + +// DirectoryOperationStats contains statistics regarding XFS directory entries. +type DirectoryOperationStats struct { + Lookups uint32 + Creates uint32 + Removes uint32 + Getdents uint32 +} + +// TransactionStats contains statistics regarding XFS metadata transactions. +type TransactionStats struct { + Sync uint32 + Async uint32 + Empty uint32 +} + +// InodeOperationStats contains statistics regarding XFS inode operations. +type InodeOperationStats struct { + Attempts uint32 + Found uint32 + Recycle uint32 + Missed uint32 + Duplicate uint32 + Reclaims uint32 + AttributeChange uint32 +} + +// LogOperationStats contains statistics regarding the XFS log buffer. +type LogOperationStats struct { + Writes uint32 + Blocks uint32 + NoInternalBuffers uint32 + Force uint32 + ForceSleep uint32 +} + +// ReadWriteStats contains statistics regarding the number of read and write +// system calls for XFS filesystems. +type ReadWriteStats struct { + Read uint32 + Write uint32 +} + +// AttributeOperationStats contains statistics regarding manipulation of +// XFS extended file attributes. +type AttributeOperationStats struct { + Get uint32 + Set uint32 + Remove uint32 + List uint32 +} + +// InodeClusteringStats contains statistics regarding XFS inode clustering +// operations. +type InodeClusteringStats struct { + Iflush uint32 + Flush uint32 + FlushInode uint32 +} + +// VnodeStats contains statistics regarding XFS vnode operations. +type VnodeStats struct { + Active uint32 + Allocate uint32 + Get uint32 + Hold uint32 + Release uint32 + Reclaim uint32 + Remove uint32 + Free uint32 +} + +// BufferStats contains statistics regarding XFS read/write I/O buffers. +type BufferStats struct { + Get uint32 + Create uint32 + GetLocked uint32 + GetLockedWaited uint32 + BusyLocked uint32 + MissLocked uint32 + PageRetries uint32 + PageFound uint32 + GetRead uint32 +} + +// ExtendedPrecisionStats contains high precision counters used to track the +// total number of bytes read, written, or flushed, during XFS operations. +type ExtendedPrecisionStats struct { + FlushBytes uint64 + WriteBytes uint64 + ReadBytes uint64 +}