Merge pull request #106 from rusenask/develop

Develop
pull/114/head 0.5.0-rc.1
Karolis Rusenas 2017-10-07 14:32:59 +01:00 committed by GitHub
commit c9b656ff8e
1532 changed files with 31862 additions and 239719 deletions

View File

@ -16,5 +16,6 @@ jobs:
steps:
- checkout
# specify any bash command here prefixed with `run: `
# specify any bash command here prefixed with `run: `
- run: make build
- run: make test

View File

@ -15,3 +15,6 @@ const (
EnvSlackBotName = "SLACK_BOT_NAME"
EnvSlackChannels = "SLACK_CHANNELS"
)
// EnvNotificationLevel - minimum level for notifications, defaults to info
const EnvNotificationLevel = "NOTIFICATION_LEVEL"

View File

@ -32,6 +32,7 @@ var (
// notifiers.
type Config struct {
Attempts int
Level types.Level
Params map[string]interface{} `yaml:",inline"`
}
@ -76,6 +77,7 @@ func RegisterSender(name string, s Sender) {
type DefaultNotificationSender struct {
config *Config
stopper *stopper.Stopper
level types.Level
}
// New - create new sender
@ -118,6 +120,10 @@ func (m *DefaultNotificationSender) Senders() map[string]Sender {
// Send - send notifications through all configured senders
func (m *DefaultNotificationSender) Send(event types.EventNotification) error {
if event.Level < m.config.Level {
return nil
}
sendersM.RLock()
defer sendersM.RUnlock()

View File

@ -0,0 +1,129 @@
package notification
import (
"context"
"fmt"
"testing"
"github.com/rusenask/keel/types"
)
type fakeSender struct {
sent *types.EventNotification
shouldConfigure bool
shouldError error
}
func (s *fakeSender) Configure(*Config) (bool, error) {
return s.shouldConfigure, nil
}
func (s *fakeSender) Send(event types.EventNotification) error {
s.sent = &event
fmt.Println("sending event")
return s.shouldError
}
func TestSend(t *testing.T) {
sndr := New(context.Background())
sndr.Configure(&Config{
Level: types.LevelDebug,
Attempts: 1,
})
fs := &fakeSender{
shouldConfigure: true,
shouldError: nil,
}
RegisterSender("fakeSender", fs)
defer sndr.UnregisterSender("fakeSender")
err := sndr.Send(types.EventNotification{
Level: types.LevelInfo,
Type: types.NotificationPreDeploymentUpdate,
Message: "foo",
})
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if fs.sent.Message != "foo" {
t.Errorf("unexpected notification message: %s", fs.sent.Message)
}
if fs.sent.Level != types.LevelInfo {
t.Errorf("unexpected level: %s", fs.sent.Level)
}
}
// test when configured level is higher than the event
func TestSendLevelNotificationA(t *testing.T) {
sndr := New(context.Background())
sndr.Configure(&Config{
Level: types.LevelInfo,
Attempts: 1,
})
fs := &fakeSender{
shouldConfigure: true,
shouldError: nil,
}
RegisterSender("fakeSender", fs)
defer sndr.UnregisterSender("fakeSender")
err := sndr.Send(types.EventNotification{
Level: types.LevelDebug,
Type: types.NotificationPreDeploymentUpdate,
Message: "foo",
})
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if fs.sent != nil {
t.Errorf("didn't expect to find sent even for this level")
}
}
// event level is higher than the configured
func TestSendLevelNotificationB(t *testing.T) {
sndr := New(context.Background())
sndr.Configure(&Config{
Level: types.LevelInfo,
Attempts: 1,
})
fs := &fakeSender{
shouldConfigure: true,
shouldError: nil,
}
RegisterSender("fakeSender", fs)
defer sndr.UnregisterSender("fakeSender")
err := sndr.Send(types.EventNotification{
Level: types.LevelSuccess,
Type: types.NotificationPreDeploymentUpdate,
Message: "foo",
})
if err != nil {
t.Errorf("unexpected error: %s", err)
}
if fs.sent.Message != "foo" {
t.Errorf("unexpected notification message: %s", fs.sent.Message)
}
if fs.sent.Level != types.LevelSuccess {
t.Errorf("unexpected level: %s", fs.sent.Level)
}
}

View File

@ -25,7 +25,7 @@ func TestWebhookRequest(t *testing.T) {
t.Errorf("missing deployment type")
}
if !strings.Contains(bodyStr, "LevelDebug") {
if !strings.Contains(bodyStr, "debug") {
t.Errorf("missing level")
}

25
glide.lock generated
View File

@ -1,8 +1,8 @@
hash: 7745d67060c2a5cdcec48b6caad7fe970628b1a8cf19496cce7060f9ede1421a
updated: 2017-09-12T19:36:03.621062651+03:00
hash: 476bff82bacd66b7bf9ea8e80ac8048daff4c26ef962e83fc96a30587f483ba9
updated: 2017-10-02T17:09:52.291993337+01:00
imports:
- name: cloud.google.com/go
version: b4e9a381a01e953e880e6d2cf7fd02d412977cae
version: f6de2c509ed9d2af648c3c147207eaaf97149aed
subpackages:
- compute/metadata
- iam
@ -17,7 +17,7 @@ imports:
subpackages:
- spew
- name: github.com/docker/distribution
version: 48294d928ced5dd9b378f7fd7c6f5da3ff3f2c89
version: cd27f179f2c10c5d300e6d09025b538c475b0d51
subpackages:
- context
- digest
@ -75,7 +75,7 @@ imports:
- name: github.com/google/gofuzz
version: 44d81051d367757e1c7c6a5a86423ece9afcf63c
- name: github.com/googleapis/gax-go
version: 8c160ca1523d8eea3932fbaa494c8964b7724aa8
version: 317e0006254c44a0ac427cc52a0e083ff0b9622f
- name: github.com/gorilla/context
version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
- name: github.com/gorilla/mux
@ -97,7 +97,7 @@ imports:
- name: github.com/nlopes/slack
version: c86337c0ef2486a15edd804355d9c73d2f2caed1
- name: github.com/opencontainers/go-digest
version: aa2ec055abd10d26d539eb630a92241b781ce4bc
version: 279bed98673dd5bef374d3b6e4b09e2af76183bf
- name: github.com/PuerkitoBio/purell
version: 8a290539e2e8629dbc4e6bad948158f790ec31f4
- name: github.com/PuerkitoBio/urlesc
@ -105,7 +105,7 @@ imports:
- name: github.com/rusenask/cron
version: 287f34f2c2db9a9d41ec56856746cce28dd53ec5
- name: github.com/rusenask/docker-registry-client
version: 315973e9173738626b8c81cb39ba247f8cb190e5
version: 10feb567ea4a2941f5821ae47b75330e0dcfe520
subpackages:
- registry
- name: github.com/rusenask/k8s-kv
@ -114,10 +114,6 @@ imports:
- kv
- name: github.com/Sirupsen/logrus
version: 89742aefa4b206dcf400792f3bd35b542998eb3b
repo: https://github.com/sirupsen/logrus.git
vcs: git
- name: github.com/sirupsen/logrus
version: 89742aefa4b206dcf400792f3bd35b542998eb3b
- name: github.com/spf13/pflag
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
- name: github.com/ugorji/go
@ -127,7 +123,7 @@ imports:
- name: github.com/urfave/negroni
version: fde5e16d32adc7ad637e9cd9ad21d4ebc6192535
- name: golang.org/x/crypto
version: 88e95fbb56610f02dbc78ebc3b207bec8cf56b86
version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
subpackages:
- ssh/terminal
- name: golang.org/x/net
@ -150,8 +146,9 @@ imports:
- jws
- jwt
- name: golang.org/x/sync
version: f52d1811a62927559de87708c8913c1650ce4f26
version: 8e0aa688b654ef28caa72506fa5ec8dba9fc7690
subpackages:
- errgroup
- semaphore
- name: golang.org/x/sys
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
@ -199,7 +196,7 @@ imports:
- socket
- urlfetch
- name: google.golang.org/genproto
version: 595979c8a7bf586b2d293fb42246bf91a0b893d9
version: 1e559d0a00eef8a9a43151db4665280bd8dd5886
subpackages:
- googleapis/api/annotations
- googleapis/iam/v1

View File

@ -5,21 +5,16 @@ import:
# subpackages:
# - pubsub
- package: cloud.google.com/go
version: b4e9a381a01e953e880e6d2cf7fd02d412977cae
version: ^0.11.0
subpackages:
- pubsub
- iam
- internal
- package: github.com/Masterminds/semver
version: ^1.3.1
- package: github.com/sirupsen/logrus
version: master
- package: github.com/Sirupsen/logrus
repo: https://github.com/sirupsen/logrus.git
vcs: git
version: master
- package: github.com/docker/distribution
version: ^2.6.2
# version: 5db89f0ca68677abc5eefce8f2a0a772c98ba52d
subpackages:
- digest
- reference
@ -32,7 +27,7 @@ import:
- package: github.com/rusenask/cron
version: v1.1.0
- package: github.com/rusenask/docker-registry-client
version: ^0.1.0
version: ^0.1.2
subpackages:
- registry
- package: github.com/urfave/negroni

13
main.go
View File

@ -74,8 +74,21 @@ func main() {
ctx, cancel := netContext.WithCancel(context.Background())
defer cancel()
notificationLevel := types.LevelInfo
if os.Getenv(constants.EnvNotificationLevel) != "" {
parsedLevel, err := types.ParseLevel(os.Getenv(constants.EnvNotificationLevel))
if err != nil {
log.WithFields(log.Fields{
"error": err,
}).Errorf("main: got error while parsing notification level, defaulting to: %s", notificationLevel)
} else {
notificationLevel = parsedLevel
}
}
notifCfg := &notification.Config{
Attempts: 10,
Level: notificationLevel,
}
sender := notification.New(ctx)

View File

@ -18,7 +18,9 @@ import (
)
// const dockerConfigJSONKey = ".dockerconfigjson"
const dockerConfigJSONKey = ".dockercfg"
const dockerConfigKey = ".dockercfg"
const dockerConfigJSONKey = ".dockerconfigjson"
// common errors
var (
@ -129,7 +131,58 @@ func (g *DefaultGetter) getCredentialsFromSecret(image *types.TrackedImage) (*ty
continue
}
if secret.Type != v1.SecretTypeDockercfg {
dockerCfg := make(DockerCfg)
switch secret.Type {
case v1.SecretTypeDockercfg:
secretDataBts, ok := secret.Data[dockerConfigKey]
if !ok {
log.WithFields(log.Fields{
"image": image.Image.Repository(),
"namespace": image.Namespace,
"secret_ref": secretRef,
"type": secret.Type,
"data": secret.Data,
}).Warn("secrets.defaultGetter: secret is missing key '.dockerconfig', ensure that key exists")
continue
}
dockerCfg, err = decodeSecret(secretDataBts)
if err != nil {
log.WithFields(log.Fields{
"image": image.Image.Repository(),
"namespace": image.Namespace,
"secret_ref": secretRef,
"secret_data": string(secretDataBts),
"error": err,
}).Error("secrets.defaultGetter: failed to decode secret")
continue
}
case v1.SecretTypeDockerConfigJson:
secretDataBts, ok := secret.Data[dockerConfigJSONKey]
if !ok {
log.WithFields(log.Fields{
"image": image.Image.Repository(),
"namespace": image.Namespace,
"secret_ref": secretRef,
"type": secret.Type,
"data": secret.Data,
}).Warn("secrets.defaultGetter: secret is missing key '.dockerconfigjson', ensure that key exists")
continue
}
dockerCfg, err = decodeJSONSecret(secretDataBts)
if err != nil {
log.WithFields(log.Fields{
"image": image.Image.Repository(),
"namespace": image.Namespace,
"secret_ref": secretRef,
"secret_data": string(secretDataBts),
"error": err,
}).Error("secrets.defaultGetter: failed to decode secret")
continue
}
default:
log.WithFields(log.Fields{
"image": image.Image.Repository(),
"namespace": image.Namespace,
@ -139,29 +192,6 @@ func (g *DefaultGetter) getCredentialsFromSecret(image *types.TrackedImage) (*ty
continue
}
secretDataBts, ok := secret.Data[dockerConfigJSONKey]
if !ok {
log.WithFields(log.Fields{
"image": image.Image.Repository(),
"namespace": image.Namespace,
"secret_ref": secretRef,
"type": secret.Type,
"data": secret.Data,
}).Warn("secrets.defaultGetter: secret is missing key '.dockerconfigjson', ensure that key exists")
continue
}
dockerCfg, err := decodeSecret(secretDataBts)
if err != nil {
log.WithFields(log.Fields{
"image": image.Image.Repository(),
"namespace": image.Namespace,
"secret_ref": secretRef,
"secret_data": string(secretDataBts),
"error": err,
}).Error("secrets.defaultGetter: failed to decode secret")
continue
}
// looking for our registry
for registry, auth := range dockerCfg {
h, err := hostname(registry)
@ -246,11 +276,15 @@ func decodeBase64Secret(authSecret string) (username, password string, err error
}
func hostname(registry string) (string, error) {
u, err := url.Parse(registry)
if err != nil {
return "", err
if strings.HasPrefix(registry, "http://") || strings.HasPrefix(registry, "https://") {
u, err := url.Parse(registry)
if err != nil {
return "", err
}
return u.Hostname(), nil
}
return u.Hostname(), nil
return registry, nil
}
func decodeSecret(data []byte) (DockerCfg, error) {
@ -262,6 +296,20 @@ func decodeSecret(data []byte) (DockerCfg, error) {
return cfg, nil
}
func decodeJSONSecret(data []byte) (DockerCfg, error) {
var cfg DockerCfgJSON
err := json.Unmarshal(data, &cfg)
if err != nil {
return nil, err
}
return cfg.Auths, nil
}
// DockerCfgJSON - secret structure when dockerconfigjson is used
type DockerCfgJSON struct {
Auths DockerCfg `json:"auths"`
}
// DockerCfg - registry_name=auth
type DockerCfg map[string]*Auth

View File

@ -12,6 +12,14 @@ import (
)
var secretDataPayload = `{"https://index.docker.io/v1/":{"username":"user-x","password":"pass-x","email":"karolis.rusenas@gmail.com","auth":"somethinghere"}}`
var secretDockerConfigJSONPayload = `{
"auths": {
"quay.io": {
"auth": "a2VlbHVzZXIra2VlbHRlc3Q6U05NR0lIVlRHUkRLSTZQMTdPTkVWUFBDQUpON1g5Sk1XUDg2ODJLWDA1RDdUQU5SWDRXMDhIUEw5QldRTDAxSg==",
"email": ""
}
}
}`
func mustEncode(data string) string {
return base64.StdEncoding.EncodeToString([]byte(data))
@ -23,7 +31,7 @@ func TestGetSecret(t *testing.T) {
impl := &testutil.FakeK8sImplementer{
AvailableSecret: &v1.Secret{
Data: map[string][]byte{
dockerConfigJSONKey: []byte(secretDataPayload),
dockerConfigKey: []byte(secretDataPayload),
},
Type: v1.SecretTypeDockercfg,
},
@ -51,6 +59,40 @@ func TestGetSecret(t *testing.T) {
}
}
func TestGetDockerConfigJSONSecret(t *testing.T) {
imgRef, _ := image.Parse("quay.io/karolisr/webhook-demo:0.0.11")
impl := &testutil.FakeK8sImplementer{
AvailableSecret: &v1.Secret{
Data: map[string][]byte{
dockerConfigJSONKey: []byte(secretDockerConfigJSONPayload),
},
Type: v1.SecretTypeDockerConfigJson,
},
}
getter := NewGetter(impl)
trackedImage := &types.TrackedImage{
Image: imgRef,
Namespace: "default",
Secrets: []string{"myregistrysecret"},
}
creds, err := getter.Get(trackedImage)
if err != nil {
t.Errorf("failed to get creds: %s", err)
}
if creds.Username != "keeluser+keeltest" {
t.Errorf("unexpected username: %s", creds.Username)
}
if creds.Password != "SNMGIHVTGRDKI6P17ONEVPPCAJN7X9JMWP8682KX05D7TANRX4W08HPL9BWQL01J" {
t.Errorf("unexpected pass: %s", creds.Password)
}
}
func TestGetSecretNotFound(t *testing.T) {
imgRef, _ := image.Parse("karolisr/webhook-demo:0.0.11")
@ -100,7 +142,7 @@ func TestLookupHelmSecret(t *testing.T) {
},
AvailableSecret: &v1.Secret{
Data: map[string][]byte{
dockerConfigJSONKey: []byte(fmt.Sprintf(secretDataPayloadEncoded, mustEncode("user-y:pass-y"))),
dockerConfigKey: []byte(fmt.Sprintf(secretDataPayloadEncoded, mustEncode("user-y:pass-y"))),
},
Type: v1.SecretTypeDockercfg,
},
@ -146,7 +188,7 @@ func TestLookupHelmEncodedSecret(t *testing.T) {
},
AvailableSecret: &v1.Secret{
Data: map[string][]byte{
dockerConfigJSONKey: []byte(secretDataPayload),
dockerConfigKey: []byte(secretDataPayload),
},
Type: v1.SecretTypeDockercfg,
},
@ -265,3 +307,40 @@ func Test_decodeBase64Secret(t *testing.T) {
})
}
}
func Test_hostname(t *testing.T) {
type args struct {
registry string
}
tests := []struct {
name string
args args
want string
wantErr bool
}{
{
name: "dockerhub",
args: args{registry: "https://index.docker.io/v1/"},
want: "index.docker.io",
wantErr: false,
},
{
name: "quay",
args: args{registry: "quay.io"},
want: "quay.io",
wantErr: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
got, err := hostname(tt.args.registry)
if (err != nil) != tt.wantErr {
t.Errorf("hostname() error = %v, wantErr %v", err, tt.wantErr)
return
}
if got != tt.want {
t.Errorf("hostname() = %v, want %v", got, tt.want)
}
})
}
}

View File

@ -9,6 +9,7 @@ package types
import (
"bytes"
"fmt"
"strings"
"time"
)
@ -224,6 +225,46 @@ const (
LevelFatal
)
// ParseLevel takes a string level and returns notification level constant.
func ParseLevel(lvl string) (Level, error) {
switch strings.ToLower(lvl) {
case "fatal":
return LevelFatal, nil
case "error":
return LevelError, nil
case "warn", "warning":
return LevelWarn, nil
case "info":
return LevelInfo, nil
case "success":
return LevelSuccess, nil
case "debug":
return LevelDebug, nil
}
var l Level
return l, fmt.Errorf("not a valid notification Level: %q", lvl)
}
func (l Level) String() string {
switch l {
case LevelDebug:
return "debug"
case LevelInfo:
return "info"
case LevelSuccess:
return "success"
case LevelWarn:
return "warn"
case LevelError:
return "error"
case LevelFatal:
return "fatal"
default:
return "unknown"
}
}
// Color - used to assign different colors for events
func (l Level) Color() string {
switch l {

54
vendor/cloud.google.com/go/MIGRATION.md generated vendored Normal file
View File

@ -0,0 +1,54 @@
# Code Changes
## v0.10.0
- pubsub: Replace
```
sub.ModifyPushConfig(ctx, pubsub.PushConfig{Endpoint: "https://example.com/push"})
```
with
```
sub.Update(ctx, pubsub.SubscriptionConfigToUpdate{
PushConfig: &pubsub.PushConfig{Endpoint: "https://example.com/push"},
})
```
- trace: traceGRPCServerInterceptor will be provided from *trace.Client.
Given an initialized `*trace.Client` named `tc`, instead of
```
s := grpc.NewServer(grpc.UnaryInterceptor(trace.GRPCServerInterceptor(tc)))
```
write
```
s := grpc.NewServer(grpc.UnaryInterceptor(tc.GRPCServerInterceptor()))
```
- trace trace.GRPCClientInterceptor will also provided from *trace.Client.
Instead of
```
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(trace.GRPCClientInterceptor()))
```
write
```
conn, err := grpc.Dial(srv.Addr, grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor()))
```
- trace: We removed the deprecated `trace.EnableGRPCTracing`. Use the gRPC
interceptor as a dial option as shown below when initializing Cloud package
clients:
```
c, err := pubsub.NewClient(ctx, "project-id", option.WithGRPCDialOption(grpc.WithUnaryInterceptor(tc.GRPCClientInterceptor())))
if err != nil {
...
}
```

141
vendor/cloud.google.com/go/README.md generated vendored
View File

@ -33,21 +33,110 @@ make backwards-incompatible changes.
## News
_March 17, 2017_
_September 28, 2017_
Breaking Pubsub changes.
* Publish is now asynchronous
([announcement](https://groups.google.com/d/topic/google-api-go-announce/aaqRDIQ3rvU/discussion)).
* Subscription.Pull replaced by Subscription.Receive, which takes a callback ([announcement](https://groups.google.com/d/topic/google-api-go-announce/8pt6oetAdKc/discussion)).
* Message.Done replaced with Message.Ack and Message.Nack.
*v0.14.0*
_February 14, 2017_
- bigquery BREAKING CHANGES:
- Standard SQL is the default for queries and views.
- `Table.Create` takes `TableMetadata` as a second argument, instead of
options.
- `Dataset.Create` takes `DatasetMetadata` as a second argument.
- `DatasetMetadata` field `ID` renamed to `FullID`
- `TableMetadata` field `ID` renamed to `FullID`
Release of a client library for Spanner. See
the
[blog post](https://cloudplatform.googleblog.com/2017/02/introducing-Cloud-Spanner-a-global-database-service-for-mission-critical-applications.html).
- Other bigquery changes:
- The client will append a random suffix to a provided job ID if you set
`AddJobIDSuffix` to true in a job config.
- Listing jobs is supported.
- Better retry logic.
Note that although the Spanner service is beta, the Go client library is alpha.
- vision, language, speech: clients are now stable
- monitoring: client is now beta
- profiler:
- Rename InstanceName to Instance, ZoneName to Zone
- Auto-detect service name and version on AppEngine.
_September 8, 2017_
*v0.13.0*
- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
options to continue using Legacy SQL after the client switches its default
to Standard SQL.
- bigquery: Support for updating dataset labels.
- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
than the client's. DatasetsInProject is no longer needed and is deprecated.
- bigtable: Fail ListInstances when any zones fail.
- spanner: support decoding of slices of basic types (e.g. []string, []int64,
etc.)
- logging/logadmin: UpdateSink no longer creates a sink if it is missing
(actually a change to the underlying service, not the client)
- profiler: Service and ServiceVersion replace Target in Config.
_August 22, 2017_
*v0.12.0*
- pubsub: Subscription.Receive now uses streaming pull.
- pubsub: add Client.TopicInProject to access topics in a different project
than the client.
- errors: renamed errorreporting. The errors package will be removed shortly.
- datastore: improved retry behavior.
- bigquery: support updates to dataset metadata, with etags.
- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
- bigquery: generate all job IDs on the client.
- storage: support bucket lifecycle configurations.
_July 31, 2017_
*v0.11.0*
- Clients for spanner, pubsub and video are now in beta.
- New client for DLP.
- spanner: performance and testing improvements.
- storage: requester-pays buckets are supported.
- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
- pubsub: bug fixes and other minor improvements
_June 17, 2017_
*v0.10.0*
- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
- pubsub: Subscription.Receive now runs concurrently for higher throughput.
- vision: cloud.google.com/go/vision is deprecated. Use
cloud.google.com/go/vision/apiv1 instead.
- translation: now stable.
- trace: several changes to the surface. See the link below.
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
@ -61,13 +150,16 @@ Google API | Status | Package
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
[Logging][cloud-logging] | stable | [`cloud.google.com/go/logging`][cloud-logging-ref]
[Monitoring][cloud-monitoring] | alpha | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[Pub/Sub][cloud-pubsub] | alpha | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Vision][cloud-vision] | beta | [`cloud.google.com/go/vision`][cloud-vision-ref]
[Language][cloud-language] | beta | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Speech][cloud-speech] | beta | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Spanner][cloud-spanner] | alpha | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Translation][cloud-translation] | beta | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Monitoring][cloud-monitoring] | beta | [`cloud.google.com/go/monitoring/apiv3`][cloud-monitoring-ref]
[Pub/Sub][cloud-pubsub] | beta | [`cloud.google.com/go/pubsub`][cloud-pubsub-ref]
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
[ErrorReporting][cloud-errors] | alpha | [`cloud.google.com/go/errorreporting`][cloud-errors-ref]
> **Alpha status**: the API is still being actively developed. As a
@ -411,8 +503,8 @@ for more information.
[cloud-monitoring]: https://cloud.google.com/monitoring/
[cloud-monitoring-ref]: https://godoc.org/cloud.google.com/go/monitoring/apiv3
[cloud-vision]: https://cloud.google.com/vision/
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision
[cloud-vision]: https://cloud.google.com/vision
[cloud-vision-ref]: https://godoc.org/cloud.google.com/go/vision/apiv1
[cloud-language]: https://cloud.google.com/natural-language
[cloud-language-ref]: https://godoc.org/cloud.google.com/go/language/apiv1
@ -427,4 +519,13 @@ for more information.
[cloud-translation]: https://cloud.google.com/translation
[cloud-translation-ref]: https://godoc.org/cloud.google.com/go/translation
[cloud-trace]: https://cloud.google.com/trace/
[cloud-trace-ref]: https://godoc.org/cloud.google.com/go/trace
[cloud-video]: https://cloud.google.com/video-intelligence/
[cloud-video-ref]: https://godoc.org/cloud.google.com/go/videointelligence/apiv1beta1
[cloud-errors]: https://cloud.google.com/error-reporting/
[cloud-errors-ref]: https://godoc.org/cloud.google.com/go/errorreporting
[default-creds]: https://developers.google.com/identity/protocols/application-default-credentials

View File

@ -20,7 +20,7 @@ import (
"fmt"
"google.golang.org/api/option"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
@ -51,7 +51,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
option.WithUserAgent(userAgent),
}
o = append(o, opts...)
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}

View File

@ -21,9 +21,12 @@ import (
// CopyConfig holds the configuration for a copy job.
type CopyConfig struct {
// JobID is the ID to use for the copy job. If unset, a job ID will be automatically created.
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Srcs are the tables from which data will be copied.
Srcs []*Table
@ -68,7 +71,9 @@ func (c *Copier) Run(ctx context.Context) (*Job, error) {
for _, t := range c.Srcs {
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
}
job := &bq.Job{Configuration: &bq.JobConfiguration{Copy: conf}}
setJobRef(job, c.JobID, c.c.projectID)
job := &bq.Job{
JobReference: createJobRef(c.JobID, c.AddJobIDSuffix, c.c.projectID),
Configuration: &bq.JobConfiguration{Copy: conf},
}
return c.c.insertJob(ctx, &insertJobConf{job: job})
}

View File

@ -15,7 +15,6 @@
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
@ -24,6 +23,7 @@ import (
func defaultCopyJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Copy: &bq.JobConfigurationTableCopy{
DestinationTable: &bq.TableReference{
@ -44,6 +44,7 @@ func defaultCopyJob() *bq.Job {
}
func TestCopy(t *testing.T) {
defer fixRandomJobID("RANDOM")()
testCases := []struct {
dst *Table
srcs []*Table
@ -105,16 +106,13 @@ func TestCopy(t *testing.T) {
config: CopyConfig{JobID: "job-id"},
want: func() *bq.Job {
j := defaultCopyJob()
j.JobReference = &bq.JobReference{
JobId: "job-id",
ProjectId: "client-project-id",
}
j.JobReference.JobId = "job-id"
return j
}(),
},
}
for _, tc := range testCases {
for i, tc := range testCases {
s := &testService{}
c := &Client{
service: s,
@ -126,11 +124,9 @@ func TestCopy(t *testing.T) {
tc.config.Dst = tc.dst
copier.CopyConfig = tc.config
if _, err := copier.Run(context.Background()); err != nil {
t.Errorf("err calling Run: %v", err)
t.Errorf("#%d: err calling Run: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("copying: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
checkJob(t, i, s.Job, tc.want)
}
}

View File

@ -1,103 +0,0 @@
// Copyright 2015 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"reflect"
"testing"
"time"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
type createTableRecorder struct {
conf *createTableConf
service
}
func (rec *createTableRecorder) createTable(ctx context.Context, conf *createTableConf) error {
rec.conf = conf
return nil
}
func TestCreateTableOptions(t *testing.T) {
s := &createTableRecorder{}
c := &Client{
projectID: "p",
service: s,
}
ds := c.Dataset("d")
table := ds.Table("t")
exp := time.Now()
q := "query"
if err := table.Create(context.Background(), TableExpiration(exp), ViewQuery(q), UseStandardSQL()); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want := createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
viewQuery: q,
useStandardSQL: true,
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
if err := table.Create(context.Background(), TableExpiration(exp), sc); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want = createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
expiration: exp,
// No need for an elaborate schema, that is tested in schema_test.go.
schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
partitionCases := []struct {
timePartitioning TimePartitioning
expectedExpiration time.Duration
}{
{TimePartitioning{}, time.Duration(0)},
{TimePartitioning{time.Second}, time.Second},
}
for _, c := range partitionCases {
if err := table.Create(context.Background(), c.timePartitioning); err != nil {
t.Fatalf("err calling Table.Create: %v", err)
}
want = createTableConf{
projectID: "p",
datasetID: "d",
tableID: "t",
timePartitioning: &TimePartitioning{c.expectedExpiration},
}
if !reflect.DeepEqual(*s.conf, want) {
t.Errorf("createTableConf: got:\n%v\nwant:\n%v", *s.conf, want)
}
}
}

View File

@ -17,6 +17,8 @@ package bigquery
import (
"time"
"cloud.google.com/go/internal/optional"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
@ -28,18 +30,57 @@ type Dataset struct {
c *Client
}
// DatasetMetadata contains information about a BigQuery dataset.
type DatasetMetadata struct {
CreationTime time.Time
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
DefaultTableExpiration time.Duration
Description string // The user-friendly description of this table.
Name string // The user-friendly name for this table.
ID string
// These fields can be set when creating a dataset.
Name string // The user-friendly name for this dataset.
Description string // The user-friendly description of this dataset.
Location string // The geo location of the dataset.
DefaultTableExpiration time.Duration // The default expiration time for new tables.
Labels map[string]string // User-provided labels.
// These fields are read-only.
CreationTime time.Time
LastModifiedTime time.Time // When the dataset or any of its tables were modified.
FullID string // The full dataset ID in the form projectID:datasetID.
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
// TODO(jba): access rules
}
// DatasetMetadataToUpdate is used when updating a dataset's metadata.
// Only non-nil fields will be updated.
type DatasetMetadataToUpdate struct {
Description optional.String // The user-friendly description of this table.
Name optional.String // The user-friendly name for this dataset.
// DefaultTableExpiration is the the default expiration time for new tables.
// If set to time.Duration(0), new tables never expire.
DefaultTableExpiration optional.Duration
setLabels map[string]string
deleteLabels map[string]bool
}
// SetLabel causes a label to be added or modified when dm is used
// in a call to Dataset.Update.
func (dm *DatasetMetadataToUpdate) SetLabel(name, value string) {
if dm.setLabels == nil {
dm.setLabels = map[string]string{}
}
dm.setLabels[name] = value
}
// DeleteLabel causes a label to be deleted when dm is used in a
// call to Dataset.Update.
func (dm *DatasetMetadataToUpdate) DeleteLabel(name string) {
if dm.deleteLabels == nil {
dm.deleteLabels = map[string]bool{}
}
dm.deleteLabels[name] = true
}
// Dataset creates a handle to a BigQuery dataset in the client's project.
func (c *Client) Dataset(id string) *Dataset {
return c.DatasetInProject(c.projectID, id)
@ -54,10 +95,10 @@ func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
}
}
// Create creates a dataset in the BigQuery service. An error will be returned
// if the dataset already exists.
func (d *Dataset) Create(ctx context.Context) error {
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID)
// Create creates a dataset in the BigQuery service. An error will be returned if the
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID, md)
}
// Delete deletes the dataset.
@ -70,6 +111,14 @@ func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID)
}
// Update modifies specific Dataset metadata fields.
// To perform a read-modify-write that protects against intervening reads,
// set the etag argument to the DatasetMetadata.ETag field from the read.
// Pass the empty string for etag for a "blind write" that will always succeed.
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
return d.c.service.patchDataset(ctx, d.ProjectID, d.DatasetID, &dm, etag)
}
// Table creates a handle to a BigQuery table in the dataset.
// To determine if a table exists, call Table.Metadata.
// If the table does not already exist, use Table.Create to create it.
@ -126,17 +175,21 @@ func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
return tok, nil
}
// Datasets returns an iterator over the datasets in the Client's project.
// Datasets returns an iterator over the datasets in a project.
// The Client's project is used by default, but that can be
// changed by setting ProjectID on the returned iterator before calling Next.
func (c *Client) Datasets(ctx context.Context) *DatasetIterator {
return c.DatasetsInProject(ctx, c.projectID)
}
// DatasetsInProject returns an iterator over the datasets in the provided project.
//
// Deprecated: call Client.Datasets, then set ProjectID on the returned iterator.
func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *DatasetIterator {
it := &DatasetIterator{
ctx: ctx,
c: c,
projectID: projectID,
ProjectID: projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
@ -148,18 +201,23 @@ func (c *Client) DatasetsInProject(ctx context.Context, projectID string) *Datas
// DatasetIterator iterates over the datasets in a project.
type DatasetIterator struct {
// ListHidden causes hidden datasets to be listed when set to true.
// Set before the first call to Next.
ListHidden bool
// Filter restricts the datasets returned by label. The filter syntax is described in
// https://cloud.google.com/bigquery/docs/labeling-datasets#filtering_datasets_using_labels
// Set before the first call to Next.
Filter string
ctx context.Context
projectID string
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Dataset
// The project ID of the listed datasets.
// Set before the first call to Next.
ProjectID string
ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []*Dataset
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
@ -175,7 +233,7 @@ func (it *DatasetIterator) Next() (*Dataset, error) {
}
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.projectID,
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.ProjectID,
pageSize, pageToken, it.ListHidden, it.Filter)
if err != nil {
return "", err

View File

@ -16,10 +16,11 @@ package bigquery
import (
"errors"
"reflect"
"strings"
"testing"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -94,7 +95,7 @@ func TestErrorFromErrorProto(t *testing.T) {
want: &Error{Location: "L", Message: "M", Reason: "R"},
},
} {
if got := errorFromErrorProto(test.in); !reflect.DeepEqual(got, test.want) {
if got := errorFromErrorProto(test.in); !testutil.Equal(got, test.want) {
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
}
}

View File

@ -17,6 +17,7 @@ package bigquery_test
import (
"fmt"
"os"
"time"
"cloud.google.com/go/bigquery"
"golang.org/x/net/context"
@ -233,7 +234,8 @@ func ExampleDataset_Create() {
if err != nil {
// TODO: Handle error.
}
if err := client.Dataset("my_dataset").Create(ctx); err != nil {
ds := client.Dataset("my_dataset")
if err := ds.Create(ctx, &bigquery.DatasetMetadata{Location: "EU"}); err != nil {
// TODO: Handle error.
}
}
@ -262,6 +264,44 @@ func ExampleDataset_Metadata() {
fmt.Println(md)
}
// This example illustrates how to perform a read-modify-write sequence on dataset
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleDataset_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
ds := client.Dataset("my_dataset")
md, err := ds.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := ds.Update(ctx,
bigquery.DatasetMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleDataset_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
md, err := client.Dataset("my_dataset").Update(ctx, bigquery.DatasetMetadataToUpdate{Name: "blind"}, "")
if err != nil {
// TODO: Handle error.
}
fmt.Println(md)
}
func ExampleDataset_Table() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
@ -351,12 +391,13 @@ func ExampleTable_Create() {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx); err != nil {
if err := t.Create(ctx, nil); err != nil {
// TODO: Handle error.
}
}
func ExampleTable_Create_schema() {
// Initialize a new table by passing TableMetadata to Table.Create.
func ExampleTable_Create_initialize() {
ctx := context.Background()
// Infer table schema from a Go type.
schema, err := bigquery.InferSchema(Item{})
@ -368,7 +409,12 @@ func ExampleTable_Create_schema() {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("new-table")
if err := t.Create(ctx, schema); err != nil {
if err := t.Create(ctx,
&bigquery.TableMetadata{
Name: "My New Table",
Schema: schema,
ExpirationTime: time.Now().Add(24 * time.Hour),
}); err != nil {
// TODO: Handle error.
}
}
@ -476,6 +522,8 @@ func ExampleTable_LoaderFrom() {
}
gcsRef := bigquery.NewGCSReference("gs://my-bucket/my-object")
gcsRef.AllowJaggedRows = true
gcsRef.MaxBadRecords = 5
gcsRef.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(gcsRef)
@ -506,6 +554,8 @@ func ExampleTable_LoaderFrom_reader() {
}
rs := bigquery.NewReaderSource(f)
rs.AllowJaggedRows = true
rs.MaxBadRecords = 5
rs.Schema = schema
// TODO: set other options on the GCSReference.
ds := client.Dataset("my_dataset")
loader := ds.Table("my_table").LoaderFrom(rs)
@ -534,7 +584,32 @@ func ExampleTable_Read() {
_ = it // TODO: iterate using Next or iterator.Pager.
}
func ExampleTable_Update() {
// This example illustrates how to perform a read-modify-write sequence on table
// metadata. Passing the metadata's ETag to the Update call ensures that the call
// will fail if the metadata was changed since the read.
func ExampleTable_Update_readModifyWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
// TODO: Handle error.
}
t := client.Dataset("my_dataset").Table("my_table")
md, err := t.Metadata(ctx)
if err != nil {
// TODO: Handle error.
}
md2, err := t.Update(ctx,
bigquery.TableMetadataToUpdate{Name: "new " + md.Name},
md.ETag)
if err != nil {
// TODO: Handle error.
}
fmt.Println(md2)
}
// To perform a blind write, ignoring the existing state (and possibly overwriting
// other updates), pass the empty string as the etag.
func ExampleTable_Update_blindWrite() {
ctx := context.Background()
client, err := bigquery.NewClient(ctx, "project-id")
if err != nil {
@ -543,7 +618,7 @@ func ExampleTable_Update() {
t := client.Dataset("my_dataset").Table("my_table")
tm, err := t.Update(ctx, bigquery.TableMetadataToUpdate{
Description: "my favorite table",
})
}, "")
if err != nil {
// TODO: Handle error.
}

View File

@ -21,9 +21,12 @@ import (
// ExtractConfig holds the configuration for an extract job.
type ExtractConfig struct {
// JobID is the ID to use for the extract job. If empty, a job ID will be automatically created.
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Src is the table from which data will be extracted.
Src *Table
@ -55,22 +58,23 @@ func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
// Run initiates an extract job.
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
conf := &bq.JobConfigurationExtract{}
job := &bq.Job{Configuration: &bq.JobConfiguration{Extract: conf}}
setJobRef(job, e.JobID, e.c.projectID)
conf.DestinationUris = append([]string{}, e.Dst.uris...)
conf.Compression = string(e.Dst.Compression)
conf.DestinationFormat = string(e.Dst.DestinationFormat)
conf.FieldDelimiter = e.Dst.FieldDelimiter
conf.SourceTable = e.Src.tableRefProto()
var printHeader *bool
if e.DisableHeader {
f := false
conf.PrintHeader = &f
printHeader = &f
}
job := &bq.Job{
JobReference: createJobRef(e.JobID, e.AddJobIDSuffix, e.c.projectID),
Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{
DestinationUris: append([]string{}, e.Dst.uris...),
Compression: string(e.Dst.Compression),
DestinationFormat: string(e.Dst.DestinationFormat),
FieldDelimiter: e.Dst.FieldDelimiter,
SourceTable: e.Src.tableRefProto(),
PrintHeader: printHeader,
},
},
}
return e.c.insertJob(ctx, &insertJobConf{job: job})
}

View File

@ -15,7 +15,6 @@
package bigquery
import (
"reflect"
"testing"
"golang.org/x/net/context"
@ -25,10 +24,11 @@ import (
func defaultExtractJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Extract: &bq.JobConfigurationExtract{
SourceTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@ -39,10 +39,11 @@ func defaultExtractJob() *bq.Job {
}
func TestExtract(t *testing.T) {
defer fixRandomJobID("RANDOM")()
s := &testService{}
c := &Client{
service: s,
projectID: "project-id",
projectID: "client-project-id",
}
testCases := []struct {
@ -86,17 +87,15 @@ func TestExtract(t *testing.T) {
},
}
for _, tc := range testCases {
for i, tc := range testCases {
ext := tc.src.ExtractorTo(tc.dst)
tc.config.Src = ext.Src
tc.config.Dst = ext.Dst
ext.ExtractConfig = tc.config
if _, err := ext.Run(context.Background()); err != nil {
t.Errorf("err calling extract: %v", err)
t.Errorf("#%d: err calling extract: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("extracting: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
checkJob(t, i, s.Job, tc.want)
}
}

View File

@ -15,10 +15,10 @@
package bigquery
import (
"reflect"
"testing"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -84,7 +84,7 @@ func TestPopulateLoadConfig(t *testing.T) {
}
got := &bq.JobConfigurationLoad{}
fc.populateLoadConfig(got)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
}
}

View File

@ -21,7 +21,6 @@ import (
"log"
"net/http"
"os"
"reflect"
"sort"
"strings"
"testing"
@ -44,9 +43,13 @@ var (
dataset *Dataset
schema = Schema{
{Name: "name", Type: StringFieldType},
{Name: "num", Type: IntegerFieldType},
{Name: "nums", Type: IntegerFieldType, Repeated: true},
{Name: "rec", Type: RecordFieldType, Schema: Schema{
{Name: "bool", Type: BooleanFieldType},
}},
}
testTableExpiration time.Time
datasetIDs = testutil.NewUIDSpace("dataset")
)
func TestMain(m *testing.M) {
@ -80,13 +83,13 @@ func initIntegrationTest() {
log.Fatalf("NewClient: %v", err)
}
dataset = client.Dataset("bigquery_integration_test")
if err := dataset.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
if err := dataset.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
log.Fatalf("creating dataset: %v", err)
}
testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
}
func TestIntegration_Create(t *testing.T) {
func TestIntegration_TableCreate(t *testing.T) {
// Check that creating a record field with an empty schema is an error.
if client == nil {
t.Skip("Integration tests skipped")
@ -95,7 +98,10 @@ func TestIntegration_Create(t *testing.T) {
schema := Schema{
{Name: "rec", Type: RecordFieldType, Schema: Schema{}},
}
err := table.Create(context.Background(), schema, TableExpiration(time.Now().Add(5*time.Minute)))
err := table.Create(context.Background(), &TableMetadata{
Schema: schema,
ExpirationTime: time.Now().Add(5 * time.Minute),
})
if err == nil {
t.Fatal("want error, got nil")
}
@ -104,7 +110,7 @@ func TestIntegration_Create(t *testing.T) {
}
}
func TestIntegration_CreateView(t *testing.T) {
func TestIntegration_TableCreateView(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
@ -114,8 +120,12 @@ func TestIntegration_CreateView(t *testing.T) {
// Test that standard SQL views work.
view := dataset.Table("t_view_standardsql")
query := ViewQuery(fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`", dataset.ProjectID, dataset.DatasetID, table.TableID))
err := view.Create(context.Background(), UseStandardSQL(), query)
query := fmt.Sprintf("SELECT APPROX_COUNT_DISTINCT(name) FROM `%s.%s.%s`",
dataset.ProjectID, dataset.DatasetID, table.TableID)
err := view.Create(context.Background(), &TableMetadata{
ViewQuery: query,
UseStandardSQL: true,
})
if err != nil {
t.Fatalf("table.create: Did not expect an error, got: %v", err)
}
@ -135,8 +145,8 @@ func TestIntegration_TableMetadata(t *testing.T) {
t.Fatal(err)
}
// TODO(jba): check md more thorougly.
if got, want := md.ID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want {
t.Errorf("metadata.ID: got %q, want %q", got, want)
if got, want := md.FullID, fmt.Sprintf("%s:%s.%s", dataset.ProjectID, dataset.DatasetID, table.TableID); got != want {
t.Errorf("metadata.FullID: got %q, want %q", got, want)
}
if got, want := md.Type, RegularTable; got != want {
t.Errorf("metadata.Type: got %v, want %v", got, want)
@ -160,7 +170,11 @@ func TestIntegration_TableMetadata(t *testing.T) {
}
for i, c := range partitionCases {
table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
err = table.Create(context.Background(), schema, c.timePartitioning, TableExpiration(time.Now().Add(5*time.Minute)))
err = table.Create(context.Background(), &TableMetadata{
Schema: schema,
TimePartitioning: &c.timePartitioning,
ExpirationTime: time.Now().Add(5 * time.Minute),
})
if err != nil {
t.Fatal(err)
}
@ -172,12 +186,39 @@ func TestIntegration_TableMetadata(t *testing.T) {
got := md.TimePartitioning
want := &TimePartitioning{c.expectedExpiration}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
}
}
}
func TestIntegration_DatasetCreate(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
uid := strings.Replace(datasetIDs.New(), "-", "_", -1)
ds := client.Dataset(uid)
wmd := &DatasetMetadata{Name: "name", Location: "EU"}
err := ds.Create(ctx, wmd)
if err != nil {
t.Fatal(err)
}
gmd, err := ds.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
if got, want := gmd.Name, wmd.Name; got != want {
t.Errorf("name: got %q, want %q", got, want)
}
if got, want := gmd.Location, wmd.Location; got != want {
t.Errorf("location: got %q, want %q", got, want)
}
if err := ds.Delete(ctx); err != nil {
t.Fatalf("deleting dataset %s: %v", ds, err)
}
}
func TestIntegration_DatasetMetadata(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
@ -187,8 +228,8 @@ func TestIntegration_DatasetMetadata(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if got, want := md.ID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want {
t.Errorf("ID: got %q, want %q", got, want)
if got, want := md.FullID, fmt.Sprintf("%s:%s", dataset.ProjectID, dataset.DatasetID); got != want {
t.Errorf("FullID: got %q, want %q", got, want)
}
jan2016 := time.Date(2016, 1, 1, 0, 0, 0, 0, time.UTC)
if md.CreationTime.Before(jan2016) {
@ -211,7 +252,7 @@ func TestIntegration_DatasetDelete(t *testing.T) {
}
ctx := context.Background()
ds := client.Dataset("delete_test")
if err := ds.Create(ctx); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
if err := ds.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
t.Fatalf("creating dataset %s: %v", ds, err)
}
if err := ds.Delete(ctx); err != nil {
@ -219,6 +260,117 @@ func TestIntegration_DatasetDelete(t *testing.T) {
}
}
func TestIntegration_DatasetUpdateETags(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
check := func(md *DatasetMetadata, wantDesc, wantName string) {
if md.Description != wantDesc {
t.Errorf("description: got %q, want %q", md.Description, wantDesc)
}
if md.Name != wantName {
t.Errorf("name: got %q, want %q", md.Name, wantName)
}
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
if md.ETag == "" {
t.Fatal("empty ETag")
}
// Write without ETag succeeds.
desc := md.Description + "d2"
name := md.Name + "n2"
md2, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: desc, Name: name}, "")
if err != nil {
t.Fatal(err)
}
check(md2, desc, name)
// Write with original ETag fails because of intervening write.
_, err = dataset.Update(ctx, DatasetMetadataToUpdate{Description: "d", Name: "n"}, md.ETag)
if err == nil {
t.Fatal("got nil, want error")
}
// Write with most recent ETag succeeds.
md3, err := dataset.Update(ctx, DatasetMetadataToUpdate{Description: "", Name: ""}, md2.ETag)
if err != nil {
t.Fatal(err)
}
check(md3, "", "")
}
func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
// Set the default expiration time.
md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Hour}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != time.Hour {
t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
}
// Omitting DefaultTableExpiration doesn't change it.
md, err = dataset.Update(ctx, DatasetMetadataToUpdate{Name: "xyz"}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != time.Hour {
t.Fatalf("got %s, want 1h", md.DefaultTableExpiration)
}
// Setting it to 0 deletes it (which looks like a 0 duration).
md, err = dataset.Update(ctx, DatasetMetadataToUpdate{DefaultTableExpiration: time.Duration(0)}, "")
if err != nil {
t.Fatal(err)
}
if md.DefaultTableExpiration != 0 {
t.Fatalf("got %s, want 0", md.DefaultTableExpiration)
}
}
func TestIntegration_DatasetUpdateLabels(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
md, err := dataset.Metadata(ctx)
if err != nil {
t.Fatal(err)
}
// TODO(jba): use a separate dataset for each test run so
// tests don't interfere with each other.
var dm DatasetMetadataToUpdate
dm.SetLabel("label", "value")
md, err = dataset.Update(ctx, dm, "")
if err != nil {
t.Fatal(err)
}
if got, want := md.Labels["label"], "value"; got != want {
t.Errorf("got %q, want %q", got, want)
}
dm = DatasetMetadataToUpdate{}
dm.DeleteLabel("label")
md, err = dataset.Update(ctx, dm, "")
if err != nil {
t.Fatal(err)
}
if _, ok := md.Labels["label"]; ok {
t.Error("label still present after deletion")
}
}
func TestIntegration_Tables(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
@ -275,7 +427,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
saverRows []*ValuesSaver
)
for i, name := range []string{"a", "b", "c"} {
row := []Value{name, int64(i)}
row := []Value{name, []Value{int64(i)}, []Value{true}}
wantRows = append(wantRows, row)
saverRows = append(saverRows, &ValuesSaver{
Schema: schema,
@ -297,7 +449,8 @@ func TestIntegration_UploadAndRead(t *testing.T) {
checkRead(t, "upload", table.Read(ctx), wantRows)
// Query the table.
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID))
q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID))
q.UseStandardSQL = true
q.DefaultProjectID = dataset.ProjectID
q.DefaultDatasetID = dataset.DatasetID
@ -347,7 +500,7 @@ func TestIntegration_UploadAndRead(t *testing.T) {
t.Fatal(err)
}
want := []Value(vl)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("%d: got %v, want %v", i, got, want)
}
}
@ -362,9 +515,11 @@ func TestIntegration_UploadAndRead(t *testing.T) {
if got, want := len(vm), len(vl); got != want {
t.Fatalf("valueMap len: got %d, want %d", got, want)
}
// With maps, structs become nested maps.
vl[2] = map[string]Value{"bool": vl[2].([]Value)[0]}
for i, v := range vl {
if got, want := vm[schema[i].Name], v; got != want {
t.Errorf("%d, name=%s: got %v, want %v",
if got, want := vm[schema[i].Name], v; !testutil.Equal(got, want) {
t.Errorf("%d, name=%s: got %#v, want %#v",
i, schema[i].Name, got, want)
}
}
@ -509,7 +664,7 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
for i, g := range got {
if i >= len(want) {
t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
} else if w := want[i]; !reflect.DeepEqual(g, w) {
} else if w := want[i]; !testutil.Equal(g, w) {
t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w))
}
}
@ -521,7 +676,7 @@ func (b byName) Len() int { return len(b) }
func (b byName) Swap(i, j int) { b[i], b[j] = b[j], b[i] }
func (b byName) Less(i, j int) bool { return b[i].Name < b[j].Name }
func TestIntegration_Update(t *testing.T) {
func TestIntegration_TableUpdate(t *testing.T) {
if client == nil {
t.Skip("Integration tests skipped")
}
@ -536,10 +691,12 @@ func TestIntegration_Update(t *testing.T) {
}
wantDescription := tm.Description + "more"
wantName := tm.Name + "more"
wantExpiration := tm.ExpirationTime.Add(time.Hour * 24)
got, err := table.Update(ctx, TableMetadataToUpdate{
Description: wantDescription,
Name: wantName,
})
Description: wantDescription,
Name: wantName,
ExpirationTime: wantExpiration,
}, tm.ETag)
if err != nil {
t.Fatal(err)
}
@ -549,10 +706,24 @@ func TestIntegration_Update(t *testing.T) {
if got.Name != wantName {
t.Errorf("Name: got %q, want %q", got.Name, wantName)
}
if !reflect.DeepEqual(got.Schema, schema) {
if got.ExpirationTime != wantExpiration {
t.Errorf("ExpirationTime: got %q, want %q", got.ExpirationTime, wantExpiration)
}
if !testutil.Equal(got.Schema, schema) {
t.Errorf("Schema: got %v, want %v", pretty.Value(got.Schema), pretty.Value(schema))
}
// Blind write succeeds.
_, err = table.Update(ctx, TableMetadataToUpdate{Name: "x"}, "")
if err != nil {
t.Fatal(err)
}
// Write with old etag fails.
_, err = table.Update(ctx, TableMetadataToUpdate{Name: "y"}, got.ETag)
if err == nil {
t.Fatal("Update with old ETag succeeded, wanted failure")
}
// Test schema update.
// Columns can be added. schema2 is the same as schema, except for the
// added column in the middle.
@ -562,63 +733,57 @@ func TestIntegration_Update(t *testing.T) {
}
schema2 := Schema{
schema[0],
{Name: "rec", Type: RecordFieldType, Schema: nested},
{Name: "rec2", Type: RecordFieldType, Schema: nested},
schema[1],
schema[2],
}
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2})
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: schema2}, "")
if err != nil {
t.Fatal(err)
}
// Wherever you add the column, it appears at the end.
schema3 := Schema{schema2[0], schema2[2], schema2[1]}
if !reflect.DeepEqual(got.Schema, schema3) {
schema3 := Schema{schema2[0], schema2[2], schema2[3], schema2[1]}
if !testutil.Equal(got.Schema, schema3) {
t.Errorf("add field:\ngot %v\nwant %v",
pretty.Value(got.Schema), pretty.Value(schema3))
}
// Updating with the empty schema succeeds, but is a no-op.
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}})
got, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema{}}, "")
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got.Schema, schema3) {
if !testutil.Equal(got.Schema, schema3) {
t.Errorf("empty schema:\ngot %v\nwant %v",
pretty.Value(got.Schema), pretty.Value(schema3))
}
// Error cases.
// Error cases when updating schema.
for _, test := range []struct {
desc string
fields []*FieldSchema
}{
{"change from optional to required", []*FieldSchema{
schema3[0],
{Name: "num", Type: IntegerFieldType, Required: true},
{Name: "name", Type: StringFieldType, Required: true},
schema3[1],
schema3[2],
schema3[3],
}},
{"add a required field", []*FieldSchema{
schema3[0], schema3[1], schema3[2],
schema3[0], schema3[1], schema3[2], schema3[3],
{Name: "req", Type: StringFieldType, Required: true},
}},
{"remove a field", []*FieldSchema{schema3[0], schema3[1]}},
{"remove a field", []*FieldSchema{schema3[0], schema3[1], schema3[2]}},
{"remove a nested field", []*FieldSchema{
schema3[0], schema3[1],
{Name: "rec", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
schema3[0], schema3[1], schema3[2],
{Name: "rec2", Type: RecordFieldType, Schema: Schema{nested[0]}}}},
{"remove all nested fields", []*FieldSchema{
schema3[0], schema3[1],
{Name: "rec", Type: RecordFieldType, Schema: Schema{}}}},
schema3[0], schema3[1], schema3[2],
{Name: "rec2", Type: RecordFieldType, Schema: Schema{}}}},
} {
for {
_, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)})
if !hasStatusCode(err, 403) {
break
}
// We've hit the rate limit for updates. Wait a bit and retry.
t.Logf("%s: retrying after getting %v", test.desc, err)
time.Sleep(4 * time.Second)
}
_, err = table.Update(ctx, TableMetadataToUpdate{Schema: Schema(test.fields)}, "")
if err == nil {
t.Errorf("%s: want error, got nil", test.desc)
} else if !hasStatusCode(err, 400) {
@ -632,7 +797,11 @@ func TestIntegration_Load(t *testing.T) {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
table := newTable(t, schema)
// CSV data can't be loaded into a repeated field, so we use a different schema.
table := newTable(t, Schema{
{Name: "name", Type: StringFieldType},
{Name: "nums", Type: IntegerFieldType},
})
defer table.Delete(ctx)
// Load the table from a reader.
@ -667,20 +836,24 @@ func TestIntegration_DML(t *testing.T) {
// Use DML to insert.
wantRows := [][]Value{
[]Value{"a", int64(0)},
[]Value{"b", int64(1)},
[]Value{"c", int64(2)},
[]Value{"a", []Value{int64(0)}, []Value{true}},
[]Value{"b", []Value{int64(1)}, []Value{false}},
[]Value{"c", []Value{int64(2)}, []Value{true}},
}
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, num) "+
"VALUES ('a', 0), ('b', 1), ('c', 2)",
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, nums, rec) "+
"VALUES ('a', [0], STRUCT<BOOL>(TRUE)), ('b', [1], STRUCT<BOOL>(FALSE)), ('c', [2], STRUCT<BOOL>(TRUE))",
table.TableID)
q := client.Query(query)
q.UseStandardSQL = true // necessary for DML
job, err := q.Run(ctx)
if err != nil {
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
return true, err // fail on 4xx
}
return false, err
}
if err := wait(ctx, job); err != nil {
fmt.Printf("wait: %v\n", err)
return false, err
}
if msg, ok := compareRead(table.Read(ctx), wantRows); !ok {
@ -819,6 +992,7 @@ func TestIntegration_LegacyQuery(t *testing.T) {
}
for _, c := range testCases {
q := client.Query(c.query)
q.UseLegacySQL = true
it, err := q.Read(ctx)
if err != nil {
t.Fatal(err)
@ -891,7 +1065,7 @@ func TestIntegration_ReadNullIntoStruct(t *testing.T) {
upl := table.Uploader()
row := &ValuesSaver{
Schema: schema,
Row: []Value{"name", nil},
Row: []Value{nil, []Value{}, []Value{nil}},
}
if err := upl.Put(ctx, []*ValuesSaver{row}); err != nil {
t.Fatal(putError(err))
@ -900,25 +1074,128 @@ func TestIntegration_ReadNullIntoStruct(t *testing.T) {
t.Fatal(err)
}
q := client.Query(fmt.Sprintf("select name, num from %s", table.TableID))
q := client.Query(fmt.Sprintf("select name from %s", table.TableID))
q.DefaultProjectID = dataset.ProjectID
q.DefaultDatasetID = dataset.DatasetID
it, err := q.Read(ctx)
if err != nil {
t.Fatal(err)
}
type S struct{ Num int64 }
type S struct{ Name string }
var s S
if err := it.Next(&s); err == nil {
t.Fatal("got nil, want error")
}
}
const (
stdName = "`bigquery-public-data.samples.shakespeare`"
legacyName = "[bigquery-public-data:samples.shakespeare]"
)
// These tests exploit the fact that the two SQL versions have different syntaxes for
// fully-qualified table names.
var useLegacySqlTests = []struct {
t string // name of table
std, legacy bool // use standard/legacy SQL
err bool // do we expect an error?
}{
{t: legacyName, std: false, legacy: true, err: false},
{t: legacyName, std: true, legacy: false, err: true},
{t: legacyName, std: false, legacy: false, err: true}, // standard SQL is default
{t: legacyName, std: true, legacy: true, err: true},
{t: stdName, std: false, legacy: true, err: true},
{t: stdName, std: true, legacy: false, err: false},
{t: stdName, std: false, legacy: false, err: false}, // standard SQL is default
{t: stdName, std: true, legacy: true, err: true},
}
func TestIntegration_QueryUseLegacySQL(t *testing.T) {
// Test the UseLegacySQL and UseStandardSQL options for queries.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
for _, test := range useLegacySqlTests {
q := client.Query(fmt.Sprintf("select word from %s limit 1", test.t))
q.UseStandardSQL = test.std
q.UseLegacySQL = test.legacy
_, err := q.Read(ctx)
gotErr := err != nil
if gotErr && !test.err {
t.Errorf("%+v:\nunexpected error: %v", test, err)
} else if !gotErr && test.err {
t.Errorf("%+v:\nsucceeded, but want error", test)
}
}
}
func TestIntegration_TableUseLegacySQL(t *testing.T) {
// Test UseLegacySQL and UseStandardSQL for Table.Create.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
table := newTable(t, schema)
defer table.Delete(ctx)
for i, test := range useLegacySqlTests {
view := dataset.Table(fmt.Sprintf("t_view_%d", i))
tm := &TableMetadata{
ViewQuery: fmt.Sprintf("SELECT word from %s", test.t),
UseStandardSQL: test.std,
UseLegacySQL: test.legacy,
}
err := view.Create(ctx, tm)
gotErr := err != nil
if gotErr && !test.err {
t.Errorf("%+v:\nunexpected error: %v", test, err)
} else if !gotErr && test.err {
t.Errorf("%+v:\nsucceeded, but want error", test)
}
view.Delete(ctx)
}
}
func TestIntegration_ListJobs(t *testing.T) {
// It's difficult to test the list of jobs, because we can't easily
// control what's in it. Also, there are many jobs in the test project,
// and it takes considerable time to list them all.
if client == nil {
t.Skip("Integration tests skipped")
}
ctx := context.Background()
// About all we can do is list a few jobs.
const max = 20
var jis []JobInfo
it := client.Jobs(ctx)
for {
ji, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
t.Fatal(err)
}
jis = append(jis, ji)
if len(jis) >= max {
break
}
}
// We expect that there is at least one job in the last few months.
if len(jis) == 0 {
t.Fatal("did not get any jobs")
}
}
// Creates a new, temporary table with a unique name and the given schema.
func newTable(t *testing.T, s Schema) *Table {
name := fmt.Sprintf("t%d", time.Now().UnixNano())
table := dataset.Table(name)
err := table.Create(context.Background(), s, TableExpiration(testTableExpiration))
err := table.Create(context.Background(), &TableMetadata{
Schema: s,
ExpirationTime: testTableExpiration,
})
if err != nil {
t.Fatal(err)
}
@ -943,8 +1220,8 @@ func compareRead(it *RowIterator, want [][]Value) (msg string, ok bool) {
for i, r := range got {
gotRow := []Value(r)
wantRow := want[i]
if !reflect.DeepEqual(gotRow, wantRow) {
return fmt.Sprintf("#%d: got %v, want %v", i, gotRow, wantRow), false
if !testutil.Equal(gotRow, wantRow) {
return fmt.Sprintf("#%d: got %#v, want %#v", i, gotRow, wantRow), false
}
}
return "", true

View File

@ -70,7 +70,7 @@ type RowIterator struct {
//
// If dst is a *map[string]Value, a new map will be created if dst is nil. Then
// for each schema column name, the map key of that name will be set to the column's
// value.
// value. STRUCT types (RECORD types or nested schemas) become nested maps.
//
// If dst is pointer to a struct, each column in the schema will be matched
// with an exported field of the struct that has the same name, ignoring case.
@ -89,8 +89,8 @@ type RowIterator struct {
// TIME civil.Time
// DATETIME civil.DateTime
//
// A repeated field corresponds to a slice or array of the element type.
// A RECORD type (nested schema) corresponds to a nested struct or struct pointer.
// A repeated field corresponds to a slice or array of the element type. A STRUCT
// type (RECORD or nested schema) corresponds to a nested struct or struct pointer.
// All calls to Next on the same iterator must use the same struct type.
//
// It is an error to attempt to read a BigQuery NULL value into a struct field.

View File

@ -17,9 +17,10 @@ package bigquery
import (
"errors"
"fmt"
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
)
@ -246,10 +247,10 @@ func TestIterator(t *testing.T) {
if err != tc.wantErr {
t.Fatalf("%s: got %v, want %v", tc.desc, err, tc.wantErr)
}
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
t.Errorf("%s: values:\ngot: %v\nwant:%v", tc.desc, values, tc.want)
}
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !reflect.DeepEqual(schema, tc.wantSchema) {
if (len(schema) != 0 || len(tc.wantSchema) != 0) && !testutil.Equal(schema, tc.wantSchema) {
t.Errorf("%s: iterator.Schema:\ngot: %v\nwant: %v", tc.desc, schema, tc.wantSchema)
}
}
@ -339,7 +340,7 @@ func TestNextAfterFinished(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if (len(values) != 0 || len(tc.want) != 0) && !reflect.DeepEqual(values, tc.want) {
if (len(values) != 0 || len(tc.want) != 0) && !testutil.Equal(values, tc.want) {
t.Errorf("values: got:\n%v\nwant:\n%v", values, tc.want)
}
// Try calling Get again.

View File

@ -16,12 +16,17 @@ package bigquery
import (
"errors"
"fmt"
"math/rand"
"os"
"sync"
"time"
"cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
)
// A Job represents an operation which has been submitted to BigQuery for processing.
@ -46,6 +51,7 @@ func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
return job, nil
}
// ID returns the job's ID.
func (j *Job) ID() string {
return j.jobID
}
@ -54,7 +60,8 @@ func (j *Job) ID() string {
type State int
const (
Pending State = iota
StateUnspecified State = iota // used only as a default in JobIterator
Pending
Running
Done
)
@ -73,21 +80,43 @@ type JobStatus struct {
Statistics *JobStatistics
}
// setJobRef initializes job's JobReference if given a non-empty jobID.
// createJobRef creates a JobReference.
// projectID must be non-empty.
func setJobRef(job *bq.Job, jobID, projectID string) {
func createJobRef(jobID string, addJobIDSuffix bool, projectID string) *bq.JobReference {
if jobID == "" {
return
jobID = randomJobIDFn()
} else if addJobIDSuffix {
jobID += "-" + randomJobIDFn()
}
// We don't check whether projectID is empty; the server will return an
// error when it encounters the resulting JobReference.
job.JobReference = &bq.JobReference{
return &bq.JobReference{
JobId: jobID,
ProjectId: projectID,
}
}
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
var (
rngMu sync.Mutex
rng = rand.New(rand.NewSource(time.Now().UnixNano() ^ int64(os.Getpid())))
)
// For testing.
var randomJobIDFn = randomJobID
func randomJobID() string {
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for suffixes.
var b [27]byte
rngMu.Lock()
for i := 0; i < len(b); i++ {
b[i] = alphanum[rng.Intn(len(alphanum))]
}
rngMu.Unlock()
return string(b[:])
}
// Done reports whether the job has completed.
// After Done returns true, the Err method will return an error if the job completed unsuccesfully.
func (s *JobStatus) Done() bool {
@ -99,20 +128,25 @@ func (s *JobStatus) Err() error {
return s.err
}
// Fill in the client field of Tables in the statistics.
func (s *JobStatus) setClient(c *Client) {
if s.Statistics == nil {
return
}
if qs, ok := s.Statistics.Details.(*QueryStatistics); ok {
for _, t := range qs.ReferencedTables {
t.c = c
}
}
}
// Status returns the current status of the job. It fails if the Status could not be determined.
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
js, err := j.c.service.jobStatus(ctx, j.projectID, j.jobID)
if err != nil {
return nil, err
}
// Fill in the client field of Tables in the statistics.
if js.Statistics != nil {
if qs, ok := js.Statistics.Details.(*QueryStatistics); ok {
for _, t := range qs.ReferencedTables {
t.c = j.c
}
}
}
js.setClient(j.c)
return js, nil
}
@ -324,3 +358,73 @@ type ExplainQueryStep struct {
func (*ExtractStatistics) implementsStatistics() {}
func (*LoadStatistics) implementsStatistics() {}
func (*QueryStatistics) implementsStatistics() {}
// Jobs lists jobs within a project.
func (c *Client) Jobs(ctx context.Context) *JobIterator {
it := &JobIterator{
ctx: ctx,
c: c,
ProjectID: c.projectID,
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.items) },
func() interface{} { b := it.items; it.items = nil; return b })
return it
}
// A JobInfo consists of a Job and a JobStatus.
type JobInfo struct {
Job *Job
Status *JobStatus
}
// JobIterator iterates over jobs in a project.
type JobIterator struct {
ProjectID string // Project ID of the jobs to list. Default is the client's project.
AllUsers bool // Whether to list jobs owned by all users in the project, or just the current caller.
State State // List only jobs in the given state. Defaults to all states.
ctx context.Context
c *Client
pageInfo *iterator.PageInfo
nextFunc func() error
items []JobInfo
}
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *JobIterator) Next() (JobInfo, error) {
if err := it.nextFunc(); err != nil {
return JobInfo{}, err
}
item := it.items[0]
it.items = it.items[1:]
return item, nil
}
func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
var st string
switch it.State {
case StateUnspecified:
st = ""
case Pending:
st = "pending"
case Running:
st = "running"
case Done:
st = "done"
default:
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
}
jobInfos, nextPageToken, err := it.c.service.listJobs(it.ctx, it.ProjectID, pageSize, pageToken, it.AllUsers, st)
if err != nil {
return "", err
}
for _, ji := range jobInfos {
ji.Job.c = it.c
ji.Status.setClient(it.c)
it.items = append(it.items, ji)
}
return nextPageToken, nil
}

95
vendor/cloud.google.com/go/bigquery/job_test.go generated vendored Normal file
View File

@ -0,0 +1,95 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package bigquery
import (
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
func TestCreateJobRef(t *testing.T) {
defer fixRandomJobID("RANDOM")()
for _, test := range []struct {
jobID string
addJobIDSuffix bool
want string
}{
{
jobID: "foo",
addJobIDSuffix: false,
want: "foo",
},
{
jobID: "",
addJobIDSuffix: false,
want: "RANDOM",
},
{
jobID: "",
addJobIDSuffix: true, // irrelevant
want: "RANDOM",
},
{
jobID: "foo",
addJobIDSuffix: true,
want: "foo-RANDOM",
},
} {
jr := createJobRef(test.jobID, test.addJobIDSuffix, "projectID")
got := jr.JobId
if got != test.want {
t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want)
}
}
}
func fixRandomJobID(s string) func() {
prev := randomJobIDFn
randomJobIDFn = func() string { return s }
return func() { randomJobIDFn = prev }
}
func checkJob(t *testing.T, i int, got, want *bq.Job) {
if got.JobReference == nil {
t.Errorf("#%d: empty job reference", i)
return
}
if got.JobReference.JobId == "" {
t.Errorf("#%d: empty job ID", i)
return
}
d := testutil.Diff(got, want)
if d != "" {
t.Errorf("#%d: (got=-, want=+) %s", i, d)
}
}
type testService struct {
*bq.Job
service
}
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
s.Job = conf.job
return &Job{}, nil
}
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
return &JobStatus{State: Done}, nil
}

View File

@ -21,9 +21,12 @@ import (
// LoadConfig holds the configuration for a load job.
type LoadConfig struct {
// JobID is the ID to use for the load job. If unset, a job ID will be automatically created.
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Src is the source from which data will be loaded.
Src LoadSource
@ -56,6 +59,8 @@ type LoadSource interface {
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
// The returned Loader may optionally be further configured before its Run method is called.
// See GCSReference and ReaderSource for additional configuration options that
// affect loading.
func (t *Table) LoaderFrom(src LoadSource) *Loader {
return &Loader{
c: t.c,
@ -69,6 +74,7 @@ func (t *Table) LoaderFrom(src LoadSource) *Loader {
// Run initiates a load job.
func (l *Loader) Run(ctx context.Context) (*Job, error) {
job := &bq.Job{
JobReference: createJobRef(l.JobID, l.AddJobIDSuffix, l.c.projectID),
Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{
CreateDisposition: string(l.CreateDisposition),
@ -78,9 +84,6 @@ func (l *Loader) Run(ctx context.Context) (*Job, error) {
}
conf := &insertJobConf{job: job}
l.Src.populateInsertJobConfForLoad(conf)
setJobRef(job, l.JobID, l.c.projectID)
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
return l.c.insertJob(ctx, conf)
}

View File

@ -15,22 +15,21 @@
package bigquery
import (
"reflect"
"strings"
"testing"
"golang.org/x/net/context"
"cloud.google.com/go/internal/pretty"
bq "google.golang.org/api/bigquery/v2"
)
func defaultLoadJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Load: &bq.JobConfigurationLoad{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@ -68,7 +67,8 @@ func bqNestedFieldSchema() *bq.TableFieldSchema {
}
func TestLoad(t *testing.T) {
c := &Client{projectID: "project-id"}
defer fixRandomJobID("RANDOM")()
c := &Client{projectID: "client-project-id"}
testCases := []struct {
dst *Table
@ -95,7 +95,7 @@ func TestLoad(t *testing.T) {
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
j.JobReference = &bq.JobReference{
JobId: "ajob",
ProjectId: "project-id",
ProjectId: "client-project-id",
}
return j
}(),
@ -218,12 +218,9 @@ func TestLoad(t *testing.T) {
tc.config.Dst = tc.dst
loader.LoadConfig = tc.config
if _, err := loader.Run(context.Background()); err != nil {
t.Errorf("%d: err calling Loader.Run: %v", i, err)
t.Errorf("#%d: err calling Loader.Run: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("loading %d: got:\n%v\nwant:\n%v",
i, pretty.Value(s.Job), pretty.Value(tc.want))
}
checkJob(t, i, s.Job, tc.want)
}
}

View File

@ -21,7 +21,10 @@ import (
"testing"
"time"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
@ -74,7 +77,7 @@ func TestParamValueScalar(t *testing.T) {
continue
}
want := sval(test.want)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
}
}
@ -99,7 +102,7 @@ func TestParamValueArray(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, test.want) {
if !testutil.Equal(got, test.want) {
t.Errorf("%#v:\ngot %+v\nwant %+v", test.val, got, test.want)
}
}
@ -121,7 +124,7 @@ func TestParamValueStruct(t *testing.T) {
"C": sval("true"),
},
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %+v\nwant %+v", got, want)
}
}
@ -172,7 +175,7 @@ func TestParamType(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, test.want) {
if !testutil.Equal(got, test.want) {
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.want)
}
}
@ -196,7 +199,9 @@ func TestIntegration_ScalarParam(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !equal(got, test.val) {
if !testutil.Equal(got, test.val, cmp.Comparer(func(t1, t2 time.Time) bool {
return t1.Round(time.Microsecond).Equal(t2.Round(time.Microsecond))
})) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val)
}
}
@ -219,7 +224,7 @@ func TestIntegration_OtherParam(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !equal(got, test.want) {
if !testutil.Equal(got, test.want) {
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want)
}
}
@ -242,21 +247,3 @@ func paramRoundTrip(c *Client, x interface{}) (Value, error) {
}
return val[0], nil
}
func equal(x1, x2 interface{}) bool {
if reflect.TypeOf(x1) != reflect.TypeOf(x2) {
return false
}
switch x1 := x1.(type) {
case float64:
if math.IsNaN(x1) {
return math.IsNaN(x2.(float64))
}
return x1 == x2
case time.Time:
// BigQuery is only accurate to the microsecond.
return x1.Round(time.Microsecond).Equal(x2.(time.Time).Round(time.Microsecond))
default:
return reflect.DeepEqual(x1, x2)
}
}

View File

@ -15,16 +15,20 @@
package bigquery
import (
"errors"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
// QueryConfig holds the configuration for a query job.
type QueryConfig struct {
// JobID is the ID to use for the query job. If this field is empty, a job ID
// will be automatically created.
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
JobID string
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
AddJobIDSuffix bool
// Dst is the table into which the results of the query will be written.
// If this field is nil, a temporary table will be created.
Dst *Table
@ -85,10 +89,12 @@ type QueryConfig struct {
// used.
MaxBytesBilled int64
// UseStandardSQL causes the query to use standard SQL.
// The default is false (using legacy SQL).
// UseStandardSQL causes the query to use standard SQL. The default.
UseStandardSQL bool
// UseLegacySQL causes the query to use legacy SQL.
UseLegacySQL bool
// Parameters is a list of query parameters. The presence of parameters
// implies the use of standard SQL.
// If the query uses positional syntax ("?"), then no parameter may have a name.
@ -123,12 +129,11 @@ func (c *Client) Query(q string) *Query {
// Run initiates a query job.
func (q *Query) Run(ctx context.Context) (*Job, error) {
job := &bq.Job{
JobReference: createJobRef(q.JobID, q.AddJobIDSuffix, q.client.projectID),
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{},
},
}
setJobRef(job, q.JobID, q.client.projectID)
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
return nil, err
}
@ -177,11 +182,18 @@ func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) err
if q.MaxBytesBilled >= 1 {
conf.MaximumBytesBilled = q.MaxBytesBilled
}
if q.UseStandardSQL || len(q.Parameters) > 0 {
if q.UseStandardSQL && q.UseLegacySQL {
return errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if len(q.Parameters) > 0 && q.UseLegacySQL {
return errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
}
if q.UseLegacySQL {
conf.UseLegacySql = true
} else {
conf.UseLegacySql = false
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
}
if q.Dst != nil && !q.Dst.implicitTable() {
conf.DestinationTable = q.Dst.tableRefProto()
}

View File

@ -15,9 +15,10 @@
package bigquery
import (
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
@ -25,10 +26,11 @@ import (
func defaultQueryJob() *bq.Job {
return &bq.Job{
JobReference: &bq.JobReference{JobId: "RANDOM", ProjectId: "client-project-id"},
Configuration: &bq.JobConfiguration{
Query: &bq.JobConfigurationQuery{
DestinationTable: &bq.TableReference{
ProjectId: "project-id",
ProjectId: "client-project-id",
DatasetId: "dataset-id",
TableId: "table-id",
},
@ -37,14 +39,17 @@ func defaultQueryJob() *bq.Job {
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
}
}
func TestQuery(t *testing.T) {
defer fixRandomJobID("RANDOM")()
c := &Client{
projectID: "project-id",
projectID: "client-project-id",
}
testCases := []struct {
dst *Table
@ -67,6 +72,20 @@ func TestQuery(t *testing.T) {
return j
}(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
JobID: "jobID",
AddJobIDSuffix: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DefaultDataset = nil
j.JobReference.JobId = "jobID-RANDOM"
return j
}(),
},
{
dst: &Table{},
src: defaultQuery,
@ -143,6 +162,7 @@ func TestQuery(t *testing.T) {
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.DestinationTable.ProjectId = "project-id"
j.Configuration.Query.WriteDisposition = "WRITE_TRUNCATE"
j.Configuration.Query.CreateDisposition = "CREATE_NEVER"
return j
@ -242,27 +262,35 @@ func TestQuery(t *testing.T) {
DefaultDatasetID: "def-dataset-id",
UseStandardSQL: true,
},
want: defaultQueryJob(),
},
{
dst: c.Dataset("dataset-id").Table("table-id"),
src: &QueryConfig{
Q: "query string",
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
UseLegacySQL: true,
},
want: func() *bq.Job {
j := defaultQueryJob()
j.Configuration.Query.UseLegacySql = false
j.Configuration.Query.ForceSendFields = []string{"UseLegacySql"}
j.Configuration.Query.UseLegacySql = true
j.Configuration.Query.ForceSendFields = nil
return j
}(),
},
}
for _, tc := range testCases {
for i, tc := range testCases {
s := &testService{}
c.service = s
query := c.Query("")
query.QueryConfig = *tc.src
query.Dst = tc.dst
if _, err := query.Run(context.Background()); err != nil {
t.Errorf("err calling query: %v", err)
t.Errorf("#%d: err calling query: %v", i, err)
continue
}
if !reflect.DeepEqual(s.Job, tc.want) {
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, tc.want)
}
checkJob(t, i, s.Job, tc.want)
}
}
@ -288,6 +316,8 @@ func TestConfiguringQuery(t *testing.T) {
ProjectId: "def-project-id",
DatasetId: "def-dataset-id",
},
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
JobReference: &bq.JobReference{
@ -299,7 +329,28 @@ func TestConfiguringQuery(t *testing.T) {
if _, err := query.Run(context.Background()); err != nil {
t.Fatalf("err calling Query.Run: %v", err)
}
if !reflect.DeepEqual(s.Job, want) {
t.Errorf("querying: got:\n%v\nwant:\n%v", s.Job, want)
if diff := testutil.Diff(s.Job, want); diff != "" {
t.Errorf("querying: -got +want:\n%s", diff)
}
}
func TestQueryLegacySQL(t *testing.T) {
c := &Client{
projectID: "project-id",
service: &testService{},
}
q := c.Query("q")
q.UseStandardSQL = true
q.UseLegacySQL = true
_, err := q.Run(context.Background())
if err == nil {
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
}
q = c.Query("q")
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
q.UseLegacySQL = true
_, err = q.Run(context.Background())
if err == nil {
t.Error("Parameters and UseLegacySQL: got nil, want error")
}
}

View File

@ -16,9 +16,12 @@ package bigquery
import (
"errors"
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
"google.golang.org/api/iterator"
@ -113,7 +116,7 @@ func TestRead(t *testing.T) {
service.values = tc.data
service.pageTokens = tc.pageTokens
if got, ok := collectValues(t, readFunc()); ok {
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
}
}
@ -210,7 +213,7 @@ func TestReadTabledataOptions(t *testing.T) {
tok: "",
}}
if !reflect.DeepEqual(s.readTabledataCalls, want) {
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
}
}
@ -254,7 +257,7 @@ func TestReadQueryOptions(t *testing.T) {
tok: "",
}}
if !reflect.DeepEqual(s.readTabledataCalls, want) {
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
}
}

View File

@ -77,11 +77,6 @@ func (s Schema) asTableSchema() *bq.TableSchema {
return &bq.TableSchema{Fields: fields}
}
// customizeCreateTable allows a Schema to be used directly as an option to CreateTable.
func (s Schema) customizeCreateTable(conf *createTableConf) {
conf.schema = s.asTableSchema()
}
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
fs := &FieldSchema{
Description: tfs.Description,

View File

@ -22,6 +22,7 @@ import (
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -192,12 +193,12 @@ func TestSchemaConversion(t *testing.T) {
for _, tc := range testCases {
bqSchema := tc.schema.asTableSchema()
if !reflect.DeepEqual(bqSchema, tc.bqSchema) {
if !testutil.Equal(bqSchema, tc.bqSchema) {
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
}
schema := convertTableSchema(tc.bqSchema)
if !reflect.DeepEqual(schema, tc.schema) {
if !testutil.Equal(schema, tc.schema) {
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
}
}
@ -311,7 +312,7 @@ func TestSimpleInference(t *testing.T) {
if err != nil {
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
pretty.Value(got), pretty.Value(tc.want))
}
@ -414,7 +415,7 @@ func TestNestedInference(t *testing.T) {
if err != nil {
t.Fatalf("%T: error inferring TableSchema: %v", tc.in, err)
}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("%T: inferring TableSchema: got:\n%#v\nwant:\n%#v", tc.in,
pretty.Value(got), pretty.Value(tc.want))
}
@ -483,7 +484,7 @@ func TestRepeatedInference(t *testing.T) {
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
pretty.Value(got), pretty.Value(tc.want))
}
@ -512,7 +513,7 @@ func TestEmbeddedInference(t *testing.T) {
reqField("Embedded", "INTEGER"),
reqField("Embedded2", "INTEGER"),
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %v, want %v", pretty.Value(got), pretty.Value(want))
}
}
@ -617,7 +618,7 @@ func TestTagInference(t *testing.T) {
if err != nil {
t.Fatalf("%d: error inferring TableSchema: %v", i, err)
}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i,
pretty.Value(got), pretty.Value(tc.want))
}
@ -675,7 +676,7 @@ func TestTagInferenceErrors(t *testing.T) {
for i, tc := range testCases {
want := tc.err
_, got := InferSchema(tc.in)
if !reflect.DeepEqual(got, want) {
if got != want {
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
}
}
@ -746,7 +747,7 @@ func TestSchemaErrors(t *testing.T) {
for _, tc := range testCases {
want := tc.err
_, got := InferSchema(tc.in)
if !reflect.DeepEqual(got, want) {
if got != want {
t.Errorf("%#v: got:\n%#v\nwant:\n%#v", tc.in, got, want)
}
}

View File

@ -15,13 +15,14 @@
package bigquery
import (
"errors"
"fmt"
"io"
"net/http"
"sync"
"time"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
@ -40,24 +41,26 @@ type service interface {
getJob(ctx context.Context, projectId, jobID string) (*Job, error)
jobCancel(ctx context.Context, projectId, jobID string) error
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
listJobs(ctx context.Context, projectId string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error)
// Tables
createTable(ctx context.Context, conf *createTableConf) error
createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error)
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error)
// Table data
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
// Datasets
insertDataset(ctx context.Context, datasetID, projectID string) error
insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error
deleteDataset(ctx context.Context, datasetID, projectID string) error
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error)
// Misc
@ -177,7 +180,6 @@ func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf
// Prepare request to fetch one page of table data.
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
setClientHeader(req.Header())
if pageToken != "" {
req.PageToken(pageToken)
} else {
@ -189,33 +191,37 @@ func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf
}
// Fetch the table schema in the background, if necessary.
var schemaErr error
var schemaFetch sync.WaitGroup
if conf.schema == nil {
schemaFetch.Add(1)
errc := make(chan error, 1)
if conf.schema != nil {
errc <- nil
} else {
go func() {
defer schemaFetch.Done()
var t *bq.Table
t, schemaErr = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Fields("schema").
Context(ctx).
Do()
if schemaErr == nil && t.Schema != nil {
err := runWithRetry(ctx, func() (err error) {
t, err = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
Fields("schema").
Context(ctx).
Do()
return err
})
if err == nil && t.Schema != nil {
conf.schema = convertTableSchema(t.Schema)
}
errc <- err
}()
}
res, err := req.Context(ctx).Do()
var res *bq.TableDataList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
schemaFetch.Wait()
if schemaErr != nil {
return nil, schemaErr
err = <-errc
if err != nil {
return nil, err
}
result := &readDataResult{
pageToken: res.PageToken,
totalRows: uint64(res.TotalRows),
@ -276,12 +282,11 @@ func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID,
Json: m,
})
}
call := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
setClientHeader(call.Header())
var res *bq.TableDataInsertAllResponse
err := runWithRetry(ctx, func() error {
var err error
req := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
setClientHeader(req.Header())
res, err = req.Do()
err := runWithRetry(ctx, func() (err error) {
res, err = call.Do()
return err
})
if err != nil {
@ -309,25 +314,41 @@ func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID,
}
func (s *bigqueryService) getJob(ctx context.Context, projectID, jobID string) (*Job, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("configuration").
Context(ctx).
Do()
bqjob, err := s.getJobInternal(ctx, projectID, jobID, "configuration", "jobReference")
if err != nil {
return nil, err
}
var isQuery bool
var dest *bq.TableReference
if res.Configuration.Query != nil {
isQuery = true
dest = res.Configuration.Query.DestinationTable
return jobFromProtos(bqjob.JobReference, bqjob.Configuration), nil
}
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
job, err := s.getJobInternal(ctx, projectID, jobID, "status", "statistics")
if err != nil {
return nil, err
}
return &Job{
projectID: projectID,
jobID: jobID,
isQuery: isQuery,
destinationTable: dest,
}, nil
st, err := jobStatusFromProto(job.Status)
if err != nil {
return nil, err
}
st.Statistics = jobStatisticsFromProto(job.Statistics)
return st, nil
}
func (s *bigqueryService) getJobInternal(ctx context.Context, projectID, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
var job *bq.Job
call := s.s.Jobs.Get(projectID, jobID).Context(ctx)
if len(fields) > 0 {
call = call.Fields(fields...)
}
setClientHeader(call.Header())
err := runWithRetry(ctx, func() (err error) {
job, err = call.Do()
return err
})
if err != nil {
return nil, err
}
return job, nil
}
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
@ -336,27 +357,29 @@ func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string
// docs: "This call will return immediately, and the client will need
// to poll for the job status to see if the cancel completed
// successfully". So it would be misleading to return a status.
_, err := s.s.Jobs.Cancel(projectID, jobID).
call := s.s.Jobs.Cancel(projectID, jobID).
Fields(). // We don't need any of the response data.
Context(ctx).
Do()
return err
Context(ctx)
setClientHeader(call.Header())
return runWithRetry(ctx, func() error {
_, err := call.Do()
return err
})
}
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
res, err := s.s.Jobs.Get(projectID, jobID).
Fields("status", "statistics"). // Only fetch what we need.
Context(ctx).
Do()
if err != nil {
return nil, err
func jobFromProtos(jr *bq.JobReference, config *bq.JobConfiguration) *Job {
var isQuery bool
var dest *bq.TableReference
if config.Query != nil {
isQuery = true
dest = config.Query.DestinationTable
}
st, err := jobStatusFromProto(res.Status)
if err != nil {
return nil, err
return &Job{
projectID: jr.ProjectId,
jobID: jr.JobId,
isQuery: isQuery,
destinationTable: dest,
}
st.Statistics = jobStatisticsFromProto(res.Statistics)
return st, nil
}
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
@ -465,7 +488,11 @@ func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID s
if pageSize > 0 {
req.MaxResults(int64(pageSize))
}
res, err := req.Do()
var res *bq.TableList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Do()
return err
})
if err != nil {
return nil, "", err
}
@ -475,61 +502,98 @@ func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID s
return tables, res.NextPageToken, nil
}
type createTableConf struct {
projectID, datasetID, tableID string
expiration time.Time
viewQuery string
schema *bq.TableSchema
useStandardSQL bool
timePartitioning *TimePartitioning
}
// createTable creates a table in the BigQuery service.
// expiration is an optional time after which the table will be deleted and its storage reclaimed.
// If viewQuery is non-empty, the created table will be of type VIEW.
// If tm.ViewQuery is non-empty, the created table will be of type VIEW.
// Note: expiration can only be set during table creation.
// Note: after table creation, a view can be modified only if its table was initially created with a view.
func (s *bigqueryService) createTable(ctx context.Context, conf *createTableConf) error {
table := &bq.Table{
TableReference: &bq.TableReference{
ProjectId: conf.projectID,
DatasetId: conf.datasetID,
TableId: conf.tableID,
},
func (s *bigqueryService) createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error {
table, err := bqTableFromMetadata(tm)
if err != nil {
return err
}
if !conf.expiration.IsZero() {
table.ExpirationTime = conf.expiration.UnixNano() / 1e6
table.TableReference = &bq.TableReference{
ProjectId: projectID,
DatasetId: datasetID,
TableId: tableID,
}
// TODO(jba): make it impossible to provide both a view query and a schema.
if conf.viewQuery != "" {
table.View = &bq.ViewDefinition{
Query: conf.viewQuery,
req := s.s.Tables.Insert(projectID, datasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err = req.Do()
return err
}
func bqTableFromMetadata(tm *TableMetadata) (*bq.Table, error) {
t := &bq.Table{}
if tm == nil {
return t, nil
}
if tm.Schema != nil && tm.ViewQuery != "" {
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
}
t.FriendlyName = tm.Name
t.Description = tm.Description
if tm.Schema != nil {
t.Schema = tm.Schema.asTableSchema()
}
if tm.ViewQuery != "" {
if tm.UseStandardSQL && tm.UseLegacySQL {
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
}
if conf.useStandardSQL {
table.View.UseLegacySql = false
table.View.ForceSendFields = append(table.View.ForceSendFields, "UseLegacySql")
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
if tm.UseLegacySQL {
t.View.UseLegacySql = true
} else {
t.View.UseLegacySql = false
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
}
} else if tm.UseLegacySQL || tm.UseStandardSQL {
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
}
if conf.schema != nil {
table.Schema = conf.schema
}
if conf.timePartitioning != nil {
table.TimePartitioning = &bq.TimePartitioning{
if tm.TimePartitioning != nil {
t.TimePartitioning = &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: int64(conf.timePartitioning.Expiration.Seconds() * 1000),
ExpirationMs: int64(tm.TimePartitioning.Expiration / time.Millisecond),
}
}
if !tm.ExpirationTime.IsZero() {
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
}
req := s.s.Tables.Insert(conf.projectID, conf.datasetID, table).Context(ctx)
setClientHeader(req.Header())
_, err := req.Do()
return err
if tm.FullID != "" {
return nil, errors.New("cannot set FullID on create")
}
if tm.Type != "" {
return nil, errors.New("cannot set Type on create")
}
if !tm.CreationTime.IsZero() {
return nil, errors.New("cannot set CreationTime on create")
}
if !tm.LastModifiedTime.IsZero() {
return nil, errors.New("cannot set LastModifiedTime on create")
}
if tm.NumBytes != 0 {
return nil, errors.New("cannot set NumBytes on create")
}
if tm.NumRows != 0 {
return nil, errors.New("cannot set NumRows on create")
}
if tm.StreamingBuffer != nil {
return nil, errors.New("cannot set StreamingBuffer on create")
}
if tm.ETag != "" {
return nil, errors.New("cannot set ETag on create")
}
return t, nil
}
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
req := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx)
setClientHeader(req.Header())
table, err := req.Do()
var table *bq.Table
err := runWithRetry(ctx, func() (err error) {
table, err = req.Do()
return err
})
if err != nil {
return nil, err
}
@ -539,7 +603,7 @@ func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datas
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
req := s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
return runWithRetry(ctx, func() error { return req.Do() })
}
func bqTableToMetadata(t *bq.Table) *TableMetadata {
@ -547,18 +611,20 @@ func bqTableToMetadata(t *bq.Table) *TableMetadata {
Description: t.Description,
Name: t.FriendlyName,
Type: TableType(t.Type),
ID: t.Id,
FullID: t.Id,
NumBytes: t.NumBytes,
NumRows: t.NumRows,
ExpirationTime: unixMillisToTime(t.ExpirationTime),
CreationTime: unixMillisToTime(t.CreationTime),
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
ETag: t.Etag,
}
if t.Schema != nil {
md.Schema = convertTableSchema(t.Schema)
}
if t.View != nil {
md.View = t.View.Query
md.ViewQuery = t.View.Query
md.UseLegacySQL = t.View.UseLegacySql
}
if t.TimePartitioning != nil {
md.TimePartitioning = &TimePartitioning{
@ -583,9 +649,10 @@ func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
Description: d.Description,
Name: d.FriendlyName,
ID: d.Id,
FullID: d.Id,
Location: d.Location,
Labels: d.Labels,
ETag: d.Etag,
}
}
@ -610,12 +677,13 @@ func convertTableReference(tr *bq.TableReference) *Table {
// patchTableConf contains fields to be patched.
type patchTableConf struct {
// These fields are omitted from the patch operation if nil.
Description *string
Name *string
Schema Schema
Description *string
Name *string
Schema Schema
ExpirationTime time.Time
}
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf) (*TableMetadata, error) {
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error) {
t := &bq.Table{}
forceSend := func(field string) {
t.ForceSendFields = append(t.ForceSendFields, field)
@ -633,39 +701,135 @@ func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID,
t.Schema = conf.Schema.asTableSchema()
forceSend("Schema")
}
table, err := s.s.Tables.Patch(projectID, datasetID, tableID, t).
Context(ctx).
Do()
if err != nil {
if !conf.ExpirationTime.IsZero() {
t.ExpirationTime = conf.ExpirationTime.UnixNano() / 1e6
forceSend("ExpirationTime")
}
call := s.s.Tables.Patch(projectID, datasetID, tableID, t).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var table *bq.Table
if err := runWithRetry(ctx, func() (err error) {
table, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqTableToMetadata(table), nil
}
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string) error {
ds := &bq.Dataset{
DatasetReference: &bq.DatasetReference{DatasetId: datasetID},
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error {
// TODO(jba): retry?
ds, err := bqDatasetFromMetadata(dm)
if err != nil {
return err
}
ds.DatasetReference = &bq.DatasetReference{DatasetId: datasetID}
req := s.s.Datasets.Insert(projectID, ds).Context(ctx)
setClientHeader(req.Header())
_, err := req.Do()
_, err = req.Do()
return err
}
func (s *bigqueryService) patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
ds := bqDatasetFromUpdateMetadata(dm)
call := s.s.Datasets.Patch(projectID, datasetID, ds).Context(ctx)
setClientHeader(call.Header())
if etag != "" {
call.Header().Set("If-Match", etag)
}
var ds2 *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds2, err = call.Do()
return err
}); err != nil {
return nil, err
}
return bqDatasetToMetadata(ds2), nil
}
func bqDatasetFromMetadata(dm *DatasetMetadata) (*bq.Dataset, error) {
ds := &bq.Dataset{}
if dm == nil {
return ds, nil
}
ds.FriendlyName = dm.Name
ds.Description = dm.Description
ds.Location = dm.Location
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
ds.Labels = dm.Labels
if !dm.CreationTime.IsZero() {
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
}
if !dm.LastModifiedTime.IsZero() {
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
}
if dm.FullID != "" {
return nil, errors.New("bigquery: Dataset.FullID is not writable")
}
if dm.ETag != "" {
return nil, errors.New("bigquery: Dataset.ETag is not writable")
}
return ds, nil
}
func bqDatasetFromUpdateMetadata(dm *DatasetMetadataToUpdate) *bq.Dataset {
ds := &bq.Dataset{}
forceSend := func(field string) {
ds.ForceSendFields = append(ds.ForceSendFields, field)
}
if dm.Description != nil {
ds.Description = optional.ToString(dm.Description)
forceSend("Description")
}
if dm.Name != nil {
ds.FriendlyName = optional.ToString(dm.Name)
forceSend("FriendlyName")
}
if dm.DefaultTableExpiration != nil {
dur := optional.ToDuration(dm.DefaultTableExpiration)
if dur == 0 {
// Send a null to delete the field.
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
} else {
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
}
}
if dm.setLabels != nil || dm.deleteLabels != nil {
ds.Labels = map[string]string{}
for k, v := range dm.setLabels {
ds.Labels[k] = v
}
if len(ds.Labels) == 0 && len(dm.deleteLabels) > 0 {
forceSend("Labels")
}
for l := range dm.deleteLabels {
ds.NullFields = append(ds.NullFields, "Labels."+l)
}
}
return ds
}
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx)
setClientHeader(req.Header())
return req.Do()
return runWithRetry(ctx, func() error { return req.Do() })
}
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
req := s.s.Datasets.Get(projectID, datasetID).Context(ctx)
setClientHeader(req.Header())
table, err := req.Do()
if err != nil {
var ds *bq.Dataset
if err := runWithRetry(ctx, func() (err error) {
ds, err = req.Do()
return err
}); err != nil {
return nil, err
}
return bqDatasetToMetadata(table), nil
return bqDatasetToMetadata(ds), nil
}
func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) {
@ -680,7 +844,11 @@ func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, ma
if filter != "" {
req.Filter(filter)
}
res, err := req.Do()
var res *bq.DatasetList
err := runWithRetry(ctx, func() (err error) {
res, err = req.Do()
return err
})
if err != nil {
return nil, "", err
}
@ -698,13 +866,54 @@ func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Datas
}
}
func (s *bigqueryService) listJobs(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error) {
req := s.s.Jobs.List(projectID).
Context(ctx).
PageToken(pageToken).
Projection("full").
AllUsers(all)
if state != "" {
req.StateFilter(state)
}
setClientHeader(req.Header())
if maxResults > 0 {
req.MaxResults(int64(maxResults))
}
res, err := req.Do()
if err != nil {
return nil, "", err
}
var jobInfos []JobInfo
for _, j := range res.Jobs {
ji, err := s.convertListedJob(j)
if err != nil {
return nil, "", err
}
jobInfos = append(jobInfos, ji)
}
return jobInfos, res.NextPageToken, nil
}
func (s *bigqueryService) convertListedJob(j *bq.JobListJobs) (JobInfo, error) {
st, err := jobStatusFromProto(j.Status)
if err != nil {
return JobInfo{}, err
}
st.Statistics = jobStatisticsFromProto(j.Statistics)
return JobInfo{
Job: jobFromProtos(j.JobReference, j.Configuration),
Status: st,
}, nil
}
// runWithRetry calls the function until it returns nil or a non-retryable error, or
// the context is done.
// See the similar function in ../storage/invoke.go. The main difference is the
// reason for retrying.
func runWithRetry(ctx context.Context, call func() error) error {
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
backoff := gax.Backoff{
Initial: 2 * time.Second,
Initial: 1 * time.Second,
Max: 32 * time.Second,
Multiplier: 2,
}
@ -717,7 +926,7 @@ func runWithRetry(ctx context.Context, call func() error) error {
})
}
// Use the criteria in https://cloud.google.com/bigquery/troubleshooting-errors.
// This is the correct definition of retryable according to the BigQuery team.
func retryableError(err error) bool {
e, ok := err.(*googleapi.Error)
if !ok {
@ -727,5 +936,5 @@ func retryableError(err error) bool {
if len(e.Errors) > 0 {
reason = e.Errors[0].Reason
}
return reason == "backendError" && (e.Code == 500 || e.Code == 503)
return reason == "backendError" || reason == "rateLimitExceeded"
}

View File

@ -15,10 +15,11 @@
package bigquery
import (
"reflect"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -58,26 +59,193 @@ func TestBQTableToMetadata(t *testing.T) {
&TableMetadata{
Description: "desc",
Name: "fname",
View: "view-query",
ID: "id",
ViewQuery: "view-query",
FullID: "id",
Type: ExternalTable,
ExpirationTime: aTime.Truncate(time.Millisecond),
CreationTime: aTime.Truncate(time.Millisecond),
LastModifiedTime: aTime.Truncate(time.Millisecond),
NumBytes: 123,
NumRows: 7,
TimePartitioning: &TimePartitioning{Expiration: time.Duration(7890) * time.Millisecond},
TimePartitioning: &TimePartitioning{Expiration: 7890 * time.Millisecond},
StreamingBuffer: &StreamingBuffer{
EstimatedBytes: 11,
EstimatedRows: 3,
OldestEntryTime: aTime,
},
ETag: "etag",
},
},
} {
got := bqTableToMetadata(test.in)
if !reflect.DeepEqual(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff)
}
}
}
func TestBQTableFromMetadata(t *testing.T) {
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
aTimeMillis := aTime.UnixNano() / 1e6
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
for _, test := range []struct {
in *TableMetadata
want *bq.Table
}{
{nil, &bq.Table{}},
{&TableMetadata{}, &bq.Table{}},
{
&TableMetadata{
Name: "n",
Description: "d",
Schema: sc,
ExpirationTime: aTime,
},
&bq.Table{
FriendlyName: "n",
Description: "d",
Schema: &bq.TableSchema{
Fields: []*bq.TableFieldSchema{
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
},
},
ExpirationTime: aTimeMillis,
},
},
{
&TableMetadata{ViewQuery: "q"},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseLegacySQL: true,
TimePartitioning: &TimePartitioning{},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: true,
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 0,
},
},
},
{
&TableMetadata{
ViewQuery: "q",
UseStandardSQL: true,
TimePartitioning: &TimePartitioning{time.Second},
},
&bq.Table{
View: &bq.ViewDefinition{
Query: "q",
UseLegacySql: false,
ForceSendFields: []string{"UseLegacySql"},
},
TimePartitioning: &bq.TimePartitioning{
Type: "DAY",
ExpirationMs: 1000,
},
},
},
} {
got, err := bqTableFromMetadata(test.in)
if err != nil {
t.Fatalf("%+v: %v", test.in, err)
}
if diff := testutil.Diff(got, test.want); diff != "" {
t.Errorf("%+v:\n-got, +want:\n%s", test.in, diff)
}
}
// Errors
for _, in := range []*TableMetadata{
{Schema: sc, ViewQuery: "q"}, // can't have both schema and query
{UseLegacySQL: true}, // UseLegacySQL without query
{UseStandardSQL: true}, // UseStandardSQL without query
// read-only fields
{FullID: "x"},
{Type: "x"},
{CreationTime: aTime},
{LastModifiedTime: aTime},
{NumBytes: 1},
{NumRows: 1},
{StreamingBuffer: &StreamingBuffer{}},
{ETag: "x"},
} {
_, err := bqTableFromMetadata(in)
if err == nil {
t.Errorf("%+v: got nil, want error", in)
}
}
}
func TestBQDatasetFromMetadata(t *testing.T) {
for _, test := range []struct {
in *DatasetMetadata
want *bq.Dataset
}{
{nil, &bq.Dataset{}},
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
{&DatasetMetadata{
Name: "name",
Description: "desc",
DefaultTableExpiration: time.Hour,
Location: "EU",
Labels: map[string]string{"x": "y"},
}, &bq.Dataset{
FriendlyName: "name",
Description: "desc",
DefaultTableExpirationMs: 60 * 60 * 1000,
Location: "EU",
Labels: map[string]string{"x": "y"},
}},
} {
got, err := bqDatasetFromMetadata(test.in)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, test.want) {
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
}
}
// Check that non-writeable fields are unset.
_, err := bqDatasetFromMetadata(&DatasetMetadata{FullID: "x"})
if err == nil {
t.Error("got nil, want error")
}
}
func TestBQDatasetFromUpdateMetadata(t *testing.T) {
dm := DatasetMetadataToUpdate{
Description: "desc",
Name: "name",
DefaultTableExpiration: time.Hour,
}
dm.SetLabel("label", "value")
dm.DeleteLabel("del")
got := bqDatasetFromUpdateMetadata(&dm)
want := &bq.Dataset{
Description: "desc",
FriendlyName: "name",
DefaultTableExpirationMs: 60 * 60 * 1000,
Labels: map[string]string{"label": "value"},
ForceSendFields: []string{"Description", "FriendlyName"},
NullFields: []string{"Labels.del"},
}
if diff := testutil.Diff(got, want); diff != "" {
t.Errorf("-got, +want:\n%s", diff)
}
}

View File

@ -39,18 +39,39 @@ type Table struct {
// TableMetadata contains information about a BigQuery table.
type TableMetadata struct {
Description string // The user-friendly description of this table.
Name string // The user-friendly name for this table.
Schema Schema
View string
// The following fields can be set when creating a table.
ID string // An opaque ID uniquely identifying the table.
Type TableType
// The user-friendly name for the table.
Name string
// The user-friendly description of the table.
Description string
// The table schema. If provided on create, ViewQuery must be empty.
Schema Schema
// The query to use for a view. If provided on create, Schema must be nil.
ViewQuery string
// Use Legacy SQL for the view query.
// At most one of UseLegacySQL and UseStandardSQL can be true.
UseLegacySQL bool
// Use Legacy SQL for the view query. The default.
// At most one of UseLegacySQL and UseStandardSQL can be true.
UseStandardSQL bool
// If non-nil, the table is partitioned by time.
TimePartitioning *TimePartitioning
// The time when this table expires. If not set, the table will persist
// indefinitely. Expired tables will be deleted and their storage reclaimed.
ExpirationTime time.Time
// All the fields below are read-only.
FullID string // An opaque ID uniquely identifying the table.
Type TableType
CreationTime time.Time
LastModifiedTime time.Time
@ -62,13 +83,14 @@ type TableMetadata struct {
// This does not include data that is being buffered during a streaming insert.
NumRows uint64
// The time-based partitioning settings for this table.
TimePartitioning *TimePartitioning
// Contains information regarding this table's streaming buffer, if one is
// present. This field will be nil if the table is not being streamed to or if
// there is no data in the streaming buffer.
StreamingBuffer *StreamingBuffer
// ETag is the ETag obtained when reading metadata. Pass it to Table.Update to
// ensure that the metadata hasn't changed since it was read.
ETag string
}
// TableCreateDisposition specifies the circumstances under which destination table will be created.
@ -111,6 +133,14 @@ const (
ExternalTable TableType = "EXTERNAL"
)
// TimePartitioning describes the time-based date partitioning on a table.
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables.
type TimePartitioning struct {
// The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration
}
// StreamingBuffer holds information about the streaming buffer.
type StreamingBuffer struct {
// A lower-bound estimate of the number of bytes currently in the streaming
@ -144,16 +174,9 @@ func (t *Table) implicitTable() bool {
}
// Create creates a table in the BigQuery service.
func (t *Table) Create(ctx context.Context, options ...CreateTableOption) error {
conf := &createTableConf{
projectID: t.ProjectID,
datasetID: t.DatasetID,
tableID: t.TableID,
}
for _, o := range options {
o.customizeCreateTable(conf)
}
return t.c.service.createTable(ctx, conf)
// Pass in a TableMetadata value to configure the dataset.
func (t *Table) Create(ctx context.Context, tm *TableMetadata) error {
return t.c.service.createTable(ctx, t.ProjectID, t.DatasetID, t.TableID, tm)
}
// Metadata fetches the metadata for the table.
@ -166,53 +189,6 @@ func (t *Table) Delete(ctx context.Context) error {
return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
}
// A CreateTableOption is an optional argument to CreateTable.
type CreateTableOption interface {
customizeCreateTable(*createTableConf)
}
type tableExpiration time.Time
// TableExpiration returns a CreateTableOption that will cause the created table to be deleted after the expiration time.
func TableExpiration(exp time.Time) CreateTableOption { return tableExpiration(exp) }
func (opt tableExpiration) customizeCreateTable(conf *createTableConf) {
conf.expiration = time.Time(opt)
}
type viewQuery string
// ViewQuery returns a CreateTableOption that causes the created table to be a virtual table defined by the supplied query.
// For more information see: https://cloud.google.com/bigquery/querying-data#views
func ViewQuery(query string) CreateTableOption { return viewQuery(query) }
func (opt viewQuery) customizeCreateTable(conf *createTableConf) {
conf.viewQuery = string(opt)
}
type useStandardSQL struct{}
// UseStandardSQL returns a CreateTableOption to set the table to use standard SQL.
// The default setting is false (using legacy SQL).
func UseStandardSQL() CreateTableOption { return useStandardSQL{} }
func (opt useStandardSQL) customizeCreateTable(conf *createTableConf) {
conf.useStandardSQL = true
}
// TimePartitioning is a CreateTableOption that can be used to set time-based
// date partitioning on a table.
// For more information see: https://cloud.google.com/bigquery/docs/creating-partitioned-tables
type TimePartitioning struct {
// (Optional) The amount of time to keep the storage for a partition.
// If the duration is empty (0), the data in the partitions do not expire.
Expiration time.Duration
}
func (opt TimePartitioning) customizeCreateTable(conf *createTableConf) {
conf.timePartitioning = &opt
}
// Read fetches the contents of the table.
func (t *Table) Read(ctx context.Context) *RowIterator {
return newRowIterator(ctx, t.c.service, &readTableConf{
@ -223,7 +199,7 @@ func (t *Table) Read(ctx context.Context) *RowIterator {
}
// Update modifies specific Table metadata fields.
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMetadata, error) {
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
var conf patchTableConf
if tm.Description != nil {
s := optional.ToString(tm.Description)
@ -234,7 +210,8 @@ func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate) (*TableMet
conf.Name = &s
}
conf.Schema = tm.Schema
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf)
conf.ExpirationTime = tm.ExpirationTime
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf, etag)
}
// TableMetadataToUpdate is used when updating a table's metadata.
@ -250,4 +227,7 @@ type TableMetadataToUpdate struct {
// When updating a schema, you can add columns but not remove them.
Schema Schema
// TODO(jba): support updating the view
// ExpirationTime is the time when this table expires.
ExpirationTime time.Time
}

View File

@ -15,10 +15,12 @@
package bigquery
import (
"reflect"
"testing"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
)
@ -133,7 +135,7 @@ func TestInsertsData(t *testing.T) {
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
}
}
if got, want := irr.rowBatches, tc.data; !reflect.DeepEqual(got, want) {
if got, want := irr.rowBatches, tc.data; !testutil.Equal(got, want) {
t.Errorf("got: %v, want: %v", got, want)
}
}
@ -265,7 +267,7 @@ func TestValueSavers(t *testing.T) {
if err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, test.want) {
if !testutil.Equal(got, test.want, cmp.AllowUnexported(testSaver{})) {
t.Errorf("%+v: got %v, want %v", test.in, pretty.Value(got), pretty.Value(test.want))
}
// Make sure Save is successful.

View File

@ -14,11 +14,6 @@
package bigquery
import (
"golang.org/x/net/context"
bq "google.golang.org/api/bigquery/v2"
)
func defaultGCS() *GCSReference {
return &GCSReference{
uris: []string{"uri"},
@ -30,18 +25,3 @@ var defaultQuery = &QueryConfig{
DefaultProjectID: "def-project-id",
DefaultDatasetID: "def-dataset-id",
}
type testService struct {
*bq.Job
service
}
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
s.Job = conf.job
return &Job{}, nil
}
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
return &JobStatus{State: Done}, nil
}

View File

@ -18,12 +18,14 @@ import (
"encoding/base64"
"fmt"
"math"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/civil"
"cloud.google.com/go/internal/pretty"
"cloud.google.com/go/internal/testutil"
bq "google.golang.org/api/bigquery/v2"
)
@ -50,7 +52,7 @@ func TestConvertBasicValues(t *testing.T) {
t.Fatalf("error converting: %v", err)
}
want := []Value{"a", int64(1), 1.2, true, []byte("foo")}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -92,7 +94,7 @@ func TestConvertNullValues(t *testing.T) {
t.Fatalf("error converting: %v", err)
}
want := []Value{nil}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting null values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -123,7 +125,7 @@ func TestBasicRepetition(t *testing.T) {
t.Fatalf("error converting: %v", err)
}
want := []Value{[]Value{int64(1), int64(2), int64(3)}}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -160,7 +162,7 @@ func TestNestedRecordContainingRepetition(t *testing.T) {
t.Fatalf("error converting: %v", err)
}
want := []Value{[]Value{[]Value{int64(1), int64(2), int64(3)}}}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting basic repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -234,7 +236,7 @@ func TestRepeatedRecordContainingRepetition(t *testing.T) {
},
},
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting repeated records with repeated values: got:\n%v\nwant:\n%v", got, want)
}
}
@ -330,7 +332,7 @@ func TestRepeatedRecordContainingRecord(t *testing.T) {
},
},
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("converting repeated records containing record : got:\n%v\nwant:\n%v", got, want)
}
}
@ -417,7 +419,7 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
t.Errorf("Expected successful save; got: %v", err)
}
got := &insertionRow{insertID, data}
if !reflect.DeepEqual(got, tc.want) {
if !testutil.Equal(got, tc.want) {
t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want)
}
}
@ -458,7 +460,7 @@ func TestStructSaver(t *testing.T) {
if wantIID := "iid"; gotIID != wantIID {
t.Errorf("%s: InsertID: got %q, want %q", msg, gotIID, wantIID)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
}
}
@ -523,7 +525,7 @@ func TestConvertRows(t *testing.T) {
if err != nil {
t.Fatalf("got %v, want nil", err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("\ngot %v\nwant %v", got, want)
}
}
@ -542,7 +544,7 @@ func TestValueList(t *testing.T) {
t.Fatal(err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
@ -551,7 +553,7 @@ func TestValueList(t *testing.T) {
if err := vl.Load(want, schema); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got %+v, want %+v", got, want)
}
}
@ -588,7 +590,7 @@ func TestValueMap(t *testing.T) {
map[string]Value{"x": 5, "y": 6},
},
}
if !reflect.DeepEqual(vm, valueMap(want)) {
if !testutil.Equal(vm, valueMap(want)) {
t.Errorf("got\n%+v\nwant\n%+v", vm, want)
}
@ -669,7 +671,7 @@ func TestStructLoader(t *testing.T) {
Nested: nested{NestS: "nested", NestI: 17},
Tagged: "z",
}
if !reflect.DeepEqual(&ts1, want) {
if !testutil.Equal(&ts1, want, cmp.AllowUnexported(testStruct1{})) {
t.Errorf("got %+v, want %+v", pretty.Value(ts1), pretty.Value(*want))
d, _, err := pretty.Diff(*want, ts1)
if err == nil {
@ -684,7 +686,7 @@ func TestStructLoader(t *testing.T) {
t.Fatal(err)
}
want2 := &nestedPtr{Nested: &nested{NestS: "nested", NestI: 17}}
if !reflect.DeepEqual(&np, want2) {
if !testutil.Equal(&np, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
}
@ -694,7 +696,7 @@ func TestStructLoader(t *testing.T) {
if err := load(&np, schema2, testValues); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(&np, want2) {
if !testutil.Equal(&np, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(np), pretty.Value(*want2))
}
if np.Nested != nst {
@ -739,7 +741,7 @@ func TestStructLoaderRepeated(t *testing.T) {
LongNums: [...]int{1, 2, 3, 0, 0},
Nested: []*nested{{"x", 1}, {"y", 2}},
}
if !reflect.DeepEqual(r1, want) {
if !testutil.Equal(r1, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r1), pretty.Value(want))
}
@ -750,7 +752,7 @@ func TestStructLoaderRepeated(t *testing.T) {
if err := load(&r2, repSchema, repValues); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(r2, want) {
if !testutil.Equal(r2, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r2), pretty.Value(want))
}
if got, want := cap(r2.Nums), 5; got != want {
@ -762,7 +764,7 @@ func TestStructLoaderRepeated(t *testing.T) {
if err := load(&r3, repSchema, repValues); err != nil {
t.Fatal(err)
}
if !reflect.DeepEqual(r3, want) {
if !testutil.Equal(r3, want) {
t.Errorf("got %+v, want %+v", pretty.Value(r3), pretty.Value(want))
}
if got, want := cap(r3.Nums), 3; got != want {
@ -801,7 +803,7 @@ func TestStructLoaderFieldOverlap(t *testing.T) {
t.Fatal(err)
}
want1 := S1{I: 7}
if !reflect.DeepEqual(s1, want1) {
if !testutil.Equal(s1, want1) {
t.Errorf("got %+v, want %+v", pretty.Value(s1), pretty.Value(want1))
}
@ -813,7 +815,7 @@ func TestStructLoaderFieldOverlap(t *testing.T) {
t.Fatal(err)
}
want2 := S2{}
if !reflect.DeepEqual(s2, want2) {
if !testutil.Equal(s2, want2) {
t.Errorf("got %+v, want %+v", pretty.Value(s2), pretty.Value(want2))
}
}

View File

@ -26,10 +26,12 @@ import (
lroauto "cloud.google.com/go/longrunning/autogen"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
gtransport "google.golang.org/api/transport/grpc"
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
"google.golang.org/grpc/codes"
)
const adminAddr = "bigtableadmin.googleapis.com:443"
@ -52,7 +54,7 @@ func NewAdminClient(ctx context.Context, project, instance string, opts ...optio
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
@ -92,17 +94,18 @@ func (ac *AdminClient) Tables(ctx context.Context) ([]string, error) {
return names, nil
}
// TableConf contains all of the information necessary to create a table with column families.
type TableConf struct {
TableID string
SplitKeys []string
// Families is a map from family name to GCPolicy
Families map[string]GCPolicy
}
// CreateTable creates a new table in the instance.
// This method may return before the table's creation is complete.
func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
prefix := ac.instancePrefix()
req := &btapb.CreateTableRequest{
Parent: prefix,
TableId: table,
}
_, err := ac.tClient.CreateTable(ctx, req)
return err
return ac.CreateTableFromConf(ctx, &TableConf{TableID: table})
}
// CreatePresplitTable creates a new table in the instance.
@ -110,16 +113,29 @@ func (ac *AdminClient) CreateTable(ctx context.Context, table string) error {
// Given two split keys, "s1" and "s2", three tablets will be created,
// spanning the key ranges: [, s1), [s1, s2), [s2, ).
// This method may return before the table's creation is complete.
func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, split_keys []string) error {
func (ac *AdminClient) CreatePresplitTable(ctx context.Context, table string, splitKeys []string) error {
return ac.CreateTableFromConf(ctx, &TableConf{TableID: table, SplitKeys: splitKeys})
}
// CreateTableFromConf creates a new table in the instance from the given configuration.
func (ac *AdminClient) CreateTableFromConf(ctx context.Context, conf *TableConf) error {
ctx = mergeOutgoingMetadata(ctx, ac.md)
var req_splits []*btapb.CreateTableRequest_Split
for _, split := range split_keys {
for _, split := range conf.SplitKeys {
req_splits = append(req_splits, &btapb.CreateTableRequest_Split{[]byte(split)})
}
ctx = mergeOutgoingMetadata(ctx, ac.md)
var tbl btapb.Table
if conf.Families != nil {
tbl.ColumnFamilies = make(map[string]*btapb.ColumnFamily)
for fam, policy := range conf.Families {
tbl.ColumnFamilies[fam] = &btapb.ColumnFamily{policy.proto()}
}
}
prefix := ac.instancePrefix()
req := &btapb.CreateTableRequest{
Parent: prefix,
TableId: table,
TableId: conf.TableID,
Table: &tbl,
InitialSplits: req_splits,
}
_, err := ac.tClient.CreateTable(ctx, req)
@ -171,13 +187,13 @@ func (ac *AdminClient) DeleteColumnFamily(ctx context.Context, table, family str
// TableInfo represents information about a table.
type TableInfo struct {
// DEPRECATED - This field is deprecated. Please use FamilyInfos instead.
Families []string
Families []string
FamilyInfos []FamilyInfo
}
// FamilyInfo represents information about a column family.
type FamilyInfo struct {
Name string
Name string
GCPolicy string
}
@ -251,7 +267,7 @@ func NewInstanceAdminClient(ctx context.Context, project string, opts ...option.
return nil, err
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
@ -297,6 +313,14 @@ func (st StorageType) proto() btapb.StorageType {
return btapb.StorageType_SSD
}
// InstanceType is the type of the instance
type InstanceType int32
const (
PRODUCTION InstanceType = InstanceType(btapb.Instance_PRODUCTION)
DEVELOPMENT = InstanceType(btapb.Instance_DEVELOPMENT)
)
// InstanceInfo represents information about an instance
type InstanceInfo struct {
Name string // name of the instance
@ -306,8 +330,10 @@ type InstanceInfo struct {
// InstanceConf contains the information necessary to create an Instance
type InstanceConf struct {
InstanceId, DisplayName, ClusterId, Zone string
NumNodes int32
StorageType StorageType
// NumNodes must not be specified for DEVELOPMENT instance types
NumNodes int32
StorageType StorageType
InstanceType InstanceType
}
var instanceNameRegexp = regexp.MustCompile(`^projects/([^/]+)/instances/([a-z][-a-z0-9]*)$`)
@ -319,7 +345,7 @@ func (iac *InstanceAdminClient) CreateInstance(ctx context.Context, conf *Instan
req := &btapb.CreateInstanceRequest{
Parent: "projects/" + iac.project,
InstanceId: conf.InstanceId,
Instance: &btapb.Instance{DisplayName: conf.DisplayName},
Instance: &btapb.Instance{DisplayName: conf.DisplayName, Type: btapb.Instance_Type(conf.InstanceType)},
Clusters: map[string]*btapb.Cluster{
conf.ClusterId: {
ServeNodes: conf.NumNodes,
@ -355,6 +381,11 @@ func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo,
if err != nil {
return nil, err
}
if len(res.FailedLocations) > 0 {
// We don't have a good way to return a partial result in the face of some zones being unavailable.
// Fail the entire request.
return nil, status.Errorf(codes.Unavailable, "Failed locations: %v", res.FailedLocations)
}
var is []*InstanceInfo
for _, i := range res.Instances {
@ -369,3 +400,24 @@ func (iac *InstanceAdminClient) Instances(ctx context.Context) ([]*InstanceInfo,
}
return is, nil
}
// InstanceInfo returns information about an instance.
func (iac *InstanceAdminClient) InstanceInfo(ctx context.Context, instanceId string) (*InstanceInfo, error) {
ctx = mergeOutgoingMetadata(ctx, iac.md)
req := &btapb.GetInstanceRequest{
Name: "projects/" + iac.project + "/instances/" + instanceId,
}
res, err := iac.iClient.GetInstance(ctx, req)
if err != nil {
return nil, err
}
m := instanceNameRegexp.FindStringSubmatch(res.Name)
if m == nil {
return nil, fmt.Errorf("malformed instance name %q", res.Name)
}
return &InstanceInfo{
Name: m[2],
DisplayName: res.DisplayName,
}, nil
}

View File

@ -21,6 +21,7 @@ import (
"fmt"
"golang.org/x/net/context"
"reflect"
"strings"
)
@ -43,6 +44,22 @@ func TestAdminIntegration(t *testing.T) {
}
defer adminClient.Close()
iAdminClient, err := testEnv.NewInstanceAdminClient()
if err != nil {
t.Fatalf("NewInstanceAdminClient: %v", err)
}
if iAdminClient != nil {
defer iAdminClient.Close()
iInfo, err := iAdminClient.InstanceInfo(ctx, adminClient.instance)
if err != nil {
t.Errorf("InstanceInfo: %v", err)
}
if iInfo.Name != adminClient.instance {
t.Errorf("InstanceInfo returned name %#v, want %#v", iInfo.Name, adminClient.instance)
}
}
list := func() []string {
tbls, err := adminClient.Tables(ctx)
if err != nil {
@ -91,6 +108,28 @@ func TestAdminIntegration(t *testing.T) {
t.Errorf("adminClient.Tables return %#v. unwanted %#v", got, unwanted)
}
tblConf := TableConf{
TableID: "conftable",
Families: map[string]GCPolicy{
"fam1": MaxVersionsPolicy(1),
"fam2": MaxVersionsPolicy(2),
},
}
if err := adminClient.CreateTableFromConf(ctx, &tblConf); err != nil {
t.Fatalf("Creating table from TableConf: %v", err)
}
defer adminClient.DeleteTable(ctx, tblConf.TableID)
tblInfo, err := adminClient.TableInfo(ctx, tblConf.TableID)
if err != nil {
t.Fatalf("Getting table info: %v", err)
}
sort.Strings(tblInfo.Families)
wantFams := []string{"fam1", "fam2"}
if !reflect.DeepEqual(tblInfo.Families, wantFams) {
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams)
}
// Populate mytable and drop row ranges
if err = adminClient.CreateColumnFamily(ctx, "mytable", "cf"); err != nil {
t.Fatalf("Creating column family: %v", err)

View File

@ -28,7 +28,7 @@ import (
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
gtransport "google.golang.org/api/transport/grpc"
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
@ -53,9 +53,15 @@ func NewClient(ctx context.Context, project, instance string, opts ...option.Cli
return nil, err
}
// Default to a small connection pool that can be overridden.
o = append(o, option.WithGRPCConnectionPool(4))
o = append(o,
option.WithGRPCConnectionPool(4),
// Set the max size to correspond to server-side limits.
option.WithGRPCDialOption(grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20))),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
@ -73,7 +79,7 @@ func (c *Client) Close() error {
}
var (
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted, codes.Internal}
idempotentRetryCodes = []codes.Code{codes.DeadlineExceeded, codes.Unavailable, codes.Aborted}
isIdempotentRetryCode = make(map[codes.Code]bool)
retryOptions = []gax.CallOption{
gax.WithDelayTimeoutSettings(100*time.Millisecond, 2000*time.Millisecond, 1.2),
@ -211,6 +217,7 @@ func decodeFamilyProto(r Row, row string, f *btpb.Family) {
}
// RowSet is a set of rows to be read. It is satisfied by RowList, RowRange and RowRangeList.
// The serialized size of the RowSet must be no larger than 1MiB.
type RowSet interface {
proto() *btpb.RowSet
@ -391,6 +398,9 @@ type ReadOption interface {
}
// RowFilter returns a ReadOption that applies f to the contents of read rows.
//
// If multiple RowFilters are provided, only the last is used. To combine filters,
// use ChainFilters or InterleaveFilters instead.
func RowFilter(f Filter) ReadOption { return rowFilter{f} }
type rowFilter struct{ f Filter }
@ -571,7 +581,7 @@ type entryErr struct {
Err error
}
// ApplyBulk applies multiple Mutations.
// ApplyBulk applies multiple Mutations, up to a maximum of 100,000.
// Each mutation is individually applied atomically,
// but the set of mutations may be applied in any order.
//

View File

@ -448,6 +448,24 @@ func TestClientIntegration(t *testing.T) {
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
}
}
// Check for google-cloud-go/issues/723. RMWs that insert new rows should keep row order sorted in the emulator.
row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-2", appendRMW([]byte{0}))
if err != nil {
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
}
row, err = tbl.ApplyReadModifyWrite(ctx, "issue-723-1", appendRMW([]byte{0}))
if err != nil {
t.Fatalf("ApplyReadModifyWrite null string: %v", err)
}
// Get only the correct row back on read.
r, err := tbl.ReadRow(ctx, "issue-723-1")
if err != nil {
t.Fatalf("Reading row: %v", err)
}
if r.Key() != "issue-723-1" {
t.Errorf("ApplyReadModifyWrite: incorrect read after RMW,\n got %v\nwant %v", r.Key(), "issue-723-1")
}
checkpoint("tested ReadModifyWrite")
// Test arbitrary timestamps more thoroughly.
@ -460,11 +478,12 @@ func TestClientIntegration(t *testing.T) {
// Timestamps are used in thousands because the server
// only permits that granularity.
mut.Set("ts", "col", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
mut.Set("ts", "col2", Timestamp(i*1000), []byte(fmt.Sprintf("val-%d", i)))
}
if err := tbl.Apply(ctx, "testrow", mut); err != nil {
t.Fatalf("Mutating row: %v", err)
}
r, err := tbl.ReadRow(ctx, "testrow")
r, err = tbl.ReadRow(ctx, "testrow")
if err != nil {
t.Fatalf("Reading row: %v", err)
}
@ -474,6 +493,10 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
@ -486,10 +509,39 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
}
// Check cell offset / limit
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowLimitFilter(3)))
if err != nil {
t.Fatalf("Reading row: %v", err)
}
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow)
}
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3)))
if err != nil {
t.Fatalf("Reading row: %v", err)
}
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 0, Value: []byte("val-0")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow)
}
// Check timestamp range filtering (with truncation)
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1001, 3000)))
if err != nil {
@ -498,6 +550,8 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow)
@ -510,6 +564,9 @@ func TestClientIntegration(t *testing.T) {
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow)
@ -559,6 +616,8 @@ func TestClientIntegration(t *testing.T) {
wantRow = Row{"ts": []ReadItem{
{Row: "testrow", Column: "ts:col", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
}}
if !reflect.DeepEqual(r, wantRow) {
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
@ -717,7 +776,7 @@ func TestClientIntegration(t *testing.T) {
checkpoint("tested high concurrency")
// Large reads, writes and scans.
bigBytes := make([]byte, 3<<20) // 3 MB is large, but less than current gRPC max of 4 MB.
bigBytes := make([]byte, 5<<20) // 5 MB is larger than current default gRPC max of 4 MB, but less than the max we set.
nonsense := []byte("lorem ipsum dolor sit amet, ")
fill(bigBytes, nonsense)
mut = NewMutation()

View File

@ -42,6 +42,7 @@ import (
"time"
"bytes"
emptypb "github.com/golang/protobuf/ptypes/empty"
"github.com/golang/protobuf/ptypes/wrappers"
"golang.org/x/net/context"
@ -118,7 +119,7 @@ func (s *server) CreateTable(ctx context.Context, req *btapb.CreateTableRequest)
s.mu.Lock()
if _, ok := s.tables[tbl]; ok {
s.mu.Unlock()
return nil, fmt.Errorf("table %q already exists", tbl)
return nil, grpc.Errorf(codes.AlreadyExists, "table %q already exists", tbl)
}
s.tables[tbl] = newTable(req)
s.mu.Unlock()
@ -183,7 +184,7 @@ func (s *server) ModifyColumnFamilies(ctx context.Context, req *btapb.ModifyColu
for _, mod := range req.Modifications {
if create := mod.GetCreate(); create != nil {
if _, ok := tbl.families[mod.Id]; ok {
return nil, fmt.Errorf("family %q already exists", mod.Id)
return nil, grpc.Errorf(codes.AlreadyExists, "family %q already exists", mod.Id)
}
newcf := &columnFamily{
name: req.Name + "/columnFamilies/" + mod.Id,
@ -276,7 +277,6 @@ func (s *server) ReadRows(req *btpb.ReadRowsRequest, stream btpb.Bigtable_ReadRo
// Rows to read can be specified by a set of row keys and/or a set of row ranges.
// Output is a stream of sorted, de-duped rows.
tbl.mu.RLock()
rowSet := make(map[string]*row)
if req.Rows != nil {
// Add the explicitly given keys
@ -458,6 +458,38 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
if !rx.MatchString(r.key) {
return false
}
case *btpb.RowFilter_CellsPerRowLimitFilter:
// Grab the first n cells in the row.
lim := int(f.CellsPerRowLimitFilter)
for _, fam := range r.families {
for _, col := range fam.colNames {
cs := fam.cells[col]
if len(cs) > lim {
fam.cells[col] = cs[:lim]
lim = 0
} else {
lim -= len(cs)
}
}
}
return true
case *btpb.RowFilter_CellsPerRowOffsetFilter:
// Skip the first n cells in the row.
offset := int(f.CellsPerRowOffsetFilter)
for _, fam := range r.families {
for _, col := range fam.colNames {
cs := fam.cells[col]
if len(cs) > offset {
fam.cells[col] = cs[offset:]
offset = 0
return true
} else {
fam.cells[col] = cs[:0]
offset -= len(cs)
}
}
}
return true
}
// Any other case, operate on a per-cell basis.
@ -591,9 +623,8 @@ func (s *server) MutateRow(ctx context.Context, req *btpb.MutateRowRequest) (*bt
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey))
r, _ := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer tbl.resortRowIndex() // Make sure the row lock is released before this grabs the table lock
defer r.mu.Unlock()
@ -610,14 +641,13 @@ func (s *server) MutateRows(req *btpb.MutateRowsRequest, stream btpb.Bigtable_Mu
if !ok {
return grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
res := &btpb.MutateRowsResponse{Entries: make([]*btpb.MutateRowsResponse_Entry, len(req.Entries))}
fs := tbl.columnFamilies()
defer tbl.resortRowIndex()
for i, entry := range req.Entries {
r := tbl.mutableRow(string(entry.RowKey))
r, _ := tbl.mutableRow(string(entry.RowKey))
r.mu.Lock()
code, msg := int32(codes.OK), ""
if err := applyMutations(tbl, r, entry.Mutations, fs); err != nil {
@ -641,12 +671,11 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
res := &btpb.CheckAndMutateRowResponse{}
fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey))
r, _ := tbl.mutableRow(string(req.RowKey))
r.mu.Lock()
defer r.mu.Unlock()
@ -797,12 +826,16 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
if !ok {
return nil, grpc.Errorf(codes.NotFound, "table %q not found", req.TableName)
}
updates := make(map[string]cell) // copy of updated cells; keyed by full column name
fs := tbl.columnFamilies()
r := tbl.mutableRow(string(req.RowKey))
rowKey := string(req.RowKey)
r, isNewRow := tbl.mutableRow(rowKey)
// This must be done before the row lock, acquired below, is released.
if isNewRow {
defer tbl.resortRowIndex()
}
r.mu.Lock()
defer r.mu.Unlock()
// Assume all mutations apply to the most recent version of the cell.
@ -992,13 +1025,13 @@ func (t *table) columnFamilies() map[string]*columnFamily {
return cp
}
func (t *table) mutableRow(row string) *row {
func (t *table) mutableRow(row string) (mutRow *row, isNewRow bool) {
// Try fast path first.
t.mu.RLock()
r := t.rowIndex[row]
t.mu.RUnlock()
if r != nil {
return r
return r, false
}
// We probably need to create the row.
@ -1010,7 +1043,7 @@ func (t *table) mutableRow(row string) *row {
t.rows = append(t.rows, r)
}
t.mu.Unlock()
return r
return r, true
}
func (t *table) resortRowIndex() {

View File

@ -36,10 +36,12 @@ import (
"cloud.google.com/go/bigtable/internal/stat"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/grpc"
)
var (
runFor = flag.Duration("run_for", 5*time.Second, "how long to run the load test for")
runFor = flag.Duration("run_for", 5*time.Second,
"how long to run the load test for; 0 to run forever until SIGTERM")
scratchTable = flag.String("scratch_table", "loadtest-scratch", "name of table to use; should not already exist")
csvOutput = flag.String("csv_output", "",
"output path for statistics in .csv format. If this file already exists it will be overwritten.")
@ -73,7 +75,12 @@ func main() {
var options []option.ClientOption
if *poolSize > 1 {
options = append(options, option.WithGRPCConnectionPool(*poolSize))
options = append(options,
option.WithGRPCConnectionPool(*poolSize),
// TODO(grpc/grpc-go#1388) using connection pool without WithBlock
// can cause RPCs to fail randomly. We can delete this after the issue is fixed.
option.WithGRPCDialOption(grpc.WithBlock()))
}
var csvFile *os.File
@ -125,7 +132,7 @@ func main() {
var reads, writes stats
stopTime := time.Now().Add(*runFor)
var wg sync.WaitGroup
for time.Now().Before(stopTime) {
for time.Now().Before(stopTime) || *runFor == 0 {
sem <- 1
wg.Add(1)
go func() {

View File

@ -64,6 +64,8 @@ type IntegrationTestConfig struct {
type IntegrationEnv interface {
Config() IntegrationTestConfig
NewAdminClient() (*AdminClient, error)
// NewInstanceAdminClient will return nil if instance administration is unsupported in this environment
NewInstanceAdminClient() (*InstanceAdminClient, error)
NewClient() (*Client, error)
Close()
}
@ -96,7 +98,7 @@ type EmulatedEnv struct {
// NewEmulatedEnv builds and starts the emulator based environment
func NewEmulatedEnv(config IntegrationTestConfig) (*EmulatedEnv, error) {
srv, err := bttest.NewServer("127.0.0.1:0")
srv, err := bttest.NewServer("127.0.0.1:0", grpc.MaxRecvMsgSize(200<<20), grpc.MaxSendMsgSize(100<<20))
if err != nil {
return nil, err
}
@ -141,11 +143,16 @@ func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) {
return NewAdminClient(ctx, e.config.Project, e.config.Instance, option.WithGRPCConn(conn))
}
// NewInstanceAdminClient returns nil for the emulated environment since the API is not implemented.
func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
return nil, nil
}
// NewClient builds a new connected data client for this environment
func (e *EmulatedEnv) NewClient() (*Client, error) {
timeout := 20 * time.Second
ctx, _ := context.WithTimeout(context.Background(), timeout)
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure())
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))
if err != nil {
return nil, err
}
@ -191,6 +198,17 @@ func (e *ProdEnv) NewAdminClient() (*AdminClient, error) {
return NewAdminClient(ctx, e.config.Project, e.config.Instance, clientOpts...)
}
// NewInstanceAdminClient returns a new connected instance admin client for this environment
func (e *ProdEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
timeout := 20 * time.Second
ctx, _ := context.WithTimeout(context.Background(), timeout)
var clientOpts []option.ClientOption
if endpoint := e.config.AdminEndpoint; endpoint != "" {
clientOpts = append(clientOpts, option.WithEndpoint(endpoint))
}
return NewInstanceAdminClient(ctx, e.config.Project, clientOpts...)
}
// NewClient builds a connected data client for this environment
func (e *ProdEnv) NewClient() (*Client, error) {
timeout := 20 * time.Second

View File

@ -285,4 +285,34 @@ func (cf conditionFilter) proto() *btpb.RowFilter {
}}}
}
// CellsPerRowOffsetFilter returns a filter that skips the first N cells of each row, matching all subsequent cells.
func CellsPerRowOffsetFilter(n int) Filter {
return cellsPerRowOffsetFilter(n)
}
type cellsPerRowOffsetFilter int32
func (cof cellsPerRowOffsetFilter) String() string {
return fmt.Sprintf("cells_per_row_offset(%d)", cof)
}
func (cof cellsPerRowOffsetFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowOffsetFilter{int32(cof)}}
}
// CellsPerRowLimitFilter returns a filter that matches only the first N cells of each row.
func CellsPerRowLimitFilter(n int) Filter {
return cellsPerRowLimitFilter(n)
}
type cellsPerRowLimitFilter int32
func (clf cellsPerRowLimitFilter) String() string {
return fmt.Sprintf("cells_per_row_limit(%d)", clf)
}
func (clf cellsPerRowLimitFilter) proto() *btpb.RowFilter {
return &btpb.RowFilter{Filter: &btpb.RowFilter_CellsPerRowLimitFilter{int32(clf)}}
}
// TODO(dsymonds): More filters: sampling

View File

@ -16,9 +16,10 @@ package civil
import (
"encoding/json"
"reflect"
"testing"
"time"
"github.com/google/go-cmp/cmp"
)
func TestDates(t *testing.T) {
@ -418,7 +419,7 @@ func TestUnmarshalJSON(t *testing.T) {
if err := json.Unmarshal([]byte(test.data), test.ptr); err != nil {
t.Fatalf("%s: %v", test.data, err)
}
if !reflect.DeepEqual(test.ptr, test.want) {
if !cmp.Equal(test.ptr, test.want) {
t.Errorf("%s: got %#v, want %#v", test.data, test.ptr, test.want)
}
}

View File

@ -12,7 +12,7 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build linux
// +build linux,go1.7
package main

View File

@ -28,7 +28,7 @@ import (
cd "google.golang.org/api/clouddebugger/v2"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
)
const (
@ -92,7 +92,7 @@ type serviceInterface interface {
}
var newService = func(ctx context.Context, tokenSource oauth2.TokenSource) (serviceInterface, error) {
httpClient, endpoint, err := transport.NewHTTPClient(ctx, option.WithTokenSource(tokenSource))
httpClient, endpoint, err := htransport.NewClient(ctx, option.WithTokenSource(tokenSource))
if err != nil {
return nil, err
}

View File

@ -25,7 +25,7 @@ import (
"golang.org/x/net/context"
raw "google.golang.org/api/container/v1"
"google.golang.org/api/option"
"google.golang.org/api/transport"
htransport "google.golang.org/api/transport/http"
)
type Type string
@ -64,7 +64,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
option.WithUserAgent(userAgent),
}
o = append(o, opts...)
httpClient, endpoint, err := transport.NewHTTPClient(ctx, o...)
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}

118
vendor/cloud.google.com/go/datastore/client.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package datastore
import (
"fmt"
gax "github.com/googleapis/gax-go"
"cloud.google.com/go/internal"
"cloud.google.com/go/internal/version"
"golang.org/x/net/context"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
)
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
// Embed so we still implement the DatastoreClient interface,
// if the interface adds more methods.
pb.DatastoreClient
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(
resourcePrefixHeader, "projects/"+projectID,
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (res *pb.LookupResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Lookup(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (res *pb.RunQueryResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.RunQuery(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (res *pb.BeginTransactionResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.BeginTransaction(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (res *pb.CommitResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Commit(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (res *pb.RollbackResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.Rollback(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (res *pb.AllocateIdsResponse, err error) {
err = dc.invoke(ctx, func(ctx context.Context) error {
res, err = dc.c.AllocateIds(ctx, in, opts...)
return err
})
return res, err
}
func (dc *datastoreClient) invoke(ctx context.Context, f func(ctx context.Context) error) error {
ctx = metadata.NewOutgoingContext(ctx, dc.md)
return internal.Retry(ctx, gax.Backoff{}, func() (stop bool, err error) {
err = f(ctx)
return !shouldRetry(err), err
})
}
func shouldRetry(err error) bool {
if err == nil {
return false
}
s, ok := status.FromError(err)
if !ok {
return false
}
// See https://cloud.google.com/datastore/docs/concepts/errors.
return s.Code() == codes.Unavailable || s.Code() == codes.DeadlineExceeded
}

View File

@ -21,15 +21,11 @@ import (
"os"
"reflect"
"cloud.google.com/go/internal/version"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
gtransport "google.golang.org/api/transport/grpc"
pb "google.golang.org/genproto/googleapis/datastore/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
)
const (
@ -44,56 +40,6 @@ const ScopeDatastore = "https://www.googleapis.com/auth/datastore"
// the resource being operated on.
const resourcePrefixHeader = "google-cloud-resource-prefix"
// protoClient is an interface for *transport.ProtoClient to support injecting
// fake clients in tests.
type protoClient interface {
Call(context.Context, string, proto.Message, proto.Message) error
}
// datastoreClient is a wrapper for the pb.DatastoreClient that includes gRPC
// metadata to be sent in each request for server-side traffic management.
type datastoreClient struct {
// Embed so we still implement the DatastoreClient interface,
// if the interface adds more methods.
pb.DatastoreClient
c pb.DatastoreClient
md metadata.MD
}
func newDatastoreClient(conn *grpc.ClientConn, projectID string) pb.DatastoreClient {
return &datastoreClient{
c: pb.NewDatastoreClient(conn),
md: metadata.Pairs(
resourcePrefixHeader, "projects/"+projectID,
"x-goog-api-client", fmt.Sprintf("gl-go/%s gccl/%s grpc/", version.Go(), version.Repo)),
}
}
func (dc *datastoreClient) Lookup(ctx context.Context, in *pb.LookupRequest, opts ...grpc.CallOption) (*pb.LookupResponse, error) {
return dc.c.Lookup(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) RunQuery(ctx context.Context, in *pb.RunQueryRequest, opts ...grpc.CallOption) (*pb.RunQueryResponse, error) {
return dc.c.RunQuery(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) BeginTransaction(ctx context.Context, in *pb.BeginTransactionRequest, opts ...grpc.CallOption) (*pb.BeginTransactionResponse, error) {
return dc.c.BeginTransaction(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Commit(ctx context.Context, in *pb.CommitRequest, opts ...grpc.CallOption) (*pb.CommitResponse, error) {
return dc.c.Commit(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) Rollback(ctx context.Context, in *pb.RollbackRequest, opts ...grpc.CallOption) (*pb.RollbackResponse, error) {
return dc.c.Rollback(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
func (dc *datastoreClient) AllocateIds(ctx context.Context, in *pb.AllocateIdsRequest, opts ...grpc.CallOption) (*pb.AllocateIdsResponse, error) {
return dc.c.AllocateIds(metadata.NewOutgoingContext(ctx, dc.md), in, opts...)
}
// Client is a client for reading and writing data in a datastore dataset.
type Client struct {
conn *grpc.ClientConn
@ -138,7 +84,7 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
return nil, errors.New("datastore: missing project/dataset id")
}
o = append(o, opts...)
conn, err := transport.DialGRPC(ctx, o...)
conn, err := gtransport.Dial(ctx, o...)
if err != nil {
return nil, fmt.Errorf("dialing: %v", err)
}
@ -201,7 +147,6 @@ func keyToProto(k *Key) *pb.Key {
return nil
}
// TODO(jbd): Eliminate unrequired allocations.
var path []*pb.Key_PathElement
for {
el := &pb.Key_PathElement{Kind: k.Kind}
@ -210,12 +155,19 @@ func keyToProto(k *Key) *pb.Key {
} else if k.Name != "" {
el.IdType = &pb.Key_PathElement_Name{Name: k.Name}
}
path = append([]*pb.Key_PathElement{el}, path...)
path = append(path, el)
if k.Parent == nil {
break
}
k = k.Parent
}
// The path should be in order [grandparent, parent, child]
// We did it backward above, so reverse back.
for i := 0; i < len(path)/2; i++ {
path[i], path[len(path)-i-1] = path[len(path)-i-1], path[i]
}
key := &pb.Key{Path: path}
if k.Namespace != "" {
key.PartitionId = &pb.PartitionId{
@ -390,17 +342,21 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
return nil
}
// Go through keys, validate them, serialize then, and create a dict mapping them to their index
// Go through keys, validate them, serialize then, and create a dict mapping them to their indices.
// Equal keys are deduped.
multiErr, any := make(MultiError, len(keys)), false
keyMap := make(map[string]int)
pbKeys := make([]*pb.Key, len(keys))
keyMap := make(map[string][]int, len(keys))
pbKeys := make([]*pb.Key, 0, len(keys))
for i, k := range keys {
if !k.valid() {
multiErr[i] = ErrInvalidKey
any = true
} else {
keyMap[k.String()] = i
pbKeys[i] = keyToProto(k)
ks := k.String()
if _, ok := keyMap[ks]; !ok {
pbKeys = append(pbKeys, keyToProto(k))
}
keyMap[ks] = append(keyMap[ks], i)
}
}
if any {
@ -434,25 +390,26 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
found = append(found, resp.Found...)
missing = append(missing, resp.Missing...)
}
if len(keys) != len(found)+len(missing) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
filled := 0
for _, e := range found {
k, err := protoToKey(e.Entity.Key)
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
index := keyMap[k.String()]
elem := v.Index(index)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
multiErr[index] = err
any = true
filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
elem := v.Index(index)
if multiArgType == multiArgTypePropertyLoadSaver || multiArgType == multiArgTypeStruct {
elem = elem.Addr()
}
if multiArgType == multiArgTypeStructPtr && elem.IsNil() {
elem.Set(reflect.New(elem.Type().Elem()))
}
if err := loadEntityProto(elem.Interface(), e.Entity); err != nil {
multiErr[index] = err
any = true
}
}
}
for _, e := range missing {
@ -460,9 +417,17 @@ func (c *Client) get(ctx context.Context, keys []*Key, dst interface{}, opts *pb
if err != nil {
return errors.New("datastore: internal error: server returned an invalid key")
}
multiErr[keyMap[k.String()]] = ErrNoSuchEntity
filled += len(keyMap[k.String()])
for _, index := range keyMap[k.String()] {
multiErr[index] = ErrNoSuchEntity
}
any = true
}
if filled != len(keys) {
return errors.New("datastore: internal error: server returned the wrong number of entities")
}
if any {
return multiErr
}
@ -592,13 +557,18 @@ func (c *Client) DeleteMulti(ctx context.Context, keys []*Key) error {
func deleteMutations(keys []*Key) ([]*pb.Mutation, error) {
mutations := make([]*pb.Mutation, 0, len(keys))
set := make(map[string]bool, len(keys))
for _, k := range keys {
if k.Incomplete() {
return nil, fmt.Errorf("datastore: can't delete the incomplete key: %v", k)
}
mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
})
ks := k.String()
if !set[ks] {
mutations = append(mutations, &pb.Mutation{
Operation: &pb.Mutation_Delete{Delete: keyToProto(k)},
})
}
set[ks] = true
}
return mutations, nil
}

View File

@ -170,6 +170,7 @@ func TestGetMulti(t *testing.T) {
{key: NameKey("X", "item1", p), put: true},
{key: NameKey("X", "item2", p), put: false},
{key: NameKey("X", "item3", p), put: false},
{key: NameKey("X", "item3", p), put: false},
{key: NameKey("X", "item4", p), put: true},
}
@ -1003,6 +1004,8 @@ func TestNilPointers(t *testing.T) {
t.Errorf("Get: err %v; want %v", err, want)
}
// Test that deleting with duplicate keys work.
keys = append(keys, keys...)
if err := client.DeleteMulti(ctx, keys); err != nil {
t.Errorf("Delete: %v", err)
}

View File

@ -57,17 +57,6 @@ func defaultController2CallOptions() *Controller2CallOptions {
})
}),
},
{"default", "non_idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &Controller2CallOptions{
RegisterDebuggee: retry[[2]string{"default", "non_idempotent"}],
@ -99,10 +88,10 @@ type Controller2Client struct {
//
// The debugger agents register with the Controller to identify the application
// being debugged, the Debuggee. All agents that register with the same data,
// represent the same Debuggee, and are assigned the same `debuggee_id`.
// represent the same Debuggee, and are assigned the same debuggee_id.
//
// The debugger agents call the Controller to retrieve the list of active
// Breakpoints. Agents with the same `debuggee_id` get the same breakpoints
// Breakpoints. Agents with the same debuggee_id get the same breakpoints
// list. An agent that can fulfill the breakpoint request updates the
// Controller with the breakpoint result. The controller selects the first
// result received and discards the rest of the results.
@ -150,14 +139,14 @@ func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
// RegisterDebuggee registers the debuggee with the controller service.
//
// All agents attached to the same application should call this method with
// the same request content to get back the same stable `debuggee_id`. Agents
// should call this method again whenever `google.rpc.Code.NOT_FOUND` is
// returned from any controller method.
// All agents attached to the same application must call this method with
// exactly the same request content to get back the same stable debuggee_id.
// Agents should call this method again whenever google.rpc.Code.NOT_FOUND
// is returned from any controller method.
//
// This allows the controller service to disable the agent or recover from any
// data loss. If the debuggee is disabled by the server, the response will
// have `is_disabled` set to `true`.
// This protocol allows the controller service to disable debuggees, recover
// from data loss, or change the debuggee_id format. Agents must handle
// debuggee_id value changing upon re-registration.
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
@ -175,7 +164,7 @@ func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebu
// ListActiveBreakpoints returns the list of all active breakpoints for the debuggee.
//
// The breakpoint specification (location, condition, and expression
// The breakpoint specification (location, condition, and expressions
// fields) is semantically immutable, although the field values may
// change. For example, an agent may update the location line number
// to reflect the actual line where the breakpoint was set, but this
@ -202,12 +191,11 @@ func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clou
}
// UpdateActiveBreakpoint updates the breakpoint state or mutable fields.
// The entire Breakpoint message must be sent back to the controller
// service.
// The entire Breakpoint message must be sent back to the controller service.
//
// Updates to active breakpoint fields are only allowed if the new value
// does not change the breakpoint specification. Updates to the `location`,
// `condition` and `expression` fields should not alter the breakpoint
// does not change the breakpoint specification. Updates to the location,
// condition and expressions fields should not alter the breakpoint
// semantics. These may only make changes such as canonicalizing a value
// or snapping the location to the correct line of code.
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {

View File

@ -59,17 +59,6 @@ func defaultDebugger2CallOptions() *Debugger2CallOptions {
})
}),
},
{"default", "non_idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &Debugger2CallOptions{
SetBreakpoint: retry[[2]string{"default", "non_idempotent"}],
@ -102,9 +91,9 @@ type Debugger2Client struct {
// and without modifying its state. An application may include one or
// more replicated processes performing the same work.
//
// The application is represented using the Debuggee concept. The Debugger
// service provides a way to query for available Debuggees, but does not
// provide a way to create one. A debuggee is created using the Controller
// A debugged application is represented using the Debuggee concept. The
// Debugger service provides a way to query for available debuggees, but does
// not provide a way to create one. A debuggee is created using the Controller
// service, usually by running a debugger agent with the application.
//
// The Debugger service enables the client to set one or more Breakpoints on a
@ -204,7 +193,7 @@ func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebugge
return resp, nil
}
// ListDebuggees lists all the debuggees that the user can set breakpoints to.
// ListDebuggees lists all the debuggees that the user has access to.
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)

View File

@ -15,7 +15,7 @@
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package debugger is an experimental, auto-generated package for the
// debugger API.
// Stackdriver Debugger API.
//
// Examines the call stack and variables of a running application
// without stopping or slowing it down.
@ -35,6 +35,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -39,6 +39,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
@ -241,7 +242,7 @@ func TestDebugger2SetBreakpoint(t *testing.T) {
func TestDebugger2SetBreakpointError(t *testing.T) {
errCode := codes.PermissionDenied
mockDebugger2.err = grpc.Errorf(errCode, "test error")
mockDebugger2.err = gstatus.Error(errCode, "test error")
var debuggeeId string = "debuggeeId-997255898"
var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{}
@ -259,7 +260,9 @@ func TestDebugger2SetBreakpointError(t *testing.T) {
resp, err := c.SetBreakpoint(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -303,7 +306,7 @@ func TestDebugger2GetBreakpoint(t *testing.T) {
func TestDebugger2GetBreakpointError(t *testing.T) {
errCode := codes.PermissionDenied
mockDebugger2.err = grpc.Errorf(errCode, "test error")
mockDebugger2.err = gstatus.Error(errCode, "test error")
var debuggeeId string = "debuggeeId-997255898"
var breakpointId string = "breakpointId498424873"
@ -321,7 +324,9 @@ func TestDebugger2GetBreakpointError(t *testing.T) {
resp, err := c.GetBreakpoint(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -362,7 +367,7 @@ func TestDebugger2DeleteBreakpoint(t *testing.T) {
func TestDebugger2DeleteBreakpointError(t *testing.T) {
errCode := codes.PermissionDenied
mockDebugger2.err = grpc.Errorf(errCode, "test error")
mockDebugger2.err = gstatus.Error(errCode, "test error")
var debuggeeId string = "debuggeeId-997255898"
var breakpointId string = "breakpointId498424873"
@ -380,7 +385,9 @@ func TestDebugger2DeleteBreakpointError(t *testing.T) {
err = c.DeleteBreakpoint(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
@ -424,7 +431,7 @@ func TestDebugger2ListBreakpoints(t *testing.T) {
func TestDebugger2ListBreakpointsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDebugger2.err = grpc.Errorf(errCode, "test error")
mockDebugger2.err = gstatus.Error(errCode, "test error")
var debuggeeId string = "debuggeeId-997255898"
var clientVersion string = "clientVersion-1506231196"
@ -440,7 +447,9 @@ func TestDebugger2ListBreakpointsError(t *testing.T) {
resp, err := c.ListBreakpoints(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -482,7 +491,7 @@ func TestDebugger2ListDebuggees(t *testing.T) {
func TestDebugger2ListDebuggeesError(t *testing.T) {
errCode := codes.PermissionDenied
mockDebugger2.err = grpc.Errorf(errCode, "test error")
mockDebugger2.err = gstatus.Error(errCode, "test error")
var project string = "project-309310695"
var clientVersion string = "clientVersion-1506231196"
@ -498,7 +507,9 @@ func TestDebugger2ListDebuggeesError(t *testing.T) {
resp, err := c.ListDebuggees(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -538,7 +549,7 @@ func TestController2RegisterDebuggee(t *testing.T) {
func TestController2RegisterDebuggeeError(t *testing.T) {
errCode := codes.PermissionDenied
mockController2.err = grpc.Errorf(errCode, "test error")
mockController2.err = gstatus.Error(errCode, "test error")
var debuggee *clouddebuggerpb.Debuggee = &clouddebuggerpb.Debuggee{}
var request = &clouddebuggerpb.RegisterDebuggeeRequest{
@ -552,7 +563,9 @@ func TestController2RegisterDebuggeeError(t *testing.T) {
resp, err := c.RegisterDebuggee(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -597,7 +610,7 @@ func TestController2ListActiveBreakpoints(t *testing.T) {
func TestController2ListActiveBreakpointsError(t *testing.T) {
errCode := codes.PermissionDenied
mockController2.err = grpc.Errorf(errCode, "test error")
mockController2.err = gstatus.Error(errCode, "test error")
var debuggeeId string = "debuggeeId-997255898"
var request = &clouddebuggerpb.ListActiveBreakpointsRequest{
@ -611,7 +624,9 @@ func TestController2ListActiveBreakpointsError(t *testing.T) {
resp, err := c.ListActiveBreakpoints(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -653,7 +668,7 @@ func TestController2UpdateActiveBreakpoint(t *testing.T) {
func TestController2UpdateActiveBreakpointError(t *testing.T) {
errCode := codes.PermissionDenied
mockController2.err = grpc.Errorf(errCode, "test error")
mockController2.err = gstatus.Error(errCode, "test error")
var debuggeeId string = "debuggeeId-997255898"
var breakpoint *clouddebuggerpb.Breakpoint = &clouddebuggerpb.Breakpoint{}
@ -669,7 +684,9 @@ func TestController2UpdateActiveBreakpointError(t *testing.T) {
resp, err := c.UpdateActiveBreakpoint(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp

View File

@ -0,0 +1,77 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp
import (
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
)
import (
"strconv"
"testing"
"time"
"cloud.google.com/go/internal/testutil"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
"google.golang.org/api/option"
)
var _ = iterator.Done
var _ = strconv.FormatUint
var _ = time.Now
func TestDlpServiceSmoke(t *testing.T) {
if testing.Short() {
t.Skip("skipping smoke test in short mode")
}
ctx := context.Background()
ts := testutil.TokenSource(ctx, DefaultAuthScopes()...)
if ts == nil {
t.Skip("Integration tests skipped. See CONTRIBUTING.md for details")
}
projectId := testutil.ProjID()
_ = projectId
c, err := NewClient(ctx, option.WithTokenSource(ts))
if err != nil {
t.Fatal(err)
}
var minLikelihood dlppb.Likelihood = dlppb.Likelihood_POSSIBLE
var inspectConfig = &dlppb.InspectConfig{
MinLikelihood: minLikelihood,
}
var type_ string = "text/plain"
var value string = "my phone number is 215-512-1212"
var itemsElement = &dlppb.ContentItem{
Type: type_,
DataItem: &dlppb.ContentItem_Value{
Value: value,
},
}
var items = []*dlppb.ContentItem{itemsElement}
var request = &dlppb.InspectContentRequest{
InspectConfig: inspectConfig,
Items: items,
}
if _, err := c.InspectContent(ctx, request); err != nil {
t.Error(err)
}
}

327
vendor/cloud.google.com/go/dlp/apiv2beta1/dlp_client.go generated vendored Normal file
View File

@ -0,0 +1,327 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp
import (
"time"
"cloud.google.com/go/internal/version"
"cloud.google.com/go/longrunning"
lroauto "cloud.google.com/go/longrunning/autogen"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
"google.golang.org/api/transport"
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
// CallOptions contains the retry settings for each method of Client.
type CallOptions struct {
InspectContent []gax.CallOption
RedactContent []gax.CallOption
CreateInspectOperation []gax.CallOption
ListInspectFindings []gax.CallOption
ListInfoTypes []gax.CallOption
ListRootCategories []gax.CallOption
}
func defaultClientOptions() []option.ClientOption {
return []option.ClientOption{
option.WithEndpoint("dlp.googleapis.com:443"),
option.WithScopes(DefaultAuthScopes()...),
}
}
func defaultCallOptions() *CallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &CallOptions{
InspectContent: retry[[2]string{"default", "non_idempotent"}],
RedactContent: retry[[2]string{"default", "non_idempotent"}],
CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
ListInspectFindings: retry[[2]string{"default", "idempotent"}],
ListInfoTypes: retry[[2]string{"default", "idempotent"}],
ListRootCategories: retry[[2]string{"default", "idempotent"}],
}
}
// Client is a client for interacting with DLP API.
type Client struct {
// The connection to the service.
conn *grpc.ClientConn
// The gRPC API client.
client dlppb.DlpServiceClient
// LROClient is used internally to handle longrunning operations.
// It is exposed so that its CallOptions can be modified if required.
// Users should not Close this client.
LROClient *lroauto.OperationsClient
// The call options for this service.
CallOptions *CallOptions
// The metadata to be sent with each request.
xGoogHeader []string
}
// NewClient creates a new dlp service client.
//
// The DLP API is a service that allows clients
// to detect the presence of Personally Identifiable Information (PII) and other
// privacy-sensitive data in user-supplied, unstructured data streams, like text
// blocks or images.
// The service also includes methods for sensitive data redaction and
// scheduling of data scans on Google Cloud Platform based data sets.
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
if err != nil {
return nil, err
}
c := &Client{
conn: conn,
CallOptions: defaultCallOptions(),
client: dlppb.NewDlpServiceClient(conn),
}
c.setGoogleClientInfo()
c.LROClient, err = lroauto.NewOperationsClient(ctx, option.WithGRPCConn(conn))
if err != nil {
// This error "should not happen", since we are just reusing old connection
// and never actually need to dial.
// If this does happen, we could leak conn. However, we cannot close conn:
// If the user invoked the function with option.WithGRPCConn,
// we would close a connection that's still in use.
// TODO(pongad): investigate error conditions.
return nil, err
}
return c, nil
}
// Connection returns the client's connection to the API service.
func (c *Client) Connection() *grpc.ClientConn {
return c.conn
}
// Close closes the connection to the API service. The user should invoke this when
// the client is no longer required.
func (c *Client) Close() error {
return c.conn.Close()
}
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *Client) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
}
// ResultPath returns the path for the result resource.
func ResultPath(result string) string {
return "" +
"inspect/results/" +
result +
""
}
// InspectContent finds potentially sensitive info in a list of strings.
// This method has limits on input size, processing time, and output size.
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
var resp *dlppb.InspectContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.InspectContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// RedactContent redacts potentially sensitive info from a list of strings.
// This method has limits on input size, processing time, and output size.
func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
var resp *dlppb.RedactContentResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.RedactContent(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
// repository.
func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
var resp *longrunningpb.Operation
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.CreateInspectOperation(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return &CreateInspectOperationHandle{
lro: longrunning.InternalNewOperation(c.LROClient, resp),
}, nil
}
// ListInspectFindings returns list of results for given inspect operation result set id.
func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
var resp *dlppb.ListInspectFindingsResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListInspectFindings(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListInfoTypes returns sensitive information types for given category.
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
var resp *dlppb.ListInfoTypesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListInfoTypes(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// ListRootCategories returns the list of root categories of sensitive information.
func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
var resp *dlppb.ListRootCategoriesResponse
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
var err error
resp, err = c.client.ListRootCategories(ctx, req, settings.GRPC...)
return err
}, opts...)
if err != nil {
return nil, err
}
return resp, nil
}
// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation.
type CreateInspectOperationHandle struct {
lro *longrunning.Operation
}
// CreateInspectOperationHandle returns a new CreateInspectOperationHandle from a given name.
// The name must be that of a previously created CreateInspectOperationHandle, possibly from a different process.
func (c *Client) CreateInspectOperationHandle(name string) *CreateInspectOperationHandle {
return &CreateInspectOperationHandle{
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
}
}
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
//
// See documentation of Poll for error-handling information.
func (op *CreateInspectOperationHandle) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
var resp dlppb.InspectOperationResult
if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
return nil, err
}
return &resp, nil
}
// Poll fetches the latest state of the long-running operation.
//
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
//
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
// the operation has completed with failure, the error is returned and op.Done will return true.
// If Poll succeeds and the operation has completed successfully,
// op.Done will return true, and the response of the operation is returned.
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
func (op *CreateInspectOperationHandle) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.InspectOperationResult, error) {
var resp dlppb.InspectOperationResult
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
return nil, err
}
if !op.Done() {
return nil, nil
}
return &resp, nil
}
// Metadata returns metadata associated with the long-running operation.
// Metadata itself does not contact the server, but Poll does.
// To get the latest metadata, call this method after a successful call to Poll.
// If the metadata is not available, the returned metadata and error are both nil.
func (op *CreateInspectOperationHandle) Metadata() (*dlppb.InspectOperationMetadata, error) {
var meta dlppb.InspectOperationMetadata
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
return nil, nil
} else if err != nil {
return nil, err
}
return &meta, nil
}
// Done reports whether the long-running operation has completed.
func (op *CreateInspectOperationHandle) Done() bool {
return op.lro.Done()
}
// Name returns the name of the long-running operation.
// The name is assigned by the server and is unique within the service from which the operation is created.
func (op *CreateInspectOperationHandle) Name() string {
return op.lro.Name()
}

View File

@ -0,0 +1,146 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp_test
import (
"cloud.google.com/go/dlp/apiv2beta1"
"golang.org/x/net/context"
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
)
func ExampleNewClient() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use client.
_ = c
}
func ExampleClient_InspectContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.InspectContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.InspectContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_RedactContent() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.RedactContentRequest{
// TODO: Fill request struct fields.
}
resp, err := c.RedactContent(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_CreateInspectOperation() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.CreateInspectOperationRequest{
// TODO: Fill request struct fields.
}
op, err := c.CreateInspectOperation(ctx, req)
if err != nil {
// TODO: Handle error.
}
resp, err := op.Wait(ctx)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListInspectFindings() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListInspectFindingsRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListInspectFindings(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListInfoTypes() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListInfoTypesRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListInfoTypes(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}
func ExampleClient_ListRootCategories() {
ctx := context.Background()
c, err := dlp.NewClient(ctx)
if err != nil {
// TODO: Handle error.
}
req := &dlppb.ListRootCategoriesRequest{
// TODO: Fill request struct fields.
}
resp, err := c.ListRootCategories(ctx, req)
if err != nil {
// TODO: Handle error.
}
// TODO: Use resp.
_ = resp
}

42
vendor/cloud.google.com/go/dlp/apiv2beta1/doc.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package dlp is an experimental, auto-generated package for the
// DLP API.
//
// The Google Data Loss Prevention API provides methods for detection of
// privacy-sensitive fragments in text, images, and Google Cloud Platform
// storage repositories.
package dlp // import "cloud.google.com/go/dlp/apiv2beta1"
import (
"golang.org/x/net/context"
"google.golang.org/grpc/metadata"
)
func insertXGoog(ctx context.Context, val []string) context.Context {
md, _ := metadata.FromOutgoingContext(ctx)
md = md.Copy()
md["x-goog-api-client"] = val
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",
}
}

556
vendor/cloud.google.com/go/dlp/apiv2beta1/mock_test.go generated vendored Normal file
View File

@ -0,0 +1,556 @@
// Copyright 2017, Google Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// AUTO-GENERATED CODE. DO NOT EDIT.
package dlp
import (
longrunningpb "google.golang.org/genproto/googleapis/longrunning"
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
)
import (
"flag"
"fmt"
"io"
"log"
"net"
"os"
"strings"
"testing"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"golang.org/x/net/context"
"google.golang.org/api/option"
status "google.golang.org/genproto/googleapis/rpc/status"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
var _ = ptypes.MarshalAny
var _ status.Status
type mockDlpServer struct {
// Embed for forward compatibility.
// Tests will keep working if more methods are added
// in the future.
dlppb.DlpServiceServer
reqs []proto.Message
// If set, all calls return this error.
err error
// responses to return if err == nil
resps []proto.Message
}
func (s *mockDlpServer) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest) (*dlppb.InspectContentResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.InspectContentResponse), nil
}
func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest) (*dlppb.RedactContentResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.RedactContentResponse), nil
}
func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*longrunningpb.Operation), nil
}
func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.ListInspectFindingsResponse), nil
}
func (s *mockDlpServer) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest) (*dlppb.ListInfoTypesResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.ListInfoTypesResponse), nil
}
func (s *mockDlpServer) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest) (*dlppb.ListRootCategoriesResponse, error) {
md, _ := metadata.FromIncomingContext(ctx)
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
}
s.reqs = append(s.reqs, req)
if s.err != nil {
return nil, s.err
}
return s.resps[0].(*dlppb.ListRootCategoriesResponse), nil
}
// clientOpt is the option tests should use to connect to the test server.
// It is initialized by TestMain.
var clientOpt option.ClientOption
var (
mockDlp mockDlpServer
)
func TestMain(m *testing.M) {
flag.Parse()
serv := grpc.NewServer()
dlppb.RegisterDlpServiceServer(serv, &mockDlp)
lis, err := net.Listen("tcp", "localhost:0")
if err != nil {
log.Fatal(err)
}
go serv.Serve(lis)
conn, err := grpc.Dial(lis.Addr().String(), grpc.WithInsecure())
if err != nil {
log.Fatal(err)
}
clientOpt = option.WithGRPCConn(conn)
os.Exit(m.Run())
}
func TestDlpServiceInspectContent(t *testing.T) {
var expectedResponse *dlppb.InspectContentResponse = &dlppb.InspectContentResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.InspectContentRequest{
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.InspectContent(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceInspectContentError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var request = &dlppb.InspectContentRequest{
InspectConfig: inspectConfig,
Items: items,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.InspectContent(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceRedactContent(t *testing.T) {
var expectedResponse *dlppb.RedactContentResponse = &dlppb.RedactContentResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil
var request = &dlppb.RedactContentRequest{
InspectConfig: inspectConfig,
Items: items,
ReplaceConfigs: replaceConfigs,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.RedactContent(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceRedactContentError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var items []*dlppb.ContentItem = nil
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil
var request = &dlppb.RedactContentRequest{
InspectConfig: inspectConfig,
Items: items,
ReplaceConfigs: replaceConfigs,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.RedactContent(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceCreateInspectOperation(t *testing.T) {
var name string = "name3373707"
var expectedResponse = &dlppb.InspectOperationResult{
Name: name,
}
mockDlp.err = nil
mockDlp.reqs = nil
any, err := ptypes.MarshalAny(expectedResponse)
if err != nil {
t.Fatal(err)
}
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Response{Response: any},
})
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{}
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
var request = &dlppb.CreateInspectOperationRequest{
InspectConfig: inspectConfig,
StorageConfig: storageConfig,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.CreateInspectOperation(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceCreateInspectOperationError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = nil
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
Name: "longrunning-test",
Done: true,
Result: &longrunningpb.Operation_Error{
Error: &status.Status{
Code: int32(errCode),
Message: "test error",
},
},
})
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{}
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
var request = &dlppb.CreateInspectOperationRequest{
InspectConfig: inspectConfig,
StorageConfig: storageConfig,
OutputConfig: outputConfig,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
respLRO, err := c.CreateInspectOperation(context.Background(), request)
if err != nil {
t.Fatal(err)
}
resp, err := respLRO.Wait(context.Background())
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceListInspectFindings(t *testing.T) {
var nextPageToken string = "nextPageToken-1530815211"
var expectedResponse = &dlppb.ListInspectFindingsResponse{
NextPageToken: nextPageToken,
}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var formattedName string = ResultPath("[RESULT]")
var request = &dlppb.ListInspectFindingsRequest{
Name: formattedName,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListInspectFindings(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceListInspectFindingsError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var formattedName string = ResultPath("[RESULT]")
var request = &dlppb.ListInspectFindingsRequest{
Name: formattedName,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListInspectFindings(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceListInfoTypes(t *testing.T) {
var expectedResponse *dlppb.ListInfoTypesResponse = &dlppb.ListInfoTypesResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var category string = "category50511102"
var languageCode string = "languageCode-412800396"
var request = &dlppb.ListInfoTypesRequest{
Category: category,
LanguageCode: languageCode,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListInfoTypes(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceListInfoTypesError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var category string = "category50511102"
var languageCode string = "languageCode-412800396"
var request = &dlppb.ListInfoTypesRequest{
Category: category,
LanguageCode: languageCode,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListInfoTypes(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}
func TestDlpServiceListRootCategories(t *testing.T) {
var expectedResponse *dlppb.ListRootCategoriesResponse = &dlppb.ListRootCategoriesResponse{}
mockDlp.err = nil
mockDlp.reqs = nil
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
var languageCode string = "languageCode-412800396"
var request = &dlppb.ListRootCategoriesRequest{
LanguageCode: languageCode,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListRootCategories(context.Background(), request)
if err != nil {
t.Fatal(err)
}
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
t.Errorf("wrong request %q, want %q", got, want)
}
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
t.Errorf("wrong response %q, want %q)", got, want)
}
}
func TestDlpServiceListRootCategoriesError(t *testing.T) {
errCode := codes.PermissionDenied
mockDlp.err = gstatus.Error(errCode, "test error")
var languageCode string = "languageCode-412800396"
var request = &dlppb.ListRootCategoriesRequest{
LanguageCode: languageCode,
}
c, err := NewClient(context.Background(), clientOpt)
if err != nil {
t.Fatal(err)
}
resp, err := c.ListRootCategories(context.Background(), request)
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
}

View File

@ -15,11 +15,13 @@
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package errorreporting is an experimental, auto-generated package for the
// errorreporting API.
// Stackdriver Error Reporting API.
//
// Stackdriver Error Reporting groups and counts similar errors from cloud
// services. The Stackdriver Error Reporting API provides a way to report new
// errors and read access to error groups and their associated errors.
//
// Use the client at cloud.google.com/go/errorreporting in preference to this.
package errorreporting // import "cloud.google.com/go/errorreporting/apiv1beta1"
import (
@ -34,6 +36,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -29,10 +29,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
errorGroupGroupPathTemplate = gax.MustCompilePathTemplate("projects/{project}/groups/{group}")
)
// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient.
type ErrorGroupCallOptions struct {
GetGroup []gax.CallOption
@ -122,14 +118,12 @@ func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
// ErrorGroupGroupPath returns the path for the group resource.
func ErrorGroupGroupPath(project, group string) string {
path, err := errorGroupGroupPathTemplate.Render(map[string]string{
"project": project,
"group": group,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/groups/" +
group +
""
}
// GetGroup get the specified group.

View File

@ -31,10 +31,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
errorStatsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
)
// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient.
type ErrorStatsCallOptions struct {
ListGroupStats []gax.CallOption
@ -127,13 +123,10 @@ func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
// ErrorStatsProjectPath returns the path for the project resource.
func ErrorStatsProjectPath(project string) string {
path, err := errorStatsProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// ListGroupStats lists the specified groups.

View File

@ -19,6 +19,7 @@ package errorreporting_test
import (
"cloud.google.com/go/errorreporting/apiv1beta1"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
@ -45,9 +46,11 @@ func ExampleErrorStatsClient_ListGroupStats() {
it := c.ListGroupStats(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
break
}
// TODO: Use resp.
_ = resp
@ -67,9 +70,11 @@ func ExampleErrorStatsClient_ListEvents() {
it := c.ListEvents(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
break
}
// TODO: Use resp.
_ = resp

View File

@ -38,6 +38,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
@ -234,7 +235,7 @@ func TestErrorGroupServiceGetGroup(t *testing.T) {
func TestErrorGroupServiceGetGroupError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorGroup.err = grpc.Errorf(errCode, "test error")
mockErrorGroup.err = gstatus.Error(errCode, "test error")
var formattedGroupName string = ErrorGroupGroupPath("[PROJECT]", "[GROUP]")
var request = &clouderrorreportingpb.GetGroupRequest{
@ -248,7 +249,9 @@ func TestErrorGroupServiceGetGroupError(t *testing.T) {
resp, err := c.GetGroup(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -293,7 +296,7 @@ func TestErrorGroupServiceUpdateGroup(t *testing.T) {
func TestErrorGroupServiceUpdateGroupError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorGroup.err = grpc.Errorf(errCode, "test error")
mockErrorGroup.err = gstatus.Error(errCode, "test error")
var group *clouderrorreportingpb.ErrorGroup = &clouderrorreportingpb.ErrorGroup{}
var request = &clouderrorreportingpb.UpdateGroupRequest{
@ -307,7 +310,9 @@ func TestErrorGroupServiceUpdateGroupError(t *testing.T) {
resp, err := c.UpdateGroup(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -365,7 +370,7 @@ func TestErrorStatsServiceListGroupStats(t *testing.T) {
func TestErrorStatsServiceListGroupStatsError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorStats.err = grpc.Errorf(errCode, "test error")
mockErrorStats.err = gstatus.Error(errCode, "test error")
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var timeRange *clouderrorreportingpb.QueryTimeRange = &clouderrorreportingpb.QueryTimeRange{}
@ -381,7 +386,9 @@ func TestErrorStatsServiceListGroupStatsError(t *testing.T) {
resp, err := c.ListGroupStats(context.Background(), request).Next()
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -439,7 +446,7 @@ func TestErrorStatsServiceListEvents(t *testing.T) {
func TestErrorStatsServiceListEventsError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorStats.err = grpc.Errorf(errCode, "test error")
mockErrorStats.err = gstatus.Error(errCode, "test error")
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var groupId string = "groupId506361563"
@ -455,7 +462,9 @@ func TestErrorStatsServiceListEventsError(t *testing.T) {
resp, err := c.ListEvents(context.Background(), request).Next()
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -495,7 +504,7 @@ func TestErrorStatsServiceDeleteEvents(t *testing.T) {
func TestErrorStatsServiceDeleteEventsError(t *testing.T) {
errCode := codes.PermissionDenied
mockErrorStats.err = grpc.Errorf(errCode, "test error")
mockErrorStats.err = gstatus.Error(errCode, "test error")
var formattedProjectName string = ErrorStatsProjectPath("[PROJECT]")
var request = &clouderrorreportingpb.DeleteEventsRequest{
@ -509,7 +518,9 @@ func TestErrorStatsServiceDeleteEventsError(t *testing.T) {
resp, err := c.DeleteEvents(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -551,7 +562,7 @@ func TestReportErrorsServiceReportErrorEvent(t *testing.T) {
func TestReportErrorsServiceReportErrorEventError(t *testing.T) {
errCode := codes.PermissionDenied
mockReportErrors.err = grpc.Errorf(errCode, "test error")
mockReportErrors.err = gstatus.Error(errCode, "test error")
var formattedProjectName string = ReportErrorsProjectPath("[PROJECT]")
var event *clouderrorreportingpb.ReportedErrorEvent = &clouderrorreportingpb.ReportedErrorEvent{}
@ -567,7 +578,9 @@ func TestReportErrorsServiceReportErrorEventError(t *testing.T) {
resp, err := c.ReportErrorEvent(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp

View File

@ -17,8 +17,6 @@
package errorreporting
import (
"time"
"cloud.google.com/go/internal/version"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
@ -26,11 +24,6 @@ import (
"google.golang.org/api/transport"
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
)
var (
reportErrorsProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
)
// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient.
@ -46,19 +39,7 @@ func defaultReportErrorsClientOptions() []option.ClientOption {
}
func defaultReportErrorsCallOptions() *ReportErrorsCallOptions {
retry := map[[2]string][]gax.CallOption{
{"default", "non_idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
retry := map[[2]string][]gax.CallOption{}
return &ReportErrorsCallOptions{
ReportErrorEvent: retry[[2]string{"default", "non_idempotent"}],
}
@ -119,13 +100,10 @@ func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
// ReportErrorsProjectPath returns the path for the project resource.
func ReportErrorsProjectPath(project string) string {
path, err := reportErrorsProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// ReportErrorEvent report an individual error event.
@ -134,8 +112,7 @@ func ReportErrorsProjectPath(project string) string {
// <strong>or</strong> an
// <a href="https://support.google.com/cloud/answer/6158862">API key</a>
// for authentication. To use an API key, append it to the URL as the value of
// a `key` parameter. For example:
// <pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
// a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)

View File

@ -0,0 +1,215 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import (
"bytes"
"errors"
"log"
"strings"
"testing"
"cloud.google.com/go/logging"
"golang.org/x/net/context"
"google.golang.org/api/option"
)
type fakeLogger struct {
entry *logging.Entry
fail bool
}
func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
if c.fail {
return errors.New("request failed")
}
c.entry = &e
return nil
}
func (c *fakeLogger) Close() error {
return nil
}
func newTestClientUsingLogging(c *fakeLogger) *Client {
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
func TestCatchNothingUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func entryMessage(e *logging.Entry) string {
return e.Payload.(map[string]interface{})["message"].(string)
}
func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) {
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" {
t.Errorf("error report didn't contain service name")
}
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(entryMessage(e), "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(entryMessage(e), panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchPanic")
if !strings.Contains(entryMessage(e), "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClientUsingLogging(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReportsUsingLogging(t *testing.T) {
fl := &fakeLogger{fail: true}
c := newTestClientUsingLogging(fl)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestCatchNilPanic")
if !strings.Contains(entryMessage(e), "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanicUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
defer func() {
e := fl.entry
if e != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReportUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Report(ctx, nil, "hello, ", "error")
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReport")
}
func TestReportfUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
e := fl.entry
if e == nil {
t.Fatalf("got no error report, expected one")
}
commonLoggingChecks(t, e, "TestReportf")
if !strings.Contains(entryMessage(e), "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}
func TestCloseUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
err := c.Close()
if err != nil {
t.Fatal(err)
}
}

456
vendor/cloud.google.com/go/errorreporting/errors.go generated vendored Normal file
View File

@ -0,0 +1,456 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package errorreporting is a Google Stackdriver Error Reporting library.
//
// This package is still experimental and subject to change.
//
// See https://cloud.google.com/error-reporting/ for more information.
//
// To initialize a client, use the NewClient function.
//
// import "cloud.google.com/go/errorreporting"
// ...
// errorsClient, err = errorreporting.NewClient(ctx, projectID, "myservice", "v1.0", true)
//
// The client can recover panics in your program and report them as errors.
// To use this functionality, defer its Catch method, as you would any other
// function for recovering panics.
//
// func foo(ctx context.Context, ...) {
// defer errorsClient.Catch(ctx)
// ...
// }
//
// Catch writes an error report containing the recovered value and a stack trace
// to Stackdriver Error Reporting.
//
// There are various options you can add to the call to Catch that modify how
// panics are handled.
//
// WithMessage and WithMessagef add a custom message after the recovered value,
// using fmt.Sprint and fmt.Sprintf respectively.
//
// defer errorsClient.Catch(ctx, errorreporting.WithMessagef("x=%d", x))
//
// WithRequest fills in various fields in the error report with information
// about an http.Request that's being handled.
//
// defer errorsClient.Catch(ctx, errorreporting.WithRequest(httpReq))
//
// By default, after recovering a panic, Catch will panic again with the
// recovered value. You can turn off this behavior with the Repanic option.
//
// defer errorsClient.Catch(ctx, errorreporting.Repanic(false))
//
// You can also change the default behavior for the client by changing the
// RepanicDefault field.
//
// errorsClient.RepanicDefault = false
//
// It is also possible to write an error report directly without recovering a
// panic, using Report or Reportf.
//
// if err != nil {
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
// }
//
// If you try to write an error report with a nil client, or if the client
// fails to write the report to the server, the error report is logged using
// log.Println.
package errorreporting // import "cloud.google.com/go/errorreporting"
import (
"bytes"
"fmt"
"log"
"net/http"
"runtime"
"strings"
"time"
api "cloud.google.com/go/errorreporting/apiv1beta1"
"cloud.google.com/go/internal/version"
"cloud.google.com/go/logging"
"github.com/golang/protobuf/ptypes/timestamp"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
const (
userAgent = `gcloud-golang-errorreporting/20160701`
)
type apiInterface interface {
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
Close() error
}
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
client, err := api.NewReportErrorsClient(ctx, opts...)
if err != nil {
return nil, err
}
client.SetGoogleClientInfo("gccl", version.Repo)
return client, nil
}
type loggerInterface interface {
LogSync(ctx context.Context, e logging.Entry) error
Close() error
}
type logger struct {
*logging.Logger
c *logging.Client
}
func (l logger) Close() error {
return l.c.Close()
}
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
lc, err := logging.NewClient(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
l := lc.Logger("errorreports")
return logger{l, lc}, nil
}
type sender interface {
send(ctx context.Context, r *http.Request, message string)
close() error
}
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
type errorApiSender struct {
apiClient apiInterface
projectID string
serviceContext erpb.ServiceContext
}
// loggingSender sends error reports using the Stackdriver Logging API.
type loggingSender struct {
logger loggerInterface
projectID string
serviceContext map[string]string
}
// Client represents a Google Cloud Error Reporting client.
type Client struct {
sender
// RepanicDefault determines whether Catch will re-panic after recovering a
// panic. This behavior can be overridden for an individual call to Catch using
// the Repanic option.
RepanicDefault bool
}
// NewClient returns a new error reporting client. Generally you will want
// to create a client on program initialization and use it through the lifetime
// of the process.
//
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty.
//
// Set useLogging to report errors also using Stackdriver Logging,
// which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
if useLogging {
l, err := newLoggerInterface(ctx, projectID, opts...)
if err != nil {
return nil, fmt.Errorf("creating Logging client: %v", err)
}
sender := &loggingSender{
logger: l,
projectID: projectID,
serviceContext: map[string]string{
"service": serviceName,
},
}
if serviceVersion != "" {
sender.serviceContext["version"] = serviceVersion
}
c := &Client{
sender: sender,
RepanicDefault: true,
}
return c, nil
} else {
a, err := newApiInterface(ctx, opts...)
if err != nil {
return nil, fmt.Errorf("creating Error Reporting client: %v", err)
}
c := &Client{
sender: &errorApiSender{
apiClient: a,
projectID: "projects/" + projectID,
serviceContext: erpb.ServiceContext{
Service: serviceName,
Version: serviceVersion,
},
},
RepanicDefault: true,
}
return c, nil
}
}
// Close closes any resources held by the client.
// Close should be called when the client is no longer needed.
// It need not be called at program exit.
func (c *Client) Close() error {
err := c.sender.close()
c.sender = nil
return err
}
// An Option is an optional argument to Catch.
type Option interface {
isOption()
}
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
// If *p is true when Catch is called, an error report is made even if recover
// returns nil. This allows Catch to report an error for panic(nil).
// If p is nil, the option is ignored.
//
// Here is an example of how to use PanicFlag:
//
// func foo(ctx context.Context, ...) {
// hasPanicked := true
// defer errorsClient.Catch(ctx, errorreporting.PanicFlag(&hasPanicked))
// ...
// ...
// // We have reached the end of the function, so we're not panicking.
// hasPanicked = false
// }
func PanicFlag(p *bool) Option { return panicFlag{p} }
type panicFlag struct {
*bool
}
func (h panicFlag) isOption() {}
// Repanic returns an Option that determines whether Catch will re-panic after
// it reports an error. This overrides the default in the client.
func Repanic(r bool) Option { return repanic(r) }
type repanic bool
func (r repanic) isOption() {}
// WithRequest returns an Option that informs Catch or Report of an http.Request
// that is being handled. Information from the Request is included in the error
// report, if one is made.
func WithRequest(r *http.Request) Option { return withRequest{r} }
type withRequest struct {
*http.Request
}
func (w withRequest) isOption() {}
// WithMessage returns an Option that sets a message to be included in the error
// report, if one is made. v is converted to a string with fmt.Sprint.
func WithMessage(v ...interface{}) Option { return message(v) }
type message []interface{}
func (m message) isOption() {}
// WithMessagef returns an Option that sets a message to be included in the error
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
type messagef struct {
format string
v []interface{}
}
func (m messagef) isOption() {}
// Catch tries to recover a panic; if it succeeds, it writes an error report.
// It should be called by deferring it, like any other function for recovering
// panics.
//
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Catch(ctx context.Context, opt ...Option) {
panicked := false
for _, o := range opt {
switch o := o.(type) {
case panicFlag:
panicked = panicked || o.bool != nil && *o.bool
}
}
x := recover()
if x == nil && !panicked {
return
}
var (
r *http.Request
shouldRepanic = true
messages = []string{fmt.Sprint(x)}
)
if c != nil {
shouldRepanic = c.RepanicDefault
}
for _, o := range opt {
switch o := o.(type) {
case repanic:
shouldRepanic = bool(o)
case withRequest:
r = o.Request
case message:
messages = append(messages, fmt.Sprint(o...))
case messagef:
messages = append(messages, fmt.Sprintf(o.format, o.v...))
}
}
c.logInternal(ctx, r, true, strings.Join(messages, " "))
if shouldRepanic {
panic(x)
}
}
// Report writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Report can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprint(v...))
}
// Reportf writes an error report unconditionally, instead of only when a panic
// occurs.
// If r is non-nil, information from the Request is included in the error report.
//
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
}
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
// limit the stack trace to 16k.
var buf [16384]byte
stack := buf[0:runtime.Stack(buf[:], false)]
message := msg + "\n" + chopStack(stack, isPanic)
if c == nil {
log.Println("Error report used nil client:", message)
return
}
c.send(ctx, r, message)
}
func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) {
payload := map[string]interface{}{
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
"message": message,
"serviceContext": s.serviceContext,
}
if r != nil {
payload["context"] = map[string]interface{}{
"httpRequest": map[string]interface{}{
"method": r.Method,
"url": r.Host + r.RequestURI,
"userAgent": r.UserAgent(),
"referrer": r.Referer(),
"remoteIp": r.RemoteAddr,
},
}
}
e := logging.Entry{
Severity: logging.Error,
Payload: payload,
}
err := s.logger.LogSync(ctx, e)
if err != nil {
log.Println("Error writing error report:", err, "report:", payload)
}
}
func (s *loggingSender) close() error {
return s.logger.Close()
}
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
time := time.Now()
var errorContext *erpb.ErrorContext
if r != nil {
errorContext = &erpb.ErrorContext{
HttpRequest: &erpb.HttpRequestContext{
Method: r.Method,
Url: r.Host + r.RequestURI,
UserAgent: r.UserAgent(),
Referrer: r.Referer(),
RemoteIp: r.RemoteAddr,
},
}
}
req := erpb.ReportErrorEventRequest{
ProjectName: s.projectID,
Event: &erpb.ReportedErrorEvent{
EventTime: &timestamp.Timestamp{
Seconds: time.Unix(),
Nanos: int32(time.Nanosecond()),
},
ServiceContext: &s.serviceContext,
Message: message,
Context: errorContext,
},
}
_, err := s.apiClient.ReportErrorEvent(ctx, &req)
if err != nil {
log.Println("Error writing error report:", err, "report:", message)
}
}
func (s *errorApiSender) close() error {
return s.apiClient.Close()
}
// chopStack trims a stack trace so that the function which panics or calls
// Report is first.
func chopStack(s []byte, isPanic bool) string {
var f []byte
if isPanic {
f = []byte("panic(")
} else {
f = []byte("cloud.google.com/go/errorreporting.(*Client).Report")
}
lfFirst := bytes.IndexByte(s, '\n')
if lfFirst == -1 {
return string(s)
}
stack := s[lfFirst:]
panicLine := bytes.Index(stack, f)
if panicLine == -1 {
return string(s)
}
stack = stack[panicLine+1:]
for i := 0; i < 2; i++ {
nextLine := bytes.IndexByte(stack, '\n')
if nextLine == -1 {
return string(s)
}
stack = stack[nextLine+1:]
}
return string(s[:lfFirst+1]) + string(stack)
}

View File

@ -0,0 +1,212 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import (
"bytes"
"errors"
"log"
"strings"
"testing"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/option"
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
)
const testProjectID = "testproject"
type fakeReportErrorsClient struct {
req *erpb.ReportErrorEventRequest
fail bool
}
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
if c.fail {
return nil, errors.New("request failed")
}
c.req = req
return &erpb.ReportErrorEventResponse{}, nil
}
func (c *fakeReportErrorsClient) Close() error {
return nil
}
func newTestClient(c *fakeReportErrorsClient) *Client {
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
return c, nil
}
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false)
if err != nil {
panic(err)
}
t.RepanicDefault = false
return t
}
var ctx context.Context
func init() {
ctx = context.Background()
}
func TestCatchNothing(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx)
}
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) {
if req.Event.ServiceContext.Service != "myservice" {
t.Errorf("error report didn't contain service name")
}
if req.Event.ServiceContext.Version != "v1.000" {
t.Errorf("error report didn't contain version name")
}
if !strings.Contains(req.Event.Message, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(req.Event.Message, panickingFunction) {
t.Errorf("error report didn't contain stack trace")
}
}
func TestCatchPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestCatchPanic")
if !strings.Contains(r.Event.Message, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchPanicNilClient(t *testing.T) {
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "TestCatchPanicNilClient") {
t.Errorf("error report didn't contain recovered value")
}
}()
var c *Client
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestLogFailedReports(t *testing.T) {
fc := &fakeReportErrorsClient{fail: true}
c := newTestClient(fc)
buf := new(bytes.Buffer)
log.SetOutput(buf)
defer func() {
recover()
body := buf.String()
if !strings.Contains(body, "hello, error") {
t.Errorf("error report didn't contain message")
}
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
t.Errorf("error report didn't contain stack trace")
}
if !strings.Contains(body, "divide by zero") {
t.Errorf("error report didn't contain recovered value")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
var x int
x = x / x
}
func TestCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestCatchNilPanic")
if !strings.Contains(r.Event.Message, "nil") {
t.Errorf("error report didn't contain recovered value")
}
}()
b := true
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
panic(nil)
}
func TestNotCatchNilPanic(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
defer func() {
r := fc.req
if r != nil {
t.Errorf("got error report, expected none")
}
}()
defer c.Catch(ctx, WithMessage("hello, error"))
panic(nil)
}
func TestReport(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
c.Report(ctx, nil, "hello, ", "error")
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestReport")
}
func TestReportf(t *testing.T) {
fc := &fakeReportErrorsClient{}
c := newTestClient(fc)
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
r := fc.req
if r == nil {
t.Fatalf("got no error report, expected one")
}
commonChecks(t, r, "errorreporting.TestReportf")
if !strings.Contains(r.Event.Message, "2+2=4") {
t.Errorf("error report didn't contain formatted message")
}
}

118
vendor/cloud.google.com/go/errorreporting/stack_test.go generated vendored Normal file
View File

@ -0,0 +1,118 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package errorreporting
import "testing"
func TestChopStack(t *testing.T) {
for _, test := range []struct {
name string
in []byte
expected string
isPanic bool
}{
{
name: "Catch",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
panic()
/gopath/src/runtime/panic.go:458 +0x243
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "function not found",
in: []byte(`goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`),
expected: `goroutine 20 [running]:
runtime/debug.Stack()
/gopath/src/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Catch()
/gopath/src/cloud.google.com/go/errorreporting/errors.go:219 +0x6ed
cloud.google.com/go/errorreporting.TestCatchPanic()
/gopath/src/cloud.google.com/go/errorreporting/errors_test.go:93 +0x171
testing.tRunner()
/gopath/src/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/src/testing/testing.go:646 +0x2ec
`,
isPanic: true,
},
{
name: "Report",
in: []byte(` goroutine 39 [running]:
runtime/debug.Stack()
/gopath/runtime/debug/stack.go:24 +0x79
cloud.google.com/go/errorreporting.(*Client).logInternal()
/gopath/cloud.google.com/go/errorreporting/errors.go:259 +0x18b
cloud.google.com/go/errorreporting.(*Client).Report()
/gopath/cloud.google.com/go/errorreporting/errors.go:248 +0x4ed
cloud.google.com/go/errorreporting.TestReport()
/gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`),
expected: ` goroutine 39 [running]:
cloud.google.com/go/errorreporting.TestReport()
/gopath/cloud.google.com/go/errorreporting/errors_test.go:137 +0x2a1
testing.tRunner()
/gopath/testing/testing.go:610 +0x81
created by testing.(*T).Run
/gopath/testing/testing.go:646 +0x2ec
`,
isPanic: false,
},
} {
out := chopStack(test.in, test.isPanic)
if out != test.expected {
t.Errorf("case %q: chopStack(%q, %t): got %q want %q", test.name, test.in, test.isPanic, out, test.expected)
}
}
}

View File

@ -204,3 +204,12 @@ func TestReportfUsingLogging(t *testing.T) {
t.Errorf("error report didn't contain formatted message")
}
}
func TestCloseUsingLogging(t *testing.T) {
fl := &fakeLogger{}
c := newTestClientUsingLogging(fl)
err := c.Close()
if err != nil {
t.Fatal(err)
}
}

View File

@ -18,14 +18,7 @@
//
// See https://cloud.google.com/error-reporting/ for more information.
//
// To initialize a client, use the NewClient function. Generally you will want
// to do this on program initialization. The NewClient function takes as
// arguments a context, the project name, a service name, and a version string.
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty. NewClient
// also takes a bool that indicates whether to report errors using Stackdriver
// Logging, which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
// To initialize a client, use the NewClient function.
//
// import "cloud.google.com/go/errors"
// ...
@ -76,6 +69,8 @@
// If you try to write an error report with a nil client, or if the client
// fails to write the report to the server, the error report is logged using
// log.Println.
//
// Deprecated: Use cloud.google.com/go/errorreporting instead.
package errors // import "cloud.google.com/go/errors"
import (
@ -155,9 +150,9 @@ type loggingSender struct {
logger loggerInterface
projectID string
serviceContext map[string]string
client *logging.Client
}
// Client represents a Google Cloud Error Reporting client.
type Client struct {
sender
// RepanicDefault determines whether Catch will re-panic after recovering a
@ -166,6 +161,16 @@ type Client struct {
RepanicDefault bool
}
// NewClient returns a new error reporting client. Generally you will want
// to create a client on program initialization and use it through the lifetime
// of the process.
//
// The service name and version string identify the running program, and are
// included in error reports. The version string can be left empty.
//
// Set useLogging to report errors also using Stackdriver Logging,
// which will result in errors appearing in both the logs and the error
// dashboard. This is useful if you are already a user of Stackdriver Logging.
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
if useLogging {
l, err := newLoggerInterface(ctx, projectID, opts...)
@ -383,7 +388,7 @@ func (s *loggingSender) send(ctx context.Context, r *http.Request, message strin
}
func (s *loggingSender) close() error {
return s.client.Close()
return s.logger.Close()
}
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {

View File

@ -15,7 +15,7 @@
// AUTO-GENERATED CODE. DO NOT EDIT.
// Package admin is an experimental, auto-generated package for the
// admin API.
// Google Identity and Access Management (IAM) API.
//
// Manages identity and access control for Google Cloud Platform resources,
// including the creation of service accounts, which you can use to
@ -34,6 +34,7 @@ func insertXGoog(ctx context.Context, val []string) context.Context {
return metadata.NewOutgoingContext(ctx, md)
}
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
func DefaultAuthScopes() []string {
return []string{
"https://www.googleapis.com/auth/cloud-platform",

View File

@ -32,12 +32,6 @@ import (
"google.golang.org/grpc/codes"
)
var (
iamProjectPathTemplate = gax.MustCompilePathTemplate("projects/{project}")
iamServiceAccountPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}")
iamKeyPathTemplate = gax.MustCompilePathTemplate("projects/{project}/serviceAccounts/{service_account}/keys/{key}")
)
// IamCallOptions contains the retry settings for each method of IamClient.
type IamCallOptions struct {
ListServiceAccounts []gax.CallOption
@ -77,17 +71,6 @@ func defaultIamCallOptions() *IamCallOptions {
})
}),
},
{"default", "non_idempotent"}: {
gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60000 * time.Millisecond,
Multiplier: 1.3,
})
}),
},
}
return &IamCallOptions{
ListServiceAccounts: retry[[2]string{"default", "idempotent"}],
@ -130,16 +113,16 @@ type IamClient struct {
// of to an individual end user. It is used to authenticate calls
// to a Google API.
//
// To create a service account, specify the `project_id` and `account_id`
// for the account. The `account_id` is unique within the project, and used
// To create a service account, specify the project_id and account_id
// for the account. The account_id is unique within the project, and used
// to generate the service account email address and a stable
// `unique_id`.
// unique_id.
//
// All other methods can identify accounts using the format
// `projects/{project}/serviceAccounts/{account}`.
// Using `-` as a wildcard for the project will infer the project from
// the account. The `account` value can be the `email` address or the
// `unique_id` of the service account.
// projects/{project}/serviceAccounts/{account}.
// Using - as a wildcard for the project will infer the project from
// the account. The account value can be the email address or the
// unique_id of the service account.
func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient, error) {
conn, err := transport.DialGRPC(ctx, append(defaultIamClientOptions(), opts...)...)
if err != nil {
@ -151,7 +134,7 @@ func NewIamClient(ctx context.Context, opts ...option.ClientOption) (*IamClient,
iamClient: adminpb.NewIAMClient(conn),
}
c.SetGoogleClientInfo()
c.setGoogleClientInfo()
return c, nil
}
@ -166,10 +149,10 @@ func (c *IamClient) Close() error {
return c.conn.Close()
}
// SetGoogleClientInfo sets the name and version of the application in
// setGoogleClientInfo sets the name and version of the application in
// the `x-goog-api-client` header passed on each request. Intended for
// use by Google-written clients.
func (c *IamClient) SetGoogleClientInfo(keyval ...string) {
func (c *IamClient) setGoogleClientInfo(keyval ...string) {
kv := append([]string{"gl-go", version.Go()}, keyval...)
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
@ -177,38 +160,32 @@ func (c *IamClient) SetGoogleClientInfo(keyval ...string) {
// IamProjectPath returns the path for the project resource.
func IamProjectPath(project string) string {
path, err := iamProjectPathTemplate.Render(map[string]string{
"project": project,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
""
}
// IamServiceAccountPath returns the path for the service account resource.
func IamServiceAccountPath(project, serviceAccount string) string {
path, err := iamServiceAccountPathTemplate.Render(map[string]string{
"project": project,
"service_account": serviceAccount,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/serviceAccounts/" +
serviceAccount +
""
}
// IamKeyPath returns the path for the key resource.
func IamKeyPath(project, serviceAccount, key string) string {
path, err := iamKeyPathTemplate.Render(map[string]string{
"project": project,
"service_account": serviceAccount,
"key": key,
})
if err != nil {
panic(err)
}
return path
return "" +
"projects/" +
project +
"/serviceAccounts/" +
serviceAccount +
"/keys/" +
key +
""
}
// ListServiceAccounts lists [ServiceAccounts][google.iam.admin.v1.ServiceAccount] for a project.
@ -282,8 +259,8 @@ func (c *IamClient) CreateServiceAccount(ctx context.Context, req *adminpb.Creat
// UpdateServiceAccount updates a [ServiceAccount][google.iam.admin.v1.ServiceAccount].
//
// Currently, only the following fields are updatable:
// `display_name` .
// The `etag` is mandatory.
// display_name .
// The etag is mandatory.
func (c *IamClient) UpdateServiceAccount(ctx context.Context, req *adminpb.ServiceAccount, opts ...gax.CallOption) (*adminpb.ServiceAccount, error) {
ctx = insertXGoog(ctx, c.xGoogHeader)
opts = append(c.CallOptions.UpdateServiceAccount[0:len(c.CallOptions.UpdateServiceAccount):len(c.CallOptions.UpdateServiceAccount)], opts...)

View File

@ -19,6 +19,7 @@ package admin_test
import (
"cloud.google.com/go/iam/admin/apiv1"
"golang.org/x/net/context"
"google.golang.org/api/iterator"
adminpb "google.golang.org/genproto/googleapis/iam/admin/v1"
iampb "google.golang.org/genproto/googleapis/iam/v1"
)
@ -46,9 +47,11 @@ func ExampleIamClient_ListServiceAccounts() {
it := c.ListServiceAccounts(ctx, req)
for {
resp, err := it.Next()
if err == iterator.Done {
break
}
if err != nil {
// TODO: Handle error.
break
}
// TODO: Use resp.
_ = resp

View File

@ -40,6 +40,7 @@ import (
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
gstatus "google.golang.org/grpc/status"
)
var _ = io.EOF
@ -309,7 +310,7 @@ func TestIamListServiceAccounts(t *testing.T) {
func TestIamListServiceAccountsError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamProjectPath("[PROJECT]")
var request = &adminpb.ListServiceAccountsRequest{
@ -323,7 +324,9 @@ func TestIamListServiceAccountsError(t *testing.T) {
resp, err := c.ListServiceAccounts(context.Background(), request).Next()
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -378,7 +381,7 @@ func TestIamGetServiceAccount(t *testing.T) {
func TestIamGetServiceAccountError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
var request = &adminpb.GetServiceAccountRequest{
@ -392,7 +395,9 @@ func TestIamGetServiceAccountError(t *testing.T) {
resp, err := c.GetServiceAccount(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -449,7 +454,7 @@ func TestIamCreateServiceAccount(t *testing.T) {
func TestIamCreateServiceAccountError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamProjectPath("[PROJECT]")
var accountId string = "accountId-803333011"
@ -465,7 +470,9 @@ func TestIamCreateServiceAccountError(t *testing.T) {
resp, err := c.CreateServiceAccount(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -520,7 +527,7 @@ func TestIamUpdateServiceAccount(t *testing.T) {
func TestIamUpdateServiceAccountError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var etag []byte = []byte("21")
var request = &adminpb.ServiceAccount{
@ -534,7 +541,9 @@ func TestIamUpdateServiceAccountError(t *testing.T) {
resp, err := c.UpdateServiceAccount(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -571,7 +580,7 @@ func TestIamDeleteServiceAccount(t *testing.T) {
func TestIamDeleteServiceAccountError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
var request = &adminpb.DeleteServiceAccountRequest{
@ -585,7 +594,9 @@ func TestIamDeleteServiceAccountError(t *testing.T) {
err = c.DeleteServiceAccount(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
@ -624,7 +635,7 @@ func TestIamListServiceAccountKeys(t *testing.T) {
func TestIamListServiceAccountKeysError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
var request = &adminpb.ListServiceAccountKeysRequest{
@ -638,7 +649,9 @@ func TestIamListServiceAccountKeysError(t *testing.T) {
resp, err := c.ListServiceAccountKeys(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -685,7 +698,7 @@ func TestIamGetServiceAccountKey(t *testing.T) {
func TestIamGetServiceAccountKeyError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]")
var request = &adminpb.GetServiceAccountKeyRequest{
@ -699,7 +712,9 @@ func TestIamGetServiceAccountKeyError(t *testing.T) {
resp, err := c.GetServiceAccountKey(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -746,7 +761,7 @@ func TestIamCreateServiceAccountKey(t *testing.T) {
func TestIamCreateServiceAccountKeyError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
var request = &adminpb.CreateServiceAccountKeyRequest{
@ -760,7 +775,9 @@ func TestIamCreateServiceAccountKeyError(t *testing.T) {
resp, err := c.CreateServiceAccountKey(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -797,7 +814,7 @@ func TestIamDeleteServiceAccountKey(t *testing.T) {
func TestIamDeleteServiceAccountKeyError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamKeyPath("[PROJECT]", "[SERVICE_ACCOUNT]", "[KEY]")
var request = &adminpb.DeleteServiceAccountKeyRequest{
@ -811,7 +828,9 @@ func TestIamDeleteServiceAccountKeyError(t *testing.T) {
err = c.DeleteServiceAccountKey(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
}
@ -857,7 +876,7 @@ func TestIamSignBlob(t *testing.T) {
func TestIamSignBlobError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedName string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
var bytesToSign []byte = []byte("45")
@ -873,7 +892,9 @@ func TestIamSignBlobError(t *testing.T) {
resp, err := c.SignBlob(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -918,7 +939,7 @@ func TestIamGetIamPolicy(t *testing.T) {
func TestIamGetIamPolicyError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
var request = &iampb.GetIamPolicyRequest{
@ -932,7 +953,9 @@ func TestIamGetIamPolicyError(t *testing.T) {
resp, err := c.getIamPolicy(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -979,7 +1002,7 @@ func TestIamSetIamPolicy(t *testing.T) {
func TestIamSetIamPolicyError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
var policy *iampb.Policy = &iampb.Policy{}
@ -995,7 +1018,9 @@ func TestIamSetIamPolicyError(t *testing.T) {
resp, err := c.setIamPolicy(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -1037,7 +1062,7 @@ func TestIamTestIamPermissions(t *testing.T) {
func TestIamTestIamPermissionsError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var formattedResource string = IamServiceAccountPath("[PROJECT]", "[SERVICE_ACCOUNT]")
var permissions []string = nil
@ -1053,7 +1078,9 @@ func TestIamTestIamPermissionsError(t *testing.T) {
resp, err := c.TestIamPermissions(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp
@ -1093,7 +1120,7 @@ func TestIamQueryGrantableRoles(t *testing.T) {
func TestIamQueryGrantableRolesError(t *testing.T) {
errCode := codes.PermissionDenied
mockIam.err = grpc.Errorf(errCode, "test error")
mockIam.err = gstatus.Error(errCode, "test error")
var fullResourceName string = "fullResourceName1300993644"
var request = &adminpb.QueryGrantableRolesRequest{
@ -1107,7 +1134,9 @@ func TestIamQueryGrantableRolesError(t *testing.T) {
resp, err := c.QueryGrantableRoles(context.Background(), request)
if c := grpc.Code(err); c != errCode {
if st, ok := gstatus.FromError(err); !ok {
t.Errorf("got error %v, expected grpc error", err)
} else if c := st.Code(); c != errCode {
t.Errorf("got error code %q, want %q", c, errCode)
}
_ = resp

View File

@ -16,9 +16,10 @@ package iam
import (
"fmt"
"reflect"
"sort"
"testing"
"cloud.google.com/go/internal/testutil"
)
func TestPolicy(t *testing.T) {
@ -65,7 +66,7 @@ func TestPolicy(t *testing.T) {
if msg, ok := checkMembers(p, Owner, nil); !ok {
t.Fatal(msg)
}
if got, want := p.Roles(), []RoleName(nil); !reflect.DeepEqual(got, want) {
if got, want := p.Roles(), []RoleName(nil); !testutil.Equal(got, want) {
t.Fatalf("roles: got %v, want %v", got, want)
}
}
@ -74,7 +75,7 @@ func checkMembers(p *Policy, role RoleName, wantMembers []string) (string, bool)
gotMembers := p.Members(role)
sort.Strings(gotMembers)
sort.Strings(wantMembers)
if !reflect.DeepEqual(gotMembers, wantMembers) {
if !testutil.Equal(gotMembers, wantMembers) {
return fmt.Sprintf("got %v, want %v", gotMembers, wantMembers), false
}
for _, m := range wantMembers {

View File

@ -22,6 +22,10 @@ import (
"strings"
"testing"
"time"
"github.com/google/go-cmp/cmp"
"cloud.google.com/go/internal/testutil"
)
type embed1 struct {
@ -142,7 +146,8 @@ func TestAgainstJSONEncodingNoTags(t *testing.T) {
t.Fatal(err)
}
setFields(fields, &got, s1)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want,
cmp.AllowUnexported(S1{}, embed1{}, embed2{}, embed3{}, embed4{}, embed5{})) {
t.Errorf("got\n%+v\nwant\n%+v", got, want)
}
}
@ -166,7 +171,7 @@ func TestAgainstJSONEncodingEmbeddedTime(t *testing.T) {
t.Fatal(err)
}
setFields(fields, &got, myt)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want) {
t.Errorf("got\n%+v\nwant\n%+v", got, want)
}
}
@ -269,7 +274,7 @@ func TestAgainstJSONEncodingWithTags(t *testing.T) {
t.Fatal(err)
}
setFields(fields, &got, s2)
if !reflect.DeepEqual(got, want) {
if !testutil.Equal(got, want, cmp.AllowUnexported(S2{})) {
t.Errorf("got\n%+v\nwant\n%+v", got, want)
}
}
@ -410,7 +415,7 @@ func compareFields(got []Field, want []*Field) (msg string, ok bool) {
}
// Need this because Field contains a function, which cannot be compared even
// by reflect.DeepEqual.
// by testutil.Equal.
func fieldsEqual(f1, f2 *Field) bool {
if f1 == nil || f2 == nil {
return f1 == f2
@ -418,7 +423,7 @@ func fieldsEqual(f1, f2 *Field) bool {
return f1.Name == f2.Name &&
f1.NameFromTag == f2.NameFromTag &&
f1.Type == f2.Type &&
reflect.DeepEqual(f1.ParsedTag, f2.ParsedTag)
testutil.Equal(f1.ParsedTag, f2.ParsedTag)
}
// Set the fields of dst from those of src.

View File

@ -38,7 +38,7 @@ go get -v ./...
# cd $GOCLOUD_HOME
# Run tests and tee output to log file, to be pushed to GCS as artifact.
go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_REVISION.log
go test -race -v -short ./... 2>&1 | tee $KOKORO_ARTIFACTS_DIR/$KOKORO_GERRIT_CHANGE_NUMBER.txt
# Make sure README.md is up to date.
make -C internal/readme test diff

View File

@ -20,6 +20,7 @@ package optional
import (
"fmt"
"strings"
"time"
)
type (
@ -37,6 +38,9 @@ type (
// Float64 is either a float64 or nil.
Float64 interface{}
// Duration is either a time.Duration or nil.
Duration interface{}
)
// ToBool returns its argument as a bool.
@ -89,6 +93,16 @@ func ToFloat64(v Float64) float64 {
return x
}
// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}
func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
}

32
vendor/cloud.google.com/go/internal/rpcreplay/Makefile generated vendored Normal file
View File

@ -0,0 +1,32 @@
# Copyright 2017 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# Makefile for building Go files from protos.
# Change these to match your environment.
PROTOC=$(HOME)/bin/protoc
PROTOC_GO_PLUGIN_DIR=$(GOPATH)/bin
PROTOBUF_REPO=$(HOME)/git-repos/protobuf
gen-protos: sync-protobuf
for d in proto/*; do \
PATH=$(PATH):$(PROTOC_GO_PLUGIN_DIR) \
$(PROTOC) --go_out=plugins=grpc:$$d \
-I $$d -I $(PROTOBUF_REPO)/src $$d/*.proto; \
done
sync-protobuf:
cd $(PROTOBUF_REPO); git pull

106
vendor/cloud.google.com/go/internal/rpcreplay/doc.go generated vendored Normal file
View File

@ -0,0 +1,106 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
/*
Package rpcreplay supports the capture and replay of gRPC calls. Its main goal is
to improve testing. Once one captures the calls of a test that runs against a real
service, one has an "automatic mock" that can be replayed against the same test,
yielding a unit test that is fast and flake-free.
Recording
To record a sequence of gRPC calls to a file, create a Recorder and pass its
DialOptions to grpc.Dial:
rec, err := rpcreplay.NewRecorder("service.replay", nil)
if err != nil { ... }
defer func() {
if err := rec.Close(); err != nil { ... }
}()
conn, err := grpc.Dial(serverAddress, rec.DialOptions()...)
It's essential to close the Recorder when the interaction is finished.
There is also a NewRecorderWriter function for capturing to an arbitrary
io.Writer.
Replaying
Replaying a captured file looks almost identical: create a Replayer and use
its DialOptions. (Since we're reading the file and not writing it, we don't
have to be as careful about the error returned from Close).
rep, err := rpcreplay.NewReplayer("service.replay")
if err != nil { ... }
defer rep.Close()
conn, err := grpc.Dial(serverAddress, rep.DialOptions()...)
Initial State
A test might use random or time-sensitive values, for instance to create unique
resources for isolation from other tests. The test therefore has initial values --
the current time, a random seed -- that differ from run to run. You must record this
initial state and re-establish it on replay.
To record the initial state, serialize it into a []byte and pass it as the second
argument to NewRecorder:
timeNow := time.Now()
b, err := timeNow.MarshalBinary()
if err != nil { ... }
rec, err := rpcreplay.NewRecorder("service.replay", b)
On replay, get the bytes from Replayer.Initial:
rep, err := rpcreplay.NewReplayer("service.replay")
if err != nil { ... }
defer rep.Close()
err = timeNow.UnmarshalBinary(rep.Initial())
if err != nil { ... }
Nondeterminism
A nondeterministic program may invoke RPCs in a different order each time
it is run. The order in which RPCs are called during recording may differ
from the order during replay.
The replayer matches incoming to recorded requests by method name and request
contents, so nondeterminism is only a concern for identical requests that result
in different responses. A nondeterministic program whose behavior differs
depending on the order of such RPCs probably has a race condition: since both the
recorded sequence of RPCs and the sequence during replay are valid orderings, the
program should behave the same under both.
Other Replayer Differences
Besides the differences in replay mentioned above, other differences may cause issues
for some programs. We list them here.
The Replayer delivers a response to an RPC immediately, without waiting for other
incoming RPCs. This can violate causality. For example, in a Pub/Sub program where
one goroutine publishes and another subscribes, during replay the Subscribe call may
finish before the Publish call begins.
For streaming RPCs, the Replayer delivers the result of Send and Recv calls in
the order they were recorded. No attempt is made to match message contents.
At present, this package does not record or replay stream headers and trailers, or
the result of the CloseSend method.
*/
package rpcreplay // import "cloud.google.com/go/internal/rpcreplay"

View File

@ -0,0 +1,47 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcreplay_test
var serverAddress string
// func Example_NewRecorder() {
// rec, err := rpcreplay.NewRecorder("service.replay", nil)
// if err != nil {
// // TODO: Handle error.
// }
// defer func() {
// if err := rec.Close(); err != nil {
// // TODO: Handle error.
// }
// }()
// conn, err := grpc.Dial(serverAddress, rec.DialOptions()...)
// if err != nil {
// // TODO: Handle error.
// }
// _ = conn // TODO: use connection
// }
// func Example_NewReplayer() {
// rep, err := rpcreplay.NewReplayer("service.replay")
// if err != nil {
// // TODO: Handle error.
// }
// defer rep.Close()
// conn, err := grpc.Dial(serverAddress, rep.DialOptions()...)
// if err != nil {
// // TODO: Handle error.
// }
// _ = conn // TODO: use connection
// }

View File

@ -0,0 +1,121 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcreplay
import (
"io"
"log"
"net"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
pb "cloud.google.com/go/internal/rpcreplay/proto/intstore"
)
// intStoreServer is an in-memory implementation of IntStore.
type intStoreServer struct {
pb.IntStoreServer
Addr string
l net.Listener
gsrv *grpc.Server
items map[string]int32
}
func newIntStoreServer() *intStoreServer {
l, err := net.Listen("tcp", "127.0.0.1:0")
if err != nil {
log.Fatal(err)
}
s := &intStoreServer{
Addr: l.Addr().String(),
l: l,
gsrv: grpc.NewServer(),
}
pb.RegisterIntStoreServer(s.gsrv, s)
go s.gsrv.Serve(s.l)
return s
}
func (s *intStoreServer) stop() {
s.gsrv.Stop()
s.l.Close()
}
func (s *intStoreServer) Set(_ context.Context, item *pb.Item) (*pb.SetResponse, error) {
old := s.setItem(item)
return &pb.SetResponse{PrevValue: old}, nil
}
func (s *intStoreServer) setItem(item *pb.Item) int32 {
if s.items == nil {
s.items = map[string]int32{}
}
old := s.items[item.Name]
s.items[item.Name] = item.Value
return old
}
func (s *intStoreServer) Get(_ context.Context, req *pb.GetRequest) (*pb.Item, error) {
val, ok := s.items[req.Name]
if !ok {
return nil, grpc.Errorf(codes.NotFound, "%q", req.Name)
}
return &pb.Item{Name: req.Name, Value: val}, nil
}
func (s *intStoreServer) ListItems(_ *pb.ListItemsRequest, ss pb.IntStore_ListItemsServer) error {
for name, val := range s.items {
if err := ss.Send(&pb.Item{Name: name, Value: val}); err != nil {
return err
}
}
return nil
}
func (s *intStoreServer) SetStream(ss pb.IntStore_SetStreamServer) error {
n := 0
for {
item, err := ss.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
s.setItem(item)
n++
}
return ss.SendAndClose(&pb.Summary{Count: int32(n)})
}
func (s *intStoreServer) StreamChat(ss pb.IntStore_StreamChatServer) error {
for {
item, err := ss.Recv()
if err == io.EOF {
break
}
if err != nil {
return err
}
if err := ss.Send(item); err != nil {
return err
}
}
return nil
}

View File

@ -0,0 +1,454 @@
// Code generated by protoc-gen-go.
// source: intstore.proto
// DO NOT EDIT!
/*
Package intstore is a generated protocol buffer package.
It is generated from these files:
intstore.proto
It has these top-level messages:
Item
SetResponse
GetRequest
Summary
ListItemsRequest
*/
package intstore
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import (
context "golang.org/x/net/context"
grpc "google.golang.org/grpc"
)
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Item struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
Value int32 `protobuf:"varint,2,opt,name=value" json:"value,omitempty"`
}
func (m *Item) Reset() { *m = Item{} }
func (m *Item) String() string { return proto.CompactTextString(m) }
func (*Item) ProtoMessage() {}
func (*Item) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Item) GetName() string {
if m != nil {
return m.Name
}
return ""
}
func (m *Item) GetValue() int32 {
if m != nil {
return m.Value
}
return 0
}
type SetResponse struct {
PrevValue int32 `protobuf:"varint,1,opt,name=prev_value,json=prevValue" json:"prev_value,omitempty"`
}
func (m *SetResponse) Reset() { *m = SetResponse{} }
func (m *SetResponse) String() string { return proto.CompactTextString(m) }
func (*SetResponse) ProtoMessage() {}
func (*SetResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} }
func (m *SetResponse) GetPrevValue() int32 {
if m != nil {
return m.PrevValue
}
return 0
}
type GetRequest struct {
Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"`
}
func (m *GetRequest) Reset() { *m = GetRequest{} }
func (m *GetRequest) String() string { return proto.CompactTextString(m) }
func (*GetRequest) ProtoMessage() {}
func (*GetRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} }
func (m *GetRequest) GetName() string {
if m != nil {
return m.Name
}
return ""
}
type Summary struct {
Count int32 `protobuf:"varint,1,opt,name=count" json:"count,omitempty"`
}
func (m *Summary) Reset() { *m = Summary{} }
func (m *Summary) String() string { return proto.CompactTextString(m) }
func (*Summary) ProtoMessage() {}
func (*Summary) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} }
func (m *Summary) GetCount() int32 {
if m != nil {
return m.Count
}
return 0
}
type ListItemsRequest struct {
}
func (m *ListItemsRequest) Reset() { *m = ListItemsRequest{} }
func (m *ListItemsRequest) String() string { return proto.CompactTextString(m) }
func (*ListItemsRequest) ProtoMessage() {}
func (*ListItemsRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} }
func init() {
proto.RegisterType((*Item)(nil), "intstore.Item")
proto.RegisterType((*SetResponse)(nil), "intstore.SetResponse")
proto.RegisterType((*GetRequest)(nil), "intstore.GetRequest")
proto.RegisterType((*Summary)(nil), "intstore.Summary")
proto.RegisterType((*ListItemsRequest)(nil), "intstore.ListItemsRequest")
}
// Reference imports to suppress errors if they are not otherwise used.
var _ context.Context
var _ grpc.ClientConn
// This is a compile-time assertion to ensure that this generated file
// is compatible with the grpc package it is being compiled against.
const _ = grpc.SupportPackageIsVersion4
// Client API for IntStore service
type IntStoreClient interface {
Set(ctx context.Context, in *Item, opts ...grpc.CallOption) (*SetResponse, error)
Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*Item, error)
// A server-to-client streaming RPC.
ListItems(ctx context.Context, in *ListItemsRequest, opts ...grpc.CallOption) (IntStore_ListItemsClient, error)
// A client-to-server streaming RPC.
SetStream(ctx context.Context, opts ...grpc.CallOption) (IntStore_SetStreamClient, error)
// A Bidirectional streaming RPC.
StreamChat(ctx context.Context, opts ...grpc.CallOption) (IntStore_StreamChatClient, error)
}
type intStoreClient struct {
cc *grpc.ClientConn
}
func NewIntStoreClient(cc *grpc.ClientConn) IntStoreClient {
return &intStoreClient{cc}
}
func (c *intStoreClient) Set(ctx context.Context, in *Item, opts ...grpc.CallOption) (*SetResponse, error) {
out := new(SetResponse)
err := grpc.Invoke(ctx, "/intstore.IntStore/Set", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *intStoreClient) Get(ctx context.Context, in *GetRequest, opts ...grpc.CallOption) (*Item, error) {
out := new(Item)
err := grpc.Invoke(ctx, "/intstore.IntStore/Get", in, out, c.cc, opts...)
if err != nil {
return nil, err
}
return out, nil
}
func (c *intStoreClient) ListItems(ctx context.Context, in *ListItemsRequest, opts ...grpc.CallOption) (IntStore_ListItemsClient, error) {
stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[0], c.cc, "/intstore.IntStore/ListItems", opts...)
if err != nil {
return nil, err
}
x := &intStoreListItemsClient{stream}
if err := x.ClientStream.SendMsg(in); err != nil {
return nil, err
}
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
return x, nil
}
type IntStore_ListItemsClient interface {
Recv() (*Item, error)
grpc.ClientStream
}
type intStoreListItemsClient struct {
grpc.ClientStream
}
func (x *intStoreListItemsClient) Recv() (*Item, error) {
m := new(Item)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *intStoreClient) SetStream(ctx context.Context, opts ...grpc.CallOption) (IntStore_SetStreamClient, error) {
stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[1], c.cc, "/intstore.IntStore/SetStream", opts...)
if err != nil {
return nil, err
}
x := &intStoreSetStreamClient{stream}
return x, nil
}
type IntStore_SetStreamClient interface {
Send(*Item) error
CloseAndRecv() (*Summary, error)
grpc.ClientStream
}
type intStoreSetStreamClient struct {
grpc.ClientStream
}
func (x *intStoreSetStreamClient) Send(m *Item) error {
return x.ClientStream.SendMsg(m)
}
func (x *intStoreSetStreamClient) CloseAndRecv() (*Summary, error) {
if err := x.ClientStream.CloseSend(); err != nil {
return nil, err
}
m := new(Summary)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func (c *intStoreClient) StreamChat(ctx context.Context, opts ...grpc.CallOption) (IntStore_StreamChatClient, error) {
stream, err := grpc.NewClientStream(ctx, &_IntStore_serviceDesc.Streams[2], c.cc, "/intstore.IntStore/StreamChat", opts...)
if err != nil {
return nil, err
}
x := &intStoreStreamChatClient{stream}
return x, nil
}
type IntStore_StreamChatClient interface {
Send(*Item) error
Recv() (*Item, error)
grpc.ClientStream
}
type intStoreStreamChatClient struct {
grpc.ClientStream
}
func (x *intStoreStreamChatClient) Send(m *Item) error {
return x.ClientStream.SendMsg(m)
}
func (x *intStoreStreamChatClient) Recv() (*Item, error) {
m := new(Item)
if err := x.ClientStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
// Server API for IntStore service
type IntStoreServer interface {
Set(context.Context, *Item) (*SetResponse, error)
Get(context.Context, *GetRequest) (*Item, error)
// A server-to-client streaming RPC.
ListItems(*ListItemsRequest, IntStore_ListItemsServer) error
// A client-to-server streaming RPC.
SetStream(IntStore_SetStreamServer) error
// A Bidirectional streaming RPC.
StreamChat(IntStore_StreamChatServer) error
}
func RegisterIntStoreServer(s *grpc.Server, srv IntStoreServer) {
s.RegisterService(&_IntStore_serviceDesc, srv)
}
func _IntStore_Set_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(Item)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(IntStoreServer).Set(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/intstore.IntStore/Set",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(IntStoreServer).Set(ctx, req.(*Item))
}
return interceptor(ctx, in, info, handler)
}
func _IntStore_Get_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) {
in := new(GetRequest)
if err := dec(in); err != nil {
return nil, err
}
if interceptor == nil {
return srv.(IntStoreServer).Get(ctx, in)
}
info := &grpc.UnaryServerInfo{
Server: srv,
FullMethod: "/intstore.IntStore/Get",
}
handler := func(ctx context.Context, req interface{}) (interface{}, error) {
return srv.(IntStoreServer).Get(ctx, req.(*GetRequest))
}
return interceptor(ctx, in, info, handler)
}
func _IntStore_ListItems_Handler(srv interface{}, stream grpc.ServerStream) error {
m := new(ListItemsRequest)
if err := stream.RecvMsg(m); err != nil {
return err
}
return srv.(IntStoreServer).ListItems(m, &intStoreListItemsServer{stream})
}
type IntStore_ListItemsServer interface {
Send(*Item) error
grpc.ServerStream
}
type intStoreListItemsServer struct {
grpc.ServerStream
}
func (x *intStoreListItemsServer) Send(m *Item) error {
return x.ServerStream.SendMsg(m)
}
func _IntStore_SetStream_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(IntStoreServer).SetStream(&intStoreSetStreamServer{stream})
}
type IntStore_SetStreamServer interface {
SendAndClose(*Summary) error
Recv() (*Item, error)
grpc.ServerStream
}
type intStoreSetStreamServer struct {
grpc.ServerStream
}
func (x *intStoreSetStreamServer) SendAndClose(m *Summary) error {
return x.ServerStream.SendMsg(m)
}
func (x *intStoreSetStreamServer) Recv() (*Item, error) {
m := new(Item)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
func _IntStore_StreamChat_Handler(srv interface{}, stream grpc.ServerStream) error {
return srv.(IntStoreServer).StreamChat(&intStoreStreamChatServer{stream})
}
type IntStore_StreamChatServer interface {
Send(*Item) error
Recv() (*Item, error)
grpc.ServerStream
}
type intStoreStreamChatServer struct {
grpc.ServerStream
}
func (x *intStoreStreamChatServer) Send(m *Item) error {
return x.ServerStream.SendMsg(m)
}
func (x *intStoreStreamChatServer) Recv() (*Item, error) {
m := new(Item)
if err := x.ServerStream.RecvMsg(m); err != nil {
return nil, err
}
return m, nil
}
var _IntStore_serviceDesc = grpc.ServiceDesc{
ServiceName: "intstore.IntStore",
HandlerType: (*IntStoreServer)(nil),
Methods: []grpc.MethodDesc{
{
MethodName: "Set",
Handler: _IntStore_Set_Handler,
},
{
MethodName: "Get",
Handler: _IntStore_Get_Handler,
},
},
Streams: []grpc.StreamDesc{
{
StreamName: "ListItems",
Handler: _IntStore_ListItems_Handler,
ServerStreams: true,
},
{
StreamName: "SetStream",
Handler: _IntStore_SetStream_Handler,
ClientStreams: true,
},
{
StreamName: "StreamChat",
Handler: _IntStore_StreamChat_Handler,
ServerStreams: true,
ClientStreams: true,
},
},
Metadata: "intstore.proto",
}
func init() { proto.RegisterFile("intstore.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 273 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x91, 0x4f, 0x4b, 0xc3, 0x40,
0x10, 0xc5, 0xb3, 0xfd, 0xa3, 0xcd, 0x08, 0x45, 0x87, 0x0a, 0x25, 0x20, 0x86, 0x3d, 0xe5, 0xa0,
0x21, 0xd4, 0xa3, 0x47, 0x0f, 0xa5, 0xe0, 0x29, 0x0b, 0x5e, 0x25, 0xca, 0x80, 0x05, 0xb3, 0x1b,
0x77, 0x27, 0x05, 0xbf, 0x84, 0x9f, 0x59, 0x36, 0x5b, 0x9b, 0xd2, 0x78, 0xdb, 0xb7, 0xf3, 0x66,
0xde, 0x6f, 0x76, 0x61, 0xbe, 0xd5, 0xec, 0xd8, 0x58, 0xca, 0x1b, 0x6b, 0xd8, 0xe0, 0xec, 0x4f,
0xcb, 0x02, 0x26, 0x1b, 0xa6, 0x1a, 0x11, 0x26, 0xba, 0xaa, 0x69, 0x29, 0x52, 0x91, 0xc5, 0x65,
0x77, 0xc6, 0x05, 0x4c, 0x77, 0xd5, 0x67, 0x4b, 0xcb, 0x51, 0x2a, 0xb2, 0x69, 0x19, 0x84, 0xbc,
0x83, 0x0b, 0x45, 0x5c, 0x92, 0x6b, 0x8c, 0x76, 0x84, 0x37, 0x00, 0x8d, 0xa5, 0xdd, 0x6b, 0x70,
0x8a, 0xce, 0x19, 0xfb, 0x9b, 0x97, 0xce, 0x9d, 0x02, 0xac, 0xbd, 0xfb, 0xab, 0x25, 0xc7, 0xff,
0xa5, 0xc8, 0x5b, 0x38, 0x57, 0x6d, 0x5d, 0x57, 0xf6, 0xdb, 0x07, 0xbe, 0x9b, 0x56, 0xf3, 0x7e,
0x4c, 0x10, 0x12, 0xe1, 0xf2, 0x79, 0xeb, 0xd8, 0x63, 0xba, 0xfd, 0xa0, 0xd5, 0xcf, 0x08, 0x66,
0x1b, 0xcd, 0xca, 0xef, 0x80, 0x39, 0x8c, 0x15, 0x31, 0xce, 0xf3, 0xc3, 0x96, 0xde, 0x9b, 0x5c,
0xf7, 0xfa, 0x08, 0x58, 0x46, 0x78, 0x0f, 0xe3, 0x35, 0x31, 0x2e, 0xfa, 0x7a, 0x8f, 0x98, 0x9c,
0x4c, 0x91, 0x11, 0x3e, 0x42, 0x7c, 0xc8, 0xc7, 0xa4, 0x2f, 0x9f, 0x42, 0x0d, 0x5b, 0x0b, 0x81,
0x2b, 0x88, 0x15, 0xb1, 0x62, 0x4b, 0x55, 0x3d, 0x20, 0xbc, 0x3a, 0x22, 0x0c, 0x4f, 0x20, 0xa3,
0xcc, 0xf7, 0x40, 0x68, 0x78, 0xfa, 0xa8, 0x86, 0x6b, 0x0d, 0x52, 0x32, 0x51, 0x88, 0xb7, 0xb3,
0xee, 0x63, 0x1f, 0x7e, 0x03, 0x00, 0x00, 0xff, 0xff, 0x22, 0x28, 0xa0, 0x49, 0xea, 0x01, 0x00,
0x00,
}

View File

@ -0,0 +1,54 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// IntStore is a service for testing the rpcreplay package.
// It is a simple key-value store for integers.
syntax = "proto3";
package intstore;
service IntStore {
rpc Set(Item) returns (SetResponse) {}
rpc Get(GetRequest) returns (Item) {}
// A server-to-client streaming RPC.
rpc ListItems(ListItemsRequest) returns (stream Item) {}
// A client-to-server streaming RPC.
rpc SetStream(stream Item) returns (Summary) {}
// A Bidirectional streaming RPC.
rpc StreamChat(stream Item) returns (stream Item) {}
}
message Item {
string name = 1;
int32 value = 2;
}
message SetResponse {
int32 prev_value = 1;
}
message GetRequest {
string name = 1;
}
message Summary {
int32 count = 1;
}
message ListItemsRequest {}

View File

@ -0,0 +1,170 @@
// Code generated by protoc-gen-go.
// source: rpcreplay.proto
// DO NOT EDIT!
/*
Package rpcreplay is a generated protocol buffer package.
It is generated from these files:
rpcreplay.proto
It has these top-level messages:
Entry
*/
package rpcreplay
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
import google_protobuf "github.com/golang/protobuf/ptypes/any"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
type Entry_Kind int32
const (
Entry_TYPE_UNSPECIFIED Entry_Kind = 0
// A unary request.
// method: the full name of the method
// message: the request proto
// is_error: false
// ref_index: 0
Entry_REQUEST Entry_Kind = 1
// A unary response.
// method: the full name of the method
// message:
// if is_error: a google.rpc.Status proto
// else: the response proto
// ref_index: index in the sequence of Entries of matching request (1-based)
Entry_RESPONSE Entry_Kind = 2
// A method that creates a stream.
// method: the full name of the method
// message:
// if is_error: a google.rpc.Status proto
// else: nil
// ref_index: 0
Entry_CREATE_STREAM Entry_Kind = 3
// A call to Send on the client returned by a stream-creating method.
// method: unset
// message: the proto being sent
// is_error: false
// ref_index: index of matching CREATE_STREAM entry (1-based)
Entry_SEND Entry_Kind = 4
// A call to Recv on the client returned by a stream-creating method.
// method: unset
// message:
// if is_error: a google.rpc.Status proto, or nil on EOF
// else: the received message
// ref_index: index of matching CREATE_STREAM entry
Entry_RECV Entry_Kind = 5
)
var Entry_Kind_name = map[int32]string{
0: "TYPE_UNSPECIFIED",
1: "REQUEST",
2: "RESPONSE",
3: "CREATE_STREAM",
4: "SEND",
5: "RECV",
}
var Entry_Kind_value = map[string]int32{
"TYPE_UNSPECIFIED": 0,
"REQUEST": 1,
"RESPONSE": 2,
"CREATE_STREAM": 3,
"SEND": 4,
"RECV": 5,
}
func (x Entry_Kind) String() string {
return proto.EnumName(Entry_Kind_name, int32(x))
}
func (Entry_Kind) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0, 0} }
// An Entry represents a single RPC activity, typically a request or response.
type Entry struct {
Kind Entry_Kind `protobuf:"varint,1,opt,name=kind,enum=rpcreplay.Entry_Kind" json:"kind,omitempty"`
Method string `protobuf:"bytes,2,opt,name=method" json:"method,omitempty"`
Message *google_protobuf.Any `protobuf:"bytes,3,opt,name=message" json:"message,omitempty"`
IsError bool `protobuf:"varint,4,opt,name=is_error,json=isError" json:"is_error,omitempty"`
RefIndex int32 `protobuf:"varint,5,opt,name=ref_index,json=refIndex" json:"ref_index,omitempty"`
}
func (m *Entry) Reset() { *m = Entry{} }
func (m *Entry) String() string { return proto.CompactTextString(m) }
func (*Entry) ProtoMessage() {}
func (*Entry) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func (m *Entry) GetKind() Entry_Kind {
if m != nil {
return m.Kind
}
return Entry_TYPE_UNSPECIFIED
}
func (m *Entry) GetMethod() string {
if m != nil {
return m.Method
}
return ""
}
func (m *Entry) GetMessage() *google_protobuf.Any {
if m != nil {
return m.Message
}
return nil
}
func (m *Entry) GetIsError() bool {
if m != nil {
return m.IsError
}
return false
}
func (m *Entry) GetRefIndex() int32 {
if m != nil {
return m.RefIndex
}
return 0
}
func init() {
proto.RegisterType((*Entry)(nil), "rpcreplay.Entry")
proto.RegisterEnum("rpcreplay.Entry_Kind", Entry_Kind_name, Entry_Kind_value)
}
func init() { proto.RegisterFile("rpcreplay.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 289 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x44, 0x8e, 0xdf, 0x4e, 0xc2, 0x30,
0x14, 0xc6, 0x2d, 0x6c, 0x30, 0x0e, 0xfe, 0xa9, 0x0d, 0x9a, 0xa1, 0x37, 0x0b, 0x57, 0xf3, 0xa6,
0x24, 0xf8, 0x04, 0x04, 0x8e, 0x09, 0x31, 0x22, 0xb6, 0xc3, 0xc4, 0x1b, 0x17, 0x70, 0x05, 0x17,
0xa1, 0x25, 0xdd, 0x4c, 0xdc, 0x6b, 0xf8, 0xc4, 0x66, 0x13, 0xf4, 0xae, 0xbf, 0x7e, 0xbf, 0x9c,
0xef, 0x83, 0x33, 0xbb, 0x7b, 0xb3, 0x6a, 0xb7, 0x59, 0x14, 0x7c, 0x67, 0x4d, 0x6e, 0x58, 0xeb,
0xef, 0xe3, 0xaa, 0xbb, 0x36, 0x66, 0xbd, 0x51, 0xfd, 0x2a, 0x58, 0x7e, 0xae, 0xfa, 0x0b, 0xbd,
0xb7, 0x7a, 0xdf, 0x35, 0x70, 0x51, 0xe7, 0xb6, 0x60, 0x37, 0xe0, 0x7c, 0xa4, 0x3a, 0xf1, 0x49,
0x40, 0xc2, 0xd3, 0xc1, 0x05, 0xff, 0xbf, 0x57, 0xe5, 0xfc, 0x3e, 0xd5, 0x89, 0xa8, 0x14, 0x76,
0x09, 0x8d, 0xad, 0xca, 0xdf, 0x4d, 0xe2, 0xd7, 0x02, 0x12, 0xb6, 0xc4, 0x9e, 0x18, 0x87, 0xe6,
0x56, 0x65, 0xd9, 0x62, 0xad, 0xfc, 0x7a, 0x40, 0xc2, 0xf6, 0xa0, 0xc3, 0x7f, 0x9b, 0xf9, 0xa1,
0x99, 0x0f, 0x75, 0x21, 0x0e, 0x12, 0xeb, 0x82, 0x97, 0x66, 0xb1, 0xb2, 0xd6, 0x58, 0xdf, 0x09,
0x48, 0xe8, 0x89, 0x66, 0x9a, 0x61, 0x89, 0xec, 0x1a, 0x5a, 0x56, 0xad, 0xe2, 0x54, 0x27, 0xea,
0xcb, 0x77, 0x03, 0x12, 0xba, 0xc2, 0xb3, 0x6a, 0x35, 0x29, 0xb9, 0xf7, 0x0a, 0x4e, 0xb9, 0x86,
0x75, 0x80, 0x46, 0x2f, 0x33, 0x8c, 0xe7, 0x53, 0x39, 0xc3, 0xd1, 0xe4, 0x6e, 0x82, 0x63, 0x7a,
0xc4, 0xda, 0xd0, 0x14, 0xf8, 0x34, 0x47, 0x19, 0x51, 0xc2, 0x8e, 0xc1, 0x13, 0x28, 0x67, 0x8f,
0x53, 0x89, 0xb4, 0xc6, 0xce, 0xe1, 0x64, 0x24, 0x70, 0x18, 0x61, 0x2c, 0x23, 0x81, 0xc3, 0x07,
0x5a, 0x67, 0x1e, 0x38, 0x12, 0xa7, 0x63, 0xea, 0x94, 0x2f, 0x81, 0xa3, 0x67, 0xea, 0x2e, 0x1b,
0xd5, 0xdc, 0xdb, 0x9f, 0x00, 0x00, 0x00, 0xff, 0xff, 0xe7, 0x9b, 0x9d, 0x4f, 0x54, 0x01, 0x00,
0x00,
}

View File

@ -0,0 +1,71 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
syntax = "proto3";
package rpcreplay;
import "google/protobuf/any.proto";
// An Entry represents a single RPC activity, typically a request or response.
message Entry {
enum Kind {
TYPE_UNSPECIFIED = 0;
// A unary request.
// method: the full name of the method
// message: the request proto
// is_error: false
// ref_index: 0
REQUEST = 1;
// A unary response.
// method: the full name of the method
// message:
// if is_error: a google.rpc.Status proto
// else: the response proto
// ref_index: index in the sequence of Entries of matching request (1-based)
RESPONSE = 2;
// A method that creates a stream.
// method: the full name of the method
// message:
// if is_error: a google.rpc.Status proto
// else: nil
// ref_index: 0
CREATE_STREAM = 3;
// A call to Send on the client returned by a stream-creating method.
// method: unset
// message: the proto being sent
// is_error: false
// ref_index: index of matching CREATE_STREAM entry (1-based)
SEND = 4; // message sent on stream
// A call to Recv on the client returned by a stream-creating method.
// method: unset
// message:
// if is_error: a google.rpc.Status proto, or nil on EOF
// else: the received message
// ref_index: index of matching CREATE_STREAM entry
RECV = 5; // message received from stream
}
Kind kind = 1;
string method = 2; // method name
google.protobuf.Any message = 3; // request, response or error status
bool is_error = 4; // was response an error?
int32 ref_index = 5; // for RESPONSE, index of matching request;
// for SEND/RECV, index of CREATE_STREAM
}

View File

@ -0,0 +1,689 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcreplay
import (
"bufio"
"encoding/binary"
"errors"
"fmt"
"io"
"log"
"os"
"sync"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/metadata"
"google.golang.org/grpc/status"
pb "cloud.google.com/go/internal/rpcreplay/proto/rpcreplay"
"github.com/golang/protobuf/proto"
"github.com/golang/protobuf/ptypes"
"github.com/golang/protobuf/ptypes/any"
spb "google.golang.org/genproto/googleapis/rpc/status"
)
// A Recorder records RPCs for later playback.
type Recorder struct {
mu sync.Mutex
w *bufio.Writer
f *os.File
next int
err error
}
// NewRecorder creates a recorder that writes to filename. The file will
// also store the initial bytes for retrieval during replay.
//
// You must call Close on the Recorder to ensure that all data is written.
func NewRecorder(filename string, initial []byte) (*Recorder, error) {
f, err := os.Create(filename)
if err != nil {
return nil, err
}
rec, err := NewRecorderWriter(f, initial)
if err != nil {
_ = f.Close()
return nil, err
}
rec.f = f
return rec, nil
}
// NewRecorderWriter creates a recorder that writes to w. The initial
// bytes will also be written to w for retrieval during replay.
//
// You must call Close on the Recorder to ensure that all data is written.
func NewRecorderWriter(w io.Writer, initial []byte) (*Recorder, error) {
bw := bufio.NewWriter(w)
if err := writeHeader(bw, initial); err != nil {
return nil, err
}
return &Recorder{w: bw, next: 1}, nil
}
// DialOptions returns the options that must be passed to grpc.Dial
// to enable recording.
func (r *Recorder) DialOptions() []grpc.DialOption {
return []grpc.DialOption{
grpc.WithUnaryInterceptor(r.interceptUnary),
grpc.WithStreamInterceptor(r.interceptStream),
}
}
// Close saves any unwritten information.
func (r *Recorder) Close() error {
r.mu.Lock()
defer r.mu.Unlock()
if r.err != nil {
return r.err
}
err := r.w.Flush()
if r.f != nil {
if err2 := r.f.Close(); err == nil {
err = err2
}
}
return err
}
// Intercepts all unary (non-stream) RPCs.
func (r *Recorder) interceptUnary(ctx context.Context, method string, req, res interface{}, cc *grpc.ClientConn, invoker grpc.UnaryInvoker, opts ...grpc.CallOption) error {
ereq := &entry{
kind: pb.Entry_REQUEST,
method: method,
msg: message{msg: req.(proto.Message)},
}
refIndex, err := r.writeEntry(ereq)
if err != nil {
return err
}
ierr := invoker(ctx, method, req, res, cc, opts...)
eres := &entry{
kind: pb.Entry_RESPONSE,
refIndex: refIndex,
}
// If the error is not a gRPC status, then something more
// serious is wrong. More significantly, we have no way
// of serializing an arbitrary error. So just return it
// without recording the response.
if _, ok := status.FromError(ierr); !ok {
r.mu.Lock()
r.err = fmt.Errorf("saw non-status error in %s response: %v (%T)", method, ierr, ierr)
r.mu.Unlock()
return ierr
}
eres.msg.set(res, ierr)
if _, err := r.writeEntry(eres); err != nil {
return err
}
return ierr
}
func (r *Recorder) writeEntry(e *entry) (int, error) {
r.mu.Lock()
defer r.mu.Unlock()
if r.err != nil {
return 0, r.err
}
err := writeEntry(r.w, e)
if err != nil {
r.err = err
return 0, err
}
n := r.next
r.next++
return n, nil
}
func (r *Recorder) interceptStream(ctx context.Context, desc *grpc.StreamDesc, cc *grpc.ClientConn, method string, streamer grpc.Streamer, opts ...grpc.CallOption) (grpc.ClientStream, error) {
cstream, serr := streamer(ctx, desc, cc, method, opts...)
e := &entry{
kind: pb.Entry_CREATE_STREAM,
method: method,
}
e.msg.set(nil, serr)
refIndex, err := r.writeEntry(e)
if err != nil {
return nil, err
}
return &recClientStream{
ctx: ctx,
rec: r,
cstream: cstream,
refIndex: refIndex,
}, serr
}
// A recClientStream implements the gprc.ClientStream interface.
// It behaves exactly like the default ClientStream, but also
// records all messages sent and received.
type recClientStream struct {
ctx context.Context
rec *Recorder
cstream grpc.ClientStream
refIndex int
}
func (rcs *recClientStream) Context() context.Context { return rcs.ctx }
func (rcs *recClientStream) SendMsg(m interface{}) error {
serr := rcs.cstream.SendMsg(m)
e := &entry{
kind: pb.Entry_SEND,
refIndex: rcs.refIndex,
}
e.msg.set(m, serr)
if _, err := rcs.rec.writeEntry(e); err != nil {
return err
}
return serr
}
func (rcs *recClientStream) RecvMsg(m interface{}) error {
serr := rcs.cstream.RecvMsg(m)
e := &entry{
kind: pb.Entry_RECV,
refIndex: rcs.refIndex,
}
e.msg.set(m, serr)
if _, err := rcs.rec.writeEntry(e); err != nil {
return err
}
return serr
}
func (rcs *recClientStream) Header() (metadata.MD, error) {
// TODO(jba): record.
return rcs.cstream.Header()
}
func (rcs *recClientStream) Trailer() metadata.MD {
// TODO(jba): record.
return rcs.cstream.Trailer()
}
func (rcs *recClientStream) CloseSend() error {
// TODO(jba): record.
return rcs.cstream.CloseSend()
}
// A Replayer replays a set of RPCs saved by a Recorder.
type Replayer struct {
initial []byte // initial state
log func(format string, v ...interface{}) // for debugging
mu sync.Mutex
calls []*call
streams []*stream
}
// A call represents a unary RPC, with a request and response (or error).
type call struct {
method string
request proto.Message
response message
}
// A stream represents a gRPC stream, with an initial create-stream call, followed by
// zero or more sends and/or receives.
type stream struct {
method string
createIndex int
createErr error // error from create call
sends []message
recvs []message
}
// NewReplayer creates a Replayer that reads from filename.
func NewReplayer(filename string) (*Replayer, error) {
f, err := os.Open(filename)
if err != nil {
return nil, err
}
defer f.Close()
return NewReplayerReader(f)
}
// NewReplayerReader creates a Replayer that reads from r.
func NewReplayerReader(r io.Reader) (*Replayer, error) {
rep := &Replayer{
log: func(string, ...interface{}) {},
}
if err := rep.read(r); err != nil {
return nil, err
}
return rep, nil
}
// read reads the stream of recorded entries.
// It matches requests with responses, with each pair grouped
// into a call struct.
func (rep *Replayer) read(r io.Reader) error {
r = bufio.NewReader(r)
bytes, err := readHeader(r)
if err != nil {
return err
}
rep.initial = bytes
callsByIndex := map[int]*call{}
streamsByIndex := map[int]*stream{}
for i := 1; ; i++ {
e, err := readEntry(r)
if err != nil {
return err
}
if e == nil {
break
}
switch e.kind {
case pb.Entry_REQUEST:
callsByIndex[i] = &call{
method: e.method,
request: e.msg.msg,
}
case pb.Entry_RESPONSE:
call := callsByIndex[e.refIndex]
if call == nil {
return fmt.Errorf("replayer: no request for response #%d", i)
}
delete(callsByIndex, e.refIndex)
call.response = e.msg
rep.calls = append(rep.calls, call)
case pb.Entry_CREATE_STREAM:
s := &stream{method: e.method, createIndex: i}
s.createErr = e.msg.err
streamsByIndex[i] = s
rep.streams = append(rep.streams, s)
case pb.Entry_SEND:
s := streamsByIndex[e.refIndex]
if s == nil {
return fmt.Errorf("replayer: no stream for send #%d", i)
}
s.sends = append(s.sends, e.msg)
case pb.Entry_RECV:
s := streamsByIndex[e.refIndex]
if s == nil {
return fmt.Errorf("replayer: no stream for recv #%d", i)
}
s.recvs = append(s.recvs, e.msg)
default:
return fmt.Errorf("replayer: unknown kind %s", e.kind)
}
}
if len(callsByIndex) > 0 {
return fmt.Errorf("replayer: %d unmatched requests", len(callsByIndex))
}
return nil
}
// DialOptions returns the options that must be passed to grpc.Dial
// to enable replaying.
func (r *Replayer) DialOptions() []grpc.DialOption {
return []grpc.DialOption{
// On replay, we make no RPCs, which means the connection may be closed
// before the normally async Dial completes. Making the Dial synchronous
// fixes that.
grpc.WithBlock(),
grpc.WithUnaryInterceptor(r.interceptUnary),
grpc.WithStreamInterceptor(r.interceptStream),
}
}
// Initial returns the initial state saved by the Recorder.
func (r *Replayer) Initial() []byte { return r.initial }
// SetLogFunc sets a function to be used for debug logging. The function
// should be safe to be called from multiple goroutines.
func (r *Replayer) SetLogFunc(f func(format string, v ...interface{})) {
r.log = f
}
// Close closes the Replayer.
func (r *Replayer) Close() error {
return nil
}
func (r *Replayer) interceptUnary(_ context.Context, method string, req, res interface{}, _ *grpc.ClientConn, _ grpc.UnaryInvoker, _ ...grpc.CallOption) error {
mreq := req.(proto.Message)
r.log("request %s (%s)", method, req)
call := r.extractCall(method, mreq)
if call == nil {
return fmt.Errorf("replayer: request not found: %s", mreq)
}
r.log("returning %v", call.response)
if call.response.err != nil {
return call.response.err
}
proto.Merge(res.(proto.Message), call.response.msg) // copy msg into res
return nil
}
func (r *Replayer) interceptStream(ctx context.Context, _ *grpc.StreamDesc, _ *grpc.ClientConn, method string, _ grpc.Streamer, _ ...grpc.CallOption) (grpc.ClientStream, error) {
r.log("create-stream %s", method)
str := r.extractStream(method)
if str == nil {
return nil, fmt.Errorf("replayer: stream not found for method %s", method)
}
if str.createErr != nil {
return nil, str.createErr
}
return &repClientStream{ctx: ctx, str: str}, nil
}
type repClientStream struct {
ctx context.Context
str *stream
}
func (rcs *repClientStream) Context() context.Context { return rcs.ctx }
func (rcs *repClientStream) SendMsg(m interface{}) error {
if len(rcs.str.sends) == 0 {
return fmt.Errorf("replayer: no more sends for stream %s, created at index %d",
rcs.str.method, rcs.str.createIndex)
}
// TODO(jba): Do not assume that the sends happen in the same order on replay.
msg := rcs.str.sends[0]
rcs.str.sends = rcs.str.sends[1:]
return msg.err
}
func (rcs *repClientStream) RecvMsg(m interface{}) error {
if len(rcs.str.recvs) == 0 {
return fmt.Errorf("replayer: no more receives for stream %s, created at index %d",
rcs.str.method, rcs.str.createIndex)
}
msg := rcs.str.recvs[0]
rcs.str.recvs = rcs.str.recvs[1:]
if msg.err != nil {
return msg.err
}
proto.Merge(m.(proto.Message), msg.msg) // copy msg into m
return nil
}
func (rcs *repClientStream) Header() (metadata.MD, error) {
log.Printf("replay: stream metadata not supported")
return nil, nil
}
func (rcs *repClientStream) Trailer() metadata.MD {
log.Printf("replay: stream metadata not supported")
return nil
}
func (rcs *repClientStream) CloseSend() error {
return nil
}
// extractCall finds the first call in the list with the same method
// and request. It returns nil if it can't find such a call.
func (r *Replayer) extractCall(method string, req proto.Message) *call {
r.mu.Lock()
defer r.mu.Unlock()
for i, call := range r.calls {
if call == nil {
continue
}
if method == call.method && proto.Equal(req, call.request) {
r.calls[i] = nil // nil out this call so we don't reuse it
return call
}
}
return nil
}
func (r *Replayer) extractStream(method string) *stream {
r.mu.Lock()
defer r.mu.Unlock()
for i, stream := range r.streams {
if stream == nil {
continue
}
if method == stream.method {
r.streams[i] = nil
return stream
}
}
return nil
}
// Fprint reads the entries from filename and writes them to w in human-readable form.
// It is intended for debugging.
func Fprint(w io.Writer, filename string) error {
f, err := os.Open(filename)
if err != nil {
return err
}
defer f.Close()
return FprintReader(w, f)
}
// FprintReader reads the entries from r and writes them to w in human-readable form.
// It is intended for debugging.
func FprintReader(w io.Writer, r io.Reader) error {
initial, err := readHeader(r)
if err != nil {
return err
}
fmt.Fprintf(w, "initial state: %q\n", string(initial))
for i := 1; ; i++ {
e, err := readEntry(r)
if err != nil {
return err
}
if e == nil {
return nil
}
s := "message"
if e.msg.err != nil {
s = "error"
}
fmt.Fprintf(w, "#%d: kind: %s, method: %s, ref index: %d, %s:\n",
i, e.kind, e.method, e.refIndex, s)
if e.msg.err == nil {
if err := proto.MarshalText(w, e.msg.msg); err != nil {
return err
}
} else {
fmt.Fprintf(w, "%v\n", e.msg.err)
}
}
}
// An entry holds one gRPC action (request, response, etc.).
type entry struct {
kind pb.Entry_Kind
method string
msg message
refIndex int // index of corresponding request or create-stream
}
func (e1 *entry) equal(e2 *entry) bool {
if e1 == nil && e2 == nil {
return true
}
if e1 == nil || e2 == nil {
return false
}
return e1.kind == e2.kind &&
e1.method == e2.method &&
proto.Equal(e1.msg.msg, e2.msg.msg) &&
errEqual(e1.msg.err, e2.msg.err) &&
e1.refIndex == e2.refIndex
}
func errEqual(e1, e2 error) bool {
if e1 == e2 {
return true
}
s1, ok1 := status.FromError(e1)
s2, ok2 := status.FromError(e2)
if !ok1 || !ok2 {
return false
}
return proto.Equal(s1.Proto(), s2.Proto())
}
// message holds either a single proto.Message or an error.
type message struct {
msg proto.Message
err error
}
func (m *message) set(msg interface{}, err error) {
m.err = err
if err != io.EOF && msg != nil {
m.msg = msg.(proto.Message)
}
}
// File format:
// header
// sequence of Entry protos
//
// Header format:
// magic string
// a record containing the bytes of the initial state
const magic = "RPCReplay"
func writeHeader(w io.Writer, initial []byte) error {
if _, err := io.WriteString(w, magic); err != nil {
return err
}
return writeRecord(w, initial)
}
func readHeader(r io.Reader) ([]byte, error) {
var buf [len(magic)]byte
if _, err := io.ReadFull(r, buf[:]); err != nil {
if err == io.EOF {
err = errors.New("rpcreplay: empty replay file")
}
return nil, err
}
if string(buf[:]) != magic {
return nil, errors.New("rpcreplay: not a replay file (does not begin with magic string)")
}
bytes, err := readRecord(r)
if err == io.EOF {
err = errors.New("rpcreplay: missing initial state")
}
return bytes, err
}
func writeEntry(w io.Writer, e *entry) error {
var m proto.Message
if e.msg.err != nil && e.msg.err != io.EOF {
s, ok := status.FromError(e.msg.err)
if !ok {
return fmt.Errorf("rpcreplay: error %v is not a Status", e.msg.err)
}
m = s.Proto()
} else {
m = e.msg.msg
}
var a *any.Any
var err error
if m != nil {
a, err = ptypes.MarshalAny(m)
if err != nil {
return err
}
}
pe := &pb.Entry{
Kind: e.kind,
Method: e.method,
Message: a,
IsError: e.msg.err != nil,
RefIndex: int32(e.refIndex),
}
bytes, err := proto.Marshal(pe)
if err != nil {
return err
}
return writeRecord(w, bytes)
}
func readEntry(r io.Reader) (*entry, error) {
buf, err := readRecord(r)
if err == io.EOF {
return nil, nil
}
if err != nil {
return nil, err
}
var pe pb.Entry
if err := proto.Unmarshal(buf, &pe); err != nil {
return nil, err
}
var msg message
if pe.Message != nil {
var any ptypes.DynamicAny
if err := ptypes.UnmarshalAny(pe.Message, &any); err != nil {
return nil, err
}
if pe.IsError {
msg.err = status.ErrorProto(any.Message.(*spb.Status))
} else {
msg.msg = any.Message
}
} else if pe.IsError {
msg.err = io.EOF
} else if pe.Kind != pb.Entry_CREATE_STREAM {
return nil, errors.New("rpcreplay: entry with nil message and false is_error")
}
return &entry{
kind: pe.Kind,
method: pe.Method,
msg: msg,
refIndex: int(pe.RefIndex),
}, nil
}
// A record consists of an unsigned 32-bit little-endian length L followed by L
// bytes.
func writeRecord(w io.Writer, data []byte) error {
if err := binary.Write(w, binary.LittleEndian, uint32(len(data))); err != nil {
return err
}
_, err := w.Write(data)
return err
}
func readRecord(r io.Reader) ([]byte, error) {
var size uint32
if err := binary.Read(r, binary.LittleEndian, &size); err != nil {
return nil, err
}
buf := make([]byte, size)
if _, err := io.ReadFull(r, buf); err != nil {
return nil, err
}
return buf, nil
}

View File

@ -0,0 +1,362 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package rpcreplay
import (
"bytes"
"io"
"testing"
ipb "cloud.google.com/go/internal/rpcreplay/proto/intstore"
rpb "cloud.google.com/go/internal/rpcreplay/proto/rpcreplay"
"cloud.google.com/go/internal/testutil"
"github.com/golang/protobuf/proto"
"golang.org/x/net/context"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/status"
)
func TestRecordIO(t *testing.T) {
buf := &bytes.Buffer{}
want := []byte{1, 2, 3}
if err := writeRecord(buf, want); err != nil {
t.Fatal(err)
}
got, err := readRecord(buf)
if err != nil {
t.Fatal(err)
}
if !bytes.Equal(got, want) {
t.Errorf("got %v, want %v", got, want)
}
}
func TestHeaderIO(t *testing.T) {
buf := &bytes.Buffer{}
want := []byte{1, 2, 3}
if err := writeHeader(buf, want); err != nil {
t.Fatal(err)
}
got, err := readHeader(buf)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(got, want) {
t.Errorf("got %v, want %v", got, want)
}
// readHeader errors
for _, contents := range []string{"", "badmagic", "gRPCReplay"} {
if _, err := readHeader(bytes.NewBufferString(contents)); err == nil {
t.Errorf("%q: got nil, want error", contents)
}
}
}
func TestEntryIO(t *testing.T) {
for i, want := range []*entry{
{
kind: rpb.Entry_REQUEST,
method: "method",
msg: message{msg: &rpb.Entry{}},
refIndex: 7,
},
{
kind: rpb.Entry_RESPONSE,
method: "method",
msg: message{err: status.Error(codes.NotFound, "not found")},
refIndex: 8,
},
{
kind: rpb.Entry_RECV,
method: "method",
msg: message{err: io.EOF},
refIndex: 3,
},
} {
buf := &bytes.Buffer{}
if err := writeEntry(buf, want); err != nil {
t.Fatal(err)
}
got, err := readEntry(buf)
if err != nil {
t.Fatal(err)
}
if !got.equal(want) {
t.Errorf("#%d: got %v, want %v", i, got, want)
}
}
}
var initialState = []byte{1, 2, 3}
func TestRecord(t *testing.T) {
srv := newIntStoreServer()
defer srv.stop()
buf := record(t, srv)
gotIstate, err := readHeader(buf)
if err != nil {
t.Fatal(err)
}
if !testutil.Equal(gotIstate, initialState) {
t.Fatalf("got %v, want %v", gotIstate, initialState)
}
item := &ipb.Item{Name: "a", Value: 1}
wantEntries := []*entry{
// Set
{
kind: rpb.Entry_REQUEST,
method: "/intstore.IntStore/Set",
msg: message{msg: item},
},
{
kind: rpb.Entry_RESPONSE,
msg: message{msg: &ipb.SetResponse{PrevValue: 0}},
refIndex: 1,
},
// Get
{
kind: rpb.Entry_REQUEST,
method: "/intstore.IntStore/Get",
msg: message{msg: &ipb.GetRequest{Name: "a"}},
},
{
kind: rpb.Entry_RESPONSE,
msg: message{msg: item},
refIndex: 3,
},
{
kind: rpb.Entry_REQUEST,
method: "/intstore.IntStore/Get",
msg: message{msg: &ipb.GetRequest{Name: "x"}},
},
{
kind: rpb.Entry_RESPONSE,
msg: message{err: status.Error(codes.NotFound, `"x"`)},
refIndex: 5,
},
// ListItems
{ // entry #7
kind: rpb.Entry_CREATE_STREAM,
method: "/intstore.IntStore/ListItems",
},
{
kind: rpb.Entry_SEND,
msg: message{msg: &ipb.ListItemsRequest{}},
refIndex: 7,
},
{
kind: rpb.Entry_RECV,
msg: message{msg: item},
refIndex: 7,
},
{
kind: rpb.Entry_RECV,
msg: message{err: io.EOF},
refIndex: 7,
},
// SetStream
{ // entry #11
kind: rpb.Entry_CREATE_STREAM,
method: "/intstore.IntStore/SetStream",
},
{
kind: rpb.Entry_SEND,
msg: message{msg: &ipb.Item{Name: "b", Value: 2}},
refIndex: 11,
},
{
kind: rpb.Entry_SEND,
msg: message{msg: &ipb.Item{Name: "c", Value: 3}},
refIndex: 11,
},
{
kind: rpb.Entry_RECV,
msg: message{msg: &ipb.Summary{Count: 2}},
refIndex: 11,
},
// StreamChat
{ // entry #15
kind: rpb.Entry_CREATE_STREAM,
method: "/intstore.IntStore/StreamChat",
},
{
kind: rpb.Entry_SEND,
msg: message{msg: &ipb.Item{Name: "d", Value: 4}},
refIndex: 15,
},
{
kind: rpb.Entry_RECV,
msg: message{msg: &ipb.Item{Name: "d", Value: 4}},
refIndex: 15,
},
{
kind: rpb.Entry_SEND,
msg: message{msg: &ipb.Item{Name: "e", Value: 5}},
refIndex: 15,
},
{
kind: rpb.Entry_RECV,
msg: message{msg: &ipb.Item{Name: "e", Value: 5}},
refIndex: 15,
},
{
kind: rpb.Entry_RECV,
msg: message{err: io.EOF},
refIndex: 15,
},
}
for i, w := range wantEntries {
g, err := readEntry(buf)
if err != nil {
t.Fatalf("%#d: %v", i+1, err)
}
if !g.equal(w) {
t.Errorf("#%d:\ngot %+v\nwant %+v", i+1, g, w)
}
}
g, err := readEntry(buf)
if err != nil {
t.Fatal(err)
}
if g != nil {
t.Errorf("\ngot %+v\nwant nil", g)
}
}
func TestReplay(t *testing.T) {
srv := newIntStoreServer()
defer srv.stop()
buf := record(t, srv)
rep, err := NewReplayerReader(buf)
if err != nil {
t.Fatal(err)
}
if got, want := rep.Initial(), initialState; !testutil.Equal(got, want) {
t.Fatalf("got %v, want %v", got, want)
}
// Replay the test.
testService(t, srv.Addr, rep.DialOptions())
}
func record(t *testing.T, srv *intStoreServer) *bytes.Buffer {
buf := &bytes.Buffer{}
rec, err := NewRecorderWriter(buf, initialState)
if err != nil {
t.Fatal(err)
}
testService(t, srv.Addr, rec.DialOptions())
if err := rec.Close(); err != nil {
t.Fatal(err)
}
return buf
}
func testService(t *testing.T, addr string, opts []grpc.DialOption) {
conn, err := grpc.Dial(addr,
append([]grpc.DialOption{grpc.WithInsecure()}, opts...)...)
if err != nil {
t.Fatal(err)
}
defer conn.Close()
client := ipb.NewIntStoreClient(conn)
ctx := context.Background()
item := &ipb.Item{Name: "a", Value: 1}
res, err := client.Set(ctx, item)
if err != nil {
t.Fatal(err)
}
if res.PrevValue != 0 {
t.Errorf("got %d, want 0", res.PrevValue)
}
got, err := client.Get(ctx, &ipb.GetRequest{Name: "a"})
if err != nil {
t.Fatal(err)
}
if !proto.Equal(got, item) {
t.Errorf("got %v, want %v", got, item)
}
_, err = client.Get(ctx, &ipb.GetRequest{Name: "x"})
if err == nil {
t.Fatal("got nil, want error")
}
if _, ok := status.FromError(err); !ok {
t.Errorf("got error type %T, want a grpc/status.Status", err)
}
wantItems := []*ipb.Item{item}
lic, err := client.ListItems(ctx, &ipb.ListItemsRequest{})
if err != nil {
t.Fatal(err)
}
for i := 0; ; i++ {
item, err := lic.Recv()
if err == io.EOF {
break
}
if err != nil {
t.Fatal(err)
}
if i >= len(wantItems) || !proto.Equal(item, wantItems[i]) {
t.Fatalf("%d: bad item", i)
}
}
ssc, err := client.SetStream(ctx)
if err != nil {
t.Fatal(err)
}
must := func(err error) {
if err != nil {
t.Fatal(err)
}
}
for i, name := range []string{"b", "c"} {
must(ssc.Send(&ipb.Item{Name: name, Value: int32(i + 2)}))
}
summary, err := ssc.CloseAndRecv()
if err != nil {
t.Fatal(err)
}
if got, want := summary.Count, int32(2); got != want {
t.Fatalf("got %d, want %d", got, want)
}
chatc, err := client.StreamChat(ctx)
if err != nil {
t.Fatal(err)
}
for i, name := range []string{"d", "e"} {
item := &ipb.Item{Name: name, Value: int32(i + 4)}
must(chatc.Send(item))
got, err := chatc.Recv()
if err != nil {
t.Fatal(err)
}
if !proto.Equal(got, item) {
t.Errorf("got %v, want %v", got, item)
}
}
must(chatc.CloseSend())
if _, err := chatc.Recv(); err != io.EOF {
t.Fatalf("got %v, want EOF", err)
}
}

99
vendor/cloud.google.com/go/internal/testutil/cmp.go generated vendored Normal file
View File

@ -0,0 +1,99 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package testutil
import (
"fmt"
"math"
"reflect"
"unicode"
"unicode/utf8"
"github.com/golang/protobuf/proto"
"github.com/google/go-cmp/cmp"
)
var (
alwaysEqual = cmp.Comparer(func(_, _ interface{}) bool { return true })
defaultCmpOptions = []cmp.Option{
// Use proto.Equal for protobufs
cmp.Comparer(proto.Equal),
// NaNs compare equal
cmp.FilterValues(func(x, y float64) bool {
return math.IsNaN(x) && math.IsNaN(y)
}, alwaysEqual),
cmp.FilterValues(func(x, y float32) bool {
return math.IsNaN(float64(x)) && math.IsNaN(float64(y))
}, alwaysEqual),
}
)
// Equal tests two values for equality.
func Equal(x, y interface{}, opts ...cmp.Option) bool {
// Put default options at the end. Order doesn't matter.
opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...)
return cmp.Equal(x, y, opts...)
}
// Diff reports the differences between two values.
// Diff(x, y) == "" iff Equal(x, y).
func Diff(x, y interface{}, opts ...cmp.Option) string {
// Put default options at the end. Order doesn't matter.
opts = append(opts[:len(opts):len(opts)], defaultCmpOptions...)
return cmp.Diff(x, y, opts...)
}
// TODO(jba): remove the code below when cmpopts becomes available.
// IgnoreUnexported returns an Option that only ignores the immediate unexported
// fields of a struct, including anonymous fields of unexported types.
// In particular, unexported fields within the struct's exported fields
// of struct types, including anonymous fields, will not be ignored unless the
// type of the field itself is also passed to IgnoreUnexported.
func IgnoreUnexported(typs ...interface{}) cmp.Option {
ux := newUnexportedFilter(typs...)
return cmp.FilterPath(ux.filter, cmp.Ignore())
}
type unexportedFilter struct{ m map[reflect.Type]bool }
func newUnexportedFilter(typs ...interface{}) unexportedFilter {
ux := unexportedFilter{m: make(map[reflect.Type]bool)}
for _, typ := range typs {
t := reflect.TypeOf(typ)
if t == nil || t.Kind() != reflect.Struct {
panic(fmt.Sprintf("invalid struct type: %T", typ))
}
ux.m[t] = true
}
return ux
}
func (xf unexportedFilter) filter(p cmp.Path) bool {
if len(p) < 2 {
return false
}
sf, ok := p[len(p)-1].(cmp.StructField)
if !ok {
return false
}
return xf.m[p[len(p)-2].Type()] && !isExported(sf.Name())
}
// isExported reports whether the identifier is exported.
func isExported(id string) bool {
r, _ := utf8.DecodeRuneInString(id)
return unicode.IsUpper(r)
}

View File

@ -15,9 +15,7 @@
// Package tracecontext provides encoders and decoders for Stackdriver Trace contexts.
package tracecontext
import (
"encoding/binary"
)
import "encoding/binary"
const (
versionID = 0
@ -29,19 +27,15 @@ const (
spanIDLen = 8
optsLen = 1
versionOffset = 0
traceIDOffset = 1
spanIDOffset = traceIDOffset + 1 + traceIDLen
optsOffset = spanIDOffset + 1 + spanIDLen
totalLen = optsOffset + 1 + optsLen
// Len represents the length of trace context.
Len = 1 + 1 + traceIDLen + 1 + spanIDLen + 1 + optsLen
)
// Encode encodes trace ID, span ID and options into dst. The number of bytes
// written will be returned. If len(dst) isn't big enough to fit the trace context,
// a negative number is returned.
func Encode(dst []byte, traceID [2]uint64, spanID uint64, opts byte) (n int) {
if len(dst) < totalLen {
func Encode(dst []byte, traceID []byte, spanID uint64, opts byte) (n int) {
if len(dst) < Len {
return -1
}
var offset = 0
@ -50,8 +44,9 @@ func Encode(dst []byte, traceID [2]uint64, spanID uint64, opts byte) (n int) {
putByte(versionID)
putByte(traceIDField)
putUint64(traceID[1])
putUint64(traceID[0])
for _, b := range traceID {
putByte(b)
}
putByte(spanIDField)
putUint64(spanID)
putByte(optsField)
@ -62,21 +57,22 @@ func Encode(dst []byte, traceID [2]uint64, spanID uint64, opts byte) (n int) {
// Decode decodes the src into a trace ID, span ID and options. If src doesn't
// contain a valid trace context, ok = false is returned.
func Decode(src []byte) (traceID [2]uint64, spanID uint64, opts byte, ok bool) {
if len(src) < totalLen {
return traceID, 0, 0, false
func Decode(src []byte) (traceID []byte, spanID uint64, opts byte, ok bool) {
if len(src) < Len {
return traceID, spanID, 0, false
}
var offset = 0
readByte := func() byte { b := src[offset]; offset++; return b }
readUint64 := func() uint64 { v := binary.LittleEndian.Uint64(src[offset:]); offset += 8; return v }
if readByte() != versionID {
return traceID, 0, 0, false
return traceID, spanID, 0, false
}
for offset < len(src) {
switch readByte() {
case traceIDField:
traceID[1] = readUint64()
traceID[0] = readUint64()
traceID = src[offset : offset+traceIDLen]
offset += traceIDLen
case spanIDField:
spanID = readUint64()
case optsField:

View File

@ -15,8 +15,9 @@
package tracecontext
import (
"reflect"
"testing"
"cloud.google.com/go/internal/testutil"
)
var validData = []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79, 1, 97, 98, 99, 100, 101, 102, 103, 104, 2, 1}
@ -25,7 +26,7 @@ func TestDecode(t *testing.T) {
tests := []struct {
name string
data []byte
wantTraceID [2]uint64
wantTraceID []byte
wantSpanID uint64
wantOpts byte
wantOk bool
@ -33,7 +34,7 @@ func TestDecode(t *testing.T) {
{
name: "nil data",
data: nil,
wantTraceID: [2]uint64{},
wantTraceID: nil,
wantSpanID: 0,
wantOpts: 0,
wantOk: false,
@ -41,7 +42,7 @@ func TestDecode(t *testing.T) {
{
name: "short data",
data: []byte{0, 0, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77},
wantTraceID: [2]uint64{},
wantTraceID: nil,
wantSpanID: 0,
wantOpts: 0,
wantOk: false,
@ -49,7 +50,7 @@ func TestDecode(t *testing.T) {
{
name: "wrong field number",
data: []byte{0, 1, 64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77},
wantTraceID: [2]uint64{},
wantTraceID: nil,
wantSpanID: 0,
wantOpts: 0,
wantOk: false,
@ -57,7 +58,7 @@ func TestDecode(t *testing.T) {
{
name: "valid data",
data: validData,
wantTraceID: [2]uint64{0x4F4E4D4C4B4A4948, 0x4746454443424140},
wantTraceID: []byte{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79},
wantSpanID: 0x6867666564636261,
wantOpts: 1,
wantOk: true,
@ -65,7 +66,7 @@ func TestDecode(t *testing.T) {
}
for _, tt := range tests {
gotTraceID, gotSpanID, gotOpts, gotOk := Decode(tt.data)
if !reflect.DeepEqual(gotTraceID, tt.wantTraceID) {
if !testutil.Equal(gotTraceID, tt.wantTraceID) {
t.Errorf("%s: Decode() gotTraceID = %v, want %v", tt.name, gotTraceID, tt.wantTraceID)
}
if gotSpanID != tt.wantSpanID {
@ -84,7 +85,7 @@ func TestEncode(t *testing.T) {
tests := []struct {
name string
dst []byte
traceID [2]uint64
traceID []byte
spanID uint64
opts byte
wantN int
@ -93,7 +94,7 @@ func TestEncode(t *testing.T) {
{
name: "short data",
dst: make([]byte, 0),
traceID: [2]uint64{5714589967255750984, 5135868584551137600},
traceID: []byte("00112233445566"),
spanID: 0x6867666564636261,
opts: 1,
wantN: -1,
@ -101,11 +102,11 @@ func TestEncode(t *testing.T) {
},
{
name: "valid data",
dst: make([]byte, totalLen),
traceID: [2]uint64{5714589967255750984, 5135868584551137600},
dst: make([]byte, Len),
traceID: []byte{64, 65, 66, 67, 68, 69, 70, 71, 72, 73, 74, 75, 76, 77, 78, 79},
spanID: 0x6867666564636261,
opts: 1,
wantN: totalLen,
wantN: Len,
wantData: validData,
},
}
@ -114,7 +115,7 @@ func TestEncode(t *testing.T) {
if gotN != tt.wantN {
t.Errorf("%s: n = %v, want %v", tt.name, gotN, tt.wantN)
}
if gotData := tt.dst; !reflect.DeepEqual(gotData, tt.wantData) {
if gotData := tt.dst; !testutil.Equal(gotData, tt.wantData) {
t.Errorf("%s: dst = %v, want %v", tt.name, gotData, tt.wantData)
}
}
@ -128,6 +129,8 @@ func BenchmarkDecode(b *testing.B) {
func BenchmarkEncode(b *testing.B) {
for i := 0; i < b.N; i++ {
Encode(validData, [2]uint64{1, 1}, 1, 1)
traceID := make([]byte, 16)
var opts byte
Encode(validData, traceID, 0, opts)
}
}

View File

@ -26,7 +26,7 @@ import (
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20170404"
const Repo = "20170928"
// Go returns the Go runtime version. The returned string
// has no whitespace.

Some files were not shown because too many files have changed in this diff Show More