Merge pull request #121 from keel-hq/feature/configurable_approvals
Feature/configurable approvalspull/122/head
commit
6575af3f30
|
@ -1,11 +1,11 @@
|
|||
FROM golang:1.8.3
|
||||
FROM golang:1.9.2
|
||||
COPY . /go/src/github.com/keel-hq/keel
|
||||
WORKDIR /go/src/github.com/keel-hq/keel
|
||||
RUN make build
|
||||
RUN make install
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates
|
||||
COPY --from=0 /go/src/github.com/keel-hq/keel/keel /bin/keel
|
||||
COPY --from=0 /go/bin/keel /bin/keel
|
||||
ENTRYPOINT ["/bin/keel"]
|
||||
|
||||
EXPOSE 9300
|
6
Makefile
6
Makefile
|
@ -13,7 +13,11 @@ test:
|
|||
|
||||
build:
|
||||
@echo "++ Building keel"
|
||||
CGO_ENABLED=0 GOOS=linux go build -a -tags netgo -ldflags "$(LDFLAGS)" -o keel .
|
||||
CGO_ENABLED=0 GOOS=linux cd cmd/keel && go build -a -tags netgo -ldflags "$(LDFLAGS) -w -s" -o keel .
|
||||
|
||||
install:
|
||||
@echo "++ Installing keel"
|
||||
CGO_ENABLED=0 GOOS=linux go install -ldflags "$(LDFLAGS) -w -s" github.com/keel-hq/keel/cmd/keel
|
||||
|
||||
image:
|
||||
docker build -t karolisr/keel:alpha -f Dockerfile .
|
||||
|
|
29
bot/bot.go
29
bot/bot.go
|
@ -79,6 +79,7 @@ type Bot struct {
|
|||
approvalsRespCh chan *approvalResponse
|
||||
|
||||
approvalsManager approvals.Manager
|
||||
approvalsChannel string // slack approvals channel name
|
||||
|
||||
k8sImplementer kubernetes.Implementer
|
||||
|
||||
|
@ -86,7 +87,7 @@ type Bot struct {
|
|||
}
|
||||
|
||||
// New - create new bot instance
|
||||
func New(name, token string, k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) *Bot {
|
||||
func New(name, token, approvalsChannel string, k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) *Bot {
|
||||
client := slack.New(token)
|
||||
|
||||
bot := &Bot{
|
||||
|
@ -95,6 +96,7 @@ func New(name, token string, k8sImplementer kubernetes.Implementer, approvalsMan
|
|||
k8sImplementer: k8sImplementer,
|
||||
name: name,
|
||||
approvalsManager: approvalsManager,
|
||||
approvalsChannel: approvalsChannel,
|
||||
approvalsRespCh: make(chan *approvalResponse), // don't add buffer to make it blocking
|
||||
}
|
||||
|
||||
|
@ -201,7 +203,7 @@ func (b *Bot) postMessage(title, message, color string, fields []slack.Attachmen
|
|||
},
|
||||
}
|
||||
|
||||
_, _, err := b.slackHTTPClient.PostMessage("general", "", params)
|
||||
_, _, err := b.slackHTTPClient.PostMessage(b.approvalsChannel, "", params)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"error": err,
|
||||
|
@ -230,6 +232,17 @@ func (b *Bot) isApproval(event *slack.MessageEvent, eventText string) (resp *app
|
|||
return nil, false
|
||||
}
|
||||
|
||||
// TODO(k): cache results in a map or get this info on startup. Although
|
||||
// if channel was then recreated (unlikely), we would miss results
|
||||
func (b *Bot) isApprovalsChannel(event *slack.MessageEvent) bool {
|
||||
for _, ch := range b.slackRTM.GetInfo().Channels {
|
||||
if ch.ID == event.Channel && ch.Name == b.approvalsChannel {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (b *Bot) handleMessage(event *slack.MessageEvent) {
|
||||
if event.BotID != "" || event.User == "" || event.SubType == "bot_message" {
|
||||
log.WithFields(log.Fields{
|
||||
|
@ -247,10 +260,14 @@ func (b *Bot) handleMessage(event *slack.MessageEvent) {
|
|||
}
|
||||
|
||||
eventText = b.trimBot(eventText)
|
||||
approval, ok := b.isApproval(event, eventText)
|
||||
if ok {
|
||||
b.approvalsRespCh <- approval
|
||||
return
|
||||
|
||||
// only accepting approvals from approvals channel
|
||||
if b.isApprovalsChannel(event) {
|
||||
approval, ok := b.isApproval(event, eventText)
|
||||
if ok {
|
||||
b.approvalsRespCh <- approval
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Responses that are just a canned string response
|
||||
|
|
|
@ -76,7 +76,7 @@ func TestBotRequest(t *testing.T) {
|
|||
|
||||
am := approvals.New(mem, codecs.DefaultSerializer())
|
||||
|
||||
bot := New("keel", token, f8s, am)
|
||||
bot := New("keel", token, "approvals", f8s, am)
|
||||
// replacing slack client so we can receive webhooks
|
||||
bot.slackHTTPClient = fi
|
||||
|
||||
|
@ -126,7 +126,7 @@ func TestProcessApprovedResponse(t *testing.T) {
|
|||
|
||||
am := approvals.New(mem, codecs.DefaultSerializer())
|
||||
|
||||
bot := New("keel", token, f8s, am)
|
||||
bot := New("keel", token, "approvals", f8s, am)
|
||||
// replacing slack client so we can receive webhooks
|
||||
bot.slackHTTPClient = fi
|
||||
|
||||
|
@ -196,7 +196,7 @@ func TestProcessApprovalReply(t *testing.T) {
|
|||
t.Fatalf("unexpected error while creating : %s", err)
|
||||
}
|
||||
|
||||
bot := New("keel", token, f8s, am)
|
||||
bot := New("keel", token, "approvals", f8s, am)
|
||||
// replacing slack client so we can receive webhooks
|
||||
bot.slackHTTPClient = fi
|
||||
|
||||
|
@ -267,7 +267,7 @@ func TestProcessRejectedReply(t *testing.T) {
|
|||
t.Fatalf("unexpected error while creating : %s", err)
|
||||
}
|
||||
|
||||
bot := New("keel", "random", f8s, am)
|
||||
bot := New("keel", "random", "approvals", f8s, am)
|
||||
|
||||
collector := approval.New()
|
||||
collector.Configure(am)
|
||||
|
@ -313,3 +313,42 @@ func TestProcessRejectedReply(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
func TestIsApproval(t *testing.T) {
|
||||
f8s := &testutil.FakeK8sImplementer{}
|
||||
mem := memory.NewMemoryCache(100*time.Hour, 100*time.Hour, 100*time.Hour)
|
||||
|
||||
identifier := "k8s/project/repo:1.2.3"
|
||||
|
||||
am := approvals.New(mem, codecs.DefaultSerializer())
|
||||
// creating initial approve request
|
||||
err := am.Create(&types.Approval{
|
||||
Identifier: identifier,
|
||||
VotesRequired: 2,
|
||||
CurrentVersion: "2.3.4",
|
||||
NewVersion: "3.4.5",
|
||||
Event: &types.Event{
|
||||
Repository: types.Repository{
|
||||
Name: "project/repo",
|
||||
Tag: "2.3.4",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error while creating : %s", err)
|
||||
}
|
||||
|
||||
bot := New("keel", "random", "approvals", f8s, am)
|
||||
|
||||
_, isApproval := bot.isApproval(&slack.MessageEvent{
|
||||
Msg: slack.Msg{
|
||||
Channel: "approvals",
|
||||
User: "user-x",
|
||||
},
|
||||
}, "approve k8s/project/repo:1.2.3")
|
||||
|
||||
if !isApproval {
|
||||
t.Errorf("event expected to be an approval")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -3,11 +3,13 @@ package main
|
|||
import (
|
||||
"os"
|
||||
"os/signal"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"context"
|
||||
|
||||
netContext "golang.org/x/net/context"
|
||||
kingpin "gopkg.in/alecthomas/kingpin.v2"
|
||||
|
||||
"github.com/keel-hq/keel/approvals"
|
||||
"github.com/keel-hq/keel/bot"
|
||||
|
@ -57,6 +59,14 @@ const EnvDebug = "DEBUG"
|
|||
func main() {
|
||||
|
||||
ver := version.GetKeelVersion()
|
||||
|
||||
inCluster := kingpin.Flag("incluster", "use in cluster configuration (defaults to 'true'), use '--no-incluster' if running outside of the cluster").Default("true").Bool()
|
||||
kubeconfig := kingpin.Flag("kubeconfig", "path to kubeconfig (if not in running inside a cluster)").Default(filepath.Join(os.Getenv("HOME"), ".kube", "config")).String()
|
||||
|
||||
kingpin.UsageTemplate(kingpin.CompactUsageTemplate).Version(ver.Version)
|
||||
kingpin.CommandLine.Help = "Automated Kubernetes deployment updates. Learn more on https://keel.sh."
|
||||
kingpin.Parse()
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"os": ver.OS,
|
||||
"build_date": ver.BuildDate,
|
||||
|
@ -100,12 +110,16 @@ func main() {
|
|||
}
|
||||
|
||||
// getting k8s provider
|
||||
k8sCfg := &kubernetes.Opts{}
|
||||
k8sCfg := &kubernetes.Opts{
|
||||
ConfigPath: *kubeconfig,
|
||||
}
|
||||
|
||||
if os.Getenv(EnvKubernetesConfig) != "" {
|
||||
k8sCfg.ConfigPath = os.Getenv(EnvKubernetesConfig)
|
||||
} else {
|
||||
k8sCfg.InCluster = true
|
||||
}
|
||||
|
||||
k8sCfg.InCluster = *inCluster
|
||||
|
||||
implementer, err := kubernetes.NewKubernetesImplementer(k8sCfg)
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
|
@ -175,6 +189,7 @@ func main() {
|
|||
}()
|
||||
|
||||
<-cleanupDone
|
||||
|
||||
}
|
||||
|
||||
// setupProviders - setting up available providers. New providers should be initialised here and added to
|
||||
|
@ -215,7 +230,13 @@ func setupBot(k8sImplementer kubernetes.Implementer, approvalsManager approvals.
|
|||
}
|
||||
|
||||
token := os.Getenv(constants.EnvSlackToken)
|
||||
slackBot := bot.New(botName, token, k8sImplementer, approvalsManager)
|
||||
|
||||
approvalsChannel := "general"
|
||||
if os.Getenv(constants.EnvSlackApprovalsChannel) != "" {
|
||||
approvalsChannel = os.Getenv(constants.EnvSlackApprovalsChannel)
|
||||
}
|
||||
|
||||
slackBot := bot.New(botName, token, approvalsChannel, k8sImplementer, approvalsManager)
|
||||
|
||||
ctx, cancel := context.WithCancel(context.Background())
|
||||
|
|
@ -14,6 +14,7 @@ const (
|
|||
EnvSlackToken = "SLACK_TOKEN"
|
||||
EnvSlackBotName = "SLACK_BOT_NAME"
|
||||
EnvSlackChannels = "SLACK_CHANNELS"
|
||||
EnvSlackApprovalsChannel = "SLACK_APPROVALS_CHANNEL"
|
||||
)
|
||||
|
||||
// EnvNotificationLevel - minimum level for notifications, defaults to info
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
hash: 476bff82bacd66b7bf9ea8e80ac8048daff4c26ef962e83fc96a30587f483ba9
|
||||
updated: 2017-10-02T17:09:52.291993337+01:00
|
||||
hash: b4faf989713037ee4eb401a0af5ea985065140cf7ffdcaab025c62969d5803ea
|
||||
updated: 2017-12-11T21:33:40.88196567Z
|
||||
imports:
|
||||
- name: cloud.google.com/go
|
||||
version: f6de2c509ed9d2af648c3c147207eaaf97149aed
|
||||
version: 050b16d2314d5fc3d4c9a51e4cd5c7468e77f162
|
||||
subpackages:
|
||||
- compute/metadata
|
||||
- iam
|
||||
|
@ -10,6 +10,12 @@ imports:
|
|||
- internal/version
|
||||
- pubsub
|
||||
- pubsub/apiv1
|
||||
- name: github.com/alecthomas/template
|
||||
version: a0175ee3bccc567396460bf5acd36800cb10c49c
|
||||
subpackages:
|
||||
- parse
|
||||
- name: github.com/alecthomas/units
|
||||
version: 2efee857e7cfd4f3d0138cc3cbb1b4966962b93a
|
||||
- name: github.com/BurntSushi/toml
|
||||
version: b26d9c308763d68093482582cea63d69be07a0f0
|
||||
- name: github.com/davecgh/go-spew
|
||||
|
@ -33,8 +39,6 @@ imports:
|
|||
subpackages:
|
||||
- log
|
||||
- swagger
|
||||
- name: github.com/facebookgo/symwalk
|
||||
version: 42004b9f322246749dd73ad71008b1f3160c0052
|
||||
- name: github.com/ghodss/yaml
|
||||
version: 0ca9ea5df5451ffdf184b4428c902747c2c11cd7
|
||||
- name: github.com/go-openapi/jsonpointer
|
||||
|
@ -79,7 +83,7 @@ imports:
|
|||
- name: github.com/gorilla/context
|
||||
version: 08b5f424b9271eedf6f9f0ce86cb9396ed337a42
|
||||
- name: github.com/gorilla/mux
|
||||
version: 24fca303ac6da784b9e8269f724ddeb0b2eea5e7
|
||||
version: 7f08801859139f86dfafd1c296e2cba9a80d292e
|
||||
- name: github.com/howeyc/gopass
|
||||
version: 3ca23474a7c7203e0a0a070fd33508f6efdb9b3d
|
||||
- name: github.com/imdario/mergo
|
||||
|
@ -93,7 +97,7 @@ imports:
|
|||
- jlexer
|
||||
- jwriter
|
||||
- name: github.com/Masterminds/semver
|
||||
version: 517734cc7d6470c0d07130e40fd40bdeb9bcd3fd
|
||||
version: 15d8430ab86497c5c0da827b748823945e1cf1e1
|
||||
- name: github.com/nlopes/slack
|
||||
version: c86337c0ef2486a15edd804355d9c73d2f2caed1
|
||||
- name: github.com/opencontainers/go-digest
|
||||
|
@ -113,7 +117,7 @@ imports:
|
|||
subpackages:
|
||||
- kv
|
||||
- name: github.com/Sirupsen/logrus
|
||||
version: 89742aefa4b206dcf400792f3bd35b542998eb3b
|
||||
version: d682213848ed68c0a260ca37d6dd5ace8423f5ba
|
||||
- name: github.com/spf13/pflag
|
||||
version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7
|
||||
- name: github.com/ugorji/go
|
||||
|
@ -121,9 +125,9 @@ imports:
|
|||
subpackages:
|
||||
- codec
|
||||
- name: github.com/urfave/negroni
|
||||
version: fde5e16d32adc7ad637e9cd9ad21d4ebc6192535
|
||||
version: 5dbbc83f748fc3ad38585842b0aedab546d0ea1e
|
||||
- name: golang.org/x/crypto
|
||||
version: 9419663f5a44be8b34ca85f08abc5fe1be11f8a3
|
||||
version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122
|
||||
subpackages:
|
||||
- ssh/terminal
|
||||
- name: golang.org/x/net
|
||||
|
@ -146,12 +150,12 @@ imports:
|
|||
- jws
|
||||
- jwt
|
||||
- name: golang.org/x/sync
|
||||
version: 8e0aa688b654ef28caa72506fa5ec8dba9fc7690
|
||||
version: fd80eb99c8f653c847d294a001bdf2a3a6f768f5
|
||||
subpackages:
|
||||
- errgroup
|
||||
- semaphore
|
||||
- name: golang.org/x/sys
|
||||
version: 8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9
|
||||
version: 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
|
@ -196,7 +200,7 @@ imports:
|
|||
- socket
|
||||
- urlfetch
|
||||
- name: google.golang.org/genproto
|
||||
version: 1e559d0a00eef8a9a43151db4665280bd8dd5886
|
||||
version: 73cb5d0be5af113b42057925bd6c93e3cd9f60fd
|
||||
subpackages:
|
||||
- googleapis/api/annotations
|
||||
- googleapis/iam/v1
|
||||
|
@ -220,6 +224,8 @@ imports:
|
|||
- status
|
||||
- tap
|
||||
- transport
|
||||
- name: gopkg.in/alecthomas/kingpin.v2
|
||||
version: 1087e65c9441605df944fb12c33f0fe7072d18ca
|
||||
- name: gopkg.in/inf.v0
|
||||
version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4
|
||||
- name: gopkg.in/yaml.v2
|
||||
|
@ -350,7 +356,7 @@ imports:
|
|||
- util/homedir
|
||||
- util/integer
|
||||
- name: k8s.io/helm
|
||||
version: bbc1f71dc03afc5f00c6ac84b9308f8ecb4f39ac
|
||||
version: 8478fb4fc723885b155c924d1c8c410b7a9444e6
|
||||
subpackages:
|
||||
- pkg/chartutil
|
||||
- pkg/helm
|
||||
|
|
|
@ -38,6 +38,11 @@ import:
|
|||
version: ab5485076ff3407ad2d02db054635913f017b0ed
|
||||
subpackages:
|
||||
- context
|
||||
- package: golang.org/x/sys
|
||||
version: 07c182904dbd53199946ba614a412c61d3c548f5
|
||||
subpackages:
|
||||
- unix
|
||||
- windows
|
||||
# - package: google.golang.org/api
|
||||
# subpackages:
|
||||
# - option
|
||||
|
|
|
@ -1,14 +1,19 @@
|
|||
sudo: false
|
||||
language: go
|
||||
go:
|
||||
- 1.6
|
||||
- 1.7
|
||||
- 1.8
|
||||
- 1.6.x
|
||||
- 1.7.x
|
||||
- 1.8.x
|
||||
- 1.9.x
|
||||
install:
|
||||
- go get -v cloud.google.com/go/...
|
||||
script:
|
||||
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in key.json.enc -out key.json -d
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762" GCLOUD_TESTS_GOLANG_KEY="$(pwd)/key.json"
|
||||
- openssl aes-256-cbc -K $encrypted_a8b3f4fc85f4_key -iv $encrypted_a8b3f4fc85f4_iv -in keys.tar.enc -out keys.tar -d
|
||||
- tar xvf keys.tar
|
||||
- GCLOUD_TESTS_GOLANG_PROJECT_ID="dulcet-port-762"
|
||||
GCLOUD_TESTS_GOLANG_KEY="$(pwd)/dulcet-port-762-key.json"
|
||||
GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID="gcloud-golang-firestore-tests"
|
||||
GCLOUD_TESTS_GOLANG_FIRESTORE_KEY="$(pwd)/gcloud-golang-firestore-tests-key.json"
|
||||
./run-tests.sh $TRAVIS_COMMIT
|
||||
env:
|
||||
matrix:
|
||||
|
|
|
@ -31,9 +31,12 @@ To run the integrations tests, creating and configuration of a project in the
|
|||
Google Developers Console is required.
|
||||
|
||||
After creating a project, you must [create a service account](https://developers.google.com/identity/protocols/OAuth2ServiceAccount#creatinganaccount).
|
||||
Ensure the project-level **Owner** [IAM role](console.cloud.google.com/iam-admin/iam/project)
|
||||
(or **Editor** and **Logs Configuration Writer** roles) are added to the
|
||||
service account.
|
||||
Ensure the project-level **Owner**
|
||||
[IAM role](console.cloud.google.com/iam-admin/iam/project) role is added to the
|
||||
service account. Alternatively, the account can be granted all of the following roles:
|
||||
- **Editor**
|
||||
- **Logs Configuration Writer**
|
||||
- **PubSub Admin**
|
||||
|
||||
Once you create a project, set the following environment variables to be able to
|
||||
run the against the actual APIs.
|
||||
|
@ -42,6 +45,12 @@ run the against the actual APIs.
|
|||
- **GCLOUD_TESTS_GOLANG_KEY**: The path to the JSON key file.
|
||||
- **GCLOUD_TESTS_API_KEY**: Your API key.
|
||||
|
||||
Firestore requires a different project and key:
|
||||
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_PROJECT_ID**: Developers Console project's ID
|
||||
supporting Firestore
|
||||
- **GCLOUD_TESTS_GOLANG_FIRESTORE_KEY**: The path to the JSON key file.
|
||||
|
||||
Install the [gcloud command-line tool][gcloudcli] to your machine and use it
|
||||
to create some resources used in integration tests.
|
||||
|
||||
|
@ -63,10 +72,15 @@ $ gcloud preview datastore create-indexes datastore/testdata/index.yaml
|
|||
$ gsutil mb gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
$ gsutil acl ch -g cloud-logs@google.com:O gs://$GCLOUD_TESTS_GOLANG_PROJECT_ID
|
||||
|
||||
# Create a PubSub topic for integration tests of storage notifications.
|
||||
$ gcloud beta pubsub topics create go-storage-notification-test
|
||||
|
||||
# Create a Spanner instance for the spanner integration tests.
|
||||
$ gcloud beta spanner instances create go-integration-test --config regional-us-central1 --nodes 1 --description 'Instance for go client test'
|
||||
# NOTE: Spanner instances are priced by the node-hour, so you may want to delete
|
||||
# the instance after testing with 'gcloud beta spanner instances delete'.
|
||||
|
||||
|
||||
```
|
||||
|
||||
Once you've set the environment variables, you can run the integration tests by
|
||||
|
@ -82,9 +96,9 @@ Before we can accept your pull requests you'll need to sign a Contributor
|
|||
License Agreement (CLA):
|
||||
|
||||
- **If you are an individual writing original source code** and **you own the
|
||||
- intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your work**,
|
||||
then you'll need to sign a [corporate CLA][corpcla].
|
||||
intellectual property**, then you'll need to sign an [individual CLA][indvcla].
|
||||
- **If you work for a company that wants to allow you to contribute your
|
||||
work**, then you'll need to sign a [corporate CLA][corpcla].
|
||||
|
||||
You can sign these electronically (just scroll to the bottom). After that,
|
||||
we'll be able to accept your pull requests.
|
||||
|
|
|
@ -24,6 +24,7 @@ Glenn Lewis <gmlewis@google.com>
|
|||
Ingo Oeser <nightlyone@googlemail.com>
|
||||
Johan Euphrosine <proppy@google.com>
|
||||
Jonathan Amsterdam <jba@google.com>
|
||||
Kunpei Sakai <namusyaka@gmail.com>
|
||||
Luna Duclos <luna.duclos@palmstonegames.com>
|
||||
Magnus Hiie <magnus.hiie@gmail.com>
|
||||
Michael McGreevy <mcgreevy@golang.org>
|
||||
|
|
|
@ -33,110 +33,75 @@ make backwards-incompatible changes.
|
|||
|
||||
## News
|
||||
|
||||
_September 28, 2017_
|
||||
_December 11, 2017_
|
||||
|
||||
*v0.14.0*
|
||||
*v0.17.0*
|
||||
|
||||
- bigquery BREAKING CHANGES:
|
||||
- Standard SQL is the default for queries and views.
|
||||
- `Table.Create` takes `TableMetadata` as a second argument, instead of
|
||||
options.
|
||||
- `Dataset.Create` takes `DatasetMetadata` as a second argument.
|
||||
- `DatasetMetadata` field `ID` renamed to `FullID`
|
||||
- `TableMetadata` field `ID` renamed to `FullID`
|
||||
- firestore BREAKING CHANGES:
|
||||
- Remove UpdateMap and UpdateStruct; rename UpdatePaths to Update.
|
||||
Change
|
||||
`docref.UpdateMap(ctx, map[string]interface{}{"a.b", 1})`
|
||||
to
|
||||
`docref.Update(ctx, []firestore.Update{{Path: "a.b", Value: 1}})`
|
||||
|
||||
Change
|
||||
`docref.UpdateStruct(ctx, []string{"Field"}, aStruct)`
|
||||
to
|
||||
`docref.Update(ctx, []firestore.Update{{Path: "Field", Value: aStruct.Field}})`
|
||||
- Rename MergePaths to Merge; require args to be FieldPaths
|
||||
- A value stored as an integer can be read into a floating-point field, and vice versa.
|
||||
- bigtable/cmd/cbt:
|
||||
- Support deleting a column.
|
||||
- Add regex option for row read.
|
||||
- spanner: Mark stable.
|
||||
- storage:
|
||||
- Add Reader.ContentEncoding method.
|
||||
- Fix handling of SignedURL headers.
|
||||
- bigquery:
|
||||
- If Uploader.Put is called with no rows, it returns nil without making a
|
||||
call.
|
||||
- Schema inference supports the "nullable" option in struct tags for
|
||||
non-required fields.
|
||||
- TimePartitioning supports "Field".
|
||||
|
||||
|
||||
_October 30, 2017_
|
||||
|
||||
*v0.16.0*
|
||||
|
||||
- Other bigquery changes:
|
||||
- The client will append a random suffix to a provided job ID if you set
|
||||
`AddJobIDSuffix` to true in a job config.
|
||||
- Listing jobs is supported.
|
||||
- Better retry logic.
|
||||
- `JobIterator.Next` returns `*Job`; removed `JobInfo` (BREAKING CHANGE).
|
||||
- UseStandardSQL is deprecated; set UseLegacySQL to true if you need
|
||||
Legacy SQL.
|
||||
- Uploader.Put will generate a random insert ID if you do not provide one.
|
||||
- Support time partitioning for load jobs.
|
||||
- Support dry-run queries.
|
||||
- A `Job` remembers its last retrieved status.
|
||||
- Support retrieving job configuration.
|
||||
- Support labels for jobs and tables.
|
||||
- Support dataset access lists.
|
||||
- Improve support for external data sources, including data from Bigtable and
|
||||
Google Sheets, and tables with external data.
|
||||
- Support updating a table's view configuration.
|
||||
- Fix uploading civil times with nanoseconds.
|
||||
|
||||
- vision, language, speech: clients are now stable
|
||||
- storage:
|
||||
- Support PubSub notifications.
|
||||
- Support Requester Pays buckets.
|
||||
|
||||
- monitoring: client is now beta
|
||||
|
||||
- profiler:
|
||||
- Rename InstanceName to Instance, ZoneName to Zone
|
||||
- Auto-detect service name and version on AppEngine.
|
||||
|
||||
_September 8, 2017_
|
||||
|
||||
*v0.13.0*
|
||||
|
||||
- bigquery: UseLegacySQL options for CreateTable and QueryConfig. Use these
|
||||
options to continue using Legacy SQL after the client switches its default
|
||||
to Standard SQL.
|
||||
|
||||
- bigquery: Support for updating dataset labels.
|
||||
|
||||
- bigquery: Set DatasetIterator.ProjectID to list datasets in a project other
|
||||
than the client's. DatasetsInProject is no longer needed and is deprecated.
|
||||
|
||||
- bigtable: Fail ListInstances when any zones fail.
|
||||
|
||||
- spanner: support decoding of slices of basic types (e.g. []string, []int64,
|
||||
etc.)
|
||||
|
||||
- logging/logadmin: UpdateSink no longer creates a sink if it is missing
|
||||
(actually a change to the underlying service, not the client)
|
||||
|
||||
- profiler: Service and ServiceVersion replace Target in Config.
|
||||
|
||||
_August 22, 2017_
|
||||
|
||||
*v0.12.0*
|
||||
|
||||
- pubsub: Subscription.Receive now uses streaming pull.
|
||||
|
||||
- pubsub: add Client.TopicInProject to access topics in a different project
|
||||
than the client.
|
||||
|
||||
- errors: renamed errorreporting. The errors package will be removed shortly.
|
||||
|
||||
- datastore: improved retry behavior.
|
||||
|
||||
- bigquery: support updates to dataset metadata, with etags.
|
||||
|
||||
- bigquery: add etag support to Table.Update (BREAKING: etag argument added).
|
||||
|
||||
- bigquery: generate all job IDs on the client.
|
||||
|
||||
- storage: support bucket lifecycle configurations.
|
||||
- profiler: Support goroutine and mutex profile types.
|
||||
|
||||
|
||||
_July 31, 2017_
|
||||
_October 3, 2017_
|
||||
|
||||
*v0.11.0*
|
||||
*v0.15.0*
|
||||
|
||||
- Clients for spanner, pubsub and video are now in beta.
|
||||
- firestore: beta release. See the
|
||||
[announcement](https://firebase.googleblog.com/2017/10/introducing-cloud-firestore.html).
|
||||
|
||||
- New client for DLP.
|
||||
- errorreporting: The existing package has been redesigned.
|
||||
|
||||
- spanner: performance and testing improvements.
|
||||
|
||||
- storage: requester-pays buckets are supported.
|
||||
|
||||
- storage, profiler, bigtable, bigquery: bug fixes and other minor improvements.
|
||||
|
||||
- pubsub: bug fixes and other minor improvements
|
||||
|
||||
_June 17, 2017_
|
||||
|
||||
|
||||
*v0.10.0*
|
||||
|
||||
- pubsub: Subscription.ModifyPushConfig replaced with Subscription.Update.
|
||||
|
||||
- pubsub: Subscription.Receive now runs concurrently for higher throughput.
|
||||
|
||||
- vision: cloud.google.com/go/vision is deprecated. Use
|
||||
cloud.google.com/go/vision/apiv1 instead.
|
||||
|
||||
- translation: now stable.
|
||||
|
||||
- trace: several changes to the surface. See the link below.
|
||||
|
||||
[Code changes required from v0.9.0.](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/MIGRATION.md)
|
||||
- errors: This package has been removed. Use errorreporting.
|
||||
|
||||
|
||||
[Older news](https://github.com/GoogleCloudPlatform/google-cloud-go/blob/master/old-news.md)
|
||||
|
@ -146,6 +111,7 @@ cloud.google.com/go/vision/apiv1 instead.
|
|||
Google API | Status | Package
|
||||
---------------------------------|--------------|-----------------------------------------------------------
|
||||
[Datastore][cloud-datastore] | stable | [`cloud.google.com/go/datastore`][cloud-datastore-ref]
|
||||
[Firestore][cloud-firestore] | beta | [`cloud.google.com/go/firestore`][cloud-firestore-ref]
|
||||
[Storage][cloud-storage] | stable | [`cloud.google.com/go/storage`][cloud-storage-ref]
|
||||
[Bigtable][cloud-bigtable] | beta | [`cloud.google.com/go/bigtable`][cloud-bigtable-ref]
|
||||
[BigQuery][cloud-bigquery] | beta | [`cloud.google.com/go/bigquery`][cloud-bigquery-ref]
|
||||
|
@ -155,7 +121,7 @@ Google API | Status | Package
|
|||
[Vision][cloud-vision] | stable | [`cloud.google.com/go/vision/apiv1`][cloud-vision-ref]
|
||||
[Language][cloud-language] | stable | [`cloud.google.com/go/language/apiv1`][cloud-language-ref]
|
||||
[Speech][cloud-speech] | stable | [`cloud.google.com/go/speech/apiv1`][cloud-speech-ref]
|
||||
[Spanner][cloud-spanner] | beta | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
[Spanner][cloud-spanner] | stable | [`cloud.google.com/go/spanner`][cloud-spanner-ref]
|
||||
[Translation][cloud-translation] | stable | [`cloud.google.com/go/translate`][cloud-translation-ref]
|
||||
[Trace][cloud-trace] | alpha | [`cloud.google.com/go/trace`][cloud-trace-ref]
|
||||
[Video Intelligence][cloud-video]| beta | [`cloud.google.com/go/videointelligence/apiv1beta1`][cloud-video-ref]
|
||||
|
@ -480,6 +446,11 @@ for more information.
|
|||
[cloud-datastore-docs]: https://cloud.google.com/datastore/docs
|
||||
[cloud-datastore-activation]: https://cloud.google.com/datastore/docs/activate
|
||||
|
||||
[cloud-firestore]: https://cloud.google.com/firestore/
|
||||
[cloud-firestore-ref]: https://godoc.org/cloud.google.com/go/firestore
|
||||
[cloud-firestore-docs]: https://cloud.google.com/firestore/docs
|
||||
[cloud-firestore-activation]: https://cloud.google.com/firestore/docs/activate
|
||||
|
||||
[cloud-pubsub]: https://cloud.google.com/pubsub/
|
||||
[cloud-pubsub-ref]: https://godoc.org/cloud.google.com/go/pubsub
|
||||
[cloud-pubsub-docs]: https://cloud.google.com/pubsub/docs
|
||||
|
|
|
@ -0,0 +1,8 @@
|
|||
# BigQuery Benchmark
|
||||
This directory contains benchmarks for BigQuery client.
|
||||
|
||||
## Usage
|
||||
`go run bench.go -- <your project id> queries.json`
|
||||
|
||||
BigQuery service caches requests so the benchmark should be run
|
||||
at least twice, disregarding the first result.
|
|
@ -0,0 +1,85 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
//+build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"flag"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigquery"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
|
||||
ctx := context.Background()
|
||||
c, err := bigquery.NewClient(ctx, flag.Arg(0))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
queriesJSON, err := ioutil.ReadFile(flag.Arg(1))
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
var queries []string
|
||||
if err := json.Unmarshal(queriesJSON, &queries); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, q := range queries {
|
||||
doQuery(ctx, c, q)
|
||||
}
|
||||
}
|
||||
|
||||
func doQuery(ctx context.Context, c *bigquery.Client, qt string) {
|
||||
startTime := time.Now()
|
||||
q := c.Query(qt)
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
numRows, numCols := 0, 0
|
||||
var firstByte time.Duration
|
||||
|
||||
for {
|
||||
var values []bigquery.Value
|
||||
err := it.Next(&values)
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
if numRows == 0 {
|
||||
numCols = len(values)
|
||||
firstByte = time.Since(startTime)
|
||||
} else if numCols != len(values) {
|
||||
log.Fatalf("got %d columns, want %d", len(values), numCols)
|
||||
}
|
||||
numRows++
|
||||
}
|
||||
log.Printf("query %q: %d rows, %d cols, first byte %f sec, total %f sec",
|
||||
qt, numRows, numCols, firstByte.Seconds(), time.Since(startTime).Seconds())
|
||||
}
|
|
@ -0,0 +1,10 @@
|
|||
[
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 10000",
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 100000",
|
||||
"SELECT * FROM `nyc-tlc.yellow.trips` LIMIT 1000000",
|
||||
"SELECT title FROM `bigquery-public-data.samples.wikipedia` ORDER BY title LIMIT 1000",
|
||||
"SELECT title, id, timestamp, contributor_ip FROM `bigquery-public-data.samples.wikipedia` WHERE title like 'Blo%' ORDER BY id",
|
||||
"SELECT * FROM `bigquery-public-data.baseball.games_post_wide` ORDER BY gameId",
|
||||
"SELECT * FROM `bigquery-public-data.samples.github_nested` WHERE repository.has_downloads ORDER BY repository.created_at LIMIT 10000",
|
||||
"SELECT repo_name, path FROM `bigquery-public-data.github_repos.files` WHERE path LIKE '%.java' ORDER BY id LIMIT 1000000"
|
||||
]
|
|
@ -14,11 +14,18 @@
|
|||
|
||||
package bigquery
|
||||
|
||||
// TODO(mcgreevy): support dry-run mode when creating jobs.
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/version"
|
||||
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/option"
|
||||
htransport "google.golang.org/api/transport/http"
|
||||
|
||||
|
@ -26,20 +33,22 @@ import (
|
|||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
const prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
const (
|
||||
prodAddr = "https://www.googleapis.com/bigquery/v2/"
|
||||
Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
userAgent = "gcloud-golang-bigquery/20160429"
|
||||
)
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by GCSReference.
|
||||
type ExternalData interface {
|
||||
externalDataConfig() bq.ExternalDataConfiguration
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
|
||||
const Scope = "https://www.googleapis.com/auth/bigquery"
|
||||
const userAgent = "gcloud-golang-bigquery/20160429"
|
||||
|
||||
// Client may be used to perform BigQuery operations.
|
||||
type Client struct {
|
||||
service service
|
||||
projectID string
|
||||
bqs *bq.Service
|
||||
}
|
||||
|
||||
// NewClient constructs a new Client which can perform BigQuery operations.
|
||||
|
@ -53,17 +62,16 @@ func NewClient(ctx context.Context, projectID string, opts ...option.ClientOptio
|
|||
o = append(o, opts...)
|
||||
httpClient, endpoint, err := htransport.NewClient(ctx, o...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("dialing: %v", err)
|
||||
return nil, fmt.Errorf("bigquery: dialing: %v", err)
|
||||
}
|
||||
|
||||
s, err := newBigqueryService(httpClient, endpoint)
|
||||
bqs, err := bq.New(httpClient)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
return nil, fmt.Errorf("bigquery: constructing client: %v", err)
|
||||
}
|
||||
|
||||
bqs.BasePath = endpoint
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: projectID,
|
||||
bqs: bqs,
|
||||
}
|
||||
return c, nil
|
||||
}
|
||||
|
@ -75,11 +83,74 @@ func (c *Client) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) insertJob(ctx context.Context, conf *insertJobConf) (*Job, error) {
|
||||
job, err := c.service.insertJob(ctx, c.projectID, conf)
|
||||
// Calls the Jobs.Insert RPC and returns a Job.
|
||||
func (c *Client) insertJob(ctx context.Context, job *bq.Job, media io.Reader) (*Job, error) {
|
||||
call := c.bqs.Jobs.Insert(c.projectID, job).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if media != nil {
|
||||
call.Media(media)
|
||||
}
|
||||
var res *bq.Job
|
||||
var err error
|
||||
invoke := func() error {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}
|
||||
// A job with a client-generated ID can be retried; the presence of the
|
||||
// ID makes the insert operation idempotent.
|
||||
// We don't retry if there is media, because it is an io.Reader. We'd
|
||||
// have to read the contents and keep it in memory, and that could be expensive.
|
||||
// TODO(jba): Look into retrying if media != nil.
|
||||
if job.JobReference != nil && media == nil {
|
||||
err = runWithRetry(ctx, invoke)
|
||||
} else {
|
||||
err = invoke()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.c = c
|
||||
return job, nil
|
||||
return bqToJob(res, c)
|
||||
}
|
||||
|
||||
// Convert a number of milliseconds since the Unix epoch to a time.Time.
|
||||
// Treat an input of zero specially: convert it to the zero time,
|
||||
// rather than the start of the epoch.
|
||||
func unixMillisToTime(m int64) time.Time {
|
||||
if m == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
// See the similar function in ../storage/invoke.go. The main difference is the
|
||||
// reason for retrying.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Max: 32 * time.Second,
|
||||
Multiplier: 2,
|
||||
}
|
||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return !retryableError(err), err
|
||||
})
|
||||
}
|
||||
|
||||
// This is the correct definition of retryable according to the BigQuery team.
|
||||
func retryableError(err error) bool {
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return reason == "backendError" || reason == "rateLimitExceeded"
|
||||
}
|
||||
|
|
|
@ -21,12 +21,6 @@ import (
|
|||
|
||||
// CopyConfig holds the configuration for a copy job.
|
||||
type CopyConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Srcs are the tables from which data will be copied.
|
||||
Srcs []*Table
|
||||
|
||||
|
@ -38,18 +32,51 @@ type CopyConfig struct {
|
|||
CreateDisposition TableCreateDisposition
|
||||
|
||||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
// The default is WriteEmpty.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
func (c *CopyConfig) toBQ() *bq.JobConfiguration {
|
||||
var ts []*bq.TableReference
|
||||
for _, t := range c.Srcs {
|
||||
ts = append(ts, t.toBQ())
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: c.Labels,
|
||||
Copy: &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.toBQ(),
|
||||
SourceTables: ts,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bqToCopyConfig(q *bq.JobConfiguration, c *Client) *CopyConfig {
|
||||
cc := &CopyConfig{
|
||||
Labels: q.Labels,
|
||||
CreateDisposition: TableCreateDisposition(q.Copy.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Copy.WriteDisposition),
|
||||
Dst: bqToTable(q.Copy.DestinationTable, c),
|
||||
}
|
||||
for _, t := range q.Copy.SourceTables {
|
||||
cc.Srcs = append(cc.Srcs, bqToTable(t, c))
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// A Copier copies data into a BigQuery table from one or more BigQuery tables.
|
||||
type Copier struct {
|
||||
JobIDConfig
|
||||
CopyConfig
|
||||
c *Client
|
||||
}
|
||||
|
||||
// CopierFrom returns a Copier which can be used to copy data into a
|
||||
// BigQuery table from one or more BigQuery tables.
|
||||
// BigQuery table from one or more BigQuery tables.
|
||||
// The returned Copier may optionally be further configured before its Run method is called.
|
||||
func (t *Table) CopierFrom(srcs ...*Table) *Copier {
|
||||
return &Copier{
|
||||
|
@ -63,17 +90,12 @@ func (t *Table) CopierFrom(srcs ...*Table) *Copier {
|
|||
|
||||
// Run initiates a copy job.
|
||||
func (c *Copier) Run(ctx context.Context) (*Job, error) {
|
||||
conf := &bq.JobConfigurationTableCopy{
|
||||
CreateDisposition: string(c.CreateDisposition),
|
||||
WriteDisposition: string(c.WriteDisposition),
|
||||
DestinationTable: c.Dst.tableRefProto(),
|
||||
}
|
||||
for _, t := range c.Srcs {
|
||||
conf.SourceTables = append(conf.SourceTables, t.tableRefProto())
|
||||
}
|
||||
job := &bq.Job{
|
||||
JobReference: createJobRef(c.JobID, c.AddJobIDSuffix, c.c.projectID),
|
||||
Configuration: &bq.JobConfiguration{Copy: conf},
|
||||
}
|
||||
return c.c.insertJob(ctx, &insertJobConf{job: job})
|
||||
return c.c.insertJob(ctx, c.newJob(), nil)
|
||||
}
|
||||
|
||||
func (c *Copier) newJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: c.JobIDConfig.createJobRef(c.c.projectID),
|
||||
Configuration: c.CopyConfig.toBQ(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,10 @@ package bigquery
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
|
@ -44,10 +47,11 @@ func defaultCopyJob() *bq.Job {
|
|||
}
|
||||
|
||||
func TestCopy(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
defer fixRandomID("RANDOM")()
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
srcs []*Table
|
||||
jobID string
|
||||
config CopyConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
|
@ -82,9 +86,11 @@ func TestCopy(t *testing.T) {
|
|||
config: CopyConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.Copy.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Copy.WriteDisposition = "WRITE_TRUNCATE"
|
||||
return j
|
||||
|
@ -103,7 +109,7 @@ func TestCopy(t *testing.T) {
|
|||
TableID: "s-table-id",
|
||||
},
|
||||
},
|
||||
config: CopyConfig{JobID: "job-id"},
|
||||
jobID: "job-id",
|
||||
want: func() *bq.Job {
|
||||
j := defaultCopyJob()
|
||||
j.JobReference.JobId = "job-id"
|
||||
|
@ -111,22 +117,25 @@ func TestCopy(t *testing.T) {
|
|||
}(),
|
||||
},
|
||||
}
|
||||
|
||||
c := &Client{projectID: "client-project-id"}
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
tc.dst.c = c
|
||||
copier := tc.dst.CopierFrom(tc.srcs...)
|
||||
copier.JobID = tc.jobID
|
||||
tc.config.Srcs = tc.srcs
|
||||
tc.config.Dst = tc.dst
|
||||
copier.CopyConfig = tc.config
|
||||
if _, err := copier.Run(context.Background()); err != nil {
|
||||
t.Errorf("#%d: err calling Run: %v", i, err)
|
||||
continue
|
||||
got := copier.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*CopyConfig), &copier.CopyConfig,
|
||||
cmpopts.IgnoreUnexported(Table{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
checkJob(t, i, s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,11 +15,14 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/optional"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
|
@ -38,6 +41,7 @@ type DatasetMetadata struct {
|
|||
Location string // The geo location of the dataset.
|
||||
DefaultTableExpiration time.Duration // The default expiration time for new tables.
|
||||
Labels map[string]string // User-provided labels.
|
||||
Access []*AccessEntry // Access permissions.
|
||||
|
||||
// These fields are read-only.
|
||||
CreationTime time.Time
|
||||
|
@ -47,7 +51,6 @@ type DatasetMetadata struct {
|
|||
// ETag is the ETag obtained when reading metadata. Pass it to Dataset.Update to
|
||||
// ensure that the metadata hasn't changed since it was read.
|
||||
ETag string
|
||||
// TODO(jba): access rules
|
||||
}
|
||||
|
||||
// DatasetMetadataToUpdate is used when updating a dataset's metadata.
|
||||
|
@ -55,30 +58,15 @@ type DatasetMetadata struct {
|
|||
type DatasetMetadataToUpdate struct {
|
||||
Description optional.String // The user-friendly description of this table.
|
||||
Name optional.String // The user-friendly name for this dataset.
|
||||
|
||||
// DefaultTableExpiration is the the default expiration time for new tables.
|
||||
// If set to time.Duration(0), new tables never expire.
|
||||
DefaultTableExpiration optional.Duration
|
||||
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
// The entire access list. It is not possible to replace individual entries.
|
||||
Access []*AccessEntry
|
||||
|
||||
// SetLabel causes a label to be added or modified when dm is used
|
||||
// in a call to Dataset.Update.
|
||||
func (dm *DatasetMetadataToUpdate) SetLabel(name, value string) {
|
||||
if dm.setLabels == nil {
|
||||
dm.setLabels = map[string]string{}
|
||||
}
|
||||
dm.setLabels[name] = value
|
||||
}
|
||||
|
||||
// DeleteLabel causes a label to be deleted when dm is used in a
|
||||
// call to Dataset.Update.
|
||||
func (dm *DatasetMetadataToUpdate) DeleteLabel(name string) {
|
||||
if dm.deleteLabels == nil {
|
||||
dm.deleteLabels = map[string]bool{}
|
||||
}
|
||||
dm.deleteLabels[name] = true
|
||||
labelUpdater
|
||||
}
|
||||
|
||||
// Dataset creates a handle to a BigQuery dataset in the client's project.
|
||||
|
@ -98,17 +86,100 @@ func (c *Client) DatasetInProject(projectID, datasetID string) *Dataset {
|
|||
// Create creates a dataset in the BigQuery service. An error will be returned if the
|
||||
// dataset already exists. Pass in a DatasetMetadata value to configure the dataset.
|
||||
func (d *Dataset) Create(ctx context.Context, md *DatasetMetadata) error {
|
||||
return d.c.service.insertDataset(ctx, d.DatasetID, d.ProjectID, md)
|
||||
ds, err := md.toBQ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ds.DatasetReference = &bq.DatasetReference{DatasetId: d.DatasetID}
|
||||
call := d.c.bqs.Datasets.Insert(d.ProjectID, ds).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
_, err = call.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (dm *DatasetMetadata) toBQ() (*bq.Dataset, error) {
|
||||
ds := &bq.Dataset{}
|
||||
if dm == nil {
|
||||
return ds, nil
|
||||
}
|
||||
ds.FriendlyName = dm.Name
|
||||
ds.Description = dm.Description
|
||||
ds.Location = dm.Location
|
||||
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
|
||||
ds.Labels = dm.Labels
|
||||
var err error
|
||||
ds.Access, err = accessListToBQ(dm.Access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !dm.CreationTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
|
||||
}
|
||||
if !dm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
|
||||
}
|
||||
if dm.FullID != "" {
|
||||
return nil, errors.New("bigquery: Dataset.FullID is not writable")
|
||||
}
|
||||
if dm.ETag != "" {
|
||||
return nil, errors.New("bigquery: Dataset.ETag is not writable")
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func accessListToBQ(a []*AccessEntry) ([]*bq.DatasetAccess, error) {
|
||||
var q []*bq.DatasetAccess
|
||||
for _, e := range a {
|
||||
a, err := e.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
q = append(q, a)
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
// Delete deletes the dataset.
|
||||
func (d *Dataset) Delete(ctx context.Context) error {
|
||||
return d.c.service.deleteDataset(ctx, d.DatasetID, d.ProjectID)
|
||||
call := d.c.bqs.Datasets.Delete(d.ProjectID, d.DatasetID).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
return call.Do()
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the dataset.
|
||||
func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
|
||||
return d.c.service.getDatasetMetadata(ctx, d.ProjectID, d.DatasetID)
|
||||
call := d.c.bqs.Datasets.Get(d.ProjectID, d.DatasetID).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
var ds *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToDatasetMetadata(ds)
|
||||
}
|
||||
|
||||
func bqToDatasetMetadata(d *bq.Dataset) (*DatasetMetadata, error) {
|
||||
dm := &DatasetMetadata{
|
||||
CreationTime: unixMillisToTime(d.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
|
||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
|
||||
Description: d.Description,
|
||||
Name: d.FriendlyName,
|
||||
FullID: d.Id,
|
||||
Location: d.Location,
|
||||
Labels: d.Labels,
|
||||
ETag: d.Etag,
|
||||
}
|
||||
for _, a := range d.Access {
|
||||
e, err := bqToAccessEntry(a, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dm.Access = append(dm.Access, e)
|
||||
}
|
||||
return dm, nil
|
||||
}
|
||||
|
||||
// Update modifies specific Dataset metadata fields.
|
||||
|
@ -116,7 +187,63 @@ func (d *Dataset) Metadata(ctx context.Context) (*DatasetMetadata, error) {
|
|||
// set the etag argument to the DatasetMetadata.ETag field from the read.
|
||||
// Pass the empty string for etag for a "blind write" that will always succeed.
|
||||
func (d *Dataset) Update(ctx context.Context, dm DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
|
||||
return d.c.service.patchDataset(ctx, d.ProjectID, d.DatasetID, &dm, etag)
|
||||
ds, err := dm.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
call := d.c.bqs.Datasets.Patch(d.ProjectID, d.DatasetID, ds).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var ds2 *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds2, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToDatasetMetadata(ds2)
|
||||
}
|
||||
|
||||
func (dm *DatasetMetadataToUpdate) toBQ() (*bq.Dataset, error) {
|
||||
ds := &bq.Dataset{}
|
||||
forceSend := func(field string) {
|
||||
ds.ForceSendFields = append(ds.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if dm.Description != nil {
|
||||
ds.Description = optional.ToString(dm.Description)
|
||||
forceSend("Description")
|
||||
}
|
||||
if dm.Name != nil {
|
||||
ds.FriendlyName = optional.ToString(dm.Name)
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if dm.DefaultTableExpiration != nil {
|
||||
dur := optional.ToDuration(dm.DefaultTableExpiration)
|
||||
if dur == 0 {
|
||||
// Send a null to delete the field.
|
||||
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
|
||||
} else {
|
||||
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
|
||||
}
|
||||
}
|
||||
if dm.Access != nil {
|
||||
var err error
|
||||
ds.Access, err = accessListToBQ(dm.Access)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(ds.Access) == 0 {
|
||||
ds.NullFields = append(ds.NullFields, "Access")
|
||||
}
|
||||
}
|
||||
labels, forces, nulls := dm.update()
|
||||
ds.Labels = labels
|
||||
ds.ForceSendFields = append(ds.ForceSendFields, forces...)
|
||||
ds.NullFields = append(ds.NullFields, nulls...)
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
// Table creates a handle to a BigQuery table in the dataset.
|
||||
|
@ -163,16 +290,41 @@ func (it *TableIterator) Next() (*Table, error) {
|
|||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TableIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
// for testing
|
||||
var listTables = func(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
|
||||
call := it.dataset.c.bqs.Tables.List(it.dataset.ProjectID, it.dataset.DatasetID).
|
||||
PageToken(pageToken).
|
||||
Context(it.ctx)
|
||||
setClientHeader(call.Header())
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(int64(pageSize))
|
||||
}
|
||||
var res *bq.TableList
|
||||
err := runWithRetry(it.ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (it *TableIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
tables, tok, err := it.dataset.c.service.listTables(it.ctx, it.dataset.ProjectID, it.dataset.DatasetID, pageSize, pageToken)
|
||||
res, err := listTables(it, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, t := range tables {
|
||||
t.c = it.dataset.c
|
||||
it.tables = append(it.tables, t)
|
||||
for _, t := range res.Tables {
|
||||
it.tables = append(it.tables, bqToTable(t.TableReference, it.dataset.c))
|
||||
}
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func bqToTable(tr *bq.TableReference, c *Client) *Table {
|
||||
return &Table{
|
||||
ProjectID: tr.ProjectId,
|
||||
DatasetID: tr.DatasetId,
|
||||
TableID: tr.TableId,
|
||||
c: c,
|
||||
}
|
||||
return tok, nil
|
||||
}
|
||||
|
||||
// Datasets returns an iterator over the datasets in a project.
|
||||
|
@ -232,15 +384,118 @@ func (it *DatasetIterator) Next() (*Dataset, error) {
|
|||
return item, nil
|
||||
}
|
||||
|
||||
// for testing
|
||||
var listDatasets = func(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
|
||||
call := it.c.bqs.Datasets.List(it.ProjectID).
|
||||
Context(it.ctx).
|
||||
PageToken(pageToken).
|
||||
All(it.ListHidden)
|
||||
setClientHeader(call.Header())
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(int64(pageSize))
|
||||
}
|
||||
if it.Filter != "" {
|
||||
call.Filter(it.Filter)
|
||||
}
|
||||
var res *bq.DatasetList
|
||||
err := runWithRetry(it.ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
return res, err
|
||||
}
|
||||
|
||||
func (it *DatasetIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
datasets, nextPageToken, err := it.c.service.listDatasets(it.ctx, it.ProjectID,
|
||||
pageSize, pageToken, it.ListHidden, it.Filter)
|
||||
res, err := listDatasets(it, pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, d := range datasets {
|
||||
d.c = it.c
|
||||
it.items = append(it.items, d)
|
||||
for _, d := range res.Datasets {
|
||||
it.items = append(it.items, &Dataset{
|
||||
ProjectID: d.DatasetReference.ProjectId,
|
||||
DatasetID: d.DatasetReference.DatasetId,
|
||||
c: it.c,
|
||||
})
|
||||
}
|
||||
return nextPageToken, nil
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
// An AccessEntry describes the permissions that an entity has on a dataset.
|
||||
type AccessEntry struct {
|
||||
Role AccessRole // The role of the entity
|
||||
EntityType EntityType // The type of entity
|
||||
Entity string // The entity (individual or group) granted access
|
||||
View *Table // The view granted access (EntityType must be ViewEntity)
|
||||
}
|
||||
|
||||
// AccessRole is the level of access to grant to a dataset.
|
||||
type AccessRole string
|
||||
|
||||
const (
|
||||
OwnerRole AccessRole = "OWNER"
|
||||
ReaderRole AccessRole = "READER"
|
||||
WriterRole AccessRole = "WRITER"
|
||||
)
|
||||
|
||||
// EntityType is the type of entity in an AccessEntry.
|
||||
type EntityType int
|
||||
|
||||
const (
|
||||
// A domain (e.g. "example.com")
|
||||
DomainEntity EntityType = iota + 1
|
||||
|
||||
// Email address of a Google Group
|
||||
GroupEmailEntity
|
||||
|
||||
// Email address of an individual user.
|
||||
UserEmailEntity
|
||||
|
||||
// A special group: one of projectOwners, projectReaders, projectWriters or allAuthenticatedUsers.
|
||||
SpecialGroupEntity
|
||||
|
||||
// A BigQuery view.
|
||||
ViewEntity
|
||||
)
|
||||
|
||||
func (e *AccessEntry) toBQ() (*bq.DatasetAccess, error) {
|
||||
q := &bq.DatasetAccess{Role: string(e.Role)}
|
||||
switch e.EntityType {
|
||||
case DomainEntity:
|
||||
q.Domain = e.Entity
|
||||
case GroupEmailEntity:
|
||||
q.GroupByEmail = e.Entity
|
||||
case UserEmailEntity:
|
||||
q.UserByEmail = e.Entity
|
||||
case SpecialGroupEntity:
|
||||
q.SpecialGroup = e.Entity
|
||||
case ViewEntity:
|
||||
q.View = e.View.toBQ()
|
||||
default:
|
||||
return nil, fmt.Errorf("bigquery: unknown entity type %d", e.EntityType)
|
||||
}
|
||||
return q, nil
|
||||
}
|
||||
|
||||
func bqToAccessEntry(q *bq.DatasetAccess, c *Client) (*AccessEntry, error) {
|
||||
e := &AccessEntry{Role: AccessRole(q.Role)}
|
||||
switch {
|
||||
case q.Domain != "":
|
||||
e.Entity = q.Domain
|
||||
e.EntityType = DomainEntity
|
||||
case q.GroupByEmail != "":
|
||||
e.Entity = q.GroupByEmail
|
||||
e.EntityType = GroupEmailEntity
|
||||
case q.UserByEmail != "":
|
||||
e.Entity = q.UserByEmail
|
||||
e.EntityType = UserEmailEntity
|
||||
case q.SpecialGroup != "":
|
||||
e.Entity = q.SpecialGroup
|
||||
e.EntityType = SpecialGroupEntity
|
||||
case q.View != nil:
|
||||
e.View = c.DatasetInProject(q.View.ProjectId, q.View.DatasetId).Table(q.View.TableId)
|
||||
e.EntityType = ViewEntity
|
||||
default:
|
||||
return nil, errors.New("bigquery: invalid access value")
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
|
|
@ -18,24 +18,29 @@ import (
|
|||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
itest "google.golang.org/api/iterator/testing"
|
||||
)
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type listTablesServiceStub struct {
|
||||
type listTablesStub struct {
|
||||
expectedProject, expectedDataset string
|
||||
tables []*Table
|
||||
service
|
||||
tables []*bq.TableListTables
|
||||
}
|
||||
|
||||
func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
if projectID != s.expectedProject {
|
||||
return nil, "", errors.New("wrong project id")
|
||||
func (s *listTablesStub) listTables(it *TableIterator, pageSize int, pageToken string) (*bq.TableList, error) {
|
||||
if it.dataset.ProjectID != s.expectedProject {
|
||||
return nil, errors.New("wrong project id")
|
||||
}
|
||||
if datasetID != s.expectedDataset {
|
||||
return nil, "", errors.New("wrong dataset id")
|
||||
if it.dataset.DatasetID != s.expectedDataset {
|
||||
return nil, errors.New("wrong dataset id")
|
||||
}
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
|
@ -46,7 +51,7 @@ func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datas
|
|||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
end := start + pageSize
|
||||
|
@ -57,100 +62,267 @@ func (s *listTablesServiceStub) listTables(ctx context.Context, projectID, datas
|
|||
if end < len(s.tables) {
|
||||
nextPageToken = strconv.Itoa(end)
|
||||
}
|
||||
return s.tables[start:end], nextPageToken, nil
|
||||
return &bq.TableList{
|
||||
Tables: s.tables[start:end],
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestTables(t *testing.T) {
|
||||
t1 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t1"}
|
||||
t2 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t2"}
|
||||
t3 := &Table{ProjectID: "p1", DatasetID: "d1", TableID: "t3"}
|
||||
allTables := []*Table{t1, t2, t3}
|
||||
c := &Client{
|
||||
service: &listTablesServiceStub{
|
||||
expectedProject: "x",
|
||||
expectedDataset: "y",
|
||||
tables: allTables,
|
||||
},
|
||||
projectID: "x",
|
||||
c := &Client{projectID: "p1"}
|
||||
inTables := []*bq.TableListTables{
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t1"}},
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t2"}},
|
||||
{TableReference: &bq.TableReference{ProjectId: "p1", DatasetId: "d1", TableId: "t3"}},
|
||||
}
|
||||
msg, ok := itest.TestIterator(allTables,
|
||||
func() interface{} { return c.Dataset("y").Tables(context.Background()) },
|
||||
outTables := []*Table{
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t1", c: c},
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t2", c: c},
|
||||
{ProjectID: "p1", DatasetID: "d1", TableID: "t3", c: c},
|
||||
}
|
||||
|
||||
lts := &listTablesStub{
|
||||
expectedProject: "p1",
|
||||
expectedDataset: "d1",
|
||||
tables: inTables,
|
||||
}
|
||||
old := listTables
|
||||
listTables = lts.listTables // cannot use t.Parallel with this test
|
||||
defer func() { listTables = old }()
|
||||
|
||||
msg, ok := itest.TestIterator(outTables,
|
||||
func() interface{} { return c.Dataset("d1").Tables(context.Background()) },
|
||||
func(it interface{}) (interface{}, error) { return it.(*TableIterator).Next() })
|
||||
if !ok {
|
||||
t.Error(msg)
|
||||
}
|
||||
}
|
||||
|
||||
type listDatasetsFake struct {
|
||||
service
|
||||
|
||||
projectID string
|
||||
datasets []*Dataset
|
||||
hidden map[*Dataset]bool
|
||||
type listDatasetsStub struct {
|
||||
expectedProject string
|
||||
datasets []*bq.DatasetListDatasets
|
||||
hidden map[*bq.DatasetListDatasets]bool
|
||||
}
|
||||
|
||||
func (df *listDatasetsFake) listDatasets(_ context.Context, projectID string, pageSize int, pageToken string, listHidden bool, filter string) ([]*Dataset, string, error) {
|
||||
func (s *listDatasetsStub) listDatasets(it *DatasetIterator, pageSize int, pageToken string) (*bq.DatasetList, error) {
|
||||
const maxPageSize = 2
|
||||
if pageSize <= 0 || pageSize > maxPageSize {
|
||||
pageSize = maxPageSize
|
||||
}
|
||||
if filter != "" {
|
||||
return nil, "", errors.New("filter not supported")
|
||||
if it.Filter != "" {
|
||||
return nil, errors.New("filter not supported")
|
||||
}
|
||||
if projectID != df.projectID {
|
||||
return nil, "", errors.New("bad project ID")
|
||||
if it.ProjectID != s.expectedProject {
|
||||
return nil, errors.New("bad project ID")
|
||||
}
|
||||
start := 0
|
||||
if pageToken != "" {
|
||||
var err error
|
||||
start, err = strconv.Atoi(pageToken)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
var (
|
||||
i int
|
||||
result []*Dataset
|
||||
result []*bq.DatasetListDatasets
|
||||
nextPageToken string
|
||||
)
|
||||
for i = start; len(result) < pageSize && i < len(df.datasets); i++ {
|
||||
if df.hidden[df.datasets[i]] && !listHidden {
|
||||
for i = start; len(result) < pageSize && i < len(s.datasets); i++ {
|
||||
if s.hidden[s.datasets[i]] && !it.ListHidden {
|
||||
continue
|
||||
}
|
||||
result = append(result, df.datasets[i])
|
||||
result = append(result, s.datasets[i])
|
||||
}
|
||||
if i < len(df.datasets) {
|
||||
if i < len(s.datasets) {
|
||||
nextPageToken = strconv.Itoa(i)
|
||||
}
|
||||
return result, nextPageToken, nil
|
||||
return &bq.DatasetList{
|
||||
Datasets: result,
|
||||
NextPageToken: nextPageToken,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func TestDatasets(t *testing.T) {
|
||||
service := &listDatasetsFake{projectID: "p"}
|
||||
client := &Client{service: service}
|
||||
datasets := []*Dataset{
|
||||
client := &Client{projectID: "p"}
|
||||
inDatasets := []*bq.DatasetListDatasets{
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "a"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "b"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "hidden"}},
|
||||
{DatasetReference: &bq.DatasetReference{ProjectId: "p", DatasetId: "c"}},
|
||||
}
|
||||
outDatasets := []*Dataset{
|
||||
{"p", "a", client},
|
||||
{"p", "b", client},
|
||||
{"p", "hidden", client},
|
||||
{"p", "c", client},
|
||||
}
|
||||
service.datasets = datasets
|
||||
service.hidden = map[*Dataset]bool{datasets[2]: true}
|
||||
c := &Client{
|
||||
projectID: "p",
|
||||
service: service,
|
||||
lds := &listDatasetsStub{
|
||||
expectedProject: "p",
|
||||
datasets: inDatasets,
|
||||
hidden: map[*bq.DatasetListDatasets]bool{inDatasets[2]: true},
|
||||
}
|
||||
msg, ok := itest.TestIterator(datasets,
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = true; return it },
|
||||
old := listDatasets
|
||||
listDatasets = lds.listDatasets // cannot use t.Parallel with this test
|
||||
defer func() { listDatasets = old }()
|
||||
|
||||
msg, ok := itest.TestIterator(outDatasets,
|
||||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = true; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=true: %s", msg)
|
||||
}
|
||||
|
||||
msg, ok = itest.TestIterator([]*Dataset{datasets[0], datasets[1], datasets[3]},
|
||||
func() interface{} { it := c.Datasets(context.Background()); it.ListHidden = false; return it },
|
||||
msg, ok = itest.TestIterator([]*Dataset{outDatasets[0], outDatasets[1], outDatasets[3]},
|
||||
func() interface{} { it := client.Datasets(context.Background()); it.ListHidden = false; return it },
|
||||
func(it interface{}) (interface{}, error) { return it.(*DatasetIterator).Next() })
|
||||
if !ok {
|
||||
t.Fatalf("ListHidden=false: %s", msg)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatasetToBQ(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
in *DatasetMetadata
|
||||
want *bq.Dataset
|
||||
}{
|
||||
{nil, &bq.Dataset{}},
|
||||
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
|
||||
{&DatasetMetadata{
|
||||
Name: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*AccessEntry{{Role: OwnerRole, Entity: "example.com", EntityType: DomainEntity}},
|
||||
}, &bq.Dataset{
|
||||
FriendlyName: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*bq.DatasetAccess{{Role: "OWNER", Domain: "example.com"}},
|
||||
}},
|
||||
} {
|
||||
got, err := test.in.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that non-writeable fields are unset.
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
for _, dm := range []*DatasetMetadata{
|
||||
{CreationTime: aTime},
|
||||
{LastModifiedTime: aTime},
|
||||
{FullID: "x"},
|
||||
{ETag: "e"},
|
||||
} {
|
||||
if _, err := dm.toBQ(); err == nil {
|
||||
t.Errorf("%+v: got nil, want error", dm)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQToDatasetMetadata(t *testing.T) {
|
||||
cTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
cMillis := cTime.UnixNano() / 1e6
|
||||
mTime := time.Date(2017, 10, 31, 0, 0, 0, 0, time.Local)
|
||||
mMillis := mTime.UnixNano() / 1e6
|
||||
q := &bq.Dataset{
|
||||
CreationTime: cMillis,
|
||||
LastModifiedTime: mMillis,
|
||||
FriendlyName: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*bq.DatasetAccess{
|
||||
{Role: "READER", UserByEmail: "joe@example.com"},
|
||||
{Role: "WRITER", GroupByEmail: "users@example.com"},
|
||||
},
|
||||
Etag: "etag",
|
||||
}
|
||||
want := &DatasetMetadata{
|
||||
CreationTime: cTime,
|
||||
LastModifiedTime: mTime,
|
||||
Name: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
Access: []*AccessEntry{
|
||||
{Role: ReaderRole, Entity: "joe@example.com", EntityType: UserEmailEntity},
|
||||
{Role: WriterRole, Entity: "users@example.com", EntityType: GroupEmailEntity},
|
||||
},
|
||||
ETag: "etag",
|
||||
}
|
||||
got, err := bqToDatasetMetadata(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("-got, +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDatasetMetadataToUpdateToBQ(t *testing.T) {
|
||||
dm := DatasetMetadataToUpdate{
|
||||
Description: "desc",
|
||||
Name: "name",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
}
|
||||
dm.SetLabel("label", "value")
|
||||
dm.DeleteLabel("del")
|
||||
|
||||
got, err := dm.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := &bq.Dataset{
|
||||
Description: "desc",
|
||||
FriendlyName: "name",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Labels: map[string]string{"label": "value"},
|
||||
ForceSendFields: []string{"Description", "FriendlyName"},
|
||||
NullFields: []string{"Labels.del"},
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("-got, +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertAccessEntry(t *testing.T) {
|
||||
c := &Client{projectID: "pid"}
|
||||
for _, e := range []*AccessEntry{
|
||||
{Role: ReaderRole, Entity: "e", EntityType: DomainEntity},
|
||||
{Role: WriterRole, Entity: "e", EntityType: GroupEmailEntity},
|
||||
{Role: OwnerRole, Entity: "e", EntityType: UserEmailEntity},
|
||||
{Role: ReaderRole, Entity: "e", EntityType: SpecialGroupEntity},
|
||||
{Role: ReaderRole, EntityType: ViewEntity,
|
||||
View: &Table{ProjectID: "p", DatasetID: "d", TableID: "t", c: c}},
|
||||
} {
|
||||
q, err := e.toBQ()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := bqToAccessEntry(q, c)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, e, cmp.AllowUnexported(Table{}, Client{})); diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
e := &AccessEntry{Role: ReaderRole, Entity: "e"}
|
||||
if _, err := e.toBQ(); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
if _, err := bqToAccessEntry(&bq.DatasetAccess{Role: "WRITER"}, nil); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
|
689
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
Normal file
689
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client.go
generated
vendored
Normal file
|
@ -0,0 +1,689 @@
|
|||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package datatransfer
|
||||
|
||||
import (
|
||||
"math"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
GetDataSource []gax.CallOption
|
||||
ListDataSources []gax.CallOption
|
||||
CreateTransferConfig []gax.CallOption
|
||||
UpdateTransferConfig []gax.CallOption
|
||||
DeleteTransferConfig []gax.CallOption
|
||||
GetTransferConfig []gax.CallOption
|
||||
ListTransferConfigs []gax.CallOption
|
||||
ScheduleTransferRuns []gax.CallOption
|
||||
GetTransferRun []gax.CallOption
|
||||
DeleteTransferRun []gax.CallOption
|
||||
ListTransferRuns []gax.CallOption
|
||||
ListTransferLogs []gax.CallOption
|
||||
CheckValidCreds []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("bigquerydatatransfer.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultCallOptions() *CallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &CallOptions{
|
||||
GetDataSource: retry[[2]string{"default", "idempotent"}],
|
||||
ListDataSources: retry[[2]string{"default", "idempotent"}],
|
||||
CreateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateTransferConfig: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteTransferConfig: retry[[2]string{"default", "idempotent"}],
|
||||
GetTransferConfig: retry[[2]string{"default", "idempotent"}],
|
||||
ListTransferConfigs: retry[[2]string{"default", "idempotent"}],
|
||||
ScheduleTransferRuns: retry[[2]string{"default", "non_idempotent"}],
|
||||
GetTransferRun: retry[[2]string{"default", "idempotent"}],
|
||||
DeleteTransferRun: retry[[2]string{"default", "idempotent"}],
|
||||
ListTransferRuns: retry[[2]string{"default", "idempotent"}],
|
||||
ListTransferLogs: retry[[2]string{"default", "idempotent"}],
|
||||
CheckValidCreds: retry[[2]string{"default", "idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// Client is a client for interacting with BigQuery Data Transfer API.
|
||||
type Client struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
client datatransferpb.DataTransferServiceClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *CallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new data transfer service client.
|
||||
//
|
||||
// The Google BigQuery Data Transfer Service API enables BigQuery users to
|
||||
// configure the transfer of their data from other Google Products into BigQuery.
|
||||
// This service contains methods that are end user exposed. It backs up the
|
||||
// frontend.
|
||||
func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &Client{
|
||||
conn: conn,
|
||||
CallOptions: defaultCallOptions(),
|
||||
|
||||
client: datatransferpb.NewDataTransferServiceClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *Client) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *Client) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ProjectPath returns the path for the project resource.
|
||||
func ProjectPath(project string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationPath returns the path for the location resource.
|
||||
func LocationPath(project, location string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationDataSourcePath returns the path for the location data source resource.
|
||||
func LocationDataSourcePath(project, location, dataSource string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/dataSources/" +
|
||||
dataSource +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationTransferConfigPath returns the path for the location transfer config resource.
|
||||
func LocationTransferConfigPath(project, location, transferConfig string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
""
|
||||
}
|
||||
|
||||
// LocationRunPath returns the path for the location run resource.
|
||||
func LocationRunPath(project, location, transferConfig, run string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/locations/" +
|
||||
location +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
"/runs/" +
|
||||
run +
|
||||
""
|
||||
}
|
||||
|
||||
// DataSourcePath returns the path for the data source resource.
|
||||
func DataSourcePath(project, dataSource string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/dataSources/" +
|
||||
dataSource +
|
||||
""
|
||||
}
|
||||
|
||||
// TransferConfigPath returns the path for the transfer config resource.
|
||||
func TransferConfigPath(project, transferConfig string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
""
|
||||
}
|
||||
|
||||
// RunPath returns the path for the run resource.
|
||||
func RunPath(project, transferConfig, run string) string {
|
||||
return "" +
|
||||
"projects/" +
|
||||
project +
|
||||
"/transferConfigs/" +
|
||||
transferConfig +
|
||||
"/runs/" +
|
||||
run +
|
||||
""
|
||||
}
|
||||
|
||||
// GetDataSource retrieves a supported data source and returns its settings,
|
||||
// which can be used for UI rendering.
|
||||
func (c *Client) GetDataSource(ctx context.Context, req *datatransferpb.GetDataSourceRequest, opts ...gax.CallOption) (*datatransferpb.DataSource, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetDataSource[0:len(c.CallOptions.GetDataSource):len(c.CallOptions.GetDataSource)], opts...)
|
||||
var resp *datatransferpb.DataSource
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetDataSource(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListDataSources lists supported data sources and returns their settings,
|
||||
// which can be used for UI rendering.
|
||||
func (c *Client) ListDataSources(ctx context.Context, req *datatransferpb.ListDataSourcesRequest, opts ...gax.CallOption) *DataSourceIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListDataSources[0:len(c.CallOptions.ListDataSources):len(c.CallOptions.ListDataSources)], opts...)
|
||||
it := &DataSourceIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.DataSource, string, error) {
|
||||
var resp *datatransferpb.ListDataSourcesResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListDataSources(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.DataSources, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// CreateTransferConfig creates a new data transfer configuration.
|
||||
func (c *Client) CreateTransferConfig(ctx context.Context, req *datatransferpb.CreateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateTransferConfig[0:len(c.CallOptions.CreateTransferConfig):len(c.CallOptions.CreateTransferConfig)], opts...)
|
||||
var resp *datatransferpb.TransferConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CreateTransferConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateTransferConfig updates a data transfer configuration.
|
||||
// All fields must be set, even if they are not updated.
|
||||
func (c *Client) UpdateTransferConfig(ctx context.Context, req *datatransferpb.UpdateTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateTransferConfig[0:len(c.CallOptions.UpdateTransferConfig):len(c.CallOptions.UpdateTransferConfig)], opts...)
|
||||
var resp *datatransferpb.TransferConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.UpdateTransferConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteTransferConfig deletes a data transfer configuration,
|
||||
// including any associated transfer runs and logs.
|
||||
func (c *Client) DeleteTransferConfig(ctx context.Context, req *datatransferpb.DeleteTransferConfigRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteTransferConfig[0:len(c.CallOptions.DeleteTransferConfig):len(c.CallOptions.DeleteTransferConfig)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.DeleteTransferConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetTransferConfig returns information about a data transfer config.
|
||||
func (c *Client) GetTransferConfig(ctx context.Context, req *datatransferpb.GetTransferConfigRequest, opts ...gax.CallOption) (*datatransferpb.TransferConfig, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetTransferConfig[0:len(c.CallOptions.GetTransferConfig):len(c.CallOptions.GetTransferConfig)], opts...)
|
||||
var resp *datatransferpb.TransferConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetTransferConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListTransferConfigs returns information about all data transfers in the project.
|
||||
func (c *Client) ListTransferConfigs(ctx context.Context, req *datatransferpb.ListTransferConfigsRequest, opts ...gax.CallOption) *TransferConfigIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListTransferConfigs[0:len(c.CallOptions.ListTransferConfigs):len(c.CallOptions.ListTransferConfigs)], opts...)
|
||||
it := &TransferConfigIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferConfig, string, error) {
|
||||
var resp *datatransferpb.ListTransferConfigsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListTransferConfigs(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.TransferConfigs, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// ScheduleTransferRuns creates transfer runs for a time range [range_start_time, range_end_time].
|
||||
// For each date - or whatever granularity the data source supports - in the
|
||||
// range, one transfer run is created.
|
||||
// Note that runs are created per UTC time in the time range.
|
||||
func (c *Client) ScheduleTransferRuns(ctx context.Context, req *datatransferpb.ScheduleTransferRunsRequest, opts ...gax.CallOption) (*datatransferpb.ScheduleTransferRunsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ScheduleTransferRuns[0:len(c.CallOptions.ScheduleTransferRuns):len(c.CallOptions.ScheduleTransferRuns)], opts...)
|
||||
var resp *datatransferpb.ScheduleTransferRunsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ScheduleTransferRuns(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetTransferRun returns information about the particular transfer run.
|
||||
func (c *Client) GetTransferRun(ctx context.Context, req *datatransferpb.GetTransferRunRequest, opts ...gax.CallOption) (*datatransferpb.TransferRun, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetTransferRun[0:len(c.CallOptions.GetTransferRun):len(c.CallOptions.GetTransferRun)], opts...)
|
||||
var resp *datatransferpb.TransferRun
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.GetTransferRun(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteTransferRun deletes the specified transfer run.
|
||||
func (c *Client) DeleteTransferRun(ctx context.Context, req *datatransferpb.DeleteTransferRunRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteTransferRun[0:len(c.CallOptions.DeleteTransferRun):len(c.CallOptions.DeleteTransferRun)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.client.DeleteTransferRun(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// ListTransferRuns returns information about running and completed jobs.
|
||||
func (c *Client) ListTransferRuns(ctx context.Context, req *datatransferpb.ListTransferRunsRequest, opts ...gax.CallOption) *TransferRunIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListTransferRuns[0:len(c.CallOptions.ListTransferRuns):len(c.CallOptions.ListTransferRuns)], opts...)
|
||||
it := &TransferRunIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferRun, string, error) {
|
||||
var resp *datatransferpb.ListTransferRunsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListTransferRuns(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.TransferRuns, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// ListTransferLogs returns user facing log messages for the data transfer run.
|
||||
func (c *Client) ListTransferLogs(ctx context.Context, req *datatransferpb.ListTransferLogsRequest, opts ...gax.CallOption) *TransferMessageIterator {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListTransferLogs[0:len(c.CallOptions.ListTransferLogs):len(c.CallOptions.ListTransferLogs)], opts...)
|
||||
it := &TransferMessageIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*datatransferpb.TransferMessage, string, error) {
|
||||
var resp *datatransferpb.ListTransferLogsResponse
|
||||
req.PageToken = pageToken
|
||||
if pageSize > math.MaxInt32 {
|
||||
req.PageSize = math.MaxInt32
|
||||
} else {
|
||||
req.PageSize = int32(pageSize)
|
||||
}
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.ListTransferLogs(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
return resp.TransferMessages, resp.NextPageToken, nil
|
||||
}
|
||||
fetch := func(pageSize int, pageToken string) (string, error) {
|
||||
items, nextPageToken, err := it.InternalFetch(pageSize, pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, items...)
|
||||
return nextPageToken, nil
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(fetch, it.bufLen, it.takeBuf)
|
||||
return it
|
||||
}
|
||||
|
||||
// CheckValidCreds returns true if valid credentials exist for the given data source and
|
||||
// requesting user.
|
||||
// Some data sources doesn't support service account, so we need to talk to
|
||||
// them on behalf of the end user. This API just checks whether we have OAuth
|
||||
// token for the particular user, which is a pre-requisite before user can
|
||||
// create a transfer config.
|
||||
func (c *Client) CheckValidCreds(ctx context.Context, req *datatransferpb.CheckValidCredsRequest, opts ...gax.CallOption) (*datatransferpb.CheckValidCredsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CheckValidCreds[0:len(c.CallOptions.CheckValidCreds):len(c.CallOptions.CheckValidCreds)], opts...)
|
||||
var resp *datatransferpb.CheckValidCredsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.CheckValidCreds(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DataSourceIterator manages a stream of *datatransferpb.DataSource.
|
||||
type DataSourceIterator struct {
|
||||
items []*datatransferpb.DataSource
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.DataSource, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *DataSourceIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *DataSourceIterator) Next() (*datatransferpb.DataSource, error) {
|
||||
var item *datatransferpb.DataSource
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *DataSourceIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *DataSourceIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TransferConfigIterator manages a stream of *datatransferpb.TransferConfig.
|
||||
type TransferConfigIterator struct {
|
||||
items []*datatransferpb.TransferConfig
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferConfig, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TransferConfigIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TransferConfigIterator) Next() (*datatransferpb.TransferConfig, error) {
|
||||
var item *datatransferpb.TransferConfig
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TransferConfigIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TransferConfigIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TransferMessageIterator manages a stream of *datatransferpb.TransferMessage.
|
||||
type TransferMessageIterator struct {
|
||||
items []*datatransferpb.TransferMessage
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferMessage, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TransferMessageIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TransferMessageIterator) Next() (*datatransferpb.TransferMessage, error) {
|
||||
var item *datatransferpb.TransferMessage
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TransferMessageIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TransferMessageIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
||||
|
||||
// TransferRunIterator manages a stream of *datatransferpb.TransferRun.
|
||||
type TransferRunIterator struct {
|
||||
items []*datatransferpb.TransferRun
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
||||
// InternalFetch is for use by the Google Cloud Libraries only.
|
||||
// It is not part of the stable interface of this package.
|
||||
//
|
||||
// InternalFetch returns results from a single call to the underlying RPC.
|
||||
// The number of results is no greater than pageSize.
|
||||
// If there are no more results, nextPageToken is empty and err is nil.
|
||||
InternalFetch func(pageSize int, pageToken string) (results []*datatransferpb.TransferRun, nextPageToken string, err error)
|
||||
}
|
||||
|
||||
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
|
||||
func (it *TransferRunIterator) PageInfo() *iterator.PageInfo {
|
||||
return it.pageInfo
|
||||
}
|
||||
|
||||
// Next returns the next result. Its second return value is iterator.Done if there are no more
|
||||
// results. Once Next returns Done, all subsequent calls will return Done.
|
||||
func (it *TransferRunIterator) Next() (*datatransferpb.TransferRun, error) {
|
||||
var item *datatransferpb.TransferRun
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return item, err
|
||||
}
|
||||
item = it.items[0]
|
||||
it.items = it.items[1:]
|
||||
return item, nil
|
||||
}
|
||||
|
||||
func (it *TransferRunIterator) bufLen() int {
|
||||
return len(it.items)
|
||||
}
|
||||
|
||||
func (it *TransferRunIterator) takeBuf() interface{} {
|
||||
b := it.items
|
||||
it.items = nil
|
||||
return b
|
||||
}
|
288
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go
generated
vendored
Normal file
288
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/data_transfer_client_example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,288 @@
|
|||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package datatransfer_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/bigquery/datatransfer/apiv1"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/iterator"
|
||||
datatransferpb "google.golang.org/genproto/googleapis/cloud/bigquery/datatransfer/v1"
|
||||
)
|
||||
|
||||
func ExampleNewClient() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleClient_GetDataSource() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetDataSourceRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetDataSource(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListDataSources() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListDataSourcesRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListDataSources(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_CreateTransferConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.CreateTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_UpdateTransferConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.UpdateTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeleteTransferConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.DeleteTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_GetTransferConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetTransferConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetTransferConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_ListTransferConfigs() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferConfigsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferConfigs(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ScheduleTransferRuns() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ScheduleTransferRunsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ScheduleTransferRuns(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_GetTransferRun() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.GetTransferRunRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetTransferRun(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeleteTransferRun() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.DeleteTransferRunRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.DeleteTransferRun(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ListTransferRuns() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferRunsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferRuns(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_ListTransferLogs() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.ListTransferLogsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
it := c.ListTransferLogs(ctx, req)
|
||||
for {
|
||||
resp, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClient_CheckValidCreds() {
|
||||
ctx := context.Background()
|
||||
c, err := datatransfer.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &datatransferpb.CheckValidCredsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CheckValidCreds(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package datatransfer is an auto-generated package for the
|
||||
// BigQuery Data Transfer API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Transfers data from partner SaaS applications to Google BigQuery on a
|
||||
// scheduled, managed basis.
|
||||
package datatransfer // import "cloud.google.com/go/bigquery/datatransfer/apiv1"
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/bigquery",
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
"https://www.googleapis.com/auth/cloud-platform.read-only",
|
||||
}
|
||||
}
|
1146
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
Normal file
1146
vendor/cloud.google.com/go/bigquery/datatransfer/apiv1/mock_test.go
generated
vendored
Normal file
File diff suppressed because it is too large
Load Diff
|
@ -124,7 +124,7 @@ These methods create references to datasets, not the datasets themselves. You ca
|
|||
a dataset reference even if the dataset doesn't exist yet. Use Dataset.Create to
|
||||
create a dataset from a reference:
|
||||
|
||||
if err := myDataset.Create(ctx); err != nil {
|
||||
if err := myDataset.Create(ctx, nil); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
|
@ -134,9 +134,10 @@ to an object in BigQuery that may or may not exist.
|
|||
table := myDataset.Table("my_table")
|
||||
|
||||
You can create, delete and update the metadata of tables with methods on Table.
|
||||
Table.Create supports a few options. For instance, you could create a temporary table with:
|
||||
For instance, you could create a temporary table with:
|
||||
|
||||
err = myDataset.Table("temp").Create(ctx, bigquery.TableExpiration(time.Now().Add(1*time.Hour)))
|
||||
err = myDataset.Table("temp").Create(ctx, &bigquery.TableMetadata{
|
||||
ExpirationTime: time.Now().Add(1*time.Hour)})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
@ -166,22 +167,23 @@ Or you can infer the schema from a struct:
|
|||
// schema1 and schema2 are identical.
|
||||
|
||||
Struct inference supports tags like those of the encoding/json package,
|
||||
so you can change names or ignore fields:
|
||||
so you can change names, ignore fields, or mark a field as nullable (non-required):
|
||||
|
||||
type student2 struct {
|
||||
Name string `bigquery:"full_name"`
|
||||
Grades []int
|
||||
Secret string `bigquery:"-"`
|
||||
Name string `bigquery:"full_name"`
|
||||
Grades []int
|
||||
Secret string `bigquery:"-"`
|
||||
Optional int `bigquery:",nullable"
|
||||
}
|
||||
schema3, err := bigquery.InferSchema(student2{})
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// schema3 has fields "full_name" and "Grade".
|
||||
// schema3 has required fields "full_name", "Grade" and nullable field "Optional".
|
||||
|
||||
Having constructed a schema, you can pass it to Table.Create as an option:
|
||||
Having constructed a schema, you can create a table with it like so:
|
||||
|
||||
if err := table.Create(ctx, schema1); err != nil {
|
||||
if err := table.Create(ctx, &bigquery.TableMetadata{Schema: schema1}); err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
|
|
|
@ -30,7 +30,7 @@ func (e Error) Error() string {
|
|||
return fmt.Sprintf("{Location: %q; Message: %q; Reason: %q}", e.Location, e.Message, e.Reason)
|
||||
}
|
||||
|
||||
func errorFromErrorProto(ep *bq.ErrorProto) *Error {
|
||||
func bqToError(ep *bq.ErrorProto) *Error {
|
||||
if ep == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -95,7 +95,7 @@ func TestErrorFromErrorProto(t *testing.T) {
|
|||
want: &Error{Location: "L", Message: "M", Reason: "R"},
|
||||
},
|
||||
} {
|
||||
if got := errorFromErrorProto(test.in); !testutil.Equal(got, test.want) {
|
||||
if got := bqToError(test.in); !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v: got %v, want %v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -86,7 +86,18 @@ func ExampleClient_JobFromID() {
|
|||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
fmt.Println(job)
|
||||
fmt.Println(job.LastStatus()) // Display the job's status.
|
||||
}
|
||||
|
||||
func ExampleClient_Jobs() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
it := client.Jobs(ctx)
|
||||
it.State = bigquery.Running // list only running jobs.
|
||||
_ = it // TODO: iterate using Next or iterator.Pager.
|
||||
}
|
||||
|
||||
func ExampleNewGCSReference() {
|
||||
|
@ -228,6 +239,25 @@ func ExampleJob_Wait() {
|
|||
}
|
||||
}
|
||||
|
||||
func ExampleJob_Config() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
ds := client.Dataset("my_dataset")
|
||||
job, err := ds.Table("t1").CopierFrom(ds.Table("t2")).Run(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
jc, err := job.Config()
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
copyConfig := jc.(*bigquery.CopyConfig)
|
||||
fmt.Println(copyConfig.Dst, copyConfig.CreateDisposition)
|
||||
}
|
||||
|
||||
func ExampleDataset_Create() {
|
||||
ctx := context.Background()
|
||||
client, err := bigquery.NewClient(ctx, "project-id")
|
||||
|
@ -365,10 +395,11 @@ func ExampleInferSchema() {
|
|||
|
||||
func ExampleInferSchema_tags() {
|
||||
type Item struct {
|
||||
Name string
|
||||
Size float64
|
||||
Count int `bigquery:"number"`
|
||||
Secret []byte `bigquery:"-"`
|
||||
Name string
|
||||
Size float64
|
||||
Count int `bigquery:"number"`
|
||||
Secret []byte `bigquery:"-"`
|
||||
Optional bool `bigquery:",nullable"`
|
||||
}
|
||||
schema, err := bigquery.InferSchema(Item{})
|
||||
if err != nil {
|
||||
|
@ -376,12 +407,13 @@ func ExampleInferSchema_tags() {
|
|||
// TODO: Handle error.
|
||||
}
|
||||
for _, fs := range schema {
|
||||
fmt.Println(fs.Name, fs.Type)
|
||||
fmt.Println(fs.Name, fs.Type, fs.Required)
|
||||
}
|
||||
// Output:
|
||||
// Name STRING
|
||||
// Size FLOAT
|
||||
// number INTEGER
|
||||
// Name STRING true
|
||||
// Size FLOAT true
|
||||
// number INTEGER true
|
||||
// Optional BOOLEAN false
|
||||
}
|
||||
|
||||
func ExampleTable_Create() {
|
||||
|
|
|
@ -0,0 +1,398 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"encoding/base64"
|
||||
"unicode/utf8"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// DataFormat describes the format of BigQuery table data.
|
||||
type DataFormat string
|
||||
|
||||
// Constants describing the format of BigQuery table data.
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
GoogleSheets DataFormat = "GOOGLE_SHEETS"
|
||||
Bigtable DataFormat = "BIGTABLE"
|
||||
)
|
||||
|
||||
// ExternalData is a table which is stored outside of BigQuery. It is implemented by
|
||||
// *ExternalDataConfig.
|
||||
// GCSReference also implements it, for backwards compatibility.
|
||||
type ExternalData interface {
|
||||
toBQ() bq.ExternalDataConfiguration
|
||||
}
|
||||
|
||||
// ExternalDataConfig describes data external to BigQuery that can be used
|
||||
// in queries and to create external tables.
|
||||
type ExternalDataConfig struct {
|
||||
// The format of the data. Required.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// The fully-qualified URIs that point to your
|
||||
// data in Google Cloud. Required.
|
||||
//
|
||||
// For Google Cloud Storage URIs, each URI can contain one '*' wildcard character
|
||||
// and it must come after the 'bucket' name. Size limits related to load jobs
|
||||
// apply to external data sources.
|
||||
//
|
||||
// For Google Cloud Bigtable URIs, exactly one URI can be specified and it has be
|
||||
// a fully specified and valid HTTPS URL for a Google Cloud Bigtable table.
|
||||
//
|
||||
// For Google Cloud Datastore backups, exactly one URI can be specified. Also,
|
||||
// the '*' wildcard character is not allowed.
|
||||
SourceURIs []string
|
||||
|
||||
// The schema of the data. Required for CSV and JSON; disallowed for the
|
||||
// other formats.
|
||||
Schema Schema
|
||||
|
||||
// Try to detect schema and format options automatically.
|
||||
// Any option specified explicitly will be honored.
|
||||
AutoDetect bool
|
||||
|
||||
// The compression type of the data.
|
||||
Compression Compression
|
||||
|
||||
// IgnoreUnknownValues causes values not matching the schema to be
|
||||
// tolerated. Unknown values are ignored. For CSV this ignores extra values
|
||||
// at the end of a line. For JSON this ignores named values that do not
|
||||
// match any column name. If this field is not set, records containing
|
||||
// unknown values are treated as bad records. The MaxBadRecords field can
|
||||
// be used to customize how bad records are handled.
|
||||
IgnoreUnknownValues bool
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
||||
// Additional options for CSV, GoogleSheets and Bigtable formats.
|
||||
Options ExternalDataConfigOptions
|
||||
}
|
||||
|
||||
func (e *ExternalDataConfig) toBQ() bq.ExternalDataConfiguration {
|
||||
q := bq.ExternalDataConfiguration{
|
||||
SourceFormat: string(e.SourceFormat),
|
||||
SourceUris: e.SourceURIs,
|
||||
Autodetect: e.AutoDetect,
|
||||
Compression: string(e.Compression),
|
||||
IgnoreUnknownValues: e.IgnoreUnknownValues,
|
||||
MaxBadRecords: e.MaxBadRecords,
|
||||
}
|
||||
if e.Schema != nil {
|
||||
q.Schema = e.Schema.toBQ()
|
||||
}
|
||||
if e.Options != nil {
|
||||
e.Options.populateExternalDataConfig(&q)
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToExternalDataConfig(q *bq.ExternalDataConfiguration) (*ExternalDataConfig, error) {
|
||||
e := &ExternalDataConfig{
|
||||
SourceFormat: DataFormat(q.SourceFormat),
|
||||
SourceURIs: q.SourceUris,
|
||||
AutoDetect: q.Autodetect,
|
||||
Compression: Compression(q.Compression),
|
||||
IgnoreUnknownValues: q.IgnoreUnknownValues,
|
||||
MaxBadRecords: q.MaxBadRecords,
|
||||
Schema: bqToSchema(q.Schema),
|
||||
}
|
||||
switch {
|
||||
case q.CsvOptions != nil:
|
||||
e.Options = bqToCSVOptions(q.CsvOptions)
|
||||
case q.GoogleSheetsOptions != nil:
|
||||
e.Options = bqToGoogleSheetsOptions(q.GoogleSheetsOptions)
|
||||
case q.BigtableOptions != nil:
|
||||
var err error
|
||||
e.Options, err = bqToBigtableOptions(q.BigtableOptions)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
return e, nil
|
||||
}
|
||||
|
||||
// ExternalDataConfigOptions are additional options for external data configurations.
|
||||
// This interface is implemented by CSVOptions, GoogleSheetsOptions and BigtableOptions.
|
||||
type ExternalDataConfigOptions interface {
|
||||
populateExternalDataConfig(*bq.ExternalDataConfiguration)
|
||||
}
|
||||
|
||||
// CSVOptions are additional options for CSV external data sources.
|
||||
type CSVOptions struct {
|
||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated
|
||||
// when reading CSV data. Missing values are treated as nulls.
|
||||
AllowJaggedRows bool
|
||||
|
||||
// AllowQuotedNewlines sets whether quoted data sections containing
|
||||
// newlines are allowed when reading CSV data.
|
||||
AllowQuotedNewlines bool
|
||||
|
||||
// Encoding is the character encoding of data to be read.
|
||||
Encoding Encoding
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when
|
||||
// reading or exporting data. The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file. The
|
||||
// default quotation character is the double quote ("), which is used if
|
||||
// both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation
|
||||
// character, set ForceZeroQuote to true.
|
||||
// Only used when reading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
}
|
||||
|
||||
func (o *CSVOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
c.CsvOptions = &bq.CsvOptions{
|
||||
AllowJaggedRows: o.AllowJaggedRows,
|
||||
AllowQuotedNewlines: o.AllowQuotedNewlines,
|
||||
Encoding: string(o.Encoding),
|
||||
FieldDelimiter: o.FieldDelimiter,
|
||||
Quote: o.quote(),
|
||||
SkipLeadingRows: o.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
// quote returns the CSV quote character, or nil if unset.
|
||||
func (o *CSVOptions) quote() *string {
|
||||
if o.ForceZeroQuote {
|
||||
quote := ""
|
||||
return "e
|
||||
}
|
||||
if o.Quote == "" {
|
||||
return nil
|
||||
}
|
||||
return &o.Quote
|
||||
}
|
||||
|
||||
func (o *CSVOptions) setQuote(ps *string) {
|
||||
if ps != nil {
|
||||
o.Quote = *ps
|
||||
if o.Quote == "" {
|
||||
o.ForceZeroQuote = true
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func bqToCSVOptions(q *bq.CsvOptions) *CSVOptions {
|
||||
o := &CSVOptions{
|
||||
AllowJaggedRows: q.AllowJaggedRows,
|
||||
AllowQuotedNewlines: q.AllowQuotedNewlines,
|
||||
Encoding: Encoding(q.Encoding),
|
||||
FieldDelimiter: q.FieldDelimiter,
|
||||
SkipLeadingRows: q.SkipLeadingRows,
|
||||
}
|
||||
o.setQuote(q.Quote)
|
||||
return o
|
||||
}
|
||||
|
||||
// GoogleSheetsOptions are additional options for GoogleSheets external data sources.
|
||||
type GoogleSheetsOptions struct {
|
||||
// The number of rows at the top of a sheet that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
}
|
||||
|
||||
func (o *GoogleSheetsOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
c.GoogleSheetsOptions = &bq.GoogleSheetsOptions{
|
||||
SkipLeadingRows: o.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
func bqToGoogleSheetsOptions(q *bq.GoogleSheetsOptions) *GoogleSheetsOptions {
|
||||
return &GoogleSheetsOptions{
|
||||
SkipLeadingRows: q.SkipLeadingRows,
|
||||
}
|
||||
}
|
||||
|
||||
// BigtableOptions are additional options for Bigtable external data sources.
|
||||
type BigtableOptions struct {
|
||||
// A list of column families to expose in the table schema along with their
|
||||
// types. If omitted, all column families are present in the table schema and
|
||||
// their values are read as BYTES.
|
||||
ColumnFamilies []*BigtableColumnFamily
|
||||
|
||||
// If true, then the column families that are not specified in columnFamilies
|
||||
// list are not exposed in the table schema. Otherwise, they are read with BYTES
|
||||
// type values. The default is false.
|
||||
IgnoreUnspecifiedColumnFamilies bool
|
||||
|
||||
// If true, then the rowkey column families will be read and converted to string.
|
||||
// Otherwise they are read with BYTES type values and users need to manually cast
|
||||
// them with CAST if necessary. The default is false.
|
||||
ReadRowkeyAsString bool
|
||||
}
|
||||
|
||||
func (o *BigtableOptions) populateExternalDataConfig(c *bq.ExternalDataConfiguration) {
|
||||
q := &bq.BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: o.IgnoreUnspecifiedColumnFamilies,
|
||||
ReadRowkeyAsString: o.ReadRowkeyAsString,
|
||||
}
|
||||
for _, f := range o.ColumnFamilies {
|
||||
q.ColumnFamilies = append(q.ColumnFamilies, f.toBQ())
|
||||
}
|
||||
c.BigtableOptions = q
|
||||
}
|
||||
|
||||
func bqToBigtableOptions(q *bq.BigtableOptions) (*BigtableOptions, error) {
|
||||
b := &BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: q.IgnoreUnspecifiedColumnFamilies,
|
||||
ReadRowkeyAsString: q.ReadRowkeyAsString,
|
||||
}
|
||||
for _, f := range q.ColumnFamilies {
|
||||
f2, err := bqToBigtableColumnFamily(f)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.ColumnFamilies = append(b.ColumnFamilies, f2)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BigtableColumnFamily describes how BigQuery should access a Bigtable column family.
|
||||
type BigtableColumnFamily struct {
|
||||
// Identifier of the column family.
|
||||
FamilyID string
|
||||
|
||||
// Lists of columns that should be exposed as individual fields as opposed to a
|
||||
// list of (column name, value) pairs. All columns whose qualifier matches a
|
||||
// qualifier in this list can be accessed as .. Other columns can be accessed as
|
||||
// a list through .Column field.
|
||||
Columns []*BigtableColumn
|
||||
|
||||
// The encoding of the values when the type is not STRING. Acceptable encoding values are:
|
||||
// - TEXT - indicates values are alphanumeric text strings.
|
||||
// - BINARY - indicates values are encoded using HBase Bytes.toBytes family of functions.
|
||||
// This can be overridden for a specific column by listing that column in 'columns' and
|
||||
// specifying an encoding for it.
|
||||
Encoding string
|
||||
|
||||
// If true, only the latest version of values are exposed for all columns in this
|
||||
// column family. This can be overridden for a specific column by listing that
|
||||
// column in 'columns' and specifying a different setting for that column.
|
||||
OnlyReadLatest bool
|
||||
|
||||
// The type to convert the value in cells of this
|
||||
// column family. The values are expected to be encoded using HBase
|
||||
// Bytes.toBytes function when using the BINARY encoding value.
|
||||
// Following BigQuery types are allowed (case-sensitive):
|
||||
// BYTES STRING INTEGER FLOAT BOOLEAN.
|
||||
// The default type is BYTES. This can be overridden for a specific column by
|
||||
// listing that column in 'columns' and specifying a type for it.
|
||||
Type string
|
||||
}
|
||||
|
||||
func (b *BigtableColumnFamily) toBQ() *bq.BigtableColumnFamily {
|
||||
q := &bq.BigtableColumnFamily{
|
||||
FamilyId: b.FamilyID,
|
||||
Encoding: b.Encoding,
|
||||
OnlyReadLatest: b.OnlyReadLatest,
|
||||
Type: b.Type,
|
||||
}
|
||||
for _, col := range b.Columns {
|
||||
q.Columns = append(q.Columns, col.toBQ())
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToBigtableColumnFamily(q *bq.BigtableColumnFamily) (*BigtableColumnFamily, error) {
|
||||
b := &BigtableColumnFamily{
|
||||
FamilyID: q.FamilyId,
|
||||
Encoding: q.Encoding,
|
||||
OnlyReadLatest: q.OnlyReadLatest,
|
||||
Type: q.Type,
|
||||
}
|
||||
for _, col := range q.Columns {
|
||||
c, err := bqToBigtableColumn(col)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Columns = append(b.Columns, c)
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// BigtableColumn describes how BigQuery should access a Bigtable column.
|
||||
type BigtableColumn struct {
|
||||
// Qualifier of the column. Columns in the parent column family that have this
|
||||
// exact qualifier are exposed as . field. The column field name is the
|
||||
// same as the column qualifier.
|
||||
Qualifier string
|
||||
|
||||
// If the qualifier is not a valid BigQuery field identifier i.e. does not match
|
||||
// [a-zA-Z][a-zA-Z0-9_]*, a valid identifier must be provided as the column field
|
||||
// name and is used as field name in queries.
|
||||
FieldName string
|
||||
|
||||
// If true, only the latest version of values are exposed for this column.
|
||||
// See BigtableColumnFamily.OnlyReadLatest.
|
||||
OnlyReadLatest bool
|
||||
|
||||
// The encoding of the values when the type is not STRING.
|
||||
// See BigtableColumnFamily.Encoding
|
||||
Encoding string
|
||||
|
||||
// The type to convert the value in cells of this column.
|
||||
// See BigtableColumnFamily.Type
|
||||
Type string
|
||||
}
|
||||
|
||||
func (b *BigtableColumn) toBQ() *bq.BigtableColumn {
|
||||
q := &bq.BigtableColumn{
|
||||
FieldName: b.FieldName,
|
||||
OnlyReadLatest: b.OnlyReadLatest,
|
||||
Encoding: b.Encoding,
|
||||
Type: b.Type,
|
||||
}
|
||||
if utf8.ValidString(b.Qualifier) {
|
||||
q.QualifierString = b.Qualifier
|
||||
} else {
|
||||
q.QualifierEncoded = base64.RawStdEncoding.EncodeToString([]byte(b.Qualifier))
|
||||
}
|
||||
return q
|
||||
}
|
||||
|
||||
func bqToBigtableColumn(q *bq.BigtableColumn) (*BigtableColumn, error) {
|
||||
b := &BigtableColumn{
|
||||
FieldName: q.FieldName,
|
||||
OnlyReadLatest: q.OnlyReadLatest,
|
||||
Encoding: q.Encoding,
|
||||
Type: q.Type,
|
||||
}
|
||||
if q.QualifierString != "" {
|
||||
b.Qualifier = q.QualifierString
|
||||
} else {
|
||||
bytes, err := base64.RawStdEncoding.DecodeString(q.QualifierEncoded)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
b.Qualifier = string(bytes)
|
||||
}
|
||||
return b, nil
|
||||
}
|
|
@ -0,0 +1,143 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
)
|
||||
|
||||
func TestExternalDataConfig(t *testing.T) {
|
||||
// Round-trip of ExternalDataConfig to underlying representation.
|
||||
for i, want := range []*ExternalDataConfig{
|
||||
{
|
||||
SourceFormat: CSV,
|
||||
SourceURIs: []string{"uri"},
|
||||
Schema: Schema{{Name: "n", Type: IntegerFieldType}},
|
||||
AutoDetect: true,
|
||||
Compression: Gzip,
|
||||
IgnoreUnknownValues: true,
|
||||
MaxBadRecords: 17,
|
||||
Options: &CSVOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
FieldDelimiter: "f",
|
||||
Quote: "q",
|
||||
SkipLeadingRows: 3,
|
||||
},
|
||||
},
|
||||
{
|
||||
SourceFormat: GoogleSheets,
|
||||
Options: &GoogleSheetsOptions{SkipLeadingRows: 4},
|
||||
},
|
||||
{
|
||||
SourceFormat: Bigtable,
|
||||
Options: &BigtableOptions{
|
||||
IgnoreUnspecifiedColumnFamilies: true,
|
||||
ReadRowkeyAsString: true,
|
||||
ColumnFamilies: []*BigtableColumnFamily{
|
||||
{
|
||||
FamilyID: "f1",
|
||||
Encoding: "TEXT",
|
||||
OnlyReadLatest: true,
|
||||
Type: "FLOAT",
|
||||
Columns: []*BigtableColumn{
|
||||
{
|
||||
Qualifier: "valid-utf-8",
|
||||
FieldName: "fn",
|
||||
OnlyReadLatest: true,
|
||||
Encoding: "BINARY",
|
||||
Type: "STRING",
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
q := want.toBQ()
|
||||
got, err := bqToExternalDataConfig(&q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("#%d: got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQuote(t *testing.T) {
|
||||
ptr := func(s string) *string { return &s }
|
||||
|
||||
for _, test := range []struct {
|
||||
quote string
|
||||
force bool
|
||||
want *string
|
||||
}{
|
||||
{"", false, nil},
|
||||
{"", true, ptr("")},
|
||||
{"-", false, ptr("-")},
|
||||
{"-", true, ptr("")},
|
||||
} {
|
||||
o := CSVOptions{
|
||||
Quote: test.quote,
|
||||
ForceZeroQuote: test.force,
|
||||
}
|
||||
got := o.quote()
|
||||
if (got == nil) != (test.want == nil) {
|
||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
if got != nil && test.want != nil && *got != *test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestQualifier(t *testing.T) {
|
||||
b := BigtableColumn{Qualifier: "a"}
|
||||
q := b.toBQ()
|
||||
if q.QualifierString != b.Qualifier || q.QualifierEncoded != "" {
|
||||
t.Errorf("got (%q, %q), want (%q, %q)",
|
||||
q.QualifierString, q.QualifierEncoded, b.Qualifier, "")
|
||||
}
|
||||
b2, err := bqToBigtableColumn(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := b2.Qualifier, b.Qualifier; got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
|
||||
const (
|
||||
invalidUTF8 = "\xDF\xFF"
|
||||
invalidEncoded = "3/8"
|
||||
)
|
||||
b = BigtableColumn{Qualifier: invalidUTF8}
|
||||
q = b.toBQ()
|
||||
if q.QualifierString != "" || q.QualifierEncoded != invalidEncoded {
|
||||
t.Errorf("got (%q, %q), want (%q, %q)",
|
||||
q.QualifierString, "", b.Qualifier, invalidEncoded)
|
||||
}
|
||||
b2, err = bqToBigtableColumn(q)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := b2.Qualifier, b.Qualifier; got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
}
|
|
@ -21,12 +21,6 @@ import (
|
|||
|
||||
// ExtractConfig holds the configuration for an extract job.
|
||||
type ExtractConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Src is the table from which data will be extracted.
|
||||
Src *Table
|
||||
|
||||
|
@ -35,10 +29,52 @@ type ExtractConfig struct {
|
|||
|
||||
// DisableHeader disables the printing of a header row in exported data.
|
||||
DisableHeader bool
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
}
|
||||
|
||||
func (e *ExtractConfig) toBQ() *bq.JobConfiguration {
|
||||
var printHeader *bool
|
||||
if e.DisableHeader {
|
||||
f := false
|
||||
printHeader = &f
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: e.Labels,
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
DestinationUris: append([]string{}, e.Dst.URIs...),
|
||||
Compression: string(e.Dst.Compression),
|
||||
DestinationFormat: string(e.Dst.DestinationFormat),
|
||||
FieldDelimiter: e.Dst.FieldDelimiter,
|
||||
SourceTable: e.Src.toBQ(),
|
||||
PrintHeader: printHeader,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func bqToExtractConfig(q *bq.JobConfiguration, c *Client) *ExtractConfig {
|
||||
qe := q.Extract
|
||||
return &ExtractConfig{
|
||||
Labels: q.Labels,
|
||||
Dst: &GCSReference{
|
||||
URIs: qe.DestinationUris,
|
||||
Compression: Compression(qe.Compression),
|
||||
DestinationFormat: DataFormat(qe.DestinationFormat),
|
||||
FileConfig: FileConfig{
|
||||
CSVOptions: CSVOptions{
|
||||
FieldDelimiter: qe.FieldDelimiter,
|
||||
},
|
||||
},
|
||||
},
|
||||
DisableHeader: qe.PrintHeader != nil && !*qe.PrintHeader,
|
||||
Src: bqToTable(qe.SourceTable, c),
|
||||
}
|
||||
}
|
||||
|
||||
// An Extractor extracts data from a BigQuery table into Google Cloud Storage.
|
||||
type Extractor struct {
|
||||
JobIDConfig
|
||||
ExtractConfig
|
||||
c *Client
|
||||
}
|
||||
|
@ -58,23 +94,12 @@ func (t *Table) ExtractorTo(dst *GCSReference) *Extractor {
|
|||
|
||||
// Run initiates an extract job.
|
||||
func (e *Extractor) Run(ctx context.Context) (*Job, error) {
|
||||
var printHeader *bool
|
||||
if e.DisableHeader {
|
||||
f := false
|
||||
printHeader = &f
|
||||
}
|
||||
job := &bq.Job{
|
||||
JobReference: createJobRef(e.JobID, e.AddJobIDSuffix, e.c.projectID),
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Extract: &bq.JobConfigurationExtract{
|
||||
DestinationUris: append([]string{}, e.Dst.uris...),
|
||||
Compression: string(e.Dst.Compression),
|
||||
DestinationFormat: string(e.Dst.DestinationFormat),
|
||||
FieldDelimiter: e.Dst.FieldDelimiter,
|
||||
SourceTable: e.Src.tableRefProto(),
|
||||
PrintHeader: printHeader,
|
||||
},
|
||||
},
|
||||
}
|
||||
return e.c.insertJob(ctx, &insertJobConf{job: job})
|
||||
return e.c.insertJob(ctx, e.newJob(), nil)
|
||||
}
|
||||
|
||||
func (e *Extractor) newJob() *bq.Job {
|
||||
return &bq.Job{
|
||||
JobReference: e.JobIDConfig.createJobRef(e.c.projectID),
|
||||
Configuration: e.ExtractConfig.toBQ(),
|
||||
}
|
||||
}
|
||||
|
|
|
@ -17,7 +17,9 @@ package bigquery
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
@ -38,11 +40,15 @@ func defaultExtractJob() *bq.Job {
|
|||
}
|
||||
}
|
||||
|
||||
func defaultGCS() *GCSReference {
|
||||
return &GCSReference{
|
||||
URIs: []string{"uri"},
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtract(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
s := &testService{}
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{
|
||||
service: s,
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
|
||||
|
@ -58,11 +64,15 @@ func TestExtract(t *testing.T) {
|
|||
want: defaultExtractJob(),
|
||||
},
|
||||
{
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: ExtractConfig{DisableHeader: true},
|
||||
dst: defaultGCS(),
|
||||
src: c.Dataset("dataset-id").Table("table-id"),
|
||||
config: ExtractConfig{
|
||||
DisableHeader: true,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultExtractJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
f := false
|
||||
j.Configuration.Extract.PrintHeader = &f
|
||||
return j
|
||||
|
@ -92,10 +102,17 @@ func TestExtract(t *testing.T) {
|
|||
tc.config.Src = ext.Src
|
||||
tc.config.Dst = ext.Dst
|
||||
ext.ExtractConfig = tc.config
|
||||
if _, err := ext.Run(context.Background()); err != nil {
|
||||
t.Errorf("#%d: err calling extract: %v", i, err)
|
||||
continue
|
||||
got := ext.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc, &ext.ExtractConfig,
|
||||
cmp.AllowUnexported(Table{}, Client{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
checkJob(t, i, s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -22,6 +22,10 @@ import (
|
|||
|
||||
// A ReaderSource is a source for a load operation that gets
|
||||
// data from an io.Reader.
|
||||
//
|
||||
// When a ReaderSource is part of a LoadConfig obtained via Job.Config,
|
||||
// its internal io.Reader will be nil, so it cannot be used for a
|
||||
// subsequent load operation.
|
||||
type ReaderSource struct {
|
||||
r io.Reader
|
||||
FileConfig
|
||||
|
@ -34,9 +38,9 @@ func NewReaderSource(r io.Reader) *ReaderSource {
|
|||
return &ReaderSource{r: r}
|
||||
}
|
||||
|
||||
func (r *ReaderSource) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.media = r.r
|
||||
r.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
func (r *ReaderSource) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
|
||||
r.FileConfig.populateLoadConfig(lc)
|
||||
return r.r
|
||||
}
|
||||
|
||||
// FileConfig contains configuration options that pertain to files, typically
|
||||
|
@ -48,29 +52,10 @@ type FileConfig struct {
|
|||
// Allowed values are: CSV, Avro, JSON, DatastoreBackup. The default is CSV.
|
||||
SourceFormat DataFormat
|
||||
|
||||
// FieldDelimiter is the separator for fields in a CSV file, used when
|
||||
// reading or exporting data. The default is ",".
|
||||
FieldDelimiter string
|
||||
|
||||
// The number of rows at the top of a CSV file that BigQuery will skip when
|
||||
// reading data.
|
||||
SkipLeadingRows int64
|
||||
|
||||
// AllowJaggedRows causes missing trailing optional columns to be tolerated
|
||||
// when reading CSV data. Missing values are treated as nulls.
|
||||
AllowJaggedRows bool
|
||||
|
||||
// AllowQuotedNewlines sets whether quoted data sections containing
|
||||
// newlines are allowed when reading CSV data.
|
||||
AllowQuotedNewlines bool
|
||||
|
||||
// Indicates if we should automatically infer the options and
|
||||
// schema for CSV and JSON sources.
|
||||
AutoDetect bool
|
||||
|
||||
// Encoding is the character encoding of data to be read.
|
||||
Encoding Encoding
|
||||
|
||||
// MaxBadRecords is the maximum number of bad records that will be ignored
|
||||
// when reading data.
|
||||
MaxBadRecords int64
|
||||
|
@ -87,26 +72,8 @@ type FileConfig struct {
|
|||
// unless the data is being loaded into a table that already exists.
|
||||
Schema Schema
|
||||
|
||||
// Quote is the value used to quote data sections in a CSV file. The
|
||||
// default quotation character is the double quote ("), which is used if
|
||||
// both Quote and ForceZeroQuote are unset.
|
||||
// To specify that no character should be interpreted as a quotation
|
||||
// character, set ForceZeroQuote to true.
|
||||
// Only used when reading data.
|
||||
Quote string
|
||||
ForceZeroQuote bool
|
||||
}
|
||||
|
||||
// quote returns the CSV quote character, or nil if unset.
|
||||
func (fc *FileConfig) quote() *string {
|
||||
if fc.ForceZeroQuote {
|
||||
quote := ""
|
||||
return "e
|
||||
}
|
||||
if fc.Quote == "" {
|
||||
return nil
|
||||
}
|
||||
return &fc.Quote
|
||||
// Additional options for CSV files.
|
||||
CSVOptions
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
|
||||
|
@ -120,47 +87,43 @@ func (fc *FileConfig) populateLoadConfig(conf *bq.JobConfigurationLoad) {
|
|||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
conf.Schema = fc.Schema.toBQ()
|
||||
}
|
||||
conf.Quote = fc.quote()
|
||||
}
|
||||
|
||||
func bqPopulateFileConfig(conf *bq.JobConfigurationLoad, fc *FileConfig) {
|
||||
fc.SourceFormat = DataFormat(conf.SourceFormat)
|
||||
fc.AutoDetect = conf.Autodetect
|
||||
fc.MaxBadRecords = conf.MaxBadRecords
|
||||
fc.IgnoreUnknownValues = conf.IgnoreUnknownValues
|
||||
fc.Schema = bqToSchema(conf.Schema)
|
||||
fc.SkipLeadingRows = conf.SkipLeadingRows
|
||||
fc.AllowJaggedRows = conf.AllowJaggedRows
|
||||
fc.AllowQuotedNewlines = conf.AllowQuotedNewlines
|
||||
fc.Encoding = Encoding(conf.Encoding)
|
||||
fc.FieldDelimiter = conf.FieldDelimiter
|
||||
fc.CSVOptions.setQuote(conf.Quote)
|
||||
}
|
||||
|
||||
func (fc *FileConfig) populateExternalDataConfig(conf *bq.ExternalDataConfiguration) {
|
||||
format := fc.SourceFormat
|
||||
if format == "" {
|
||||
// Format must be explicitly set for external data sources.
|
||||
format = CSV
|
||||
}
|
||||
// TODO(jba): support AutoDetect.
|
||||
conf.Autodetect = fc.AutoDetect
|
||||
conf.IgnoreUnknownValues = fc.IgnoreUnknownValues
|
||||
conf.MaxBadRecords = fc.MaxBadRecords
|
||||
conf.SourceFormat = string(format)
|
||||
if fc.Schema != nil {
|
||||
conf.Schema = fc.Schema.asTableSchema()
|
||||
conf.Schema = fc.Schema.toBQ()
|
||||
}
|
||||
if format == CSV {
|
||||
conf.CsvOptions = &bq.CsvOptions{
|
||||
AllowJaggedRows: fc.AllowJaggedRows,
|
||||
AllowQuotedNewlines: fc.AllowQuotedNewlines,
|
||||
Encoding: string(fc.Encoding),
|
||||
FieldDelimiter: fc.FieldDelimiter,
|
||||
SkipLeadingRows: fc.SkipLeadingRows,
|
||||
Quote: fc.quote(),
|
||||
}
|
||||
fc.CSVOptions.populateExternalDataConfig(conf)
|
||||
}
|
||||
}
|
||||
|
||||
// DataFormat describes the format of BigQuery table data.
|
||||
type DataFormat string
|
||||
|
||||
// Constants describing the format of BigQuery table data.
|
||||
const (
|
||||
CSV DataFormat = "CSV"
|
||||
Avro DataFormat = "AVRO"
|
||||
JSON DataFormat = "NEWLINE_DELIMITED_JSON"
|
||||
DatastoreBackup DataFormat = "DATASTORE_BACKUP"
|
||||
)
|
||||
|
||||
// Encoding specifies the character encoding of data to be loaded into BigQuery.
|
||||
// See https://cloud.google.com/bigquery/docs/reference/v2/jobs#configuration.load.encoding
|
||||
// for more details about how this is used.
|
||||
|
|
|
@ -22,56 +22,36 @@ import (
|
|||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestQuote(t *testing.T) {
|
||||
ptr := func(s string) *string { return &s }
|
||||
|
||||
for _, test := range []struct {
|
||||
quote string
|
||||
force bool
|
||||
want *string
|
||||
}{
|
||||
{"", false, nil},
|
||||
{"", true, ptr("")},
|
||||
{"-", false, ptr("-")},
|
||||
{"-", true, ptr("")},
|
||||
} {
|
||||
fc := FileConfig{
|
||||
Quote: test.quote,
|
||||
ForceZeroQuote: test.force,
|
||||
}
|
||||
got := fc.quote()
|
||||
if (got == nil) != (test.want == nil) {
|
||||
t.Errorf("%+v\ngot %v\nwant %v", test, pretty.Value(got), pretty.Value(test.want))
|
||||
}
|
||||
if got != nil && test.want != nil && *got != *test.want {
|
||||
t.Errorf("%+v: got %q, want %q", test, *got, *test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPopulateLoadConfig(t *testing.T) {
|
||||
hyphen := "-"
|
||||
fc := FileConfig{
|
||||
var (
|
||||
hyphen = "-"
|
||||
fc = FileConfig{
|
||||
SourceFormat: CSV,
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
AutoDetect: true,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: Schema{
|
||||
stringFieldSchema(),
|
||||
nestedFieldSchema(),
|
||||
},
|
||||
Quote: hyphen,
|
||||
CSVOptions: CSVOptions{
|
||||
Quote: hyphen,
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: UTF_8,
|
||||
},
|
||||
}
|
||||
)
|
||||
|
||||
func TestFileConfigPopulateLoadConfig(t *testing.T) {
|
||||
want := &bq.JobConfigurationLoad{
|
||||
SourceFormat: "CSV",
|
||||
FieldDelimiter: "\t",
|
||||
SkipLeadingRows: 8,
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Autodetect: true,
|
||||
Encoding: "UTF-8",
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
|
@ -88,3 +68,31 @@ func TestPopulateLoadConfig(t *testing.T) {
|
|||
t.Errorf("got:\n%v\nwant:\n%v", pretty.Value(got), pretty.Value(want))
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileConfigPopulateExternalDataConfig(t *testing.T) {
|
||||
got := &bq.ExternalDataConfiguration{}
|
||||
fc.populateExternalDataConfig(got)
|
||||
|
||||
want := &bq.ExternalDataConfiguration{
|
||||
SourceFormat: "CSV",
|
||||
Autodetect: true,
|
||||
MaxBadRecords: 7,
|
||||
IgnoreUnknownValues: true,
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqStringFieldSchema(),
|
||||
bqNestedFieldSchema(),
|
||||
}},
|
||||
CsvOptions: &bq.CsvOptions{
|
||||
AllowJaggedRows: true,
|
||||
AllowQuotedNewlines: true,
|
||||
Encoding: "UTF-8",
|
||||
FieldDelimiter: "\t",
|
||||
Quote: &hyphen,
|
||||
SkipLeadingRows: 8,
|
||||
},
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -14,13 +14,17 @@
|
|||
|
||||
package bigquery
|
||||
|
||||
import bq "google.golang.org/api/bigquery/v2"
|
||||
import (
|
||||
"io"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// GCSReference is a reference to one or more Google Cloud Storage objects, which together constitute
|
||||
// an input or output to a BigQuery operation.
|
||||
type GCSReference struct {
|
||||
// TODO(jba): Export so that GCSReference can be used to hold data from a Job.get api call and expose it to the user.
|
||||
uris []string
|
||||
// URIs refer to Google Cloud Storage objects.
|
||||
URIs []string
|
||||
|
||||
FileConfig
|
||||
|
||||
|
@ -42,7 +46,7 @@ type GCSReference struct {
|
|||
// For more information about the treatment of wildcards and multiple URIs,
|
||||
// see https://cloud.google.com/bigquery/exporting-data-from-bigquery#exportingmultiple
|
||||
func NewGCSReference(uri ...string) *GCSReference {
|
||||
return &GCSReference{uris: uri}
|
||||
return &GCSReference{URIs: uri}
|
||||
}
|
||||
|
||||
// Compression is the type of compression to apply when writing data to Google Cloud Storage.
|
||||
|
@ -53,15 +57,16 @@ const (
|
|||
Gzip Compression = "GZIP"
|
||||
)
|
||||
|
||||
func (gcs *GCSReference) populateInsertJobConfForLoad(conf *insertJobConf) {
|
||||
conf.job.Configuration.Load.SourceUris = gcs.uris
|
||||
gcs.FileConfig.populateLoadConfig(conf.job.Configuration.Load)
|
||||
func (gcs *GCSReference) populateLoadConfig(lc *bq.JobConfigurationLoad) io.Reader {
|
||||
lc.SourceUris = gcs.URIs
|
||||
gcs.FileConfig.populateLoadConfig(lc)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (gcs *GCSReference) externalDataConfig() bq.ExternalDataConfiguration {
|
||||
func (gcs *GCSReference) toBQ() bq.ExternalDataConfiguration {
|
||||
conf := bq.ExternalDataConfiguration{
|
||||
Compression: string(gcs.Compression),
|
||||
SourceUris: append([]string{}, gcs.uris...),
|
||||
SourceUris: append([]string{}, gcs.URIs...),
|
||||
}
|
||||
gcs.FileConfig.populateExternalDataConfig(&conf)
|
||||
return conf
|
||||
|
|
|
@ -26,12 +26,15 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"cloud.google.com/go/storage"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
|
@ -39,9 +42,10 @@ import (
|
|||
)
|
||||
|
||||
var (
|
||||
client *Client
|
||||
dataset *Dataset
|
||||
schema = Schema{
|
||||
client *Client
|
||||
storageClient *storage.Client
|
||||
dataset *Dataset
|
||||
schema = Schema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
{Name: "nums", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "rec", Type: RecordFieldType, Schema: Schema{
|
||||
|
@ -49,12 +53,17 @@ var (
|
|||
}},
|
||||
}
|
||||
testTableExpiration time.Time
|
||||
datasetIDs = testutil.NewUIDSpace("dataset")
|
||||
// BigQuery does not accept hyphens in dataset or table IDs, so we create IDs
|
||||
// with underscores.
|
||||
datasetIDs = testutil.NewUIDSpaceSep("dataset", '_')
|
||||
tableIDs = testutil.NewUIDSpaceSep("table", '_')
|
||||
)
|
||||
|
||||
func TestMain(m *testing.M) {
|
||||
initIntegrationTest()
|
||||
os.Exit(m.Run())
|
||||
cleanup := initIntegrationTest()
|
||||
r := m.Run()
|
||||
cleanup()
|
||||
os.Exit(r)
|
||||
}
|
||||
|
||||
func getClient(t *testing.T) *Client {
|
||||
|
@ -65,16 +74,16 @@ func getClient(t *testing.T) *Client {
|
|||
}
|
||||
|
||||
// If integration tests will be run, create a unique bucket for them.
|
||||
func initIntegrationTest() {
|
||||
func initIntegrationTest() func() {
|
||||
flag.Parse() // needed for testing.Short()
|
||||
if testing.Short() {
|
||||
return
|
||||
return func() {}
|
||||
}
|
||||
ctx := context.Background()
|
||||
ts := testutil.TokenSource(ctx, Scope)
|
||||
if ts == nil {
|
||||
log.Println("Integration tests skipped. See CONTRIBUTING.md for details")
|
||||
return
|
||||
return func() {}
|
||||
}
|
||||
projID := testutil.ProjID()
|
||||
var err error
|
||||
|
@ -82,13 +91,39 @@ func initIntegrationTest() {
|
|||
if err != nil {
|
||||
log.Fatalf("NewClient: %v", err)
|
||||
}
|
||||
dataset = client.Dataset("bigquery_integration_test")
|
||||
if err := dataset.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
log.Fatalf("creating dataset: %v", err)
|
||||
storageClient, err = storage.NewClient(ctx,
|
||||
option.WithTokenSource(testutil.TokenSource(ctx, storage.ScopeFullControl)))
|
||||
if err != nil {
|
||||
log.Fatalf("storage.NewClient: %v", err)
|
||||
}
|
||||
dataset = client.Dataset(datasetIDs.New())
|
||||
if err := dataset.Create(ctx, nil); err != nil {
|
||||
log.Fatalf("creating dataset %s: %v", dataset.DatasetID, err)
|
||||
}
|
||||
testTableExpiration = time.Now().Add(10 * time.Minute).Round(time.Second)
|
||||
return func() {
|
||||
if err := deleteDataset(ctx, dataset); err != nil {
|
||||
log.Printf("could not delete %s", dataset.DatasetID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func deleteDataset(ctx context.Context, ds *Dataset) error {
|
||||
it := ds.Tables(ctx)
|
||||
for {
|
||||
tbl, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := tbl.Delete(ctx); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ds.Delete(ctx)
|
||||
}
|
||||
func TestIntegration_TableCreate(t *testing.T) {
|
||||
// Check that creating a record field with an empty schema is an error.
|
||||
if client == nil {
|
||||
|
@ -162,16 +197,28 @@ func TestIntegration_TableMetadata(t *testing.T) {
|
|||
|
||||
// Create tables that have time partitioning
|
||||
partitionCases := []struct {
|
||||
timePartitioning TimePartitioning
|
||||
expectedExpiration time.Duration
|
||||
timePartitioning TimePartitioning
|
||||
wantExpiration time.Duration
|
||||
wantField string
|
||||
}{
|
||||
{TimePartitioning{}, time.Duration(0)},
|
||||
{TimePartitioning{time.Second}, time.Second},
|
||||
{TimePartitioning{}, time.Duration(0), ""},
|
||||
{TimePartitioning{Expiration: time.Second}, time.Second, ""},
|
||||
{
|
||||
TimePartitioning{
|
||||
Expiration: time.Second,
|
||||
Field: "date",
|
||||
}, time.Second, "date"},
|
||||
}
|
||||
|
||||
schema2 := Schema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
{Name: "date", Type: DateFieldType},
|
||||
}
|
||||
|
||||
for i, c := range partitionCases {
|
||||
table := dataset.Table(fmt.Sprintf("t_metadata_partition_%v", i))
|
||||
err = table.Create(context.Background(), &TableMetadata{
|
||||
Schema: schema,
|
||||
Schema: schema2,
|
||||
TimePartitioning: &c.timePartitioning,
|
||||
ExpirationTime: time.Now().Add(5 * time.Minute),
|
||||
})
|
||||
|
@ -185,7 +232,10 @@ func TestIntegration_TableMetadata(t *testing.T) {
|
|||
}
|
||||
|
||||
got := md.TimePartitioning
|
||||
want := &TimePartitioning{c.expectedExpiration}
|
||||
want := &TimePartitioning{
|
||||
Expiration: c.wantExpiration,
|
||||
Field: c.wantField,
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("metadata.TimePartitioning: got %v, want %v", got, want)
|
||||
}
|
||||
|
@ -197,8 +247,7 @@ func TestIntegration_DatasetCreate(t *testing.T) {
|
|||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
uid := strings.Replace(datasetIDs.New(), "-", "_", -1)
|
||||
ds := client.Dataset(uid)
|
||||
ds := client.Dataset(datasetIDs.New())
|
||||
wmd := &DatasetMetadata{Name: "name", Location: "EU"}
|
||||
err := ds.Create(ctx, wmd)
|
||||
if err != nil {
|
||||
|
@ -215,7 +264,7 @@ func TestIntegration_DatasetCreate(t *testing.T) {
|
|||
t.Errorf("location: got %q, want %q", got, want)
|
||||
}
|
||||
if err := ds.Delete(ctx); err != nil {
|
||||
t.Fatalf("deleting dataset %s: %v", ds, err)
|
||||
t.Fatalf("deleting dataset %v: %v", ds, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -251,12 +300,12 @@ func TestIntegration_DatasetDelete(t *testing.T) {
|
|||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
ds := client.Dataset("delete_test")
|
||||
if err := ds.Create(ctx, nil); err != nil && !hasStatusCode(err, http.StatusConflict) { // AlreadyExists is 409
|
||||
t.Fatalf("creating dataset %s: %v", ds, err)
|
||||
ds := client.Dataset(datasetIDs.New())
|
||||
if err := ds.Create(ctx, nil); err != nil {
|
||||
t.Fatalf("creating dataset %s: %v", ds.DatasetID, err)
|
||||
}
|
||||
if err := ds.Delete(ctx); err != nil {
|
||||
t.Fatalf("deleting dataset %s: %v", ds, err)
|
||||
t.Fatalf("deleting dataset %s: %v", ds.DatasetID, err)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -340,6 +389,38 @@ func TestIntegration_DatasetUpdateDefaultExpiration(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetUpdateAccess(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
md, err := dataset.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
origAccess := append([]*AccessEntry(nil), md.Access...)
|
||||
newEntry := &AccessEntry{
|
||||
Role: ReaderRole,
|
||||
Entity: "Joe@example.com",
|
||||
EntityType: UserEmailEntity,
|
||||
}
|
||||
newAccess := append(md.Access, newEntry)
|
||||
dm := DatasetMetadataToUpdate{Access: newAccess}
|
||||
md, err = dataset.Update(ctx, dm, md.ETag)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
defer func() {
|
||||
_, err := dataset.Update(ctx, DatasetMetadataToUpdate{Access: origAccess}, md.ETag)
|
||||
if err != nil {
|
||||
t.Log("could not restore dataset access list")
|
||||
}
|
||||
}()
|
||||
if diff := testutil.Diff(md.Access, newAccess); diff != "" {
|
||||
t.Fatalf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_DatasetUpdateLabels(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
|
@ -349,8 +430,6 @@ func TestIntegration_DatasetUpdateLabels(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// TODO(jba): use a separate dataset for each test run so
|
||||
// tests don't interfere with each other.
|
||||
var dm DatasetMetadataToUpdate
|
||||
dm.SetLabel("label", "value")
|
||||
md, err = dataset.Update(ctx, dm, "")
|
||||
|
@ -371,6 +450,34 @@ func TestIntegration_DatasetUpdateLabels(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntegration_TableUpdateLabels(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
var tm TableMetadataToUpdate
|
||||
tm.SetLabel("label", "value")
|
||||
md, err := table.Update(ctx, tm, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if got, want := md.Labels["label"], "value"; got != want {
|
||||
t.Errorf("got %q, want %q", got, want)
|
||||
}
|
||||
tm = TableMetadataToUpdate{}
|
||||
tm.DeleteLabel("label")
|
||||
md, err = table.Update(ctx, tm, "")
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if _, ok := md.Labels["label"]; ok {
|
||||
t.Error("label still present after deletion")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_Tables(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
|
@ -450,7 +557,6 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||
|
||||
// Query the table.
|
||||
q := client.Query(fmt.Sprintf("select name, nums, rec from %s", table.TableID))
|
||||
q.UseStandardSQL = true
|
||||
q.DefaultProjectID = dataset.ProjectID
|
||||
q.DefaultDatasetID = dataset.DatasetID
|
||||
|
||||
|
@ -465,11 +571,16 @@ func TestIntegration_UploadAndRead(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if job1.LastStatus() == nil {
|
||||
t.Error("no LastStatus")
|
||||
}
|
||||
job2, err := client.JobFromID(ctx, job1.ID())
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if job2.LastStatus() == nil {
|
||||
t.Error("no LastStatus")
|
||||
}
|
||||
rit, err = job2.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -574,10 +685,9 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{15, 4, 5, 0}
|
||||
ts := time.Date(2016, 3, 20, 15, 4, 5, 0, time.UTC)
|
||||
tm := civil.Time{15, 4, 5, 6000}
|
||||
ts := time.Date(2016, 3, 20, 15, 4, 5, 6000, time.UTC)
|
||||
dtm := civil.DateTime{d, tm}
|
||||
|
||||
d2 := civil.Date{1994, 5, 15}
|
||||
tm2 := civil.Time{1, 2, 4, 0}
|
||||
ts2 := time.Date(1994, 5, 15, 1, 2, 4, 0, time.UTC)
|
||||
|
@ -660,12 +770,15 @@ func TestIntegration_UploadAndReadStructs(t *testing.T) {
|
|||
}
|
||||
sort.Sort(byName(got))
|
||||
|
||||
// Round times to the microsecond.
|
||||
roundToMicros := cmp.Transformer("RoundToMicros",
|
||||
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
|
||||
// BigQuery does not elide nils. It reports an error for nil fields.
|
||||
for i, g := range got {
|
||||
if i >= len(want) {
|
||||
t.Errorf("%d: got %v, past end of want", i, pretty.Value(g))
|
||||
} else if w := want[i]; !testutil.Equal(g, w) {
|
||||
t.Errorf("%d: got %v, want %v", i, pretty.Value(g), pretty.Value(w))
|
||||
} else if diff := testutil.Diff(g, want[i], roundToMicros); diff != "" {
|
||||
t.Errorf("%d: got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -814,14 +927,35 @@ func TestIntegration_Load(t *testing.T) {
|
|||
rs := NewReaderSource(r)
|
||||
loader := table.LoaderFrom(rs)
|
||||
loader.WriteDisposition = WriteTruncate
|
||||
loader.Labels = map[string]string{"test": "go"}
|
||||
job, err := loader.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if job.LastStatus() == nil {
|
||||
t.Error("no LastStatus")
|
||||
}
|
||||
conf, err := job.Config()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
config, ok := conf.(*LoadConfig)
|
||||
if !ok {
|
||||
t.Fatalf("got %T, want LoadConfig", conf)
|
||||
}
|
||||
diff := testutil.Diff(config, &loader.LoadConfig,
|
||||
cmp.AllowUnexported(Table{}),
|
||||
cmpopts.IgnoreUnexported(Client{}, ReaderSource{}),
|
||||
// returned schema is at top level, not in the config
|
||||
cmpopts.IgnoreFields(FileConfig{}, "Schema"))
|
||||
if diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "reader load", table.Read(ctx), wantRows)
|
||||
|
||||
}
|
||||
|
||||
func TestIntegration_DML(t *testing.T) {
|
||||
|
@ -829,22 +963,30 @@ func TestIntegration_DML(t *testing.T) {
|
|||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
// Retry insert; sometimes it fails with INTERNAL.
|
||||
err := internal.Retry(ctx, gax.Backoff{}, func() (bool, error) {
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
sql := fmt.Sprintf(`INSERT %s.%s (name, nums, rec)
|
||||
VALUES ('a', [0], STRUCT<BOOL>(TRUE)),
|
||||
('b', [1], STRUCT<BOOL>(FALSE)),
|
||||
('c', [2], STRUCT<BOOL>(TRUE))`,
|
||||
table.DatasetID, table.TableID)
|
||||
if err := dmlInsert(ctx, sql); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", []Value{int64(0)}, []Value{true}},
|
||||
[]Value{"b", []Value{int64(1)}, []Value{false}},
|
||||
[]Value{"c", []Value{int64(2)}, []Value{true}},
|
||||
}
|
||||
checkRead(t, "DML", table.Read(ctx), wantRows)
|
||||
}
|
||||
|
||||
func dmlInsert(ctx context.Context, sql string) error {
|
||||
// Retry insert; sometimes it fails with INTERNAL.
|
||||
return internal.Retry(ctx, gax.Backoff{}, func() (bool, error) {
|
||||
// Use DML to insert.
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", []Value{int64(0)}, []Value{true}},
|
||||
[]Value{"b", []Value{int64(1)}, []Value{false}},
|
||||
[]Value{"c", []Value{int64(2)}, []Value{true}},
|
||||
}
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (name, nums, rec) "+
|
||||
"VALUES ('a', [0], STRUCT<BOOL>(TRUE)), ('b', [1], STRUCT<BOOL>(FALSE)), ('c', [2], STRUCT<BOOL>(TRUE))",
|
||||
table.TableID)
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
q := client.Query(sql)
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
|
||||
|
@ -853,18 +995,13 @@ func TestIntegration_DML(t *testing.T) {
|
|||
return false, err
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
fmt.Printf("wait: %v\n", err)
|
||||
if e, ok := err.(*googleapi.Error); ok && e.Code < 500 {
|
||||
return true, err // fail on 4xx
|
||||
}
|
||||
return false, err
|
||||
}
|
||||
if msg, ok := compareRead(table.Read(ctx), wantRows); !ok {
|
||||
// Stop on read error, because that has never been flaky.
|
||||
return true, errors.New(msg)
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_TimeTypes(t *testing.T) {
|
||||
|
@ -882,10 +1019,11 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
|||
defer table.Delete(ctx)
|
||||
|
||||
d := civil.Date{2016, 3, 20}
|
||||
tm := civil.Time{12, 30, 0, 0}
|
||||
tm := civil.Time{12, 30, 0, 6000}
|
||||
dtm := civil.DateTime{d, tm}
|
||||
ts := time.Date(2016, 3, 20, 15, 04, 05, 0, time.UTC)
|
||||
wantRows := [][]Value{
|
||||
[]Value{d, tm, civil.DateTime{d, tm}, ts},
|
||||
[]Value{d, tm, dtm, ts},
|
||||
}
|
||||
upl := table.Uploader()
|
||||
if err := upl.Put(ctx, []*ValuesSaver{
|
||||
|
@ -899,16 +1037,11 @@ func TestIntegration_TimeTypes(t *testing.T) {
|
|||
|
||||
// SQL wants DATETIMEs with a space between date and time, but the service
|
||||
// returns them in RFC3339 form, with a "T" between.
|
||||
query := fmt.Sprintf("INSERT bigquery_integration_test.%s (d, t, dt, ts) "+
|
||||
"VALUES ('%s', '%s', '%s %s', '%s')",
|
||||
table.TableID, d, tm, d, tm, ts.Format("2006-01-02 15:04:05"))
|
||||
q := client.Query(query)
|
||||
q.UseStandardSQL = true // necessary for DML
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
query := fmt.Sprintf("INSERT %s.%s (d, t, dt, ts) "+
|
||||
"VALUES ('%s', '%s', '%s', '%s')",
|
||||
table.DatasetID, table.TableID,
|
||||
d, CivilTimeString(tm), CivilDateTimeString(dtm), ts.Format("2006-01-02 15:04:05"))
|
||||
if err := dmlInsert(ctx, query); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
wantRows = append(wantRows, wantRows[0])
|
||||
|
@ -959,7 +1092,6 @@ func TestIntegration_StandardQuery(t *testing.T) {
|
|||
}
|
||||
for _, c := range testCases {
|
||||
q := client.Query(c.query)
|
||||
q.UseStandardSQL = true
|
||||
it, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
|
@ -1045,7 +1177,14 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
|||
for _, c := range testCases {
|
||||
q := client.Query(c.query)
|
||||
q.Parameters = c.parameters
|
||||
it, err := q.Read(ctx)
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if job.LastStatus() == nil {
|
||||
t.Error("no LastStatus")
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -1053,6 +1192,131 @@ func TestIntegration_QueryParameters(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntegration_QueryDryRun(t *testing.T) {
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
q := client.Query("SELECT word from " + stdName + " LIMIT 10")
|
||||
q.DryRun = true
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
s := job.LastStatus()
|
||||
if s.State != Done {
|
||||
t.Errorf("state is %v, expected Done", s.State)
|
||||
}
|
||||
if s.Statistics == nil {
|
||||
t.Fatal("no statistics")
|
||||
}
|
||||
if s.Statistics.Details.(*QueryStatistics).Schema == nil {
|
||||
t.Fatal("no schema")
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ExtractExternal(t *testing.T) {
|
||||
// Create a table, extract it to GCS, then query it externally.
|
||||
if client == nil {
|
||||
t.Skip("Integration tests skipped")
|
||||
}
|
||||
ctx := context.Background()
|
||||
schema := Schema{
|
||||
{Name: "name", Type: StringFieldType},
|
||||
{Name: "num", Type: IntegerFieldType},
|
||||
}
|
||||
table := newTable(t, schema)
|
||||
defer table.Delete(ctx)
|
||||
|
||||
// Insert table data.
|
||||
sql := fmt.Sprintf(`INSERT %s.%s (name, num)
|
||||
VALUES ('a', 1), ('b', 2), ('c', 3)`,
|
||||
table.DatasetID, table.TableID)
|
||||
if err := dmlInsert(ctx, sql); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// Extract to a GCS object as CSV.
|
||||
bucketName := testutil.ProjID()
|
||||
objectName := fmt.Sprintf("bq-test-%s.csv", table.TableID)
|
||||
uri := fmt.Sprintf("gs://%s/%s", bucketName, objectName)
|
||||
defer storageClient.Bucket(bucketName).Object(objectName).Delete(ctx)
|
||||
gr := NewGCSReference(uri)
|
||||
gr.DestinationFormat = CSV
|
||||
e := table.ExtractorTo(gr)
|
||||
job, err := e.Run(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
conf, err := job.Config()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
config, ok := conf.(*ExtractConfig)
|
||||
if !ok {
|
||||
t.Fatalf("got %T, want ExtractConfig", conf)
|
||||
}
|
||||
diff := testutil.Diff(config, &e.ExtractConfig,
|
||||
cmp.AllowUnexported(Table{}),
|
||||
cmpopts.IgnoreUnexported(Client{}))
|
||||
if diff != "" {
|
||||
t.Errorf("got=-, want=+:\n%s", diff)
|
||||
}
|
||||
if err := wait(ctx, job); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
edc := &ExternalDataConfig{
|
||||
SourceFormat: CSV,
|
||||
SourceURIs: []string{uri},
|
||||
Schema: schema,
|
||||
Options: &CSVOptions{SkipLeadingRows: 1},
|
||||
}
|
||||
// Query that CSV file directly.
|
||||
q := client.Query("SELECT * FROM csv")
|
||||
q.TableDefinitions = map[string]ExternalData{"csv": edc}
|
||||
wantRows := [][]Value{
|
||||
[]Value{"a", int64(1)},
|
||||
[]Value{"b", int64(2)},
|
||||
[]Value{"c", int64(3)},
|
||||
}
|
||||
iter, err := q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "external query", iter, wantRows)
|
||||
|
||||
// Make a table pointing to the file, and query it.
|
||||
// BigQuery does not allow a Table.Read on an external table.
|
||||
table = dataset.Table(tableIDs.New())
|
||||
err = table.Create(context.Background(), &TableMetadata{
|
||||
Schema: schema,
|
||||
ExpirationTime: testTableExpiration,
|
||||
ExternalDataConfig: edc,
|
||||
})
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
q = client.Query(fmt.Sprintf("SELECT * FROM %s.%s", table.DatasetID, table.TableID))
|
||||
iter, err = q.Read(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
checkRead(t, "external table", iter, wantRows)
|
||||
|
||||
// While we're here, check that the table metadata is correct.
|
||||
md, err := table.Metadata(ctx)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
// One difference: since BigQuery returns the schema as part of the ordinary
|
||||
// table metadata, it does not populate ExternalDataConfig.Schema.
|
||||
md.ExternalDataConfig.Schema = md.Schema
|
||||
if diff := testutil.Diff(md.ExternalDataConfig, edc); diff != "" {
|
||||
t.Errorf("got=-, want=+\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ReadNullIntoStruct(t *testing.T) {
|
||||
// Reading a null into a struct field should return an error (not panic).
|
||||
if client == nil {
|
||||
|
@ -1167,31 +1431,30 @@ func TestIntegration_ListJobs(t *testing.T) {
|
|||
|
||||
// About all we can do is list a few jobs.
|
||||
const max = 20
|
||||
var jis []JobInfo
|
||||
var jobs []*Job
|
||||
it := client.Jobs(ctx)
|
||||
for {
|
||||
ji, err := it.Next()
|
||||
job, err := it.Next()
|
||||
if err == iterator.Done {
|
||||
break
|
||||
}
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
jis = append(jis, ji)
|
||||
if len(jis) >= max {
|
||||
jobs = append(jobs, job)
|
||||
if len(jobs) >= max {
|
||||
break
|
||||
}
|
||||
}
|
||||
// We expect that there is at least one job in the last few months.
|
||||
if len(jis) == 0 {
|
||||
if len(jobs) == 0 {
|
||||
t.Fatal("did not get any jobs")
|
||||
}
|
||||
}
|
||||
|
||||
// Creates a new, temporary table with a unique name and the given schema.
|
||||
func newTable(t *testing.T, s Schema) *Table {
|
||||
name := fmt.Sprintf("t%d", time.Now().UnixNano())
|
||||
table := dataset.Table(name)
|
||||
table := dataset.Table(tableIDs.New())
|
||||
err := table.Create(context.Background(), &TableMetadata{
|
||||
Schema: s,
|
||||
ExpirationTime: testTableExpiration,
|
||||
|
@ -1268,7 +1531,7 @@ func hasStatusCode(err error, code int) bool {
|
|||
func wait(ctx context.Context, job *Job) error {
|
||||
status, err := job.Wait(ctx)
|
||||
if err != nil {
|
||||
return fmt.Errorf("getting job status: %v", err)
|
||||
return err
|
||||
}
|
||||
if status.Err() != nil {
|
||||
return fmt.Errorf("job status error: %#v", status.Err())
|
||||
|
|
|
@ -19,20 +19,15 @@ import (
|
|||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
// A pageFetcher returns a page of rows, starting from the row specified by token.
|
||||
type pageFetcher interface {
|
||||
fetch(ctx context.Context, s service, token string) (*readDataResult, error)
|
||||
setPaging(*pagingConf)
|
||||
}
|
||||
|
||||
func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator {
|
||||
func newRowIterator(ctx context.Context, t *Table, pf pageFetcher) *RowIterator {
|
||||
it := &RowIterator{
|
||||
ctx: ctx,
|
||||
service: s,
|
||||
pf: pf,
|
||||
ctx: ctx,
|
||||
table: t,
|
||||
pf: pf,
|
||||
}
|
||||
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
|
||||
it.fetch,
|
||||
|
@ -44,7 +39,7 @@ func newRowIterator(ctx context.Context, s service, pf pageFetcher) *RowIterator
|
|||
// A RowIterator provides access to the result of a BigQuery lookup.
|
||||
type RowIterator struct {
|
||||
ctx context.Context
|
||||
service service
|
||||
table *Table
|
||||
pf pageFetcher
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
|
@ -135,16 +130,7 @@ func isStructPtr(x interface{}) bool {
|
|||
func (it *RowIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
||||
pc := &pagingConf{}
|
||||
if pageSize > 0 {
|
||||
pc.recordsPerRequest = int64(pageSize)
|
||||
pc.setRecordsPerRequest = true
|
||||
}
|
||||
if pageToken == "" {
|
||||
pc.startIndex = it.StartIndex
|
||||
}
|
||||
it.pf.setPaging(pc)
|
||||
res, err := it.pf.fetch(it.ctx, it.service, pageToken)
|
||||
res, err := it.pf(it.ctx, it.table, it.schema, it.StartIndex, int64(pageSize), pageToken)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
@ -152,3 +138,69 @@ func (it *RowIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|||
it.schema = res.schema
|
||||
return res.pageToken, nil
|
||||
}
|
||||
|
||||
// A pageFetcher returns a page of rows from a destination table.
|
||||
type pageFetcher func(ctx context.Context, _ *Table, _ Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error)
|
||||
|
||||
type fetchPageResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
// fetchPage gets a page of rows from t.
|
||||
func fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
errc := make(chan error, 1)
|
||||
if schema != nil {
|
||||
errc <- nil
|
||||
} else {
|
||||
go func() {
|
||||
var bqt *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
bqt, err = t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
})
|
||||
if err == nil && bqt.Schema != nil {
|
||||
schema = bqToSchema(bqt.Schema)
|
||||
}
|
||||
errc <- err
|
||||
}()
|
||||
}
|
||||
call := t.c.bqs.Tabledata.List(t.ProjectID, t.DatasetID, t.TableID)
|
||||
setClientHeader(call.Header())
|
||||
if pageToken != "" {
|
||||
call.PageToken(pageToken)
|
||||
} else {
|
||||
call.StartIndex(startIndex)
|
||||
}
|
||||
if pageSize > 0 {
|
||||
call.MaxResults(pageSize)
|
||||
}
|
||||
var res *bq.TableDataList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = <-errc
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
rows, err := convertRows(res.Rows, schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &fetchPageResult{
|
||||
pageToken: res.PageToken,
|
||||
rows: rows,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: schema,
|
||||
}, nil
|
||||
}
|
||||
|
|
|
@ -26,27 +26,24 @@ import (
|
|||
)
|
||||
|
||||
type fetchResponse struct {
|
||||
result *readDataResult // The result to return.
|
||||
err error // The error to return.
|
||||
result *fetchPageResult // The result to return.
|
||||
err error // The error to return.
|
||||
}
|
||||
|
||||
// pageFetcherStub services fetch requests by returning data from an in-memory list of values.
|
||||
type pageFetcherStub struct {
|
||||
fetchResponses map[string]fetchResponse
|
||||
|
||||
err error
|
||||
err error
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
call, ok := pf.fetchResponses[token]
|
||||
func (pf *pageFetcherStub) fetchPage(ctx context.Context, _ *Table, _ Schema, _ uint64, _ int64, pageToken string) (*fetchPageResult, error) {
|
||||
call, ok := pf.fetchResponses[pageToken]
|
||||
if !ok {
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", token)
|
||||
pf.err = fmt.Errorf("Unexpected page token: %q", pageToken)
|
||||
}
|
||||
return call.result, call.err
|
||||
}
|
||||
|
||||
func (pf *pageFetcherStub) setPaging(pc *pagingConf) {}
|
||||
|
||||
func TestIterator(t *testing.T) {
|
||||
var (
|
||||
iiSchema = Schema{
|
||||
|
@ -72,7 +69,7 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Iteration over single empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
schema: Schema{},
|
||||
|
@ -86,7 +83,7 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Iteration over single page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
|
@ -100,7 +97,7 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Iteration over single page with different schema",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{"1", 2}, {"11", 12}},
|
||||
schema: siSchema,
|
||||
|
@ -114,14 +111,14 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Iteration over two pages",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
|
@ -135,21 +132,21 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Server response includes empty page",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
|
@ -163,7 +160,7 @@ func TestIterator(t *testing.T) {
|
|||
desc: "Fetch error",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
|
@ -173,7 +170,7 @@ func TestIterator(t *testing.T) {
|
|||
// We returns some data from this fetch, but also an error.
|
||||
// So the end result should include only data from the previous fetch.
|
||||
err: fetchFailure,
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
|
@ -190,14 +187,14 @@ func TestIterator(t *testing.T) {
|
|||
pageToken: "a",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
|
@ -213,21 +210,21 @@ func TestIterator(t *testing.T) {
|
|||
pageToken: "b",
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "a",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"a": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "b",
|
||||
rows: [][]Value{{101, 102}, {111, 112}},
|
||||
schema: iiSchema,
|
||||
},
|
||||
},
|
||||
"b": {
|
||||
result: &readDataResult{},
|
||||
result: &fetchPageResult{},
|
||||
},
|
||||
},
|
||||
// In this test case, Next will return false on its first call,
|
||||
|
@ -241,7 +238,7 @@ func TestIterator(t *testing.T) {
|
|||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
it.PageInfo().Token = tc.pageToken
|
||||
values, schema, err := consumeRowIterator(it)
|
||||
if err != tc.wantErr {
|
||||
|
@ -291,7 +288,7 @@ func TestNextDuringErrorState(t *testing.T) {
|
|||
"": {err: errors.New("bang")},
|
||||
},
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err == nil {
|
||||
t.Errorf("Expected error after calling Next")
|
||||
|
@ -309,7 +306,7 @@ func TestNextAfterFinished(t *testing.T) {
|
|||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{{1, 2}, {11, 12}},
|
||||
},
|
||||
|
@ -320,7 +317,7 @@ func TestNextAfterFinished(t *testing.T) {
|
|||
{
|
||||
fetchResponses: map[string]fetchResponse{
|
||||
"": {
|
||||
result: &readDataResult{
|
||||
result: &fetchPageResult{
|
||||
pageToken: "",
|
||||
rows: [][]Value{},
|
||||
},
|
||||
|
@ -334,7 +331,7 @@ func TestNextAfterFinished(t *testing.T) {
|
|||
pf := &pageFetcherStub{
|
||||
fetchResponses: tc.fetchResponses,
|
||||
}
|
||||
it := newRowIterator(context.Background(), nil, pf)
|
||||
it := newRowIterator(context.Background(), nil, pf.fetchPage)
|
||||
|
||||
values, _, err := consumeRowIterator(it)
|
||||
if err != nil {
|
||||
|
@ -358,7 +355,7 @@ func TestIteratorNextTypes(t *testing.T) {
|
|||
struct{}{},
|
||||
} {
|
||||
if err := it.Next(v); err == nil {
|
||||
t.Error("%v: want error, got nil", v)
|
||||
t.Errorf("%v: want error, got nil", v)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -26,6 +26,7 @@ import (
|
|||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
|
@ -35,20 +36,19 @@ type Job struct {
|
|||
projectID string
|
||||
jobID string
|
||||
|
||||
isQuery bool
|
||||
destinationTable *bq.TableReference // table to read query results from
|
||||
config *bq.JobConfiguration
|
||||
lastStatus *JobStatus
|
||||
}
|
||||
|
||||
// JobFromID creates a Job which refers to an existing BigQuery job. The job
|
||||
// need not have been created by this package. For example, the job may have
|
||||
// been created in the BigQuery console.
|
||||
func (c *Client) JobFromID(ctx context.Context, id string) (*Job, error) {
|
||||
job, err := c.service.getJob(ctx, c.projectID, id)
|
||||
bqjob, err := c.getJobInternal(ctx, id, "configuration", "jobReference", "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
job.c = c
|
||||
return job, nil
|
||||
return bqToJob(bqjob, c)
|
||||
}
|
||||
|
||||
// ID returns the job's ID.
|
||||
|
@ -80,20 +80,62 @@ type JobStatus struct {
|
|||
Statistics *JobStatistics
|
||||
}
|
||||
|
||||
// JobConfig contains configuration information for a job. It is implemented by
|
||||
// *CopyConfig, *ExtractConfig, *LoadConfig and *QueryConfig.
|
||||
type JobConfig interface {
|
||||
isJobConfig()
|
||||
}
|
||||
|
||||
func (*CopyConfig) isJobConfig() {}
|
||||
func (*ExtractConfig) isJobConfig() {}
|
||||
func (*LoadConfig) isJobConfig() {}
|
||||
func (*QueryConfig) isJobConfig() {}
|
||||
|
||||
// Config returns the configuration information for j.
|
||||
func (j *Job) Config() (JobConfig, error) {
|
||||
return bqToJobConfig(j.config, j.c)
|
||||
}
|
||||
|
||||
func bqToJobConfig(q *bq.JobConfiguration, c *Client) (JobConfig, error) {
|
||||
switch {
|
||||
case q == nil:
|
||||
return nil, nil
|
||||
case q.Copy != nil:
|
||||
return bqToCopyConfig(q, c), nil
|
||||
case q.Extract != nil:
|
||||
return bqToExtractConfig(q, c), nil
|
||||
case q.Load != nil:
|
||||
return bqToLoadConfig(q, c), nil
|
||||
case q.Query != nil:
|
||||
return bqToQueryConfig(q, c)
|
||||
default:
|
||||
return nil, nil
|
||||
}
|
||||
}
|
||||
|
||||
// JobIDConfig describes how to create an ID for a job.
|
||||
type JobIDConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
}
|
||||
|
||||
// createJobRef creates a JobReference.
|
||||
// projectID must be non-empty.
|
||||
func createJobRef(jobID string, addJobIDSuffix bool, projectID string) *bq.JobReference {
|
||||
if jobID == "" {
|
||||
jobID = randomJobIDFn()
|
||||
} else if addJobIDSuffix {
|
||||
jobID += "-" + randomJobIDFn()
|
||||
}
|
||||
func (j *JobIDConfig) createJobRef(projectID string) *bq.JobReference {
|
||||
// We don't check whether projectID is empty; the server will return an
|
||||
// error when it encounters the resulting JobReference.
|
||||
return &bq.JobReference{
|
||||
JobId: jobID,
|
||||
ProjectId: projectID,
|
||||
jr := &bq.JobReference{ProjectId: projectID}
|
||||
if j.JobID == "" {
|
||||
jr.JobId = randomIDFn()
|
||||
} else if j.AddJobIDSuffix {
|
||||
jr.JobId = j.JobID + "-" + randomIDFn()
|
||||
} else {
|
||||
jr.JobId = j.JobID
|
||||
}
|
||||
return jr
|
||||
}
|
||||
|
||||
const alphanum = "ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789"
|
||||
|
@ -104,11 +146,15 @@ var (
|
|||
)
|
||||
|
||||
// For testing.
|
||||
var randomJobIDFn = randomJobID
|
||||
var randomIDFn = randomID
|
||||
|
||||
func randomJobID() string {
|
||||
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for suffixes.
|
||||
var b [27]byte
|
||||
// As of August 2017, the BigQuery service uses 27 alphanumeric characters for
|
||||
// suffixes.
|
||||
const randomIDLen = 27
|
||||
|
||||
func randomID() string {
|
||||
// This is used for both job IDs and insert IDs.
|
||||
var b [randomIDLen]byte
|
||||
rngMu.Lock()
|
||||
for i := 0; i < len(b); i++ {
|
||||
b[i] = alphanum[rng.Intn(len(alphanum))]
|
||||
|
@ -128,33 +174,43 @@ func (s *JobStatus) Err() error {
|
|||
return s.err
|
||||
}
|
||||
|
||||
// Fill in the client field of Tables in the statistics.
|
||||
func (s *JobStatus) setClient(c *Client) {
|
||||
if s.Statistics == nil {
|
||||
return
|
||||
}
|
||||
if qs, ok := s.Statistics.Details.(*QueryStatistics); ok {
|
||||
for _, t := range qs.ReferencedTables {
|
||||
t.c = c
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Status returns the current status of the job. It fails if the Status could not be determined.
|
||||
// Status retrieves the current status of the job from BigQuery. It fails if the Status could not be determined.
|
||||
func (j *Job) Status(ctx context.Context) (*JobStatus, error) {
|
||||
js, err := j.c.service.jobStatus(ctx, j.projectID, j.jobID)
|
||||
bqjob, err := j.c.getJobInternal(ctx, j.jobID, "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
js.setClient(j.c)
|
||||
return js, nil
|
||||
if err := j.setStatus(bqjob.Status); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.setStatistics(bqjob.Statistics, j.c)
|
||||
return j.lastStatus, nil
|
||||
}
|
||||
|
||||
// LastStatus returns the most recently retrieved status of the job. The status is
|
||||
// retrieved when a new job is created, or when JobFromID or Job.Status is called.
|
||||
// Call Job.Status to get the most up-to-date information about a job.
|
||||
func (j *Job) LastStatus() *JobStatus {
|
||||
return j.lastStatus
|
||||
}
|
||||
|
||||
// Cancel requests that a job be cancelled. This method returns without waiting for
|
||||
// cancellation to take effect. To check whether the job has terminated, use Job.Status.
|
||||
// Cancelled jobs may still incur costs.
|
||||
func (j *Job) Cancel(ctx context.Context) error {
|
||||
return j.c.service.jobCancel(ctx, j.projectID, j.jobID)
|
||||
// Jobs.Cancel returns a job entity, but the only relevant piece of
|
||||
// data it may contain (the status of the job) is unreliable. From the
|
||||
// docs: "This call will return immediately, and the client will need
|
||||
// to poll for the job status to see if the cancel completed
|
||||
// successfully". So it would be misleading to return a status.
|
||||
call := j.c.bqs.Jobs.Cancel(j.projectID, j.jobID).
|
||||
Fields(). // We don't need any of the response data.
|
||||
Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := call.Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
// Wait blocks until the job or the context is done. It returns the final status
|
||||
|
@ -163,9 +219,9 @@ func (j *Job) Cancel(ctx context.Context) error {
|
|||
// Wait returns nil if the status was retrieved successfully, even if
|
||||
// status.Err() != nil. So callers must check both errors. See the example.
|
||||
func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
||||
if j.isQuery {
|
||||
if j.isQuery() {
|
||||
// We can avoid polling for query jobs.
|
||||
if _, err := j.c.service.waitForQuery(ctx, j.projectID, j.jobID); err != nil {
|
||||
if _, err := j.waitForQuery(ctx, j.projectID); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// Note: extra RPC even if you just want to wait for the query to finish.
|
||||
|
@ -196,30 +252,54 @@ func (j *Job) Wait(ctx context.Context) (*JobStatus, error) {
|
|||
// Read fetches the results of a query job.
|
||||
// If j is not a query job, Read returns an error.
|
||||
func (j *Job) Read(ctx context.Context) (*RowIterator, error) {
|
||||
if !j.isQuery {
|
||||
return j.read(ctx, j.waitForQuery, fetchPage)
|
||||
}
|
||||
|
||||
func (j *Job) read(ctx context.Context, waitForQuery func(context.Context, string) (Schema, error), pf pageFetcher) (*RowIterator, error) {
|
||||
if !j.isQuery() {
|
||||
return nil, errors.New("bigquery: cannot read from a non-query job")
|
||||
}
|
||||
var projectID string
|
||||
if j.destinationTable != nil {
|
||||
projectID = j.destinationTable.ProjectId
|
||||
} else {
|
||||
projectID = j.c.projectID
|
||||
destTable := j.config.Query.DestinationTable
|
||||
// The destination table should only be nil if there was a query error.
|
||||
if destTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
}
|
||||
|
||||
schema, err := j.c.service.waitForQuery(ctx, projectID, j.jobID)
|
||||
projectID := destTable.ProjectId
|
||||
schema, err := waitForQuery(ctx, projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
// The destination table should only be nil if there was a query error.
|
||||
if j.destinationTable == nil {
|
||||
return nil, errors.New("bigquery: query job missing destination table")
|
||||
dt := bqToTable(destTable, j.c)
|
||||
it := newRowIterator(ctx, dt, pf)
|
||||
it.schema = schema
|
||||
return it, nil
|
||||
}
|
||||
|
||||
// waitForQuery waits for the query job to complete and returns its schema.
|
||||
func (j *Job) waitForQuery(ctx context.Context, projectID string) (Schema, error) {
|
||||
// Use GetQueryResults only to wait for completion, not to read results.
|
||||
call := j.c.bqs.Jobs.GetQueryResults(projectID, j.jobID).Context(ctx).MaxResults(0)
|
||||
setClientHeader(call.Header())
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Multiplier: 2,
|
||||
Max: 60 * time.Second,
|
||||
}
|
||||
return newRowIterator(ctx, j.c.service, &readTableConf{
|
||||
projectID: j.destinationTable.ProjectId,
|
||||
datasetID: j.destinationTable.DatasetId,
|
||||
tableID: j.destinationTable.TableId,
|
||||
schema: schema,
|
||||
}), nil
|
||||
var res *bq.GetQueryResultsResponse
|
||||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
res, err = call.Do()
|
||||
if err != nil {
|
||||
return !retryableError(err), err
|
||||
}
|
||||
if !res.JobComplete { // GetQueryResults may return early without error; retry.
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToSchema(res.Schema), nil
|
||||
}
|
||||
|
||||
// JobStatistics contains statistics about a job.
|
||||
|
@ -373,12 +453,6 @@ func (c *Client) Jobs(ctx context.Context) *JobIterator {
|
|||
return it
|
||||
}
|
||||
|
||||
// A JobInfo consists of a Job and a JobStatus.
|
||||
type JobInfo struct {
|
||||
Job *Job
|
||||
Status *JobStatus
|
||||
}
|
||||
|
||||
// JobIterator iterates over jobs in a project.
|
||||
type JobIterator struct {
|
||||
ProjectID string // Project ID of the jobs to list. Default is the client's project.
|
||||
|
@ -389,14 +463,14 @@ type JobIterator struct {
|
|||
c *Client
|
||||
pageInfo *iterator.PageInfo
|
||||
nextFunc func() error
|
||||
items []JobInfo
|
||||
items []*Job
|
||||
}
|
||||
|
||||
func (it *JobIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
|
||||
|
||||
func (it *JobIterator) Next() (JobInfo, error) {
|
||||
func (it *JobIterator) Next() (*Job, error) {
|
||||
if err := it.nextFunc(); err != nil {
|
||||
return JobInfo{}, err
|
||||
return nil, err
|
||||
}
|
||||
item := it.items[0]
|
||||
it.items = it.items[1:]
|
||||
|
@ -417,14 +491,179 @@ func (it *JobIterator) fetch(pageSize int, pageToken string) (string, error) {
|
|||
default:
|
||||
return "", fmt.Errorf("bigquery: invalid value for JobIterator.State: %d", it.State)
|
||||
}
|
||||
jobInfos, nextPageToken, err := it.c.service.listJobs(it.ctx, it.ProjectID, pageSize, pageToken, it.AllUsers, st)
|
||||
|
||||
req := it.c.bqs.Jobs.List(it.ProjectID).
|
||||
Context(it.ctx).
|
||||
PageToken(pageToken).
|
||||
Projection("full").
|
||||
AllUsers(it.AllUsers)
|
||||
if st != "" {
|
||||
req.StateFilter(st)
|
||||
}
|
||||
setClientHeader(req.Header())
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
for _, ji := range jobInfos {
|
||||
ji.Job.c = it.c
|
||||
ji.Status.setClient(it.c)
|
||||
it.items = append(it.items, ji)
|
||||
for _, j := range res.Jobs {
|
||||
job, err := convertListedJob(j, it.c)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
it.items = append(it.items, job)
|
||||
}
|
||||
return nextPageToken, nil
|
||||
return res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func convertListedJob(j *bq.JobListJobs, c *Client) (*Job, error) {
|
||||
return bqToJob2(j.JobReference, j.Configuration, j.Status, j.Statistics, c)
|
||||
}
|
||||
|
||||
func (c *Client) getJobInternal(ctx context.Context, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
|
||||
var job *bq.Job
|
||||
call := c.bqs.Jobs.Get(c.projectID, jobID).Context(ctx)
|
||||
if len(fields) > 0 {
|
||||
call = call.Fields(fields...)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
job, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func bqToJob(q *bq.Job, c *Client) (*Job, error) {
|
||||
return bqToJob2(q.JobReference, q.Configuration, q.Status, q.Statistics, c)
|
||||
}
|
||||
|
||||
func bqToJob2(qr *bq.JobReference, qc *bq.JobConfiguration, qs *bq.JobStatus, qt *bq.JobStatistics, c *Client) (*Job, error) {
|
||||
j := &Job{
|
||||
projectID: qr.ProjectId,
|
||||
jobID: qr.JobId,
|
||||
c: c,
|
||||
}
|
||||
j.setConfig(qc)
|
||||
if err := j.setStatus(qs); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.setStatistics(qt, c)
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (j *Job) setConfig(config *bq.JobConfiguration) {
|
||||
if config == nil {
|
||||
return
|
||||
}
|
||||
j.config = config
|
||||
}
|
||||
|
||||
func (j *Job) isQuery() bool {
|
||||
return j.config != nil && j.config.Query != nil
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func (j *Job) setStatus(qs *bq.JobStatus) error {
|
||||
if qs == nil {
|
||||
return nil
|
||||
}
|
||||
state, ok := stateMap[qs.State]
|
||||
if !ok {
|
||||
return fmt.Errorf("unexpected job state: %v", qs.State)
|
||||
}
|
||||
j.lastStatus = &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := bqToError(qs.ErrorResult); state == Done && err != nil {
|
||||
j.lastStatus.err = err
|
||||
}
|
||||
for _, ep := range qs.Errors {
|
||||
j.lastStatus.Errors = append(j.lastStatus.Errors, bqToError(ep))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (j *Job) setStatistics(s *bq.JobStatistics, c *Client) {
|
||||
if s == nil || j.lastStatus == nil {
|
||||
return
|
||||
}
|
||||
js := &JobStatistics{
|
||||
CreationTime: unixMillisToTime(s.CreationTime),
|
||||
StartTime: unixMillisToTime(s.StartTime),
|
||||
EndTime: unixMillisToTime(s.EndTime),
|
||||
TotalBytesProcessed: s.TotalBytesProcessed,
|
||||
}
|
||||
switch {
|
||||
case s.Extract != nil:
|
||||
js.Details = &ExtractStatistics{
|
||||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
|
||||
}
|
||||
case s.Load != nil:
|
||||
js.Details = &LoadStatistics{
|
||||
InputFileBytes: s.Load.InputFileBytes,
|
||||
InputFiles: s.Load.InputFiles,
|
||||
OutputBytes: s.Load.OutputBytes,
|
||||
OutputRows: s.Load.OutputRows,
|
||||
}
|
||||
case s.Query != nil:
|
||||
var names []string
|
||||
for _, qp := range s.Query.UndeclaredQueryParameters {
|
||||
names = append(names, qp.Name)
|
||||
}
|
||||
var tables []*Table
|
||||
for _, tr := range s.Query.ReferencedTables {
|
||||
tables = append(tables, bqToTable(tr, c))
|
||||
}
|
||||
js.Details = &QueryStatistics{
|
||||
BillingTier: s.Query.BillingTier,
|
||||
CacheHit: s.Query.CacheHit,
|
||||
StatementType: s.Query.StatementType,
|
||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||
Schema: bqToSchema(s.Query.Schema),
|
||||
ReferencedTables: tables,
|
||||
UndeclaredQueryParameterNames: names,
|
||||
}
|
||||
}
|
||||
j.lastStatus.Statistics = js
|
||||
}
|
||||
|
||||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
|
||||
var res []*ExplainQueryStage
|
||||
for _, s := range stages {
|
||||
var steps []*ExplainQueryStep
|
||||
for _, p := range s.Steps {
|
||||
steps = append(steps, &ExplainQueryStep{
|
||||
Kind: p.Kind,
|
||||
Substeps: p.Substeps,
|
||||
})
|
||||
}
|
||||
res = append(res, &ExplainQueryStage{
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
ID: s.Id,
|
||||
Name: s.Name,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
|
|
@ -18,12 +18,11 @@ import (
|
|||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestCreateJobRef(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
defer fixRandomID("RANDOM")()
|
||||
for _, test := range []struct {
|
||||
jobID string
|
||||
addJobIDSuffix bool
|
||||
|
@ -50,7 +49,8 @@ func TestCreateJobRef(t *testing.T) {
|
|||
want: "foo-RANDOM",
|
||||
},
|
||||
} {
|
||||
jr := createJobRef(test.jobID, test.addJobIDSuffix, "projectID")
|
||||
jc := JobIDConfig{JobID: test.jobID, AddJobIDSuffix: test.addJobIDSuffix}
|
||||
jr := jc.createJobRef("projectID")
|
||||
got := jr.JobId
|
||||
if got != test.want {
|
||||
t.Errorf("%q, %t: got %q, want %q", test.jobID, test.addJobIDSuffix, got, test.want)
|
||||
|
@ -58,10 +58,10 @@ func TestCreateJobRef(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func fixRandomJobID(s string) func() {
|
||||
prev := randomJobIDFn
|
||||
randomJobIDFn = func() string { return s }
|
||||
return func() { randomJobIDFn = prev }
|
||||
func fixRandomID(s string) func() {
|
||||
prev := randomIDFn
|
||||
randomIDFn = func() string { return s }
|
||||
return func() { randomIDFn = prev }
|
||||
}
|
||||
|
||||
func checkJob(t *testing.T, i int, got, want *bq.Job) {
|
||||
|
@ -78,18 +78,3 @@ func checkJob(t *testing.T, i int, got, want *bq.Job) {
|
|||
t.Errorf("#%d: (got=-, want=+) %s", i, d)
|
||||
}
|
||||
}
|
||||
|
||||
type testService struct {
|
||||
*bq.Job
|
||||
|
||||
service
|
||||
}
|
||||
|
||||
func (s *testService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
s.Job = conf.job
|
||||
return &Job{}, nil
|
||||
}
|
||||
|
||||
func (s *testService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
return &JobStatus{State: Done}, nil
|
||||
}
|
||||
|
|
|
@ -15,18 +15,14 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"io"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// LoadConfig holds the configuration for a load job.
|
||||
type LoadConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Src is the source from which data will be loaded.
|
||||
Src LoadSource
|
||||
|
||||
|
@ -40,10 +36,53 @@ type LoadConfig struct {
|
|||
// WriteDisposition specifies how existing data in the destination table is treated.
|
||||
// The default is WriteAppend.
|
||||
WriteDisposition TableWriteDisposition
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
// If non-nil, the destination table is partitioned by time.
|
||||
TimePartitioning *TimePartitioning
|
||||
}
|
||||
|
||||
func (l *LoadConfig) toBQ() (*bq.JobConfiguration, io.Reader) {
|
||||
config := &bq.JobConfiguration{
|
||||
Labels: l.Labels,
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
DestinationTable: l.Dst.toBQ(),
|
||||
TimePartitioning: l.TimePartitioning.toBQ(),
|
||||
},
|
||||
}
|
||||
media := l.Src.populateLoadConfig(config.Load)
|
||||
return config, media
|
||||
}
|
||||
|
||||
func bqToLoadConfig(q *bq.JobConfiguration, c *Client) *LoadConfig {
|
||||
lc := &LoadConfig{
|
||||
Labels: q.Labels,
|
||||
CreateDisposition: TableCreateDisposition(q.Load.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(q.Load.WriteDisposition),
|
||||
Dst: bqToTable(q.Load.DestinationTable, c),
|
||||
TimePartitioning: bqToTimePartitioning(q.Load.TimePartitioning),
|
||||
}
|
||||
var fc *FileConfig
|
||||
if len(q.Load.SourceUris) == 0 {
|
||||
s := NewReaderSource(nil)
|
||||
fc = &s.FileConfig
|
||||
lc.Src = s
|
||||
} else {
|
||||
s := NewGCSReference(q.Load.SourceUris...)
|
||||
fc = &s.FileConfig
|
||||
lc.Src = s
|
||||
}
|
||||
bqPopulateFileConfig(q.Load, fc)
|
||||
return lc
|
||||
}
|
||||
|
||||
// A Loader loads data from Google Cloud Storage into a BigQuery table.
|
||||
type Loader struct {
|
||||
JobIDConfig
|
||||
LoadConfig
|
||||
c *Client
|
||||
}
|
||||
|
@ -54,7 +93,8 @@ type Loader struct {
|
|||
// This package defines two LoadSources: GCSReference, for Google Cloud Storage
|
||||
// objects, and ReaderSource, for data read from an io.Reader.
|
||||
type LoadSource interface {
|
||||
populateInsertJobConfForLoad(conf *insertJobConf)
|
||||
// populates config, returns media
|
||||
populateLoadConfig(*bq.JobConfigurationLoad) io.Reader
|
||||
}
|
||||
|
||||
// LoaderFrom returns a Loader which can be used to load data into a BigQuery table.
|
||||
|
@ -73,17 +113,14 @@ func (t *Table) LoaderFrom(src LoadSource) *Loader {
|
|||
|
||||
// Run initiates a load job.
|
||||
func (l *Loader) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
JobReference: createJobRef(l.JobID, l.AddJobIDSuffix, l.c.projectID),
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Load: &bq.JobConfigurationLoad{
|
||||
CreateDisposition: string(l.CreateDisposition),
|
||||
WriteDisposition: string(l.WriteDisposition),
|
||||
},
|
||||
},
|
||||
}
|
||||
conf := &insertJobConf{job: job}
|
||||
l.Src.populateInsertJobConfForLoad(conf)
|
||||
job.Configuration.Load.DestinationTable = l.Dst.tableRefProto()
|
||||
return l.c.insertJob(ctx, conf)
|
||||
job, media := l.newJob()
|
||||
return l.c.insertJob(ctx, job, media)
|
||||
}
|
||||
|
||||
func (l *Loader) newJob() (*bq.Job, io.Reader) {
|
||||
config, media := l.LoadConfig.toBQ()
|
||||
return &bq.Job{
|
||||
JobReference: l.JobIDConfig.createJobRef(l.c.projectID),
|
||||
Configuration: config,
|
||||
}, media
|
||||
}
|
||||
|
|
|
@ -17,8 +17,11 @@ package bigquery
|
|||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"github.com/google/go-cmp/cmp/cmpopts"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
@ -67,12 +70,13 @@ func bqNestedFieldSchema() *bq.TableFieldSchema {
|
|||
}
|
||||
|
||||
func TestLoad(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{projectID: "client-project-id"}
|
||||
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src LoadSource
|
||||
jobID string
|
||||
config LoadConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
|
@ -82,17 +86,24 @@ func TestLoad(t *testing.T) {
|
|||
want: defaultLoadJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
jobID: "ajob",
|
||||
config: LoadConfig{
|
||||
CreateDisposition: CreateNever,
|
||||
WriteDisposition: WriteTruncate,
|
||||
JobID: "ajob",
|
||||
Labels: map[string]string{"a": "b"},
|
||||
TimePartitioning: &TimePartitioning{Expiration: 1234 * time.Millisecond},
|
||||
},
|
||||
src: NewGCSReference("uri"),
|
||||
want: func() *bq.Job {
|
||||
j := defaultLoadJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.Load.CreateDisposition = "CREATE_NEVER"
|
||||
j.Configuration.Load.WriteDisposition = "WRITE_TRUNCATE"
|
||||
j.Configuration.Load.TimePartitioning = &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: 1234,
|
||||
}
|
||||
j.JobReference = &bq.JobReference{
|
||||
JobId: "ajob",
|
||||
ProjectId: "client-project-id",
|
||||
|
@ -211,16 +222,23 @@ func TestLoad(t *testing.T) {
|
|||
}
|
||||
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
loader := tc.dst.LoaderFrom(tc.src)
|
||||
loader.JobID = tc.jobID
|
||||
tc.config.Src = tc.src
|
||||
tc.config.Dst = tc.dst
|
||||
loader.LoadConfig = tc.config
|
||||
if _, err := loader.Run(context.Background()); err != nil {
|
||||
t.Errorf("#%d: err calling Loader.Run: %v", i, err)
|
||||
continue
|
||||
got, _ := loader.newJob()
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*LoadConfig), &loader.LoadConfig,
|
||||
cmp.AllowUnexported(Table{}, Client{}),
|
||||
cmpopts.IgnoreUnexported(ReaderSource{}))
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
checkJob(t, i, s.Job, tc.want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -20,6 +20,7 @@ import (
|
|||
"fmt"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/civil"
|
||||
|
@ -36,17 +37,24 @@ var (
|
|||
validFieldName = regexp.MustCompile("^[a-zA-Z_][a-zA-Z0-9_]{0,127}$")
|
||||
)
|
||||
|
||||
const nullableTagOption = "nullable"
|
||||
|
||||
func bqTagParser(t reflect.StructTag) (name string, keep bool, other interface{}, err error) {
|
||||
if s := t.Get("bigquery"); s != "" {
|
||||
if s == "-" {
|
||||
return "", false, nil, nil
|
||||
}
|
||||
if !validFieldName.MatchString(s) {
|
||||
return "", false, nil, errInvalidFieldName
|
||||
}
|
||||
return s, true, nil, nil
|
||||
name, keep, opts, err := fields.ParseStandardTag("bigquery", t)
|
||||
if err != nil {
|
||||
return "", false, nil, err
|
||||
}
|
||||
return "", true, nil, nil
|
||||
if name != "" && !validFieldName.MatchString(name) {
|
||||
return "", false, nil, errInvalidFieldName
|
||||
}
|
||||
for _, opt := range opts {
|
||||
if opt != nullableTagOption {
|
||||
return "", false, nil, fmt.Errorf(
|
||||
"bigquery: invalid tag option %q. The only valid option is %q",
|
||||
opt, nullableTagOption)
|
||||
}
|
||||
}
|
||||
return name, keep, opts, nil
|
||||
}
|
||||
|
||||
var fieldCache = fields.NewCache(bqTagParser, nil, nil)
|
||||
|
@ -77,8 +85,9 @@ type QueryParameter struct {
|
|||
Name string
|
||||
|
||||
// Value is the value of the parameter.
|
||||
// The following Go types are supported, with their corresponding
|
||||
// Bigquery types:
|
||||
//
|
||||
// When you create a QueryParameter to send to BigQuery, the following Go types
|
||||
// are supported, with their corresponding Bigquery types:
|
||||
// int, int8, int16, int32, int64, uint8, uint16, uint32: INT64
|
||||
// Note that uint, uint64 and uintptr are not supported, because
|
||||
// they may contain values that cannot fit into a 64-bit signed integer.
|
||||
|
@ -89,10 +98,17 @@ type QueryParameter struct {
|
|||
// time.Time: TIMESTAMP
|
||||
// Arrays and slices of the above.
|
||||
// Structs of the above. Only the exported fields are used.
|
||||
//
|
||||
// When a QueryParameter is returned inside a QueryConfig from a call to
|
||||
// Job.Config:
|
||||
// Integers are of type int64.
|
||||
// Floating-point values are of type float64.
|
||||
// Arrays are of type []interface{}, regardless of the array element type.
|
||||
// Structs are of type map[string]interface{}.
|
||||
Value interface{}
|
||||
}
|
||||
|
||||
func (p QueryParameter) toRaw() (*bq.QueryParameter, error) {
|
||||
func (p QueryParameter) toBQ() (*bq.QueryParameter, error) {
|
||||
pv, err := paramValue(reflect.ValueOf(p.Value))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -189,12 +205,11 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
|||
|
||||
case typeOfTime:
|
||||
// civil.Time has nanosecond resolution, but BigQuery TIME only microsecond.
|
||||
res.Value = civilTimeParamString(v.Interface().(civil.Time))
|
||||
res.Value = CivilTimeString(v.Interface().(civil.Time))
|
||||
return res, nil
|
||||
|
||||
case typeOfDateTime:
|
||||
dt := v.Interface().(civil.DateTime)
|
||||
res.Value = dt.Date.String() + " " + civilTimeParamString(dt.Time)
|
||||
res.Value = CivilDateTimeString(v.Interface().(civil.DateTime))
|
||||
return res, nil
|
||||
|
||||
case typeOfGoTime:
|
||||
|
@ -254,12 +269,81 @@ func paramValue(v reflect.Value) (bq.QueryParameterValue, error) {
|
|||
return res, nil
|
||||
}
|
||||
|
||||
func civilTimeParamString(t civil.Time) string {
|
||||
if t.Nanosecond == 0 {
|
||||
return t.String()
|
||||
} else {
|
||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
||||
t.Nanosecond = 0
|
||||
return t.String() + fmt.Sprintf(".%06d", micro)
|
||||
func bqToQueryParameter(q *bq.QueryParameter) (QueryParameter, error) {
|
||||
p := QueryParameter{Name: q.Name}
|
||||
val, err := convertParamValue(q.ParameterValue, q.ParameterType)
|
||||
if err != nil {
|
||||
return QueryParameter{}, err
|
||||
}
|
||||
p.Value = val
|
||||
return p, nil
|
||||
}
|
||||
|
||||
var paramTypeToFieldType = map[string]FieldType{
|
||||
int64ParamType.Type: IntegerFieldType,
|
||||
float64ParamType.Type: FloatFieldType,
|
||||
boolParamType.Type: BooleanFieldType,
|
||||
stringParamType.Type: StringFieldType,
|
||||
bytesParamType.Type: BytesFieldType,
|
||||
dateParamType.Type: DateFieldType,
|
||||
timeParamType.Type: TimeFieldType,
|
||||
}
|
||||
|
||||
// Convert a parameter value from the service to a Go value. This is similar to, but
|
||||
// not quite the same as, converting data values.
|
||||
func convertParamValue(qval *bq.QueryParameterValue, qtype *bq.QueryParameterType) (interface{}, error) {
|
||||
switch qtype.Type {
|
||||
case "ARRAY":
|
||||
if qval == nil {
|
||||
return []interface{}(nil), nil
|
||||
}
|
||||
return convertParamArray(qval.ArrayValues, qtype.ArrayType)
|
||||
case "STRUCT":
|
||||
if qval == nil {
|
||||
return map[string]interface{}(nil), nil
|
||||
}
|
||||
return convertParamStruct(qval.StructValues, qtype.StructTypes)
|
||||
case "TIMESTAMP":
|
||||
return time.Parse(timestampFormat, qval.Value)
|
||||
case "DATETIME":
|
||||
parts := strings.Fields(qval.Value)
|
||||
if len(parts) != 2 {
|
||||
return nil, fmt.Errorf("bigquery: bad DATETIME value %q", qval.Value)
|
||||
}
|
||||
return civil.ParseDateTime(parts[0] + "T" + parts[1])
|
||||
default:
|
||||
return convertBasicType(qval.Value, paramTypeToFieldType[qtype.Type])
|
||||
}
|
||||
}
|
||||
|
||||
// convertParamArray converts a query parameter array value to a Go value. It
|
||||
// always returns a []interface{}.
|
||||
func convertParamArray(elVals []*bq.QueryParameterValue, elType *bq.QueryParameterType) ([]interface{}, error) {
|
||||
var vals []interface{}
|
||||
for _, el := range elVals {
|
||||
val, err := convertParamValue(el, elType)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals = append(vals, val)
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
||||
// convertParamValue converts a query parameter struct value into a Go value. It
|
||||
// always returns a map[string]interface{}.
|
||||
func convertParamStruct(sVals map[string]bq.QueryParameterValue, sTypes []*bq.QueryParameterTypeStructTypes) (map[string]interface{}, error) {
|
||||
vals := map[string]interface{}{}
|
||||
for _, st := range sTypes {
|
||||
if sv, ok := sVals[st.Name]; ok {
|
||||
val, err := convertParamValue(&sv, st.Type)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
vals[st.Name] = val
|
||||
} else {
|
||||
vals[st.Name] = nil
|
||||
}
|
||||
}
|
||||
return vals, nil
|
||||
}
|
||||
|
|
|
@ -30,40 +30,79 @@ import (
|
|||
)
|
||||
|
||||
var scalarTests = []struct {
|
||||
val interface{}
|
||||
want string
|
||||
val interface{} // The Go value
|
||||
wantVal string // paramValue's desired output
|
||||
wantType *bq.QueryParameterType // paramType's desired output
|
||||
}{
|
||||
{int64(0), "0"},
|
||||
{3.14, "3.14"},
|
||||
{3.14159e-87, "3.14159e-87"},
|
||||
{true, "true"},
|
||||
{"string", "string"},
|
||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n"},
|
||||
{math.NaN(), "NaN"},
|
||||
{[]byte("foo"), "Zm9v"}, // base64 encoding of "foo"
|
||||
{int64(0), "0", int64ParamType},
|
||||
{3.14, "3.14", float64ParamType},
|
||||
{3.14159e-87, "3.14159e-87", float64ParamType},
|
||||
{true, "true", boolParamType},
|
||||
{"string", "string", stringParamType},
|
||||
{"\u65e5\u672c\u8a9e\n", "\u65e5\u672c\u8a9e\n", stringParamType},
|
||||
{math.NaN(), "NaN", float64ParamType},
|
||||
{[]byte("foo"), "Zm9v", bytesParamType}, // base64 encoding of "foo"
|
||||
{time.Date(2016, 3, 20, 4, 22, 9, 5000, time.FixedZone("neg1-2", -3720)),
|
||||
"2016-03-20 04:22:09.000005-01:02"},
|
||||
{civil.Date{2016, 3, 20}, "2016-03-20"},
|
||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000"},
|
||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}}, "2016-03-20 04:05:06.789000"},
|
||||
"2016-03-20 04:22:09.000005-01:02",
|
||||
timestampParamType},
|
||||
{civil.Date{2016, 3, 20}, "2016-03-20", dateParamType},
|
||||
{civil.Time{4, 5, 6, 789000000}, "04:05:06.789000", timeParamType},
|
||||
{civil.DateTime{civil.Date{2016, 3, 20}, civil.Time{4, 5, 6, 789000000}},
|
||||
"2016-03-20 04:05:06.789000",
|
||||
dateTimeParamType},
|
||||
}
|
||||
|
||||
type S1 struct {
|
||||
A int
|
||||
B *S2
|
||||
C bool
|
||||
}
|
||||
type (
|
||||
S1 struct {
|
||||
A int
|
||||
B *S2
|
||||
C bool
|
||||
}
|
||||
S2 struct {
|
||||
D string
|
||||
e int
|
||||
}
|
||||
)
|
||||
|
||||
type S2 struct {
|
||||
D string
|
||||
e int
|
||||
}
|
||||
var (
|
||||
s1 = S1{
|
||||
A: 1,
|
||||
B: &S2{D: "s"},
|
||||
C: true,
|
||||
}
|
||||
|
||||
var s1 = S1{
|
||||
A: 1,
|
||||
B: &S2{D: "s"},
|
||||
C: true,
|
||||
}
|
||||
s1ParamType = &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "A", Type: int64ParamType},
|
||||
{Name: "B", Type: &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "D", Type: stringParamType},
|
||||
},
|
||||
}},
|
||||
{Name: "C", Type: boolParamType},
|
||||
},
|
||||
}
|
||||
|
||||
s1ParamValue = bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"A": sval("1"),
|
||||
"B": bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"D": sval("s"),
|
||||
},
|
||||
},
|
||||
"C": sval("true"),
|
||||
},
|
||||
}
|
||||
|
||||
s1ParamReturnValue = map[string]interface{}{
|
||||
"A": int64(1),
|
||||
"B": map[string]interface{}{"D": "s"},
|
||||
"C": true,
|
||||
}
|
||||
)
|
||||
|
||||
func sval(s string) bq.QueryParameterValue {
|
||||
return bq.QueryParameterValue{Value: s}
|
||||
|
@ -76,7 +115,7 @@ func TestParamValueScalar(t *testing.T) {
|
|||
t.Errorf("%v: got %v, want nil", test.val, err)
|
||||
continue
|
||||
}
|
||||
want := sval(test.want)
|
||||
want := sval(test.wantVal)
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.val, got, want)
|
||||
}
|
||||
|
@ -113,19 +152,8 @@ func TestParamValueStruct(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"A": sval("1"),
|
||||
"B": bq.QueryParameterValue{
|
||||
StructValues: map[string]bq.QueryParameterValue{
|
||||
"D": sval("s"),
|
||||
},
|
||||
},
|
||||
"C": sval("true"),
|
||||
},
|
||||
}
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("got %+v\nwant %+v", got, want)
|
||||
if !testutil.Equal(got, s1ParamValue) {
|
||||
t.Errorf("got %+v\nwant %+v", got, s1ParamValue)
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,35 +169,24 @@ func TestParamValueErrors(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestParamType(t *testing.T) {
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.wantType) {
|
||||
t.Errorf("%v (%T): got %v, want %v", test.val, test.val, got, test.wantType)
|
||||
}
|
||||
}
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want *bq.QueryParameterType
|
||||
}{
|
||||
{0, int64ParamType},
|
||||
{uint32(32767), int64ParamType},
|
||||
{3.14, float64ParamType},
|
||||
{float32(3.14), float64ParamType},
|
||||
{math.NaN(), float64ParamType},
|
||||
{true, boolParamType},
|
||||
{"", stringParamType},
|
||||
{"string", stringParamType},
|
||||
{time.Now(), timestampParamType},
|
||||
{[]byte("foo"), bytesParamType},
|
||||
{[]int{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}},
|
||||
{[3]bool{}, &bq.QueryParameterType{Type: "ARRAY", ArrayType: boolParamType}},
|
||||
{S1{}, &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "A", Type: int64ParamType},
|
||||
{Name: "B", Type: &bq.QueryParameterType{
|
||||
Type: "STRUCT",
|
||||
StructTypes: []*bq.QueryParameterTypeStructTypes{
|
||||
{Name: "D", Type: stringParamType},
|
||||
},
|
||||
}},
|
||||
{Name: "C", Type: boolParamType},
|
||||
},
|
||||
}},
|
||||
{S1{}, s1ParamType},
|
||||
} {
|
||||
got, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
|
@ -192,17 +209,74 @@ func TestParamTypeErrors(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ScalarParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
func TestConvertParamValue(t *testing.T) {
|
||||
// Scalars.
|
||||
for _, test := range scalarTests {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
pval, err := paramValue(reflect.ValueOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.val, cmp.Comparer(func(t1, t2 time.Time) bool {
|
||||
return t1.Round(time.Microsecond).Equal(t2.Round(time.Microsecond))
|
||||
})) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.val, test.val)
|
||||
ptype, err := paramType(reflect.TypeOf(test.val))
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got, err := convertParamValue(&pval, ptype)
|
||||
if err != nil {
|
||||
t.Fatalf("convertParamValue(%+v, %+v): %v", pval, ptype, err)
|
||||
}
|
||||
if !testutil.Equal(got, test.val) {
|
||||
t.Errorf("%#v: got %#v", test.val, got)
|
||||
}
|
||||
}
|
||||
// Arrays.
|
||||
for _, test := range []struct {
|
||||
pval *bq.QueryParameterValue
|
||||
want []interface{}
|
||||
}{
|
||||
{
|
||||
&bq.QueryParameterValue{},
|
||||
nil,
|
||||
},
|
||||
{
|
||||
&bq.QueryParameterValue{
|
||||
ArrayValues: []*bq.QueryParameterValue{{Value: "1"}, {Value: "2"}},
|
||||
},
|
||||
[]interface{}{int64(1), int64(2)},
|
||||
},
|
||||
} {
|
||||
ptype := &bq.QueryParameterType{Type: "ARRAY", ArrayType: int64ParamType}
|
||||
got, err := convertParamValue(test.pval, ptype)
|
||||
if err != nil {
|
||||
t.Fatalf("%+v: %v", test.pval, err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%+v: got %+v, want %+v", test.pval, got, test.want)
|
||||
}
|
||||
}
|
||||
// Structs.
|
||||
got, err := convertParamValue(&s1ParamValue, s1ParamType)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, s1ParamReturnValue) {
|
||||
t.Errorf("got %+v, want %+v", got, s1ParamReturnValue)
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegration_ScalarParam(t *testing.T) {
|
||||
roundToMicros := cmp.Transformer("RoundToMicros",
|
||||
func(t time.Time) time.Time { return t.Round(time.Microsecond) })
|
||||
c := getClient(t)
|
||||
for _, test := range scalarTests {
|
||||
gotData, gotParam, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(gotData, test.val, roundToMicros) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotData, gotData, test.val, test.val)
|
||||
}
|
||||
if !testutil.Equal(gotParam, test.val, roundToMicros) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", gotParam, gotParam, test.val, test.val)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -210,40 +284,78 @@ func TestIntegration_ScalarParam(t *testing.T) {
|
|||
func TestIntegration_OtherParam(t *testing.T) {
|
||||
c := getClient(t)
|
||||
for _, test := range []struct {
|
||||
val interface{}
|
||||
want interface{}
|
||||
val interface{}
|
||||
wantData interface{}
|
||||
wantParam interface{}
|
||||
}{
|
||||
{[]int(nil), []Value(nil)},
|
||||
{[]int{}, []Value(nil)},
|
||||
{[]int{1, 2}, []Value{int64(1), int64(2)}},
|
||||
{[3]int{1, 2, 3}, []Value{int64(1), int64(2), int64(3)}},
|
||||
{S1{}, []Value{int64(0), nil, false}},
|
||||
{s1, []Value{int64(1), []Value{"s"}, true}},
|
||||
{[]int(nil), []Value(nil), []interface{}(nil)},
|
||||
{[]int{}, []Value(nil), []interface{}(nil)},
|
||||
{
|
||||
[]int{1, 2},
|
||||
[]Value{int64(1), int64(2)},
|
||||
[]interface{}{int64(1), int64(2)},
|
||||
},
|
||||
{
|
||||
[3]int{1, 2, 3},
|
||||
[]Value{int64(1), int64(2), int64(3)},
|
||||
[]interface{}{int64(1), int64(2), int64(3)},
|
||||
},
|
||||
{
|
||||
S1{},
|
||||
[]Value{int64(0), nil, false},
|
||||
map[string]interface{}{
|
||||
"A": int64(0),
|
||||
"B": nil,
|
||||
"C": false,
|
||||
},
|
||||
},
|
||||
{
|
||||
s1,
|
||||
[]Value{int64(1), []Value{"s"}, true},
|
||||
s1ParamReturnValue,
|
||||
},
|
||||
} {
|
||||
got, err := paramRoundTrip(c, test.val)
|
||||
gotData, gotParam, err := paramRoundTrip(c, test.val)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("\ngot %#v (%T)\nwant %#v (%T)", got, got, test.want, test.want)
|
||||
if !testutil.Equal(gotData, test.wantData) {
|
||||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
|
||||
test.val, gotData, gotData, test.wantData, test.wantData)
|
||||
}
|
||||
if !testutil.Equal(gotParam, test.wantParam) {
|
||||
t.Errorf("%#v:\ngot %#v (%T)\nwant %#v (%T)",
|
||||
test.val, gotParam, gotParam, test.wantParam, test.wantParam)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func paramRoundTrip(c *Client, x interface{}) (Value, error) {
|
||||
// paramRoundTrip passes x as a query parameter to BigQuery. It returns
|
||||
// the resulting data value from running the query and the parameter value from
|
||||
// the returned job configuration.
|
||||
func paramRoundTrip(c *Client, x interface{}) (data Value, param interface{}, err error) {
|
||||
ctx := context.Background()
|
||||
q := c.Query("select ?")
|
||||
q.Parameters = []QueryParameter{{Value: x}}
|
||||
it, err := q.Read(context.Background())
|
||||
job, err := q.Run(ctx)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
it, err := job.Read(ctx)
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
var val []Value
|
||||
err = it.Next(&val)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, nil, err
|
||||
}
|
||||
if len(val) != 1 {
|
||||
return nil, errors.New("wrong number of values")
|
||||
return nil, nil, errors.New("wrong number of values")
|
||||
}
|
||||
return val[0], nil
|
||||
conf, err := job.Config()
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
return val[0], conf.(*QueryConfig).Parameters[0].Value, nil
|
||||
}
|
||||
|
|
|
@ -23,12 +23,6 @@ import (
|
|||
|
||||
// QueryConfig holds the configuration for a query job.
|
||||
type QueryConfig struct {
|
||||
// JobID is the ID to use for the job. If empty, a random job ID will be generated.
|
||||
JobID string
|
||||
|
||||
// If AddJobIDSuffix is true, then a random string will be appended to JobID.
|
||||
AddJobIDSuffix bool
|
||||
|
||||
// Dst is the table into which the results of the query will be written.
|
||||
// If this field is nil, a temporary table will be created.
|
||||
Dst *Table
|
||||
|
@ -43,6 +37,9 @@ type QueryConfig struct {
|
|||
|
||||
// TableDefinitions describes data sources outside of BigQuery.
|
||||
// The map keys may be used as table names in the query string.
|
||||
//
|
||||
// When a QueryConfig is returned from Job.Config, the map values
|
||||
// are always of type *ExternalDataConfig.
|
||||
TableDefinitions map[string]ExternalData
|
||||
|
||||
// CreateDisposition specifies the circumstances under which the destination table will be created.
|
||||
|
@ -90,6 +87,7 @@ type QueryConfig struct {
|
|||
MaxBytesBilled int64
|
||||
|
||||
// UseStandardSQL causes the query to use standard SQL. The default.
|
||||
// Deprecated: use UseLegacySQL.
|
||||
UseStandardSQL bool
|
||||
|
||||
// UseLegacySQL causes the query to use legacy SQL.
|
||||
|
@ -101,6 +99,130 @@ type QueryConfig struct {
|
|||
// If the query uses named syntax ("@p"), then all parameters must have names.
|
||||
// It is illegal to mix positional and named syntax.
|
||||
Parameters []QueryParameter
|
||||
|
||||
// The labels associated with this job.
|
||||
Labels map[string]string
|
||||
|
||||
// If true, don't actually run this job. A valid query will return a mostly
|
||||
// empty response with some processing statistics, while an invalid query will
|
||||
// return the same error it would if it wasn't a dry run.
|
||||
//
|
||||
// Query.Read will fail with dry-run queries. Call Query.Run instead, and then
|
||||
// call LastStatus on the returned job to get statistics. Calling Status on a
|
||||
// dry-run job will fail.
|
||||
DryRun bool
|
||||
}
|
||||
|
||||
func (qc *QueryConfig) toBQ() (*bq.JobConfiguration, error) {
|
||||
qconf := &bq.JobConfigurationQuery{
|
||||
Query: qc.Q,
|
||||
CreateDisposition: string(qc.CreateDisposition),
|
||||
WriteDisposition: string(qc.WriteDisposition),
|
||||
AllowLargeResults: qc.AllowLargeResults,
|
||||
Priority: string(qc.Priority),
|
||||
MaximumBytesBilled: qc.MaxBytesBilled,
|
||||
}
|
||||
if len(qc.TableDefinitions) > 0 {
|
||||
qconf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
}
|
||||
for name, data := range qc.TableDefinitions {
|
||||
qconf.TableDefinitions[name] = data.toBQ()
|
||||
}
|
||||
if qc.DefaultProjectID != "" || qc.DefaultDatasetID != "" {
|
||||
qconf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: qc.DefaultDatasetID,
|
||||
ProjectId: qc.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
if tier := int64(qc.MaxBillingTier); tier > 0 {
|
||||
qconf.MaximumBillingTier = &tier
|
||||
}
|
||||
f := false
|
||||
if qc.DisableQueryCache {
|
||||
qconf.UseQueryCache = &f
|
||||
}
|
||||
if qc.DisableFlattenedResults {
|
||||
qconf.FlattenResults = &f
|
||||
// DisableFlattenResults implies AllowLargeResults.
|
||||
qconf.AllowLargeResults = true
|
||||
}
|
||||
if qc.UseStandardSQL && qc.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
if len(qc.Parameters) > 0 && qc.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
|
||||
}
|
||||
if qc.UseLegacySQL {
|
||||
qconf.UseLegacySql = true
|
||||
} else {
|
||||
qconf.UseLegacySql = false
|
||||
qconf.ForceSendFields = append(qconf.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
if qc.Dst != nil && !qc.Dst.implicitTable() {
|
||||
qconf.DestinationTable = qc.Dst.toBQ()
|
||||
}
|
||||
for _, p := range qc.Parameters {
|
||||
qp, err := p.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qconf.QueryParameters = append(qconf.QueryParameters, qp)
|
||||
}
|
||||
return &bq.JobConfiguration{
|
||||
Labels: qc.Labels,
|
||||
DryRun: qc.DryRun,
|
||||
Query: qconf,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func bqToQueryConfig(q *bq.JobConfiguration, c *Client) (*QueryConfig, error) {
|
||||
qq := q.Query
|
||||
qc := &QueryConfig{
|
||||
Labels: q.Labels,
|
||||
DryRun: q.DryRun,
|
||||
Q: qq.Query,
|
||||
CreateDisposition: TableCreateDisposition(qq.CreateDisposition),
|
||||
WriteDisposition: TableWriteDisposition(qq.WriteDisposition),
|
||||
AllowLargeResults: qq.AllowLargeResults,
|
||||
Priority: QueryPriority(qq.Priority),
|
||||
MaxBytesBilled: qq.MaximumBytesBilled,
|
||||
UseLegacySQL: qq.UseLegacySql,
|
||||
UseStandardSQL: !qq.UseLegacySql,
|
||||
}
|
||||
if len(qq.TableDefinitions) > 0 {
|
||||
qc.TableDefinitions = make(map[string]ExternalData)
|
||||
}
|
||||
for name, qedc := range qq.TableDefinitions {
|
||||
edc, err := bqToExternalDataConfig(&qedc)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qc.TableDefinitions[name] = edc
|
||||
}
|
||||
if qq.DefaultDataset != nil {
|
||||
qc.DefaultProjectID = qq.DefaultDataset.ProjectId
|
||||
qc.DefaultDatasetID = qq.DefaultDataset.DatasetId
|
||||
}
|
||||
if qq.MaximumBillingTier != nil {
|
||||
qc.MaxBillingTier = int(*qq.MaximumBillingTier)
|
||||
}
|
||||
if qq.UseQueryCache != nil && !*qq.UseQueryCache {
|
||||
qc.DisableQueryCache = true
|
||||
}
|
||||
if qq.FlattenResults != nil && !*qq.FlattenResults {
|
||||
qc.DisableFlattenedResults = true
|
||||
}
|
||||
if qq.DestinationTable != nil {
|
||||
qc.Dst = bqToTable(qq.DestinationTable, c)
|
||||
}
|
||||
for _, qp := range qq.QueryParameters {
|
||||
p, err := bqToQueryParameter(qp)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
qc.Parameters = append(qc.Parameters, p)
|
||||
}
|
||||
return qc, nil
|
||||
}
|
||||
|
||||
// QueryPriority specifies a priority with which a query is to be executed.
|
||||
|
@ -113,8 +235,9 @@ const (
|
|||
|
||||
// A Query queries data from a BigQuery table. Use Client.Query to create a Query.
|
||||
type Query struct {
|
||||
client *Client
|
||||
JobIDConfig
|
||||
QueryConfig
|
||||
client *Client
|
||||
}
|
||||
|
||||
// Query creates a query with string q.
|
||||
|
@ -128,83 +251,26 @@ func (c *Client) Query(q string) *Query {
|
|||
|
||||
// Run initiates a query job.
|
||||
func (q *Query) Run(ctx context.Context) (*Job, error) {
|
||||
job := &bq.Job{
|
||||
JobReference: createJobRef(q.JobID, q.AddJobIDSuffix, q.client.projectID),
|
||||
Configuration: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{},
|
||||
},
|
||||
}
|
||||
if err := q.QueryConfig.populateJobQueryConfig(job.Configuration.Query); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.insertJob(ctx, &insertJobConf{job: job})
|
||||
job, err := q.newJob()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j, err := q.client.insertJob(ctx, job, nil)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
j.isQuery = true
|
||||
return j, nil
|
||||
}
|
||||
|
||||
func (q *QueryConfig) populateJobQueryConfig(conf *bq.JobConfigurationQuery) error {
|
||||
conf.Query = q.Q
|
||||
|
||||
if len(q.TableDefinitions) > 0 {
|
||||
conf.TableDefinitions = make(map[string]bq.ExternalDataConfiguration)
|
||||
func (q *Query) newJob() (*bq.Job, error) {
|
||||
config, err := q.QueryConfig.toBQ()
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
for name, data := range q.TableDefinitions {
|
||||
conf.TableDefinitions[name] = data.externalDataConfig()
|
||||
}
|
||||
|
||||
if q.DefaultProjectID != "" || q.DefaultDatasetID != "" {
|
||||
conf.DefaultDataset = &bq.DatasetReference{
|
||||
DatasetId: q.DefaultDatasetID,
|
||||
ProjectId: q.DefaultProjectID,
|
||||
}
|
||||
}
|
||||
|
||||
if tier := int64(q.MaxBillingTier); tier > 0 {
|
||||
conf.MaximumBillingTier = &tier
|
||||
}
|
||||
conf.CreateDisposition = string(q.CreateDisposition)
|
||||
conf.WriteDisposition = string(q.WriteDisposition)
|
||||
conf.AllowLargeResults = q.AllowLargeResults
|
||||
conf.Priority = string(q.Priority)
|
||||
|
||||
f := false
|
||||
if q.DisableQueryCache {
|
||||
conf.UseQueryCache = &f
|
||||
}
|
||||
if q.DisableFlattenedResults {
|
||||
conf.FlattenResults = &f
|
||||
// DisableFlattenResults implies AllowLargeResults.
|
||||
conf.AllowLargeResults = true
|
||||
}
|
||||
if q.MaxBytesBilled >= 1 {
|
||||
conf.MaximumBytesBilled = q.MaxBytesBilled
|
||||
}
|
||||
if q.UseStandardSQL && q.UseLegacySQL {
|
||||
return errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
if len(q.Parameters) > 0 && q.UseLegacySQL {
|
||||
return errors.New("bigquery: cannot provide both Parameters (implying standard SQL) and UseLegacySQL")
|
||||
}
|
||||
if q.UseLegacySQL {
|
||||
conf.UseLegacySql = true
|
||||
} else {
|
||||
conf.UseLegacySql = false
|
||||
conf.ForceSendFields = append(conf.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
if q.Dst != nil && !q.Dst.implicitTable() {
|
||||
conf.DestinationTable = q.Dst.tableRefProto()
|
||||
}
|
||||
for _, p := range q.Parameters {
|
||||
qp, err := p.toRaw()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
conf.QueryParameters = append(conf.QueryParameters, qp)
|
||||
}
|
||||
return nil
|
||||
return &bq.Job{
|
||||
JobReference: q.JobIDConfig.createJobRef(q.client.projectID),
|
||||
Configuration: config,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// Read submits a query for execution and returns the results via a RowIterator.
|
||||
|
|
|
@ -17,9 +17,9 @@ package bigquery
|
|||
import (
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
@ -46,15 +46,22 @@ func defaultQueryJob() *bq.Job {
|
|||
}
|
||||
}
|
||||
|
||||
var defaultQuery = &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
}
|
||||
|
||||
func TestQuery(t *testing.T) {
|
||||
defer fixRandomJobID("RANDOM")()
|
||||
defer fixRandomID("RANDOM")()
|
||||
c := &Client{
|
||||
projectID: "client-project-id",
|
||||
}
|
||||
testCases := []struct {
|
||||
dst *Table
|
||||
src *QueryConfig
|
||||
want *bq.Job
|
||||
dst *Table
|
||||
src *QueryConfig
|
||||
jobIDConfig JobIDConfig
|
||||
want *bq.Job
|
||||
}{
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
|
@ -64,21 +71,22 @@ func TestQuery(t *testing.T) {
|
|||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
Q: "query string",
|
||||
Labels: map[string]string{"a": "b"},
|
||||
DryRun: true,
|
||||
},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Labels = map[string]string{"a": "b"}
|
||||
j.Configuration.DryRun = true
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
JobID: "jobID",
|
||||
AddJobIDSuffix: true,
|
||||
},
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
jobIDConfig: JobIDConfig{JobID: "jobID", AddJobIDSuffix: true},
|
||||
src: &QueryConfig{Q: "query string"},
|
||||
want: func() *bq.Job {
|
||||
j := defaultQueryJob()
|
||||
j.Configuration.Query.DefaultDataset = nil
|
||||
|
@ -244,16 +252,6 @@ func TestQuery(t *testing.T) {
|
|||
return j
|
||||
}(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
Q: "query string",
|
||||
DefaultProjectID: "def-project-id",
|
||||
DefaultDatasetID: "def-dataset-id",
|
||||
MaxBytesBilled: -1,
|
||||
},
|
||||
want: defaultQueryJob(),
|
||||
},
|
||||
{
|
||||
dst: c.Dataset("dataset-id").Table("table-id"),
|
||||
src: &QueryConfig{
|
||||
|
@ -281,24 +279,71 @@ func TestQuery(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
s := &testService{}
|
||||
c.service = s
|
||||
query := c.Query("")
|
||||
query.JobIDConfig = tc.jobIDConfig
|
||||
query.QueryConfig = *tc.src
|
||||
query.Dst = tc.dst
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
got, err := query.newJob()
|
||||
if err != nil {
|
||||
t.Errorf("#%d: err calling query: %v", i, err)
|
||||
continue
|
||||
}
|
||||
checkJob(t, i, s.Job, tc.want)
|
||||
checkJob(t, i, got, tc.want)
|
||||
|
||||
// Round-trip.
|
||||
jc, err := bqToJobConfig(got.Configuration, c)
|
||||
if err != nil {
|
||||
t.Fatalf("#%d: %v", i, err)
|
||||
}
|
||||
wantConfig := query.QueryConfig
|
||||
// We set AllowLargeResults to true when DisableFlattenedResults is true.
|
||||
if wantConfig.DisableFlattenedResults {
|
||||
wantConfig.AllowLargeResults = true
|
||||
}
|
||||
// A QueryConfig with neither UseXXXSQL field set is equivalent
|
||||
// to one where UseStandardSQL = true.
|
||||
if !wantConfig.UseLegacySQL && !wantConfig.UseStandardSQL {
|
||||
wantConfig.UseStandardSQL = true
|
||||
}
|
||||
// Treat nil and empty tables the same, and ignore the client.
|
||||
tableEqual := func(t1, t2 *Table) bool {
|
||||
if t1 == nil {
|
||||
t1 = &Table{}
|
||||
}
|
||||
if t2 == nil {
|
||||
t2 = &Table{}
|
||||
}
|
||||
return t1.ProjectID == t2.ProjectID && t1.DatasetID == t2.DatasetID && t1.TableID == t2.TableID
|
||||
}
|
||||
// A table definition that is a GCSReference round-trips as an ExternalDataConfig.
|
||||
// TODO(jba): see if there is a way to express this with a transformer.
|
||||
gcsRefToEDC := func(g *GCSReference) *ExternalDataConfig {
|
||||
q := g.toBQ()
|
||||
e, _ := bqToExternalDataConfig(&q)
|
||||
return e
|
||||
}
|
||||
externalDataEqual := func(e1, e2 ExternalData) bool {
|
||||
if r, ok := e1.(*GCSReference); ok {
|
||||
e1 = gcsRefToEDC(r)
|
||||
}
|
||||
if r, ok := e2.(*GCSReference); ok {
|
||||
e2 = gcsRefToEDC(r)
|
||||
}
|
||||
return cmp.Equal(e1, e2)
|
||||
}
|
||||
diff := testutil.Diff(jc.(*QueryConfig), &wantConfig,
|
||||
cmp.Comparer(tableEqual),
|
||||
cmp.Comparer(externalDataEqual),
|
||||
)
|
||||
if diff != "" {
|
||||
t.Errorf("#%d: (got=-, want=+:\n%s", i, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConfiguringQuery(t *testing.T) {
|
||||
s := &testService{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
|
||||
query := c.Query("q")
|
||||
|
@ -326,30 +371,28 @@ func TestConfiguringQuery(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
if _, err := query.Run(context.Background()); err != nil {
|
||||
t.Fatalf("err calling Query.Run: %v", err)
|
||||
got, err := query.newJob()
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Query.newJob: %v", err)
|
||||
}
|
||||
if diff := testutil.Diff(s.Job, want); diff != "" {
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("querying: -got +want:\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestQueryLegacySQL(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &testService{},
|
||||
}
|
||||
c := &Client{projectID: "project-id"}
|
||||
q := c.Query("q")
|
||||
q.UseStandardSQL = true
|
||||
q.UseLegacySQL = true
|
||||
_, err := q.Run(context.Background())
|
||||
_, err := q.newJob()
|
||||
if err == nil {
|
||||
t.Error("UseStandardSQL and UseLegacySQL: got nil, want error")
|
||||
}
|
||||
q = c.Query("q")
|
||||
q.Parameters = []QueryParameter{{Name: "p", Value: 3}}
|
||||
q.UseLegacySQL = true
|
||||
_, err = q.Run(context.Background())
|
||||
_, err = q.newJob()
|
||||
if err == nil {
|
||||
t.Error("Parameters and UseLegacySQL: got nil, want error")
|
||||
}
|
||||
|
|
|
@ -27,69 +27,65 @@ import (
|
|||
"google.golang.org/api/iterator"
|
||||
)
|
||||
|
||||
type readTabledataArgs struct {
|
||||
conf *readTableConf
|
||||
tok string
|
||||
type pageFetcherArgs struct {
|
||||
table *Table
|
||||
schema Schema
|
||||
startIndex uint64
|
||||
pageSize int64
|
||||
pageToken string
|
||||
}
|
||||
|
||||
// readServiceStub services read requests by returning data from an in-memory list of values.
|
||||
type readServiceStub struct {
|
||||
// pageFetcherReadStub services read requests by returning data from an in-memory list of values.
|
||||
type pageFetcherReadStub struct {
|
||||
// values and pageTokens are used as sources of data to return in response to calls to readTabledata or readQuery.
|
||||
values [][][]Value // contains pages / rows / columns.
|
||||
pageTokens map[string]string // maps incoming page token to returned page token.
|
||||
|
||||
// arguments are recorded for later inspection.
|
||||
readTabledataCalls []readTabledataArgs
|
||||
|
||||
service
|
||||
calls []pageFetcherArgs
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readValues(tok string) *readDataResult {
|
||||
result := &readDataResult{
|
||||
pageToken: s.pageTokens[tok],
|
||||
func (s *pageFetcherReadStub) fetchPage(ctx context.Context, t *Table, schema Schema, startIndex uint64, pageSize int64, pageToken string) (*fetchPageResult, error) {
|
||||
s.calls = append(s.calls,
|
||||
pageFetcherArgs{t, schema, startIndex, pageSize, pageToken})
|
||||
result := &fetchPageResult{
|
||||
pageToken: s.pageTokens[pageToken],
|
||||
rows: s.values[0],
|
||||
}
|
||||
s.values = s.values[1:]
|
||||
|
||||
return result
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
|
||||
func waitForQueryStub(context.Context, string) (Schema, error) {
|
||||
return nil, nil
|
||||
}
|
||||
|
||||
func (s *readServiceStub) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
s.readTabledataCalls = append(s.readTabledataCalls, readTabledataArgs{conf, token})
|
||||
return s.readValues(token), nil
|
||||
}
|
||||
|
||||
func TestRead(t *testing.T) {
|
||||
// The data for the service stub to return is populated for each test case in the testCases for loop.
|
||||
ctx := context.Background()
|
||||
service := &readServiceStub{}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: service,
|
||||
}
|
||||
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
c: c,
|
||||
isQuery: true,
|
||||
destinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
config: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{
|
||||
DestinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for _, readFunc := range []func() *RowIterator{
|
||||
func() *RowIterator {
|
||||
return c.Dataset("dataset-id").Table("table-id").Read(ctx)
|
||||
return c.Dataset("dataset-id").Table("table-id").read(ctx, pf.fetchPage)
|
||||
},
|
||||
func() *RowIterator {
|
||||
it, err := queryJob.Read(ctx)
|
||||
it, err := queryJob.read(ctx, waitForQueryStub, pf.fetchPage)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
@ -113,8 +109,8 @@ func TestRead(t *testing.T) {
|
|||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
service.values = tc.data
|
||||
service.pageTokens = tc.pageTokens
|
||||
pf.values = tc.data
|
||||
pf.pageTokens = tc.pageTokens
|
||||
if got, ok := collectValues(t, readFunc()); ok {
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", got, tc.want)
|
||||
|
@ -142,13 +138,11 @@ func collectValues(t *testing.T, it *RowIterator) ([][]Value, bool) {
|
|||
}
|
||||
|
||||
func TestNoMoreValues(t *testing.T) {
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &readServiceStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
},
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}, {11, 12}}},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), pf.fetchPage)
|
||||
var vals []Value
|
||||
// We expect to retrieve two values and then fail on the next attempt.
|
||||
if err := it.Next(&vals); err != nil {
|
||||
|
@ -162,23 +156,16 @@ func TestNoMoreValues(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
type errorReadService struct {
|
||||
service
|
||||
}
|
||||
|
||||
var errBang = errors.New("bang!")
|
||||
|
||||
func (s *errorReadService) readTabledata(ctx context.Context, conf *readTableConf, token string) (*readDataResult, error) {
|
||||
func errorFetchPage(context.Context, *Table, Schema, uint64, int64, string) (*fetchPageResult, error) {
|
||||
return nil, errBang
|
||||
}
|
||||
|
||||
func TestReadError(t *testing.T) {
|
||||
// test that service read errors are propagated back to the caller.
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: &errorReadService{},
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
c := &Client{projectID: "project-id"}
|
||||
it := c.Dataset("dataset-id").Table("table-id").read(context.Background(), errorFetchPage)
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != errBang {
|
||||
t.Fatalf("Get: got: %v: want: %v", err, errBang)
|
||||
|
@ -187,54 +174,47 @@ func TestReadError(t *testing.T) {
|
|||
|
||||
func TestReadTabledataOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
s := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
c := &Client{
|
||||
projectID: "project-id",
|
||||
service: s,
|
||||
}
|
||||
it := c.Dataset("dataset-id").Table("table-id").Read(context.Background())
|
||||
c := &Client{projectID: "project-id"}
|
||||
tr := c.Dataset("dataset-id").Table("table-id")
|
||||
it := tr.read(context.Background(), s.fetchPage)
|
||||
it.PageInfo().MaxSize = 5
|
||||
var vals []Value
|
||||
if err := it.Next(&vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
want := []pageFetcherArgs{{
|
||||
table: tr,
|
||||
pageSize: 5,
|
||||
pageToken: "",
|
||||
}}
|
||||
|
||||
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
if diff := testutil.Diff(s.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, pageFetcherReadStub{}, Table{}, Client{})); diff != "" {
|
||||
t.Errorf("reading (got=-, want=+):\n%s", diff)
|
||||
}
|
||||
}
|
||||
|
||||
func TestReadQueryOptions(t *testing.T) {
|
||||
// test that read options are propagated.
|
||||
s := &readServiceStub{
|
||||
c := &Client{projectID: "project-id"}
|
||||
pf := &pageFetcherReadStub{
|
||||
values: [][][]Value{{{1, 2}}},
|
||||
}
|
||||
tr := &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
}
|
||||
queryJob := &Job{
|
||||
projectID: "project-id",
|
||||
jobID: "job-id",
|
||||
c: &Client{service: s},
|
||||
isQuery: true,
|
||||
destinationTable: &bq.TableReference{
|
||||
ProjectId: "project-id",
|
||||
DatasetId: "dataset-id",
|
||||
TableId: "table-id",
|
||||
c: c,
|
||||
config: &bq.JobConfiguration{
|
||||
Query: &bq.JobConfigurationQuery{DestinationTable: tr},
|
||||
},
|
||||
}
|
||||
it, err := queryJob.Read(context.Background())
|
||||
it, err := queryJob.read(context.Background(), waitForQueryStub, pf.fetchPage)
|
||||
if err != nil {
|
||||
t.Fatalf("err calling Read: %v", err)
|
||||
}
|
||||
|
@ -244,20 +224,12 @@ func TestReadQueryOptions(t *testing.T) {
|
|||
t.Fatalf("Next: got: %v: want: nil", err)
|
||||
}
|
||||
|
||||
want := []readTabledataArgs{{
|
||||
conf: &readTableConf{
|
||||
projectID: "project-id",
|
||||
datasetID: "dataset-id",
|
||||
tableID: "table-id",
|
||||
paging: pagingConf{
|
||||
recordsPerRequest: 5,
|
||||
setRecordsPerRequest: true,
|
||||
},
|
||||
},
|
||||
tok: "",
|
||||
want := []pageFetcherArgs{{
|
||||
table: bqToTable(tr, c),
|
||||
pageSize: 5,
|
||||
pageToken: "",
|
||||
}}
|
||||
|
||||
if !testutil.Equal(s.readTabledataCalls, want, cmp.AllowUnexported(readTabledataArgs{}, readTableConf{}, pagingConf{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", s.readTabledataCalls, want)
|
||||
if !testutil.Equal(pf.calls, want, cmp.AllowUnexported(pageFetcherArgs{}, Table{}, Client{})) {
|
||||
t.Errorf("reading: got:\n%v\nwant:\n%v", pf.calls, want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -49,7 +49,7 @@ type FieldSchema struct {
|
|||
Schema Schema
|
||||
}
|
||||
|
||||
func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
|
||||
func (fs *FieldSchema) toBQ() *bq.TableFieldSchema {
|
||||
tfs := &bq.TableFieldSchema{
|
||||
Description: fs.Description,
|
||||
Name: fs.Name,
|
||||
|
@ -63,21 +63,21 @@ func (fs *FieldSchema) asTableFieldSchema() *bq.TableFieldSchema {
|
|||
} // else leave as default, which is interpreted as NULLABLE.
|
||||
|
||||
for _, f := range fs.Schema {
|
||||
tfs.Fields = append(tfs.Fields, f.asTableFieldSchema())
|
||||
tfs.Fields = append(tfs.Fields, f.toBQ())
|
||||
}
|
||||
|
||||
return tfs
|
||||
}
|
||||
|
||||
func (s Schema) asTableSchema() *bq.TableSchema {
|
||||
func (s Schema) toBQ() *bq.TableSchema {
|
||||
var fields []*bq.TableFieldSchema
|
||||
for _, f := range s {
|
||||
fields = append(fields, f.asTableFieldSchema())
|
||||
fields = append(fields, f.toBQ())
|
||||
}
|
||||
return &bq.TableSchema{Fields: fields}
|
||||
}
|
||||
|
||||
func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
func bqToFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
||||
fs := &FieldSchema{
|
||||
Description: tfs.Description,
|
||||
Name: tfs.Name,
|
||||
|
@ -87,18 +87,18 @@ func convertTableFieldSchema(tfs *bq.TableFieldSchema) *FieldSchema {
|
|||
}
|
||||
|
||||
for _, f := range tfs.Fields {
|
||||
fs.Schema = append(fs.Schema, convertTableFieldSchema(f))
|
||||
fs.Schema = append(fs.Schema, bqToFieldSchema(f))
|
||||
}
|
||||
return fs
|
||||
}
|
||||
|
||||
func convertTableSchema(ts *bq.TableSchema) Schema {
|
||||
func bqToSchema(ts *bq.TableSchema) Schema {
|
||||
if ts == nil {
|
||||
return nil
|
||||
}
|
||||
var s Schema
|
||||
for _, f := range ts.Fields {
|
||||
s = append(s, convertTableFieldSchema(f))
|
||||
s = append(s, bqToFieldSchema(f))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
@ -141,6 +141,7 @@ func InferSchema(st interface{}) (Schema, error) {
|
|||
return inferSchemaReflectCached(reflect.TypeOf(st))
|
||||
}
|
||||
|
||||
// TODO(jba): replace with sync.Map for Go 1.9.
|
||||
var schemaCache atomiccache.Cache
|
||||
|
||||
type cacheVal struct {
|
||||
|
@ -184,21 +185,21 @@ func inferStruct(t reflect.Type) (Schema, error) {
|
|||
}
|
||||
|
||||
// inferFieldSchema infers the FieldSchema for a Go type
|
||||
func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
|
||||
func inferFieldSchema(rt reflect.Type, nullable bool) (*FieldSchema, error) {
|
||||
switch rt {
|
||||
case typeOfByteSlice:
|
||||
return &FieldSchema{Required: true, Type: BytesFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: BytesFieldType}, nil
|
||||
case typeOfGoTime:
|
||||
return &FieldSchema{Required: true, Type: TimestampFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: TimestampFieldType}, nil
|
||||
case typeOfDate:
|
||||
return &FieldSchema{Required: true, Type: DateFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: DateFieldType}, nil
|
||||
case typeOfTime:
|
||||
return &FieldSchema{Required: true, Type: TimeFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: TimeFieldType}, nil
|
||||
case typeOfDateTime:
|
||||
return &FieldSchema{Required: true, Type: DateTimeFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: DateTimeFieldType}, nil
|
||||
}
|
||||
if isSupportedIntType(rt) {
|
||||
return &FieldSchema{Required: true, Type: IntegerFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: IntegerFieldType}, nil
|
||||
}
|
||||
switch rt.Kind() {
|
||||
case reflect.Slice, reflect.Array:
|
||||
|
@ -208,7 +209,7 @@ func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
|
|||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
|
||||
f, err := inferFieldSchema(et)
|
||||
f, err := inferFieldSchema(et, false)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -220,13 +221,13 @@ func inferFieldSchema(rt reflect.Type) (*FieldSchema, error) {
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &FieldSchema{Required: true, Type: RecordFieldType, Schema: nested}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: RecordFieldType, Schema: nested}, nil
|
||||
case reflect.String:
|
||||
return &FieldSchema{Required: true, Type: StringFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: StringFieldType}, nil
|
||||
case reflect.Bool:
|
||||
return &FieldSchema{Required: true, Type: BooleanFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: BooleanFieldType}, nil
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return &FieldSchema{Required: true, Type: FloatFieldType}, nil
|
||||
return &FieldSchema{Required: !nullable, Type: FloatFieldType}, nil
|
||||
default:
|
||||
return nil, errUnsupportedFieldType
|
||||
}
|
||||
|
@ -240,7 +241,14 @@ func inferFields(rt reflect.Type) (Schema, error) {
|
|||
return nil, err
|
||||
}
|
||||
for _, field := range fields {
|
||||
f, err := inferFieldSchema(field.Type)
|
||||
var nullable bool
|
||||
for _, opt := range field.ParsedTag.([]string) {
|
||||
if opt == nullableTagOption {
|
||||
nullable = true
|
||||
break
|
||||
}
|
||||
}
|
||||
f, err := inferFieldSchema(field.Type, nullable)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -192,12 +192,12 @@ func TestSchemaConversion(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tc := range testCases {
|
||||
bqSchema := tc.schema.asTableSchema()
|
||||
bqSchema := tc.schema.toBQ()
|
||||
if !testutil.Equal(bqSchema, tc.bqSchema) {
|
||||
t.Errorf("converting to TableSchema: got:\n%v\nwant:\n%v",
|
||||
pretty.Value(bqSchema), pretty.Value(tc.bqSchema))
|
||||
}
|
||||
schema := convertTableSchema(tc.bqSchema)
|
||||
schema := bqToSchema(tc.bqSchema)
|
||||
if !testutil.Equal(schema, tc.schema) {
|
||||
t.Errorf("converting to Schema: got:\n%v\nwant:\n%v", schema, tc.schema)
|
||||
}
|
||||
|
@ -536,6 +536,7 @@ type withTags struct {
|
|||
SimpleTag int `bigquery:"simple_tag"`
|
||||
UnderscoreTag int `bigquery:"_id"`
|
||||
MixedCase int `bigquery:"MIXEDcase"`
|
||||
Nullable int `bigquery:",nullable"`
|
||||
}
|
||||
|
||||
type withTagsNested struct {
|
||||
|
@ -563,6 +564,7 @@ var withTagsSchema = Schema{
|
|||
reqField("simple_tag", "INTEGER"),
|
||||
reqField("_id", "INTEGER"),
|
||||
reqField("MIXEDcase", "INTEGER"),
|
||||
{Name: "Nullable", Type: FieldType("INTEGER"), Required: false},
|
||||
}
|
||||
|
||||
func TestTagInference(t *testing.T) {
|
||||
|
@ -666,12 +668,6 @@ func TestTagInferenceErrors(t *testing.T) {
|
|||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
{
|
||||
in: struct {
|
||||
OmitEmpty int `bigquery:"abc,omitempty"`
|
||||
}{},
|
||||
err: errInvalidFieldName,
|
||||
},
|
||||
}
|
||||
for i, tc := range testCases {
|
||||
want := tc.err
|
||||
|
@ -680,6 +676,13 @@ func TestTagInferenceErrors(t *testing.T) {
|
|||
t.Errorf("%d: inferring TableSchema: got:\n%#v\nwant:\n%#v", i, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
_, err := InferSchema(struct {
|
||||
X int `bigquery:",optional"`
|
||||
}{})
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestSchemaErrors(t *testing.T) {
|
||||
|
|
|
@ -1,940 +0,0 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal"
|
||||
"cloud.google.com/go/internal/optional"
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
"google.golang.org/api/googleapi"
|
||||
)
|
||||
|
||||
// service provides an internal abstraction to isolate the generated
|
||||
// BigQuery API; most of this package uses this interface instead.
|
||||
// The single implementation, *bigqueryService, contains all the knowledge
|
||||
// of the generated BigQuery API.
|
||||
type service interface {
|
||||
// Jobs
|
||||
insertJob(ctx context.Context, projectId string, conf *insertJobConf) (*Job, error)
|
||||
getJob(ctx context.Context, projectId, jobID string) (*Job, error)
|
||||
jobCancel(ctx context.Context, projectId, jobID string) error
|
||||
jobStatus(ctx context.Context, projectId, jobID string) (*JobStatus, error)
|
||||
listJobs(ctx context.Context, projectId string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error)
|
||||
|
||||
// Tables
|
||||
createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error
|
||||
getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error)
|
||||
deleteTable(ctx context.Context, projectID, datasetID, tableID string) error
|
||||
|
||||
// listTables returns a page of Tables and a next page token. Note: the Tables do not have their c field populated.
|
||||
listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error)
|
||||
patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error)
|
||||
|
||||
// Table data
|
||||
readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error)
|
||||
insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error
|
||||
|
||||
// Datasets
|
||||
insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error
|
||||
deleteDataset(ctx context.Context, datasetID, projectID string) error
|
||||
getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error)
|
||||
patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error)
|
||||
|
||||
// Misc
|
||||
|
||||
// Waits for a query to complete.
|
||||
waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error)
|
||||
|
||||
// listDatasets returns a page of Datasets and a next page token. Note: the Datasets do not have their c field populated.
|
||||
listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error)
|
||||
}
|
||||
|
||||
var xGoogHeader = fmt.Sprintf("gl-go/%s gccl/%s", version.Go(), version.Repo)
|
||||
|
||||
func setClientHeader(headers http.Header) {
|
||||
headers.Set("x-goog-api-client", xGoogHeader)
|
||||
}
|
||||
|
||||
type bigqueryService struct {
|
||||
s *bq.Service
|
||||
}
|
||||
|
||||
func newBigqueryService(client *http.Client, endpoint string) (*bigqueryService, error) {
|
||||
s, err := bq.New(client)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("constructing bigquery client: %v", err)
|
||||
}
|
||||
s.BasePath = endpoint
|
||||
|
||||
return &bigqueryService{s: s}, nil
|
||||
}
|
||||
|
||||
// getPages calls the supplied getPage function repeatedly until there are no pages left to get.
|
||||
// token is the token of the initial page to start from. Use an empty string to start from the beginning.
|
||||
func getPages(token string, getPage func(token string) (nextToken string, err error)) error {
|
||||
for {
|
||||
var err error
|
||||
token, err = getPage(token)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if token == "" {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type insertJobConf struct {
|
||||
job *bq.Job
|
||||
media io.Reader
|
||||
}
|
||||
|
||||
// Calls the Jobs.Insert RPC and returns a Job. Callers must set the returned Job's
|
||||
// client.
|
||||
func (s *bigqueryService) insertJob(ctx context.Context, projectID string, conf *insertJobConf) (*Job, error) {
|
||||
call := s.s.Jobs.Insert(projectID, conf.job).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if conf.media != nil {
|
||||
call.Media(conf.media)
|
||||
}
|
||||
var res *bq.Job
|
||||
var err error
|
||||
invoke := func() error {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}
|
||||
// A job with a client-generated ID can be retried; the presence of the
|
||||
// ID makes the insert operation idempotent.
|
||||
// We don't retry if there is media, because it is an io.Reader. We'd
|
||||
// have to read the contents and keep it in memory, and that could be expensive.
|
||||
// TODO(jba): Look into retrying if media != nil.
|
||||
if conf.job.JobReference != nil && conf.media == nil {
|
||||
err = runWithRetry(ctx, invoke)
|
||||
} else {
|
||||
err = invoke()
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var dt *bq.TableReference
|
||||
if qc := res.Configuration.Query; qc != nil {
|
||||
dt = qc.DestinationTable
|
||||
}
|
||||
return &Job{
|
||||
projectID: projectID,
|
||||
jobID: res.JobReference.JobId,
|
||||
destinationTable: dt,
|
||||
}, nil
|
||||
}
|
||||
|
||||
type pagingConf struct {
|
||||
recordsPerRequest int64
|
||||
setRecordsPerRequest bool
|
||||
|
||||
startIndex uint64
|
||||
}
|
||||
|
||||
type readTableConf struct {
|
||||
projectID, datasetID, tableID string
|
||||
paging pagingConf
|
||||
schema Schema // lazily initialized when the first page of data is fetched.
|
||||
}
|
||||
|
||||
func (conf *readTableConf) fetch(ctx context.Context, s service, token string) (*readDataResult, error) {
|
||||
return s.readTabledata(ctx, conf, token)
|
||||
}
|
||||
|
||||
func (conf *readTableConf) setPaging(pc *pagingConf) { conf.paging = *pc }
|
||||
|
||||
type readDataResult struct {
|
||||
pageToken string
|
||||
rows [][]Value
|
||||
totalRows uint64
|
||||
schema Schema
|
||||
}
|
||||
|
||||
func (s *bigqueryService) readTabledata(ctx context.Context, conf *readTableConf, pageToken string) (*readDataResult, error) {
|
||||
// Prepare request to fetch one page of table data.
|
||||
req := s.s.Tabledata.List(conf.projectID, conf.datasetID, conf.tableID)
|
||||
setClientHeader(req.Header())
|
||||
if pageToken != "" {
|
||||
req.PageToken(pageToken)
|
||||
} else {
|
||||
req.StartIndex(conf.paging.startIndex)
|
||||
}
|
||||
|
||||
if conf.paging.setRecordsPerRequest {
|
||||
req.MaxResults(conf.paging.recordsPerRequest)
|
||||
}
|
||||
|
||||
// Fetch the table schema in the background, if necessary.
|
||||
errc := make(chan error, 1)
|
||||
if conf.schema != nil {
|
||||
errc <- nil
|
||||
} else {
|
||||
go func() {
|
||||
var t *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
t, err = s.s.Tables.Get(conf.projectID, conf.datasetID, conf.tableID).
|
||||
Fields("schema").
|
||||
Context(ctx).
|
||||
Do()
|
||||
return err
|
||||
})
|
||||
if err == nil && t.Schema != nil {
|
||||
conf.schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
errc <- err
|
||||
}()
|
||||
}
|
||||
var res *bq.TableDataList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = req.Context(ctx).Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
err = <-errc
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
result := &readDataResult{
|
||||
pageToken: res.PageToken,
|
||||
totalRows: uint64(res.TotalRows),
|
||||
schema: conf.schema,
|
||||
}
|
||||
result.rows, err = convertRows(res.Rows, conf.schema)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return result, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) waitForQuery(ctx context.Context, projectID, jobID string) (Schema, error) {
|
||||
// Use GetQueryResults only to wait for completion, not to read results.
|
||||
req := s.s.Jobs.GetQueryResults(projectID, jobID).Context(ctx).MaxResults(0)
|
||||
setClientHeader(req.Header())
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Multiplier: 2,
|
||||
Max: 60 * time.Second,
|
||||
}
|
||||
var res *bq.GetQueryResultsResponse
|
||||
err := internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
res, err = req.Do()
|
||||
if err != nil {
|
||||
return !retryableError(err), err
|
||||
}
|
||||
if !res.JobComplete { // GetQueryResults may return early without error; retry.
|
||||
return false, nil
|
||||
}
|
||||
return true, nil
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return convertTableSchema(res.Schema), nil
|
||||
}
|
||||
|
||||
type insertRowsConf struct {
|
||||
templateSuffix string
|
||||
ignoreUnknownValues bool
|
||||
skipInvalidRows bool
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
req := &bq.TableDataInsertAllRequest{
|
||||
TemplateSuffix: conf.templateSuffix,
|
||||
IgnoreUnknownValues: conf.ignoreUnknownValues,
|
||||
SkipInvalidRows: conf.skipInvalidRows,
|
||||
}
|
||||
for _, row := range rows {
|
||||
m := make(map[string]bq.JsonValue)
|
||||
for k, v := range row.Row {
|
||||
m[k] = bq.JsonValue(v)
|
||||
}
|
||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
||||
InsertId: row.InsertID,
|
||||
Json: m,
|
||||
})
|
||||
}
|
||||
call := s.s.Tabledata.InsertAll(projectID, datasetID, tableID, req).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
var res *bq.TableDataInsertAllResponse
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(res.InsertErrors) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
var errs PutMultiError
|
||||
for _, e := range res.InsertErrors {
|
||||
if int(e.Index) > len(rows) {
|
||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
||||
}
|
||||
rie := RowInsertionError{
|
||||
InsertID: rows[e.Index].InsertID,
|
||||
RowIndex: int(e.Index),
|
||||
}
|
||||
for _, errp := range e.Errors {
|
||||
rie.Errors = append(rie.Errors, errorFromErrorProto(errp))
|
||||
}
|
||||
errs = append(errs, rie)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getJob(ctx context.Context, projectID, jobID string) (*Job, error) {
|
||||
bqjob, err := s.getJobInternal(ctx, projectID, jobID, "configuration", "jobReference")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return jobFromProtos(bqjob.JobReference, bqjob.Configuration), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobStatus(ctx context.Context, projectID, jobID string) (*JobStatus, error) {
|
||||
job, err := s.getJobInternal(ctx, projectID, jobID, "status", "statistics")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st, err := jobStatusFromProto(job.Status)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
st.Statistics = jobStatisticsFromProto(job.Statistics)
|
||||
return st, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getJobInternal(ctx context.Context, projectID, jobID string, fields ...googleapi.Field) (*bq.Job, error) {
|
||||
var job *bq.Job
|
||||
call := s.s.Jobs.Get(projectID, jobID).Context(ctx)
|
||||
if len(fields) > 0 {
|
||||
call = call.Fields(fields...)
|
||||
}
|
||||
setClientHeader(call.Header())
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
job, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return job, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) jobCancel(ctx context.Context, projectID, jobID string) error {
|
||||
// Jobs.Cancel returns a job entity, but the only relevant piece of
|
||||
// data it may contain (the status of the job) is unreliable. From the
|
||||
// docs: "This call will return immediately, and the client will need
|
||||
// to poll for the job status to see if the cancel completed
|
||||
// successfully". So it would be misleading to return a status.
|
||||
call := s.s.Jobs.Cancel(projectID, jobID).
|
||||
Fields(). // We don't need any of the response data.
|
||||
Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
return runWithRetry(ctx, func() error {
|
||||
_, err := call.Do()
|
||||
return err
|
||||
})
|
||||
}
|
||||
|
||||
func jobFromProtos(jr *bq.JobReference, config *bq.JobConfiguration) *Job {
|
||||
var isQuery bool
|
||||
var dest *bq.TableReference
|
||||
if config.Query != nil {
|
||||
isQuery = true
|
||||
dest = config.Query.DestinationTable
|
||||
}
|
||||
return &Job{
|
||||
projectID: jr.ProjectId,
|
||||
jobID: jr.JobId,
|
||||
isQuery: isQuery,
|
||||
destinationTable: dest,
|
||||
}
|
||||
}
|
||||
|
||||
var stateMap = map[string]State{"PENDING": Pending, "RUNNING": Running, "DONE": Done}
|
||||
|
||||
func jobStatusFromProto(status *bq.JobStatus) (*JobStatus, error) {
|
||||
state, ok := stateMap[status.State]
|
||||
if !ok {
|
||||
return nil, fmt.Errorf("unexpected job state: %v", status.State)
|
||||
}
|
||||
|
||||
newStatus := &JobStatus{
|
||||
State: state,
|
||||
err: nil,
|
||||
}
|
||||
if err := errorFromErrorProto(status.ErrorResult); state == Done && err != nil {
|
||||
newStatus.err = err
|
||||
}
|
||||
|
||||
for _, ep := range status.Errors {
|
||||
newStatus.Errors = append(newStatus.Errors, errorFromErrorProto(ep))
|
||||
}
|
||||
return newStatus, nil
|
||||
}
|
||||
|
||||
func jobStatisticsFromProto(s *bq.JobStatistics) *JobStatistics {
|
||||
js := &JobStatistics{
|
||||
CreationTime: unixMillisToTime(s.CreationTime),
|
||||
StartTime: unixMillisToTime(s.StartTime),
|
||||
EndTime: unixMillisToTime(s.EndTime),
|
||||
TotalBytesProcessed: s.TotalBytesProcessed,
|
||||
}
|
||||
switch {
|
||||
case s.Extract != nil:
|
||||
js.Details = &ExtractStatistics{
|
||||
DestinationURIFileCounts: []int64(s.Extract.DestinationUriFileCounts),
|
||||
}
|
||||
case s.Load != nil:
|
||||
js.Details = &LoadStatistics{
|
||||
InputFileBytes: s.Load.InputFileBytes,
|
||||
InputFiles: s.Load.InputFiles,
|
||||
OutputBytes: s.Load.OutputBytes,
|
||||
OutputRows: s.Load.OutputRows,
|
||||
}
|
||||
case s.Query != nil:
|
||||
var names []string
|
||||
for _, qp := range s.Query.UndeclaredQueryParameters {
|
||||
names = append(names, qp.Name)
|
||||
}
|
||||
var tables []*Table
|
||||
for _, tr := range s.Query.ReferencedTables {
|
||||
tables = append(tables, convertTableReference(tr))
|
||||
}
|
||||
js.Details = &QueryStatistics{
|
||||
BillingTier: s.Query.BillingTier,
|
||||
CacheHit: s.Query.CacheHit,
|
||||
StatementType: s.Query.StatementType,
|
||||
TotalBytesBilled: s.Query.TotalBytesBilled,
|
||||
TotalBytesProcessed: s.Query.TotalBytesProcessed,
|
||||
NumDMLAffectedRows: s.Query.NumDmlAffectedRows,
|
||||
QueryPlan: queryPlanFromProto(s.Query.QueryPlan),
|
||||
Schema: convertTableSchema(s.Query.Schema),
|
||||
ReferencedTables: tables,
|
||||
UndeclaredQueryParameterNames: names,
|
||||
}
|
||||
}
|
||||
return js
|
||||
}
|
||||
|
||||
func queryPlanFromProto(stages []*bq.ExplainQueryStage) []*ExplainQueryStage {
|
||||
var res []*ExplainQueryStage
|
||||
for _, s := range stages {
|
||||
var steps []*ExplainQueryStep
|
||||
for _, p := range s.Steps {
|
||||
steps = append(steps, &ExplainQueryStep{
|
||||
Kind: p.Kind,
|
||||
Substeps: p.Substeps,
|
||||
})
|
||||
}
|
||||
res = append(res, &ExplainQueryStage{
|
||||
ComputeRatioAvg: s.ComputeRatioAvg,
|
||||
ComputeRatioMax: s.ComputeRatioMax,
|
||||
ID: s.Id,
|
||||
Name: s.Name,
|
||||
ReadRatioAvg: s.ReadRatioAvg,
|
||||
ReadRatioMax: s.ReadRatioMax,
|
||||
RecordsRead: s.RecordsRead,
|
||||
RecordsWritten: s.RecordsWritten,
|
||||
Status: s.Status,
|
||||
Steps: steps,
|
||||
WaitRatioAvg: s.WaitRatioAvg,
|
||||
WaitRatioMax: s.WaitRatioMax,
|
||||
WriteRatioAvg: s.WriteRatioAvg,
|
||||
WriteRatioMax: s.WriteRatioMax,
|
||||
})
|
||||
}
|
||||
return res
|
||||
}
|
||||
|
||||
// listTables returns a subset of tables that belong to a dataset, and a token for fetching the next subset.
|
||||
func (s *bigqueryService) listTables(ctx context.Context, projectID, datasetID string, pageSize int, pageToken string) ([]*Table, string, error) {
|
||||
var tables []*Table
|
||||
req := s.s.Tables.List(projectID, datasetID).
|
||||
PageToken(pageToken).
|
||||
Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
if pageSize > 0 {
|
||||
req.MaxResults(int64(pageSize))
|
||||
}
|
||||
var res *bq.TableList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
for _, t := range res.Tables {
|
||||
tables = append(tables, convertTableReference(t.TableReference))
|
||||
}
|
||||
return tables, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
// createTable creates a table in the BigQuery service.
|
||||
// If tm.ViewQuery is non-empty, the created table will be of type VIEW.
|
||||
// Note: expiration can only be set during table creation.
|
||||
// Note: after table creation, a view can be modified only if its table was initially created with a view.
|
||||
func (s *bigqueryService) createTable(ctx context.Context, projectID, datasetID, tableID string, tm *TableMetadata) error {
|
||||
table, err := bqTableFromMetadata(tm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
table.TableReference = &bq.TableReference{
|
||||
ProjectId: projectID,
|
||||
DatasetId: datasetID,
|
||||
TableId: tableID,
|
||||
}
|
||||
req := s.s.Tables.Insert(projectID, datasetID, table).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func bqTableFromMetadata(tm *TableMetadata) (*bq.Table, error) {
|
||||
t := &bq.Table{}
|
||||
if tm == nil {
|
||||
return t, nil
|
||||
}
|
||||
if tm.Schema != nil && tm.ViewQuery != "" {
|
||||
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
|
||||
}
|
||||
t.FriendlyName = tm.Name
|
||||
t.Description = tm.Description
|
||||
if tm.Schema != nil {
|
||||
t.Schema = tm.Schema.asTableSchema()
|
||||
}
|
||||
if tm.ViewQuery != "" {
|
||||
if tm.UseStandardSQL && tm.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
|
||||
if tm.UseLegacySQL {
|
||||
t.View.UseLegacySql = true
|
||||
} else {
|
||||
t.View.UseLegacySql = false
|
||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
} else if tm.UseLegacySQL || tm.UseStandardSQL {
|
||||
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
|
||||
}
|
||||
if tm.TimePartitioning != nil {
|
||||
t.TimePartitioning = &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: int64(tm.TimePartitioning.Expiration / time.Millisecond),
|
||||
}
|
||||
}
|
||||
if !tm.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||
}
|
||||
|
||||
if tm.FullID != "" {
|
||||
return nil, errors.New("cannot set FullID on create")
|
||||
}
|
||||
if tm.Type != "" {
|
||||
return nil, errors.New("cannot set Type on create")
|
||||
}
|
||||
if !tm.CreationTime.IsZero() {
|
||||
return nil, errors.New("cannot set CreationTime on create")
|
||||
}
|
||||
if !tm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("cannot set LastModifiedTime on create")
|
||||
}
|
||||
if tm.NumBytes != 0 {
|
||||
return nil, errors.New("cannot set NumBytes on create")
|
||||
}
|
||||
if tm.NumRows != 0 {
|
||||
return nil, errors.New("cannot set NumRows on create")
|
||||
}
|
||||
if tm.StreamingBuffer != nil {
|
||||
return nil, errors.New("cannot set StreamingBuffer on create")
|
||||
}
|
||||
if tm.ETag != "" {
|
||||
return nil, errors.New("cannot set ETag on create")
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getTableMetadata(ctx context.Context, projectID, datasetID, tableID string) (*TableMetadata, error) {
|
||||
req := s.s.Tables.Get(projectID, datasetID, tableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var table *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
table, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteTable(ctx context.Context, projectID, datasetID, tableID string) error {
|
||||
req := s.s.Tables.Delete(projectID, datasetID, tableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { return req.Do() })
|
||||
}
|
||||
|
||||
func bqTableToMetadata(t *bq.Table) *TableMetadata {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
FullID: t.Id,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
ETag: t.Etag,
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = convertTableSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.ViewQuery = t.View.Query
|
||||
md.UseLegacySQL = t.View.UseLegacySql
|
||||
}
|
||||
if t.TimePartitioning != nil {
|
||||
md.TimePartitioning = &TimePartitioning{
|
||||
Expiration: time.Duration(t.TimePartitioning.ExpirationMs) * time.Millisecond,
|
||||
}
|
||||
}
|
||||
if t.StreamingBuffer != nil {
|
||||
md.StreamingBuffer = &StreamingBuffer{
|
||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
||||
EstimatedRows: t.StreamingBuffer.EstimatedRows,
|
||||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
|
||||
}
|
||||
}
|
||||
return md
|
||||
}
|
||||
|
||||
func bqDatasetToMetadata(d *bq.Dataset) *DatasetMetadata {
|
||||
/// TODO(jba): access
|
||||
return &DatasetMetadata{
|
||||
CreationTime: unixMillisToTime(d.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(d.LastModifiedTime),
|
||||
DefaultTableExpiration: time.Duration(d.DefaultTableExpirationMs) * time.Millisecond,
|
||||
Description: d.Description,
|
||||
Name: d.FriendlyName,
|
||||
FullID: d.Id,
|
||||
Location: d.Location,
|
||||
Labels: d.Labels,
|
||||
ETag: d.Etag,
|
||||
}
|
||||
}
|
||||
|
||||
// Convert a number of milliseconds since the Unix epoch to a time.Time.
|
||||
// Treat an input of zero specially: convert it to the zero time,
|
||||
// rather than the start of the epoch.
|
||||
func unixMillisToTime(m int64) time.Time {
|
||||
if m == 0 {
|
||||
return time.Time{}
|
||||
}
|
||||
return time.Unix(0, m*1e6)
|
||||
}
|
||||
|
||||
func convertTableReference(tr *bq.TableReference) *Table {
|
||||
return &Table{
|
||||
ProjectID: tr.ProjectId,
|
||||
DatasetID: tr.DatasetId,
|
||||
TableID: tr.TableId,
|
||||
}
|
||||
}
|
||||
|
||||
// patchTableConf contains fields to be patched.
|
||||
type patchTableConf struct {
|
||||
// These fields are omitted from the patch operation if nil.
|
||||
Description *string
|
||||
Name *string
|
||||
Schema Schema
|
||||
ExpirationTime time.Time
|
||||
}
|
||||
|
||||
func (s *bigqueryService) patchTable(ctx context.Context, projectID, datasetID, tableID string, conf *patchTableConf, etag string) (*TableMetadata, error) {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if conf.Description != nil {
|
||||
t.Description = *conf.Description
|
||||
forceSend("Description")
|
||||
}
|
||||
if conf.Name != nil {
|
||||
t.FriendlyName = *conf.Name
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if conf.Schema != nil {
|
||||
t.Schema = conf.Schema.asTableSchema()
|
||||
forceSend("Schema")
|
||||
}
|
||||
if !conf.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = conf.ExpirationTime.UnixNano() / 1e6
|
||||
forceSend("ExpirationTime")
|
||||
}
|
||||
call := s.s.Tables.Patch(projectID, datasetID, tableID, t).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var table *bq.Table
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
table, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqTableToMetadata(table), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) insertDataset(ctx context.Context, datasetID, projectID string, dm *DatasetMetadata) error {
|
||||
// TODO(jba): retry?
|
||||
ds, err := bqDatasetFromMetadata(dm)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
ds.DatasetReference = &bq.DatasetReference{DatasetId: datasetID}
|
||||
req := s.s.Datasets.Insert(projectID, ds).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (s *bigqueryService) patchDataset(ctx context.Context, projectID, datasetID string, dm *DatasetMetadataToUpdate, etag string) (*DatasetMetadata, error) {
|
||||
ds := bqDatasetFromUpdateMetadata(dm)
|
||||
call := s.s.Datasets.Patch(projectID, datasetID, ds).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var ds2 *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds2, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqDatasetToMetadata(ds2), nil
|
||||
}
|
||||
|
||||
func bqDatasetFromMetadata(dm *DatasetMetadata) (*bq.Dataset, error) {
|
||||
ds := &bq.Dataset{}
|
||||
if dm == nil {
|
||||
return ds, nil
|
||||
}
|
||||
ds.FriendlyName = dm.Name
|
||||
ds.Description = dm.Description
|
||||
ds.Location = dm.Location
|
||||
ds.DefaultTableExpirationMs = int64(dm.DefaultTableExpiration / time.Millisecond)
|
||||
ds.Labels = dm.Labels
|
||||
if !dm.CreationTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.CreationTime is not writable")
|
||||
}
|
||||
if !dm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("bigquery: Dataset.LastModifiedTime is not writable")
|
||||
}
|
||||
if dm.FullID != "" {
|
||||
return nil, errors.New("bigquery: Dataset.FullID is not writable")
|
||||
}
|
||||
if dm.ETag != "" {
|
||||
return nil, errors.New("bigquery: Dataset.ETag is not writable")
|
||||
}
|
||||
return ds, nil
|
||||
}
|
||||
|
||||
func bqDatasetFromUpdateMetadata(dm *DatasetMetadataToUpdate) *bq.Dataset {
|
||||
ds := &bq.Dataset{}
|
||||
forceSend := func(field string) {
|
||||
ds.ForceSendFields = append(ds.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if dm.Description != nil {
|
||||
ds.Description = optional.ToString(dm.Description)
|
||||
forceSend("Description")
|
||||
}
|
||||
if dm.Name != nil {
|
||||
ds.FriendlyName = optional.ToString(dm.Name)
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
if dm.DefaultTableExpiration != nil {
|
||||
dur := optional.ToDuration(dm.DefaultTableExpiration)
|
||||
if dur == 0 {
|
||||
// Send a null to delete the field.
|
||||
ds.NullFields = append(ds.NullFields, "DefaultTableExpirationMs")
|
||||
} else {
|
||||
ds.DefaultTableExpirationMs = int64(dur / time.Millisecond)
|
||||
}
|
||||
}
|
||||
if dm.setLabels != nil || dm.deleteLabels != nil {
|
||||
ds.Labels = map[string]string{}
|
||||
for k, v := range dm.setLabels {
|
||||
ds.Labels[k] = v
|
||||
}
|
||||
if len(ds.Labels) == 0 && len(dm.deleteLabels) > 0 {
|
||||
forceSend("Labels")
|
||||
}
|
||||
for l := range dm.deleteLabels {
|
||||
ds.NullFields = append(ds.NullFields, "Labels."+l)
|
||||
}
|
||||
}
|
||||
return ds
|
||||
}
|
||||
|
||||
func (s *bigqueryService) deleteDataset(ctx context.Context, datasetID, projectID string) error {
|
||||
req := s.s.Datasets.Delete(projectID, datasetID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return runWithRetry(ctx, func() error { return req.Do() })
|
||||
}
|
||||
|
||||
func (s *bigqueryService) getDatasetMetadata(ctx context.Context, projectID, datasetID string) (*DatasetMetadata, error) {
|
||||
req := s.s.Datasets.Get(projectID, datasetID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var ds *bq.Dataset
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
ds, err = req.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqDatasetToMetadata(ds), nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) listDatasets(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, filter string) ([]*Dataset, string, error) {
|
||||
req := s.s.Datasets.List(projectID).
|
||||
Context(ctx).
|
||||
PageToken(pageToken).
|
||||
All(all)
|
||||
setClientHeader(req.Header())
|
||||
if maxResults > 0 {
|
||||
req.MaxResults(int64(maxResults))
|
||||
}
|
||||
if filter != "" {
|
||||
req.Filter(filter)
|
||||
}
|
||||
var res *bq.DatasetList
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var datasets []*Dataset
|
||||
for _, d := range res.Datasets {
|
||||
datasets = append(datasets, s.convertListedDataset(d))
|
||||
}
|
||||
return datasets, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedDataset(d *bq.DatasetListDatasets) *Dataset {
|
||||
return &Dataset{
|
||||
ProjectID: d.DatasetReference.ProjectId,
|
||||
DatasetID: d.DatasetReference.DatasetId,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *bigqueryService) listJobs(ctx context.Context, projectID string, maxResults int, pageToken string, all bool, state string) ([]JobInfo, string, error) {
|
||||
req := s.s.Jobs.List(projectID).
|
||||
Context(ctx).
|
||||
PageToken(pageToken).
|
||||
Projection("full").
|
||||
AllUsers(all)
|
||||
if state != "" {
|
||||
req.StateFilter(state)
|
||||
}
|
||||
setClientHeader(req.Header())
|
||||
if maxResults > 0 {
|
||||
req.MaxResults(int64(maxResults))
|
||||
}
|
||||
res, err := req.Do()
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
var jobInfos []JobInfo
|
||||
for _, j := range res.Jobs {
|
||||
ji, err := s.convertListedJob(j)
|
||||
if err != nil {
|
||||
return nil, "", err
|
||||
}
|
||||
jobInfos = append(jobInfos, ji)
|
||||
}
|
||||
return jobInfos, res.NextPageToken, nil
|
||||
}
|
||||
|
||||
func (s *bigqueryService) convertListedJob(j *bq.JobListJobs) (JobInfo, error) {
|
||||
st, err := jobStatusFromProto(j.Status)
|
||||
if err != nil {
|
||||
return JobInfo{}, err
|
||||
}
|
||||
st.Statistics = jobStatisticsFromProto(j.Statistics)
|
||||
return JobInfo{
|
||||
Job: jobFromProtos(j.JobReference, j.Configuration),
|
||||
Status: st,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// runWithRetry calls the function until it returns nil or a non-retryable error, or
|
||||
// the context is done.
|
||||
// See the similar function in ../storage/invoke.go. The main difference is the
|
||||
// reason for retrying.
|
||||
func runWithRetry(ctx context.Context, call func() error) error {
|
||||
// These parameters match the suggestions in https://cloud.google.com/bigquery/sla.
|
||||
backoff := gax.Backoff{
|
||||
Initial: 1 * time.Second,
|
||||
Max: 32 * time.Second,
|
||||
Multiplier: 2,
|
||||
}
|
||||
return internal.Retry(ctx, backoff, func() (stop bool, err error) {
|
||||
err = call()
|
||||
if err == nil {
|
||||
return true, nil
|
||||
}
|
||||
return !retryableError(err), err
|
||||
})
|
||||
}
|
||||
|
||||
// This is the correct definition of retryable according to the BigQuery team.
|
||||
func retryableError(err error) bool {
|
||||
e, ok := err.(*googleapi.Error)
|
||||
if !ok {
|
||||
return false
|
||||
}
|
||||
var reason string
|
||||
if len(e.Errors) > 0 {
|
||||
reason = e.Errors[0].Reason
|
||||
}
|
||||
return reason == "backendError" || reason == "rateLimitExceeded"
|
||||
}
|
|
@ -15,6 +15,7 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"time"
|
||||
|
||||
|
@ -59,6 +60,7 @@ type TableMetadata struct {
|
|||
|
||||
// Use Legacy SQL for the view query. The default.
|
||||
// At most one of UseLegacySQL and UseStandardSQL can be true.
|
||||
// Deprecated: use UseLegacySQL.
|
||||
UseStandardSQL bool
|
||||
|
||||
// If non-nil, the table is partitioned by time.
|
||||
|
@ -68,6 +70,12 @@ type TableMetadata struct {
|
|||
// indefinitely. Expired tables will be deleted and their storage reclaimed.
|
||||
ExpirationTime time.Time
|
||||
|
||||
// User-provided labels.
|
||||
Labels map[string]string
|
||||
|
||||
// Information about a table stored outside of BigQuery.
|
||||
ExternalDataConfig *ExternalDataConfig
|
||||
|
||||
// All the fields below are read-only.
|
||||
|
||||
FullID string // An opaque ID uniquely identifying the table.
|
||||
|
@ -139,6 +147,32 @@ type TimePartitioning struct {
|
|||
// The amount of time to keep the storage for a partition.
|
||||
// If the duration is empty (0), the data in the partitions do not expire.
|
||||
Expiration time.Duration
|
||||
|
||||
// If empty, the table is partitioned by pseudo column '_PARTITIONTIME'; if set, the
|
||||
// table is partitioned by this field. The field must be a top-level TIMESTAMP or
|
||||
// DATE field. Its mode must be NULLABLE or REQUIRED.
|
||||
Field string
|
||||
}
|
||||
|
||||
func (p *TimePartitioning) toBQ() *bq.TimePartitioning {
|
||||
if p == nil {
|
||||
return nil
|
||||
}
|
||||
return &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: int64(p.Expiration / time.Millisecond),
|
||||
Field: p.Field,
|
||||
}
|
||||
}
|
||||
|
||||
func bqToTimePartitioning(q *bq.TimePartitioning) *TimePartitioning {
|
||||
if q == nil {
|
||||
return nil
|
||||
}
|
||||
return &TimePartitioning{
|
||||
Expiration: time.Duration(q.ExpirationMs) * time.Millisecond,
|
||||
Field: q.Field,
|
||||
}
|
||||
}
|
||||
|
||||
// StreamingBuffer holds information about the streaming buffer.
|
||||
|
@ -155,7 +189,7 @@ type StreamingBuffer struct {
|
|||
OldestEntryTime time.Time
|
||||
}
|
||||
|
||||
func (t *Table) tableRefProto() *bq.TableReference {
|
||||
func (t *Table) toBQ() *bq.TableReference {
|
||||
return &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
|
@ -174,60 +208,280 @@ func (t *Table) implicitTable() bool {
|
|||
}
|
||||
|
||||
// Create creates a table in the BigQuery service.
|
||||
// Pass in a TableMetadata value to configure the dataset.
|
||||
// Pass in a TableMetadata value to configure the table.
|
||||
// If tm.View.Query is non-empty, the created table will be of type VIEW.
|
||||
// Expiration can only be set during table creation.
|
||||
// After table creation, a view can be modified only if its table was initially created
|
||||
// with a view.
|
||||
func (t *Table) Create(ctx context.Context, tm *TableMetadata) error {
|
||||
return t.c.service.createTable(ctx, t.ProjectID, t.DatasetID, t.TableID, tm)
|
||||
table, err := tm.toBQ()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
table.TableReference = &bq.TableReference{
|
||||
ProjectId: t.ProjectID,
|
||||
DatasetId: t.DatasetID,
|
||||
TableId: t.TableID,
|
||||
}
|
||||
req := t.c.bqs.Tables.Insert(t.ProjectID, t.DatasetID, table).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
_, err = req.Do()
|
||||
return err
|
||||
}
|
||||
|
||||
func (tm *TableMetadata) toBQ() (*bq.Table, error) {
|
||||
t := &bq.Table{}
|
||||
if tm == nil {
|
||||
return t, nil
|
||||
}
|
||||
if tm.Schema != nil && tm.ViewQuery != "" {
|
||||
return nil, errors.New("bigquery: provide Schema or ViewQuery, not both")
|
||||
}
|
||||
t.FriendlyName = tm.Name
|
||||
t.Description = tm.Description
|
||||
t.Labels = tm.Labels
|
||||
if tm.Schema != nil {
|
||||
t.Schema = tm.Schema.toBQ()
|
||||
}
|
||||
if tm.ViewQuery != "" {
|
||||
if tm.UseStandardSQL && tm.UseLegacySQL {
|
||||
return nil, errors.New("bigquery: cannot provide both UseStandardSQL and UseLegacySQL")
|
||||
}
|
||||
t.View = &bq.ViewDefinition{Query: tm.ViewQuery}
|
||||
if tm.UseLegacySQL {
|
||||
t.View.UseLegacySql = true
|
||||
} else {
|
||||
t.View.UseLegacySql = false
|
||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
} else if tm.UseLegacySQL || tm.UseStandardSQL {
|
||||
return nil, errors.New("bigquery: UseLegacy/StandardSQL requires ViewQuery")
|
||||
}
|
||||
t.TimePartitioning = tm.TimePartitioning.toBQ()
|
||||
if !tm.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||
}
|
||||
if tm.ExternalDataConfig != nil {
|
||||
edc := tm.ExternalDataConfig.toBQ()
|
||||
t.ExternalDataConfiguration = &edc
|
||||
}
|
||||
if tm.FullID != "" {
|
||||
return nil, errors.New("cannot set FullID on create")
|
||||
}
|
||||
if tm.Type != "" {
|
||||
return nil, errors.New("cannot set Type on create")
|
||||
}
|
||||
if !tm.CreationTime.IsZero() {
|
||||
return nil, errors.New("cannot set CreationTime on create")
|
||||
}
|
||||
if !tm.LastModifiedTime.IsZero() {
|
||||
return nil, errors.New("cannot set LastModifiedTime on create")
|
||||
}
|
||||
if tm.NumBytes != 0 {
|
||||
return nil, errors.New("cannot set NumBytes on create")
|
||||
}
|
||||
if tm.NumRows != 0 {
|
||||
return nil, errors.New("cannot set NumRows on create")
|
||||
}
|
||||
if tm.StreamingBuffer != nil {
|
||||
return nil, errors.New("cannot set StreamingBuffer on create")
|
||||
}
|
||||
if tm.ETag != "" {
|
||||
return nil, errors.New("cannot set ETag on create")
|
||||
}
|
||||
return t, nil
|
||||
}
|
||||
|
||||
// Metadata fetches the metadata for the table.
|
||||
func (t *Table) Metadata(ctx context.Context) (*TableMetadata, error) {
|
||||
return t.c.service.getTableMetadata(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
req := t.c.bqs.Tables.Get(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
var table *bq.Table
|
||||
err := runWithRetry(ctx, func() (err error) {
|
||||
table, err = req.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToTableMetadata(table)
|
||||
}
|
||||
|
||||
func bqToTableMetadata(t *bq.Table) (*TableMetadata, error) {
|
||||
md := &TableMetadata{
|
||||
Description: t.Description,
|
||||
Name: t.FriendlyName,
|
||||
Type: TableType(t.Type),
|
||||
FullID: t.Id,
|
||||
Labels: t.Labels,
|
||||
NumBytes: t.NumBytes,
|
||||
NumRows: t.NumRows,
|
||||
ExpirationTime: unixMillisToTime(t.ExpirationTime),
|
||||
CreationTime: unixMillisToTime(t.CreationTime),
|
||||
LastModifiedTime: unixMillisToTime(int64(t.LastModifiedTime)),
|
||||
ETag: t.Etag,
|
||||
}
|
||||
if t.Schema != nil {
|
||||
md.Schema = bqToSchema(t.Schema)
|
||||
}
|
||||
if t.View != nil {
|
||||
md.ViewQuery = t.View.Query
|
||||
md.UseLegacySQL = t.View.UseLegacySql
|
||||
}
|
||||
md.TimePartitioning = bqToTimePartitioning(t.TimePartitioning)
|
||||
if t.StreamingBuffer != nil {
|
||||
md.StreamingBuffer = &StreamingBuffer{
|
||||
EstimatedBytes: t.StreamingBuffer.EstimatedBytes,
|
||||
EstimatedRows: t.StreamingBuffer.EstimatedRows,
|
||||
OldestEntryTime: unixMillisToTime(int64(t.StreamingBuffer.OldestEntryTime)),
|
||||
}
|
||||
}
|
||||
if t.ExternalDataConfiguration != nil {
|
||||
edc, err := bqToExternalDataConfig(t.ExternalDataConfiguration)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
md.ExternalDataConfig = edc
|
||||
}
|
||||
return md, nil
|
||||
}
|
||||
|
||||
// Delete deletes the table.
|
||||
func (t *Table) Delete(ctx context.Context) error {
|
||||
return t.c.service.deleteTable(ctx, t.ProjectID, t.DatasetID, t.TableID)
|
||||
req := t.c.bqs.Tables.Delete(t.ProjectID, t.DatasetID, t.TableID).Context(ctx)
|
||||
setClientHeader(req.Header())
|
||||
return req.Do()
|
||||
}
|
||||
|
||||
// Read fetches the contents of the table.
|
||||
func (t *Table) Read(ctx context.Context) *RowIterator {
|
||||
return newRowIterator(ctx, t.c.service, &readTableConf{
|
||||
projectID: t.ProjectID,
|
||||
datasetID: t.DatasetID,
|
||||
tableID: t.TableID,
|
||||
})
|
||||
return t.read(ctx, fetchPage)
|
||||
}
|
||||
|
||||
func (t *Table) read(ctx context.Context, pf pageFetcher) *RowIterator {
|
||||
return newRowIterator(ctx, t, pf)
|
||||
}
|
||||
|
||||
// Update modifies specific Table metadata fields.
|
||||
func (t *Table) Update(ctx context.Context, tm TableMetadataToUpdate, etag string) (*TableMetadata, error) {
|
||||
var conf patchTableConf
|
||||
bqt := tm.toBQ()
|
||||
call := t.c.bqs.Tables.Patch(t.ProjectID, t.DatasetID, t.TableID, bqt).Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
if etag != "" {
|
||||
call.Header().Set("If-Match", etag)
|
||||
}
|
||||
var res *bq.Table
|
||||
if err := runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
}); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return bqToTableMetadata(res)
|
||||
}
|
||||
|
||||
func (tm *TableMetadataToUpdate) toBQ() *bq.Table {
|
||||
t := &bq.Table{}
|
||||
forceSend := func(field string) {
|
||||
t.ForceSendFields = append(t.ForceSendFields, field)
|
||||
}
|
||||
|
||||
if tm.Description != nil {
|
||||
s := optional.ToString(tm.Description)
|
||||
conf.Description = &s
|
||||
t.Description = optional.ToString(tm.Description)
|
||||
forceSend("Description")
|
||||
}
|
||||
if tm.Name != nil {
|
||||
s := optional.ToString(tm.Name)
|
||||
conf.Name = &s
|
||||
t.FriendlyName = optional.ToString(tm.Name)
|
||||
forceSend("FriendlyName")
|
||||
}
|
||||
conf.Schema = tm.Schema
|
||||
conf.ExpirationTime = tm.ExpirationTime
|
||||
return t.c.service.patchTable(ctx, t.ProjectID, t.DatasetID, t.TableID, &conf, etag)
|
||||
if tm.Schema != nil {
|
||||
t.Schema = tm.Schema.toBQ()
|
||||
forceSend("Schema")
|
||||
}
|
||||
if !tm.ExpirationTime.IsZero() {
|
||||
t.ExpirationTime = tm.ExpirationTime.UnixNano() / 1e6
|
||||
forceSend("ExpirationTime")
|
||||
}
|
||||
if tm.ViewQuery != nil {
|
||||
t.View = &bq.ViewDefinition{
|
||||
Query: optional.ToString(tm.ViewQuery),
|
||||
ForceSendFields: []string{"Query"},
|
||||
}
|
||||
}
|
||||
if tm.UseLegacySQL != nil {
|
||||
if t.View == nil {
|
||||
t.View = &bq.ViewDefinition{}
|
||||
}
|
||||
t.View.UseLegacySql = optional.ToBool(tm.UseLegacySQL)
|
||||
t.View.ForceSendFields = append(t.View.ForceSendFields, "UseLegacySql")
|
||||
}
|
||||
labels, forces, nulls := tm.update()
|
||||
t.Labels = labels
|
||||
t.ForceSendFields = append(t.ForceSendFields, forces...)
|
||||
t.NullFields = append(t.NullFields, nulls...)
|
||||
return t
|
||||
}
|
||||
|
||||
// TableMetadataToUpdate is used when updating a table's metadata.
|
||||
// Only non-nil fields will be updated.
|
||||
type TableMetadataToUpdate struct {
|
||||
// Description is the user-friendly description of this table.
|
||||
// The user-friendly description of this table.
|
||||
Description optional.String
|
||||
|
||||
// Name is the user-friendly name for this table.
|
||||
// The user-friendly name for this table.
|
||||
Name optional.String
|
||||
|
||||
// Schema is the table's schema.
|
||||
// The table's schema.
|
||||
// When updating a schema, you can add columns but not remove them.
|
||||
Schema Schema
|
||||
// TODO(jba): support updating the view
|
||||
|
||||
// ExpirationTime is the time when this table expires.
|
||||
// The time when this table expires.
|
||||
ExpirationTime time.Time
|
||||
|
||||
// The query to use for a view.
|
||||
ViewQuery optional.String
|
||||
|
||||
// Use Legacy SQL for the view query.
|
||||
UseLegacySQL optional.Bool
|
||||
|
||||
labelUpdater
|
||||
}
|
||||
|
||||
// labelUpdater contains common code for updating labels.
|
||||
type labelUpdater struct {
|
||||
setLabels map[string]string
|
||||
deleteLabels map[string]bool
|
||||
}
|
||||
|
||||
// SetLabel causes a label to be added or modified on a call to Update.
|
||||
func (u *labelUpdater) SetLabel(name, value string) {
|
||||
if u.setLabels == nil {
|
||||
u.setLabels = map[string]string{}
|
||||
}
|
||||
u.setLabels[name] = value
|
||||
}
|
||||
|
||||
// DeleteLabel causes a label to be deleted on a call to Update.
|
||||
func (u *labelUpdater) DeleteLabel(name string) {
|
||||
if u.deleteLabels == nil {
|
||||
u.deleteLabels = map[string]bool{}
|
||||
}
|
||||
u.deleteLabels[name] = true
|
||||
}
|
||||
|
||||
func (u *labelUpdater) update() (labels map[string]string, forces, nulls []string) {
|
||||
if u.setLabels == nil && u.deleteLabels == nil {
|
||||
return nil, nil, nil
|
||||
}
|
||||
labels = map[string]string{}
|
||||
for k, v := range u.setLabels {
|
||||
labels[k] = v
|
||||
}
|
||||
if len(labels) == 0 && len(u.deleteLabels) > 0 {
|
||||
forces = []string{"Labels"}
|
||||
}
|
||||
for l := range u.deleteLabels {
|
||||
nulls = append(nulls, "Labels."+l)
|
||||
}
|
||||
return labels, forces, nulls
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2015 Google Inc. All Rights Reserved.
|
||||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -19,11 +19,10 @@ import (
|
|||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
func TestBQTableToMetadata(t *testing.T) {
|
||||
func TestBQToTableMetadata(t *testing.T) {
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
aTimeMillis := aTime.UnixNano() / 1e6
|
||||
for _, test := range []struct {
|
||||
|
@ -52,22 +51,32 @@ func TestBQTableToMetadata(t *testing.T) {
|
|||
TimePartitioning: &bq.TimePartitioning{
|
||||
ExpirationMs: 7890,
|
||||
Type: "DAY",
|
||||
Field: "pfield",
|
||||
},
|
||||
Type: "EXTERNAL",
|
||||
View: &bq.ViewDefinition{Query: "view-query"},
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{
|
||||
SourceFormat: "GOOGLE_SHEETS",
|
||||
},
|
||||
Type: "EXTERNAL",
|
||||
View: &bq.ViewDefinition{Query: "view-query"},
|
||||
},
|
||||
&TableMetadata{
|
||||
Description: "desc",
|
||||
Name: "fname",
|
||||
ViewQuery: "view-query",
|
||||
FullID: "id",
|
||||
Type: ExternalTable,
|
||||
ExpirationTime: aTime.Truncate(time.Millisecond),
|
||||
CreationTime: aTime.Truncate(time.Millisecond),
|
||||
LastModifiedTime: aTime.Truncate(time.Millisecond),
|
||||
NumBytes: 123,
|
||||
NumRows: 7,
|
||||
TimePartitioning: &TimePartitioning{Expiration: 7890 * time.Millisecond},
|
||||
Description: "desc",
|
||||
Name: "fname",
|
||||
ViewQuery: "view-query",
|
||||
FullID: "id",
|
||||
Type: ExternalTable,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: GoogleSheets},
|
||||
ExpirationTime: aTime.Truncate(time.Millisecond),
|
||||
CreationTime: aTime.Truncate(time.Millisecond),
|
||||
LastModifiedTime: aTime.Truncate(time.Millisecond),
|
||||
NumBytes: 123,
|
||||
NumRows: 7,
|
||||
TimePartitioning: &TimePartitioning{
|
||||
Expiration: 7890 * time.Millisecond,
|
||||
Field: "pfield",
|
||||
},
|
||||
StreamingBuffer: &StreamingBuffer{
|
||||
EstimatedBytes: 11,
|
||||
EstimatedRows: 3,
|
||||
|
@ -77,14 +86,17 @@ func TestBQTableToMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
} {
|
||||
got := bqTableToMetadata(test.in)
|
||||
got, err := bqToTableMetadata(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
if diff := testutil.Diff(got, test.want); diff != "" {
|
||||
t.Errorf("%+v:\n, -got, +want:\n%s", test.in, diff)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQTableFromMetadata(t *testing.T) {
|
||||
func TestTableMetadataToBQ(t *testing.T) {
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
aTimeMillis := aTime.UnixNano() / 1e6
|
||||
sc := Schema{fieldSchema("desc", "name", "STRING", false, true)}
|
||||
|
@ -97,10 +109,12 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
{&TableMetadata{}, &bq.Table{}},
|
||||
{
|
||||
&TableMetadata{
|
||||
Name: "n",
|
||||
Description: "d",
|
||||
Schema: sc,
|
||||
ExpirationTime: aTime,
|
||||
Name: "n",
|
||||
Description: "d",
|
||||
Schema: sc,
|
||||
ExpirationTime: aTime,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfig: &ExternalDataConfig{SourceFormat: Bigtable},
|
||||
},
|
||||
&bq.Table{
|
||||
FriendlyName: "n",
|
||||
|
@ -111,6 +125,8 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
},
|
||||
},
|
||||
ExpirationTime: aTimeMillis,
|
||||
Labels: map[string]string{"a": "b"},
|
||||
ExternalDataConfiguration: &bq.ExternalDataConfiguration{SourceFormat: "BIGTABLE"},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
@ -142,9 +158,12 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
},
|
||||
{
|
||||
&TableMetadata{
|
||||
ViewQuery: "q",
|
||||
UseStandardSQL: true,
|
||||
TimePartitioning: &TimePartitioning{time.Second},
|
||||
ViewQuery: "q",
|
||||
UseStandardSQL: true,
|
||||
TimePartitioning: &TimePartitioning{
|
||||
Expiration: time.Second,
|
||||
Field: "ofDreams",
|
||||
},
|
||||
},
|
||||
&bq.Table{
|
||||
View: &bq.ViewDefinition{
|
||||
|
@ -155,11 +174,12 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
TimePartitioning: &bq.TimePartitioning{
|
||||
Type: "DAY",
|
||||
ExpirationMs: 1000,
|
||||
Field: "ofDreams",
|
||||
},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got, err := bqTableFromMetadata(test.in)
|
||||
got, err := test.in.toBQ()
|
||||
if err != nil {
|
||||
t.Fatalf("%+v: %v", test.in, err)
|
||||
}
|
||||
|
@ -183,69 +203,89 @@ func TestBQTableFromMetadata(t *testing.T) {
|
|||
{StreamingBuffer: &StreamingBuffer{}},
|
||||
{ETag: "x"},
|
||||
} {
|
||||
_, err := bqTableFromMetadata(in)
|
||||
_, err := in.toBQ()
|
||||
if err == nil {
|
||||
t.Errorf("%+v: got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQDatasetFromMetadata(t *testing.T) {
|
||||
func TestTableMetadataToUpdateToBQ(t *testing.T) {
|
||||
aTime := time.Date(2017, 1, 26, 0, 0, 0, 0, time.Local)
|
||||
for _, test := range []struct {
|
||||
in *DatasetMetadata
|
||||
want *bq.Dataset
|
||||
tm TableMetadataToUpdate
|
||||
want *bq.Table
|
||||
}{
|
||||
{nil, &bq.Dataset{}},
|
||||
{&DatasetMetadata{Name: "name"}, &bq.Dataset{FriendlyName: "name"}},
|
||||
{&DatasetMetadata{
|
||||
Name: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
}, &bq.Dataset{
|
||||
FriendlyName: "name",
|
||||
Description: "desc",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Location: "EU",
|
||||
Labels: map[string]string{"x": "y"},
|
||||
}},
|
||||
{
|
||||
tm: TableMetadataToUpdate{},
|
||||
want: &bq.Table{},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{
|
||||
Description: "d",
|
||||
Name: "n",
|
||||
},
|
||||
want: &bq.Table{
|
||||
Description: "d",
|
||||
FriendlyName: "n",
|
||||
ForceSendFields: []string{"Description", "FriendlyName"},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{
|
||||
Schema: Schema{fieldSchema("desc", "name", "STRING", false, true)},
|
||||
ExpirationTime: aTime,
|
||||
},
|
||||
want: &bq.Table{
|
||||
Schema: &bq.TableSchema{
|
||||
Fields: []*bq.TableFieldSchema{
|
||||
bqTableFieldSchema("desc", "name", "STRING", "REQUIRED"),
|
||||
},
|
||||
},
|
||||
ExpirationTime: aTime.UnixNano() / 1e6,
|
||||
ForceSendFields: []string{"Schema", "ExpirationTime"},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{ViewQuery: "q"},
|
||||
want: &bq.Table{
|
||||
View: &bq.ViewDefinition{Query: "q", ForceSendFields: []string{"Query"}},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{UseLegacySQL: false},
|
||||
want: &bq.Table{
|
||||
View: &bq.ViewDefinition{
|
||||
UseLegacySql: false,
|
||||
ForceSendFields: []string{"UseLegacySql"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: TableMetadataToUpdate{ViewQuery: "q", UseLegacySQL: true},
|
||||
want: &bq.Table{
|
||||
View: &bq.ViewDefinition{
|
||||
Query: "q",
|
||||
UseLegacySql: true,
|
||||
ForceSendFields: []string{"Query", "UseLegacySql"},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
tm: func() (tm TableMetadataToUpdate) {
|
||||
tm.SetLabel("L", "V")
|
||||
tm.DeleteLabel("D")
|
||||
return tm
|
||||
}(),
|
||||
want: &bq.Table{
|
||||
Labels: map[string]string{"L": "V"},
|
||||
NullFields: []string{"Labels.D"},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got, err := bqDatasetFromMetadata(test.in)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
got := test.tm.toBQ()
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%v:\ngot %+v\nwant %+v", test.in, got, test.want)
|
||||
t.Errorf("%+v:\ngot %+v\nwant %+v", test.tm, got, test.want)
|
||||
}
|
||||
}
|
||||
|
||||
// Check that non-writeable fields are unset.
|
||||
_, err := bqDatasetFromMetadata(&DatasetMetadata{FullID: "x"})
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBQDatasetFromUpdateMetadata(t *testing.T) {
|
||||
dm := DatasetMetadataToUpdate{
|
||||
Description: "desc",
|
||||
Name: "name",
|
||||
DefaultTableExpiration: time.Hour,
|
||||
}
|
||||
dm.SetLabel("label", "value")
|
||||
dm.DeleteLabel("del")
|
||||
|
||||
got := bqDatasetFromUpdateMetadata(&dm)
|
||||
want := &bq.Dataset{
|
||||
Description: "desc",
|
||||
FriendlyName: "name",
|
||||
DefaultTableExpirationMs: 60 * 60 * 1000,
|
||||
Labels: map[string]string{"label": "value"},
|
||||
ForceSendFields: []string{"Description", "FriendlyName"},
|
||||
NullFields: []string{"Labels.del"},
|
||||
}
|
||||
if diff := testutil.Diff(got, want); diff != "" {
|
||||
t.Errorf("-got, +want:\n%s", diff)
|
||||
}
|
||||
}
|
|
@ -20,6 +20,7 @@ import (
|
|||
"reflect"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
// An Uploader does streaming inserts into a BigQuery table.
|
||||
|
@ -151,27 +152,73 @@ func toValueSaver(x interface{}) (ValueSaver, bool, error) {
|
|||
}
|
||||
|
||||
func (u *Uploader) putMulti(ctx context.Context, src []ValueSaver) error {
|
||||
var rows []*insertionRow
|
||||
for _, saver := range src {
|
||||
req, err := u.newInsertRequest(src)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if req == nil {
|
||||
return nil
|
||||
}
|
||||
call := u.t.c.bqs.Tabledata.InsertAll(u.t.ProjectID, u.t.DatasetID, u.t.TableID, req)
|
||||
call = call.Context(ctx)
|
||||
setClientHeader(call.Header())
|
||||
var res *bq.TableDataInsertAllResponse
|
||||
err = runWithRetry(ctx, func() (err error) {
|
||||
res, err = call.Do()
|
||||
return err
|
||||
})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return handleInsertErrors(res.InsertErrors, req.Rows)
|
||||
}
|
||||
|
||||
func (u *Uploader) newInsertRequest(savers []ValueSaver) (*bq.TableDataInsertAllRequest, error) {
|
||||
if savers == nil { // If there are no rows, do nothing.
|
||||
return nil, nil
|
||||
}
|
||||
req := &bq.TableDataInsertAllRequest{
|
||||
TemplateSuffix: u.TableTemplateSuffix,
|
||||
IgnoreUnknownValues: u.IgnoreUnknownValues,
|
||||
SkipInvalidRows: u.SkipInvalidRows,
|
||||
}
|
||||
for _, saver := range savers {
|
||||
row, insertID, err := saver.Save()
|
||||
if err != nil {
|
||||
return err
|
||||
return nil, err
|
||||
}
|
||||
rows = append(rows, &insertionRow{InsertID: insertID, Row: row})
|
||||
if insertID == "" {
|
||||
insertID = randomIDFn()
|
||||
}
|
||||
m := make(map[string]bq.JsonValue)
|
||||
for k, v := range row {
|
||||
m[k] = bq.JsonValue(v)
|
||||
}
|
||||
req.Rows = append(req.Rows, &bq.TableDataInsertAllRequestRows{
|
||||
InsertId: insertID,
|
||||
Json: m,
|
||||
})
|
||||
}
|
||||
|
||||
return u.t.c.service.insertRows(ctx, u.t.ProjectID, u.t.DatasetID, u.t.TableID, rows, &insertRowsConf{
|
||||
skipInvalidRows: u.SkipInvalidRows,
|
||||
ignoreUnknownValues: u.IgnoreUnknownValues,
|
||||
templateSuffix: u.TableTemplateSuffix,
|
||||
})
|
||||
return req, nil
|
||||
}
|
||||
|
||||
// An insertionRow represents a row of data to be inserted into a table.
|
||||
type insertionRow struct {
|
||||
// If InsertID is non-empty, BigQuery will use it to de-duplicate insertions of
|
||||
// this row on a best-effort basis.
|
||||
InsertID string
|
||||
// The data to be inserted, represented as a map from field name to Value.
|
||||
Row map[string]Value
|
||||
func handleInsertErrors(ierrs []*bq.TableDataInsertAllResponseInsertErrors, rows []*bq.TableDataInsertAllRequestRows) error {
|
||||
if len(ierrs) == 0 {
|
||||
return nil
|
||||
}
|
||||
var errs PutMultiError
|
||||
for _, e := range ierrs {
|
||||
if int(e.Index) > len(rows) {
|
||||
return fmt.Errorf("internal error: unexpected row index: %v", e.Index)
|
||||
}
|
||||
rie := RowInsertionError{
|
||||
InsertID: rows[e.Index].InsertId,
|
||||
RowIndex: int(e.Index),
|
||||
}
|
||||
for _, errp := range e.Errors {
|
||||
rie.Errors = append(rie.Errors, bqToError(errp))
|
||||
}
|
||||
errs = append(errs, rie)
|
||||
}
|
||||
return errs
|
||||
}
|
||||
|
|
|
@ -15,228 +15,138 @@
|
|||
package bigquery
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"strconv"
|
||||
"testing"
|
||||
|
||||
"github.com/google/go-cmp/cmp"
|
||||
|
||||
"cloud.google.com/go/internal/pretty"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
bq "google.golang.org/api/bigquery/v2"
|
||||
)
|
||||
|
||||
type testSaver struct {
|
||||
ir *insertionRow
|
||||
err error
|
||||
row map[string]Value
|
||||
insertID string
|
||||
err error
|
||||
}
|
||||
|
||||
func (ts testSaver) Save() (map[string]Value, string, error) {
|
||||
return ts.ir.Row, ts.ir.InsertID, ts.err
|
||||
return ts.row, ts.insertID, ts.err
|
||||
}
|
||||
|
||||
func TestRejectsNonValueSavers(t *testing.T) {
|
||||
client := &Client{projectID: "project-id"}
|
||||
u := Uploader{t: client.Dataset("dataset-id").Table("table-id")}
|
||||
inputs := []interface{}{
|
||||
1,
|
||||
[]int{1, 2},
|
||||
[]interface{}{
|
||||
testSaver{ir: &insertionRow{"a", map[string]Value{"one": 1}}},
|
||||
1,
|
||||
},
|
||||
StructSaver{},
|
||||
}
|
||||
for _, in := range inputs {
|
||||
if err := u.Put(context.Background(), in); err == nil {
|
||||
t.Errorf("put value: %v; got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
func TestNewInsertRequest(t *testing.T) {
|
||||
prev := randomIDFn
|
||||
n := 0
|
||||
randomIDFn = func() string { n++; return strconv.Itoa(n) }
|
||||
defer func() { randomIDFn = prev }()
|
||||
|
||||
type insertRowsRecorder struct {
|
||||
rowBatches [][]*insertionRow
|
||||
service
|
||||
}
|
||||
|
||||
func (irr *insertRowsRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
irr.rowBatches = append(irr.rowBatches, rows)
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestInsertsData(t *testing.T) {
|
||||
testCases := []struct {
|
||||
data [][]*insertionRow
|
||||
tests := []struct {
|
||||
ul *Uploader
|
||||
savers []ValueSaver
|
||||
req *bq.TableDataInsertAllRequest
|
||||
}{
|
||||
{
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
ul: &Uploader{},
|
||||
savers: nil,
|
||||
req: nil,
|
||||
},
|
||||
{
|
||||
ul: &Uploader{},
|
||||
savers: []ValueSaver{
|
||||
testSaver{row: map[string]Value{"one": 1}},
|
||||
testSaver{row: map[string]Value{"two": 2}},
|
||||
},
|
||||
req: &bq.TableDataInsertAllRequest{
|
||||
Rows: []*bq.TableDataInsertAllRequestRows{
|
||||
{InsertId: "1", Json: map[string]bq.JsonValue{"one": 1}},
|
||||
{InsertId: "2", Json: map[string]bq.JsonValue{"two": 2}},
|
||||
},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
ul: &Uploader{
|
||||
TableTemplateSuffix: "suffix",
|
||||
IgnoreUnknownValues: true,
|
||||
SkipInvalidRows: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
savers: []ValueSaver{
|
||||
testSaver{insertID: "a", row: map[string]Value{"one": 1}},
|
||||
testSaver{insertID: "", row: map[string]Value{"two": 2}},
|
||||
},
|
||||
},
|
||||
{
|
||||
|
||||
data: [][]*insertionRow{
|
||||
{
|
||||
&insertionRow{"a", map[string]Value{"one": 1}},
|
||||
&insertionRow{"b", map[string]Value{"two": 2}},
|
||||
},
|
||||
{
|
||||
&insertionRow{"c", map[string]Value{"three": 3}},
|
||||
&insertionRow{"d", map[string]Value{"four": 4}},
|
||||
req: &bq.TableDataInsertAllRequest{
|
||||
Rows: []*bq.TableDataInsertAllRequestRows{
|
||||
{InsertId: "a", Json: map[string]bq.JsonValue{"one": 1}},
|
||||
{InsertId: "3", Json: map[string]bq.JsonValue{"two": 2}},
|
||||
},
|
||||
TemplateSuffix: "suffix",
|
||||
SkipInvalidRows: true,
|
||||
IgnoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
irr := &insertRowsRecorder{}
|
||||
client := &Client{
|
||||
projectID: "project-id",
|
||||
service: irr,
|
||||
}
|
||||
u := client.Dataset("dataset-id").Table("table-id").Uploader()
|
||||
for _, batch := range tc.data {
|
||||
if len(batch) == 0 {
|
||||
continue
|
||||
}
|
||||
var toUpload interface{}
|
||||
if len(batch) == 1 {
|
||||
toUpload = testSaver{ir: batch[0]}
|
||||
} else {
|
||||
savers := []testSaver{}
|
||||
for _, row := range batch {
|
||||
savers = append(savers, testSaver{ir: row})
|
||||
}
|
||||
toUpload = savers
|
||||
}
|
||||
|
||||
err := u.Put(context.Background(), toUpload)
|
||||
if err != nil {
|
||||
t.Errorf("expected successful Put of ValueSaver; got: %v", err)
|
||||
}
|
||||
}
|
||||
if got, want := irr.rowBatches, tc.data; !testutil.Equal(got, want) {
|
||||
t.Errorf("got: %v, want: %v", got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type uploadOptionRecorder struct {
|
||||
received *insertRowsConf
|
||||
service
|
||||
}
|
||||
|
||||
func (u *uploadOptionRecorder) insertRows(ctx context.Context, projectID, datasetID, tableID string, rows []*insertionRow, conf *insertRowsConf) error {
|
||||
u.received = conf
|
||||
return nil
|
||||
}
|
||||
|
||||
func TestUploadOptionsPropagate(t *testing.T) {
|
||||
// we don't care for the data in this testcase.
|
||||
dummyData := testSaver{ir: &insertionRow{}}
|
||||
recorder := new(uploadOptionRecorder)
|
||||
c := &Client{service: recorder}
|
||||
table := &Table{
|
||||
ProjectID: "project-id",
|
||||
DatasetID: "dataset-id",
|
||||
TableID: "table-id",
|
||||
c: c,
|
||||
}
|
||||
|
||||
tests := [...]struct {
|
||||
ul *Uploader
|
||||
conf insertRowsConf
|
||||
}{
|
||||
{
|
||||
// test zero options lead to zero value for insertRowsConf
|
||||
ul: table.Uploader(),
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.IgnoreUnknownValues = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
{
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
skipInvalidRows: true,
|
||||
},
|
||||
},
|
||||
{ // multiple upload options combine
|
||||
ul: func() *Uploader {
|
||||
u := table.Uploader()
|
||||
u.TableTemplateSuffix = "suffix"
|
||||
u.IgnoreUnknownValues = true
|
||||
u.SkipInvalidRows = true
|
||||
return u
|
||||
}(),
|
||||
conf: insertRowsConf{
|
||||
templateSuffix: "suffix",
|
||||
skipInvalidRows: true,
|
||||
ignoreUnknownValues: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
for i, tc := range tests {
|
||||
err := tc.ul.Put(context.Background(), dummyData)
|
||||
got, err := tc.ul.newInsertRequest(tc.savers)
|
||||
if err != nil {
|
||||
t.Fatalf("%d: expected successful Put of ValueSaver; got: %v", i, err)
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if recorder.received == nil {
|
||||
t.Fatalf("%d: received no options at all!", i)
|
||||
want := tc.req
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("%d: %#v: got %#v, want %#v", i, tc.ul, got, want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
want := tc.conf
|
||||
got := *recorder.received
|
||||
if got != want {
|
||||
t.Errorf("%d: got %#v, want %#v, ul=%#v", i, got, want, tc.ul)
|
||||
func TestNewInsertRequestErrors(t *testing.T) {
|
||||
var u Uploader
|
||||
_, err := u.newInsertRequest([]ValueSaver{testSaver{err: errors.New("!")}})
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestHandleInsertErrors(t *testing.T) {
|
||||
rows := []*bq.TableDataInsertAllRequestRows{
|
||||
{InsertId: "a"},
|
||||
{InsertId: "b"},
|
||||
}
|
||||
for _, test := range []struct {
|
||||
in []*bq.TableDataInsertAllResponseInsertErrors
|
||||
want error
|
||||
}{
|
||||
{
|
||||
in: nil,
|
||||
want: nil,
|
||||
},
|
||||
{
|
||||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
|
||||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
|
||||
},
|
||||
{
|
||||
in: []*bq.TableDataInsertAllResponseInsertErrors{{Index: 1}},
|
||||
want: PutMultiError{RowInsertionError{InsertID: "b", RowIndex: 1}},
|
||||
},
|
||||
{
|
||||
in: []*bq.TableDataInsertAllResponseInsertErrors{
|
||||
{Errors: []*bq.ErrorProto{{Message: "m0"}}, Index: 0},
|
||||
{Errors: []*bq.ErrorProto{{Message: "m1"}}, Index: 1},
|
||||
},
|
||||
want: PutMultiError{
|
||||
RowInsertionError{InsertID: "a", RowIndex: 0, Errors: []error{&Error{Message: "m0"}}},
|
||||
RowInsertionError{InsertID: "b", RowIndex: 1, Errors: []error{&Error{Message: "m1"}}},
|
||||
},
|
||||
},
|
||||
} {
|
||||
got := handleInsertErrors(test.in, rows)
|
||||
if !testutil.Equal(got, test.want) {
|
||||
t.Errorf("%#v:\ngot\n%#v\nwant\n%#v", test.in, got, test.want)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSavers(t *testing.T) {
|
||||
ts := &testSaver{ir: &insertionRow{}}
|
||||
ts := &testSaver{}
|
||||
type T struct{ I int }
|
||||
schema, err := InferSchema(T{})
|
||||
if err != nil {
|
||||
|
@ -246,6 +156,8 @@ func TestValueSavers(t *testing.T) {
|
|||
in interface{}
|
||||
want []ValueSaver
|
||||
}{
|
||||
{[]interface{}(nil), nil},
|
||||
{[]interface{}{}, nil},
|
||||
{ts, []ValueSaver{ts}},
|
||||
{T{I: 1}, []ValueSaver{&StructSaver{Schema: schema, Struct: T{I: 1}}}},
|
||||
{[]ValueSaver{ts, ts}, []ValueSaver{ts, ts}},
|
||||
|
@ -279,3 +191,21 @@ func TestValueSavers(t *testing.T) {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueSaversErrors(t *testing.T) {
|
||||
inputs := []interface{}{
|
||||
nil,
|
||||
1,
|
||||
[]int{1, 2},
|
||||
[]interface{}{
|
||||
testSaver{row: map[string]Value{"one": 1}, insertID: "a"},
|
||||
1,
|
||||
},
|
||||
StructSaver{},
|
||||
}
|
||||
for _, in := range inputs {
|
||||
if _, err := valueSavers(in); err == nil {
|
||||
t.Errorf("%#v: got nil, want error", in)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -243,7 +243,7 @@ func compileToOps(structType reflect.Type, schema Schema) ([]structLoaderOp, err
|
|||
}
|
||||
|
||||
// determineSetFunc chooses the best function for setting a field of type ftype
|
||||
// to a value whose schema field type is sftype. It returns nil if stype
|
||||
// to a value whose schema field type is stype. It returns nil if stype
|
||||
// is not assignable to ftype.
|
||||
// determineSetFunc considers only basic types. See compileToOps for
|
||||
// handling of repetition and nesting.
|
||||
|
@ -405,7 +405,7 @@ func valuesToMap(vs []Value, schema Schema) (map[string]Value, error) {
|
|||
m := make(map[string]Value)
|
||||
for i, fieldSchema := range schema {
|
||||
if fieldSchema.Type != RecordFieldType {
|
||||
m[fieldSchema.Name] = vs[i]
|
||||
m[fieldSchema.Name] = toUploadValue(vs[i], fieldSchema)
|
||||
continue
|
||||
}
|
||||
// Nested record, possibly repeated.
|
||||
|
@ -510,14 +510,9 @@ func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (i
|
|||
schemaField.Name, vfield.Type())
|
||||
}
|
||||
|
||||
// A non-nested field can be represented by its Go value.
|
||||
// A non-nested field can be represented by its Go value, except for civil times.
|
||||
if schemaField.Type != RecordFieldType {
|
||||
if !schemaField.Repeated || vfield.Len() > 0 {
|
||||
return vfield.Interface(), nil
|
||||
}
|
||||
// The service treats a null repeated field as an error. Return
|
||||
// nil to omit the field entirely.
|
||||
return nil, nil
|
||||
return toUploadValueReflect(vfield, schemaField), nil
|
||||
}
|
||||
// A non-repeated nested field is converted into a map[string]Value.
|
||||
if !schemaField.Repeated {
|
||||
|
@ -545,6 +540,73 @@ func structFieldToUploadValue(vfield reflect.Value, schemaField *FieldSchema) (i
|
|||
return vals, nil
|
||||
}
|
||||
|
||||
func toUploadValue(val interface{}, fs *FieldSchema) interface{} {
|
||||
if fs.Type == TimeFieldType || fs.Type == DateTimeFieldType {
|
||||
return toUploadValueReflect(reflect.ValueOf(val), fs)
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
func toUploadValueReflect(v reflect.Value, fs *FieldSchema) interface{} {
|
||||
switch fs.Type {
|
||||
case TimeFieldType:
|
||||
return civilToUploadValue(v, fs, func(v reflect.Value) string {
|
||||
return CivilTimeString(v.Interface().(civil.Time))
|
||||
})
|
||||
case DateTimeFieldType:
|
||||
return civilToUploadValue(v, fs, func(v reflect.Value) string {
|
||||
return CivilDateTimeString(v.Interface().(civil.DateTime))
|
||||
})
|
||||
default:
|
||||
if !fs.Repeated || v.Len() > 0 {
|
||||
return v.Interface()
|
||||
}
|
||||
// The service treats a null repeated field as an error. Return
|
||||
// nil to omit the field entirely.
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
func civilToUploadValue(v reflect.Value, fs *FieldSchema, cvt func(reflect.Value) string) interface{} {
|
||||
if !fs.Repeated {
|
||||
return cvt(v)
|
||||
}
|
||||
if v.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
s := make([]string, v.Len())
|
||||
for i := 0; i < v.Len(); i++ {
|
||||
s[i] = cvt(v.Index(i))
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// CivilTimeString returns a string representing a civil.Time in a format compatible
|
||||
// with BigQuery SQL. It rounds the time to the nearest microsecond and returns a
|
||||
// string with six digits of sub-second precision.
|
||||
//
|
||||
// Use CivilTimeString when using civil.Time in DML, for example in INSERT
|
||||
// statements.
|
||||
func CivilTimeString(t civil.Time) string {
|
||||
if t.Nanosecond == 0 {
|
||||
return t.String()
|
||||
} else {
|
||||
micro := (t.Nanosecond + 500) / 1000 // round to nearest microsecond
|
||||
t.Nanosecond = 0
|
||||
return t.String() + fmt.Sprintf(".%06d", micro)
|
||||
}
|
||||
}
|
||||
|
||||
// CivilDateTimeString returns a string representing a civil.DateTime in a format compatible
|
||||
// with BigQuery SQL. It separate the date and time with a space, and formats the time
|
||||
// with CivilTimeString.
|
||||
//
|
||||
// Use CivilDateTimeString when using civil.DateTime in DML, for example in INSERT
|
||||
// statements.
|
||||
func CivilDateTimeString(dt civil.DateTime) string {
|
||||
return dt.Date.String() + " " + CivilTimeString(dt.Time)
|
||||
}
|
||||
|
||||
// convertRows converts a series of TableRows into a series of Value slices.
|
||||
// schema is used to interpret the data from rows; its length must match the
|
||||
// length of each row.
|
||||
|
@ -618,7 +680,6 @@ func convertNestedRecord(val map[string]interface{}, schema Schema) (Value, erro
|
|||
for i, cell := range record {
|
||||
// each cell contains a single entry, keyed by "v"
|
||||
val := cell.(map[string]interface{})["v"]
|
||||
|
||||
fs := schema[i]
|
||||
v, err := convertValue(val, fs.Type, fs.Schema)
|
||||
if err != nil {
|
||||
|
|
|
@ -58,22 +58,31 @@ func TestConvertBasicValues(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestConvertTime(t *testing.T) {
|
||||
// TODO(jba): add tests for civil time types.
|
||||
schema := []*FieldSchema{
|
||||
{Type: TimestampFieldType},
|
||||
{Type: DateFieldType},
|
||||
{Type: TimeFieldType},
|
||||
{Type: DateTimeFieldType},
|
||||
}
|
||||
thyme := time.Date(1970, 1, 1, 10, 0, 0, 10, time.UTC)
|
||||
ts := testTimestamp.Round(time.Millisecond)
|
||||
row := &bq.TableRow{
|
||||
F: []*bq.TableCell{
|
||||
{V: fmt.Sprintf("%.10f", float64(thyme.UnixNano())/1e9)},
|
||||
{V: fmt.Sprintf("%.10f", float64(ts.UnixNano())/1e9)},
|
||||
{V: testDate.String()},
|
||||
{V: testTime.String()},
|
||||
{V: testDateTime.String()},
|
||||
},
|
||||
}
|
||||
got, err := convertRow(row, schema)
|
||||
if err != nil {
|
||||
t.Fatalf("error converting: %v", err)
|
||||
}
|
||||
if !got[0].(time.Time).Equal(thyme) {
|
||||
t.Errorf("converting basic values: got:\n%v\nwant:\n%v", got, thyme)
|
||||
want := []Value{ts, testDate, testTime, testDateTime}
|
||||
for i, g := range got {
|
||||
w := want[i]
|
||||
if !testutil.Equal(g, w) {
|
||||
t.Errorf("#%d: got:\n%v\nwant:\n%v", i, g, w)
|
||||
}
|
||||
}
|
||||
if got[0].(time.Time).Location() != time.UTC {
|
||||
t.Errorf("expected time zone UTC: got:\n%v", got)
|
||||
|
@ -337,24 +346,58 @@ func TestRepeatedRecordContainingRecord(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
func TestConvertRowErrors(t *testing.T) {
|
||||
// mismatched lengths
|
||||
if _, err := convertRow(&bq.TableRow{F: []*bq.TableCell{{V: ""}}}, Schema{}); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
v3 := map[string]interface{}{"v": 3}
|
||||
for _, test := range []struct {
|
||||
value interface{}
|
||||
fs FieldSchema
|
||||
}{
|
||||
{3, FieldSchema{Type: IntegerFieldType}}, // not a string
|
||||
{[]interface{}{v3}, // not a string, repeated
|
||||
FieldSchema{Type: IntegerFieldType, Repeated: true}},
|
||||
{map[string]interface{}{"f": []interface{}{v3}}, // not a string, nested
|
||||
FieldSchema{Type: RecordFieldType, Schema: Schema{{Type: IntegerFieldType}}}},
|
||||
{map[string]interface{}{"f": []interface{}{v3}}, // wrong length, nested
|
||||
FieldSchema{Type: RecordFieldType, Schema: Schema{}}},
|
||||
} {
|
||||
_, err := convertRow(
|
||||
&bq.TableRow{F: []*bq.TableCell{{V: test.value}}},
|
||||
Schema{&test.fs})
|
||||
if err == nil {
|
||||
t.Errorf("value %v, fs %v: got nil, want error", test.value, test.fs)
|
||||
}
|
||||
}
|
||||
|
||||
// bad field type
|
||||
if _, err := convertBasicType("", FieldType("BAD")); err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuesSaverConvertsToMap(t *testing.T) {
|
||||
testCases := []struct {
|
||||
vs ValuesSaver
|
||||
want *insertionRow
|
||||
vs ValuesSaver
|
||||
wantInsertID string
|
||||
wantRow map[string]Value
|
||||
}{
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
Schema: []*FieldSchema{
|
||||
{Name: "intField", Type: IntegerFieldType},
|
||||
{Name: "strField", Type: StringFieldType},
|
||||
{Name: "dtField", Type: DateTimeFieldType},
|
||||
},
|
||||
InsertID: "iid",
|
||||
Row: []Value{1, "a"},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{"intField": 1, "strField": "a"},
|
||||
Row: []Value{1, "a",
|
||||
civil.DateTime{civil.Date{1, 2, 3}, civil.Time{4, 5, 6, 7000}}},
|
||||
},
|
||||
wantInsertID: "iid",
|
||||
wantRow: map[string]Value{"intField": 1, "strField": "a",
|
||||
"dtField": "0001-02-03 04:05:06.000007"},
|
||||
},
|
||||
{
|
||||
vs: ValuesSaver{
|
||||
|
@ -371,13 +414,11 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
|||
InsertID: "iid",
|
||||
Row: []Value{1, []Value{[]Value{2, 3}}},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"intField": 1,
|
||||
"recordField": map[string]Value{
|
||||
"nestedInt": []Value{2, 3},
|
||||
},
|
||||
wantInsertID: "iid",
|
||||
wantRow: map[string]Value{
|
||||
"intField": 1,
|
||||
"recordField": map[string]Value{
|
||||
"nestedInt": []Value{2, 3},
|
||||
},
|
||||
},
|
||||
},
|
||||
|
@ -402,25 +443,59 @@ func TestValuesSaverConvertsToMap(t *testing.T) {
|
|||
},
|
||||
},
|
||||
},
|
||||
want: &insertionRow{
|
||||
InsertID: "iid",
|
||||
Row: map[string]Value{
|
||||
"records": []Value{
|
||||
map[string]Value{"x": 1, "y": 2},
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
},
|
||||
wantInsertID: "iid",
|
||||
wantRow: map[string]Value{
|
||||
"records": []Value{
|
||||
map[string]Value{"x": 1, "y": 2},
|
||||
map[string]Value{"x": 3, "y": 4},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
for _, tc := range testCases {
|
||||
data, insertID, err := tc.vs.Save()
|
||||
gotRow, gotInsertID, err := tc.vs.Save()
|
||||
if err != nil {
|
||||
t.Errorf("Expected successful save; got: %v", err)
|
||||
continue
|
||||
}
|
||||
got := &insertionRow{insertID, data}
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("saving ValuesSaver:\ngot:\n%+v\nwant:\n%+v", got, tc.want)
|
||||
if !testutil.Equal(gotRow, tc.wantRow) {
|
||||
t.Errorf("%v row:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotRow, tc.wantRow)
|
||||
}
|
||||
if !testutil.Equal(gotInsertID, tc.wantInsertID) {
|
||||
t.Errorf("%v ID:\ngot:\n%+v\nwant:\n%+v", tc.vs, gotInsertID, tc.wantInsertID)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestValuesToMapErrors(t *testing.T) {
|
||||
for _, test := range []struct {
|
||||
values []Value
|
||||
schema Schema
|
||||
}{
|
||||
{ // mismatched length
|
||||
[]Value{1},
|
||||
Schema{},
|
||||
},
|
||||
{ // nested record not a slice
|
||||
[]Value{1},
|
||||
Schema{{Type: RecordFieldType}},
|
||||
},
|
||||
{ // nested record mismatched length
|
||||
[]Value{[]Value{1}},
|
||||
Schema{{Type: RecordFieldType}},
|
||||
},
|
||||
{ // nested repeated record not a slice
|
||||
[]Value{[]Value{1}},
|
||||
Schema{{Type: RecordFieldType, Repeated: true}},
|
||||
},
|
||||
{ // nested repeated record mismatched length
|
||||
[]Value{[]Value{[]Value{1}}},
|
||||
Schema{{Type: RecordFieldType, Repeated: true}},
|
||||
},
|
||||
} {
|
||||
_, err := valuesToMap(test.values, test.schema)
|
||||
if err == nil {
|
||||
t.Errorf("%v, %v: got nil, want error", test.values, test.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
@ -429,6 +504,8 @@ func TestStructSaver(t *testing.T) {
|
|||
schema := Schema{
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "r", Type: IntegerFieldType, Repeated: true},
|
||||
{Name: "t", Type: TimeFieldType},
|
||||
{Name: "tr", Type: TimeFieldType, Repeated: true},
|
||||
{Name: "nested", Type: RecordFieldType, Schema: Schema{
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
}},
|
||||
|
@ -442,6 +519,8 @@ func TestStructSaver(t *testing.T) {
|
|||
T struct {
|
||||
S string
|
||||
R []int
|
||||
T civil.Time
|
||||
TR []civil.Time
|
||||
Nested *N
|
||||
Rnested []*N
|
||||
}
|
||||
|
@ -464,22 +543,27 @@ func TestStructSaver(t *testing.T) {
|
|||
t.Errorf("%s:\ngot\n%#v\nwant\n%#v", msg, got, want)
|
||||
}
|
||||
}
|
||||
|
||||
ct1 := civil.Time{1, 2, 3, 4000}
|
||||
ct2 := civil.Time{5, 6, 7, 8000}
|
||||
in := T{
|
||||
S: "x",
|
||||
R: []int{1, 2},
|
||||
T: ct1,
|
||||
TR: []civil.Time{ct1, ct2},
|
||||
Nested: &N{B: true},
|
||||
Rnested: []*N{{true}, {false}},
|
||||
}
|
||||
want := map[string]Value{
|
||||
"s": "x",
|
||||
"r": []int{1, 2},
|
||||
"t": "01:02:03.000004",
|
||||
"tr": []string{"01:02:03.000004", "05:06:07.000008"},
|
||||
"nested": map[string]Value{"b": true},
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value{"b": false}},
|
||||
}
|
||||
check("all values", in, want)
|
||||
check("all values, ptr", &in, want)
|
||||
check("empty struct", T{}, map[string]Value{"s": ""})
|
||||
check("empty struct", T{}, map[string]Value{"s": "", "t": "00:00:00"})
|
||||
|
||||
// Missing and extra fields ignored.
|
||||
type T2 struct {
|
||||
|
@ -492,10 +576,39 @@ func TestStructSaver(t *testing.T) {
|
|||
check("nils in slice", T{Rnested: []*N{{true}, nil, {false}}},
|
||||
map[string]Value{
|
||||
"s": "",
|
||||
"t": "00:00:00",
|
||||
"rnested": []Value{map[string]Value{"b": true}, map[string]Value(nil), map[string]Value{"b": false}},
|
||||
})
|
||||
}
|
||||
|
||||
func TestStructSaverErrors(t *testing.T) {
|
||||
type (
|
||||
badField struct {
|
||||
I int `bigquery:"@"`
|
||||
}
|
||||
badR struct{ R int }
|
||||
badRN struct{ R []int }
|
||||
)
|
||||
|
||||
for i, test := range []struct {
|
||||
struct_ interface{}
|
||||
schema Schema
|
||||
}{
|
||||
{0, nil}, // not a struct
|
||||
{&badField{}, nil}, // bad field name
|
||||
{&badR{}, Schema{{Name: "r", Repeated: true}}}, // repeated field has bad type
|
||||
{&badR{}, Schema{{Name: "r", Type: RecordFieldType}}}, // nested field has bad type
|
||||
{&badRN{[]int{0}}, // nested repeated field has bad type
|
||||
Schema{{Name: "r", Type: RecordFieldType, Repeated: true}}},
|
||||
} {
|
||||
ss := &StructSaver{Struct: test.struct_, Schema: test.schema}
|
||||
_, _, err := ss.Save()
|
||||
if err == nil {
|
||||
t.Errorf("#%d, %v, %v: got nil, want error", i, test.struct_, test.schema)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestConvertRows(t *testing.T) {
|
||||
schema := []*FieldSchema{
|
||||
{Type: StringFieldType},
|
||||
|
@ -528,6 +641,12 @@ func TestConvertRows(t *testing.T) {
|
|||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("\ngot %v\nwant %v", got, want)
|
||||
}
|
||||
|
||||
rows[0].F[0].V = 1
|
||||
_, err = convertRows(rows, schema)
|
||||
if err == nil {
|
||||
t.Error("got nil, want error")
|
||||
}
|
||||
}
|
||||
|
||||
func TestValueList(t *testing.T) {
|
||||
|
@ -835,6 +954,65 @@ func TestStructLoaderErrors(t *testing.T) {
|
|||
type bad2 struct{ I uint } // unsupported integer type
|
||||
check(&bad2{})
|
||||
|
||||
type bad3 struct {
|
||||
I int `bigquery:"@"`
|
||||
} // bad field name
|
||||
check(&bad3{})
|
||||
|
||||
type bad4 struct{ Nested int } // non-struct for nested field
|
||||
check(&bad4{})
|
||||
|
||||
type bad5 struct{ Nested struct{ NestS int } } // bad nested struct
|
||||
check(&bad5{})
|
||||
|
||||
bad6 := &struct{ Nums int }{} // non-slice for repeated field
|
||||
sl := structLoader{}
|
||||
err := sl.set(bad6, repSchema)
|
||||
if err == nil {
|
||||
t.Errorf("%T: got nil, want error", bad6)
|
||||
}
|
||||
|
||||
// sl.set's error is sticky, with even good input.
|
||||
err2 := sl.set(&repStruct{}, repSchema)
|
||||
if err2 != err {
|
||||
t.Errorf("%v != %v, expected equal", err2, err)
|
||||
}
|
||||
// sl.Load is similarly sticky
|
||||
err2 = sl.Load(nil, nil)
|
||||
if err2 != err {
|
||||
t.Errorf("%v != %v, expected equal", err2, err)
|
||||
}
|
||||
|
||||
// Null values.
|
||||
schema := Schema{
|
||||
{Name: "i", Type: IntegerFieldType},
|
||||
{Name: "f", Type: FloatFieldType},
|
||||
{Name: "b", Type: BooleanFieldType},
|
||||
{Name: "s", Type: StringFieldType},
|
||||
{Name: "by", Type: BytesFieldType},
|
||||
{Name: "d", Type: DateFieldType},
|
||||
}
|
||||
type s struct {
|
||||
I int
|
||||
F float64
|
||||
B bool
|
||||
S string
|
||||
By []byte
|
||||
D civil.Date
|
||||
}
|
||||
vals := []Value{int64(0), 0.0, false, "", []byte{}, testDate}
|
||||
if err := load(&s{}, schema, vals); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
for i, e := range vals {
|
||||
vals[i] = nil
|
||||
got := load(&s{}, schema, vals)
|
||||
if got != errNoNulls {
|
||||
t.Errorf("#%d: got %v, want %v", i, got, errNoNulls)
|
||||
}
|
||||
vals[i] = e
|
||||
}
|
||||
|
||||
// Using more than one struct type with the same structLoader.
|
||||
type different struct {
|
||||
B bool
|
||||
|
@ -845,11 +1023,11 @@ func TestStructLoaderErrors(t *testing.T) {
|
|||
Nums []int
|
||||
}
|
||||
|
||||
var sl structLoader
|
||||
sl = structLoader{}
|
||||
if err := sl.set(&testStruct1{}, schema2); err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
err := sl.set(&different{}, schema2)
|
||||
err = sl.set(&different{}, schema2)
|
||||
if err == nil {
|
||||
t.Error("different struct types: got nil, want error")
|
||||
}
|
||||
|
|
|
@ -19,10 +19,12 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"fmt"
|
||||
"golang.org/x/net/context"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func TestAdminIntegration(t *testing.T) {
|
||||
|
@ -126,7 +128,7 @@ func TestAdminIntegration(t *testing.T) {
|
|||
}
|
||||
sort.Strings(tblInfo.Families)
|
||||
wantFams := []string{"fam1", "fam2"}
|
||||
if !reflect.DeepEqual(tblInfo.Families, wantFams) {
|
||||
if !testutil.Equal(tblInfo.Families, wantFams) {
|
||||
t.Errorf("Column family mismatch, got %v, want %v", tblInfo.Families, wantFams)
|
||||
}
|
||||
|
||||
|
|
|
@ -19,12 +19,13 @@ package bigtable
|
|||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"reflect"
|
||||
"strings"
|
||||
"sync"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
|
@ -67,14 +68,16 @@ func TestClientIntegration(t *testing.T) {
|
|||
t.Fatalf("IntegrationEnv: %v", err)
|
||||
}
|
||||
|
||||
timeout := 30 * time.Second
|
||||
var timeout time.Duration
|
||||
if testEnv.Config().UseProd {
|
||||
timeout = 5 * time.Minute
|
||||
t.Logf("Running test against production")
|
||||
} else {
|
||||
timeout = 1 * time.Minute
|
||||
t.Logf("bttest.Server running on %s", testEnv.Config().AdminEndpoint)
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
ctx, cancel := context.WithTimeout(context.Background(), timeout)
|
||||
defer cancel()
|
||||
|
||||
client, err := testEnv.NewClient()
|
||||
if err != nil {
|
||||
|
@ -156,7 +159,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "jadams", Column: "follows:tjefferson", Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
if !testutil.Equal(row, wantRow) {
|
||||
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
|
||||
}
|
||||
checkpoint("tested ReadRow")
|
||||
|
@ -319,6 +322,12 @@ func TestClientIntegration(t *testing.T) {
|
|||
filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), ColumnFilter(".*mckinley.*")), StripValueFilter(), nil),
|
||||
want: "",
|
||||
},
|
||||
{
|
||||
desc: "chain that ends with an interleave that has no match. covers #804",
|
||||
rr: RowRange{},
|
||||
filter: ConditionFilter(ChainFilters(ColumnFilter(".*j.*"), InterleaveFilters(ColumnFilter(".*x.*"), ColumnFilter(".*z.*"))), StripValueFilter(), nil),
|
||||
want: "",
|
||||
},
|
||||
}
|
||||
for _, tc := range readTests {
|
||||
var opts []ReadOption
|
||||
|
@ -442,9 +451,13 @@ func TestClientIntegration(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("ApplyReadModifyWrite %+v: %v", step.rmw, err)
|
||||
}
|
||||
// Make sure the modified cell returned by the RMW operation has a timestamp.
|
||||
if row["counter"][0].Timestamp == 0 {
|
||||
t.Errorf("RMW returned cell timestamp: got %v, want > 0", row["counter"][0].Timestamp)
|
||||
}
|
||||
clearTimestamps(row)
|
||||
wantRow := Row{"counter": []ReadItem{{Row: "gwashington", Column: "counter:likes", Value: step.want}}}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
if !testutil.Equal(row, wantRow) {
|
||||
t.Fatalf("After %s,\n got %v\nwant %v", step.desc, row, wantRow)
|
||||
}
|
||||
}
|
||||
|
@ -498,7 +511,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Do the same read, but filter to the latest two versions.
|
||||
|
@ -512,7 +525,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and LatestNFilter(2),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Check cell offset / limit
|
||||
|
@ -525,7 +538,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "testrow", Column: "ts:col", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and CellsPerRowLimitFilter(3),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(CellsPerRowOffsetFilter(3)))
|
||||
|
@ -539,7 +552,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 0, Value: []byte("val-0")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and CellsPerRowOffsetFilter(3),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Check timestamp range filtering (with truncation)
|
||||
|
@ -553,7 +566,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 3000),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
r, err = tbl.ReadRow(ctx, "testrow", RowFilter(TimestampRangeFilterMicros(1000, 0)))
|
||||
|
@ -568,7 +581,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 1000, Value: []byte("val-1")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and TimestampRangeFilter(1000, 0),\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete non-existing cells, no such column family in this row
|
||||
|
@ -585,7 +598,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete non-existing cells, no such column in this column family
|
||||
|
@ -599,7 +612,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell was deleted unexpectly,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
// Delete the cell with timestamp 2000 and repeat the last read,
|
||||
|
@ -619,7 +632,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "testrow", Column: "ts:col2", Timestamp: 3000, Value: []byte("val-3")},
|
||||
{Row: "testrow", Column: "ts:col2", Timestamp: 2000, Value: []byte("val-2")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Cell with multiple versions and LatestNFilter(2), after deleting timestamp 2000,\n got %v\nwant %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested multiple versions in a cell")
|
||||
|
@ -654,7 +667,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "row1", Column: "ts:col", Timestamp: 0, Value: []byte("3")},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("column family was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
|
||||
|
@ -672,7 +685,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "row2", Column: "status:start", Timestamp: 0, Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Column family was deleted unexpectly.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested family delete")
|
||||
|
@ -700,7 +713,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "row3", Column: "status:start", Timestamp: 0, Value: []byte("1")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
mut = NewMutation()
|
||||
|
@ -717,7 +730,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
{Row: "row3", Column: "status:end", Timestamp: 0, Value: []byte("3")},
|
||||
},
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Column was not deleted.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
mut = NewMutation()
|
||||
|
@ -742,7 +755,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Fatalf("Reading row: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Column was not deleted correctly.\n got %v\n want %v", r, wantRow)
|
||||
}
|
||||
checkpoint("tested column delete")
|
||||
|
@ -791,7 +804,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
wantRow = Row{"ts": []ReadItem{
|
||||
{Row: "bigrow", Column: "ts:col", Value: bigBytes},
|
||||
}}
|
||||
if !reflect.DeepEqual(r, wantRow) {
|
||||
if !testutil.Equal(r, wantRow) {
|
||||
t.Errorf("Big read returned incorrect bytes: %v", r)
|
||||
}
|
||||
// Now write 1000 rows, each with 82 KB values, then scan them all.
|
||||
|
@ -879,7 +892,7 @@ func TestClientIntegration(t *testing.T) {
|
|||
wantItems = append(wantItems, ReadItem{Row: rowKey, Column: "bulk:" + val, Value: []byte("1")})
|
||||
}
|
||||
wantRow := Row{"bulk": wantItems}
|
||||
if !reflect.DeepEqual(row, wantRow) {
|
||||
if !testutil.Equal(row, wantRow) {
|
||||
t.Errorf("Read row mismatch.\n got %#v\nwant %#v", row, wantRow)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -421,12 +421,14 @@ func filterRow(f *btpb.RowFilter, r *row) bool {
|
|||
}
|
||||
}
|
||||
}
|
||||
var count int
|
||||
for _, fam := range r.families {
|
||||
for _, cs := range fam.cells {
|
||||
sort.Sort(byDescTS(cs))
|
||||
count += len(cs)
|
||||
}
|
||||
}
|
||||
return true
|
||||
return count > 0
|
||||
case *btpb.RowFilter_CellsPerColumnLimitFilter:
|
||||
lim := int(f.CellsPerColumnLimitFilter)
|
||||
for _, fam := range r.families {
|
||||
|
@ -690,10 +692,8 @@ func (s *server) CheckAndMutateRow(ctx context.Context, req *btpb.CheckAndMutate
|
|||
nr := r.copy()
|
||||
filterRow(req.PredicateFilter, nr)
|
||||
whichMut = !nr.isEmpty()
|
||||
// TODO(dsymonds): Figure out if this is supposed to be set
|
||||
// even when there's no predicate filter.
|
||||
res.PredicateMatched = whichMut
|
||||
}
|
||||
res.PredicateMatched = whichMut
|
||||
muts := req.FalseMutations
|
||||
if whichMut {
|
||||
muts = req.TrueMutations
|
||||
|
@ -907,7 +907,8 @@ func (s *server) ReadModifyWriteRow(ctx context.Context, req *btpb.ReadModifyWri
|
|||
f.Columns = append(f.Columns, &btpb.Column{
|
||||
Qualifier: []byte(qual),
|
||||
Cells: []*btpb.Cell{{
|
||||
Value: cell.value,
|
||||
TimestampMicros: cell.ts,
|
||||
Value: cell.value,
|
||||
}},
|
||||
})
|
||||
}
|
||||
|
|
|
@ -17,6 +17,7 @@ package bttest
|
|||
import (
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"strconv"
|
||||
"sync"
|
||||
"sync/atomic"
|
||||
"testing"
|
||||
|
@ -26,7 +27,6 @@ import (
|
|||
btapb "google.golang.org/genproto/googleapis/bigtable/admin/v2"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
"google.golang.org/grpc"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
func TestConcurrentMutationsReadModifyAndGC(t *testing.T) {
|
||||
|
@ -430,7 +430,7 @@ func TestReadRowsOrder(t *testing.T) {
|
|||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 27 {
|
||||
t.Fatal("Chunk count: got %d, want 27", len(mock.responses[0].Chunks))
|
||||
t.Fatalf("Chunk count: got %d, want 27", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder := func(ms *MockReadRowsServer) {
|
||||
var prevFam, prevCol string
|
||||
|
@ -480,7 +480,7 @@ func TestReadRowsOrder(t *testing.T) {
|
|||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 18 {
|
||||
t.Fatal("Chunk count: got %d, want 18", len(mock.responses[0].Chunks))
|
||||
t.Fatalf("Chunk count: got %d, want 18", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder(mock)
|
||||
|
||||
|
@ -511,7 +511,61 @@ func TestReadRowsOrder(t *testing.T) {
|
|||
t.Fatal("Response count: got 0, want > 0")
|
||||
}
|
||||
if len(mock.responses[0].Chunks) != 30 {
|
||||
t.Fatal("Chunk count: got %d, want 30", len(mock.responses[0].Chunks))
|
||||
t.Fatalf("Chunk count: got %d, want 30", len(mock.responses[0].Chunks))
|
||||
}
|
||||
testOrder(mock)
|
||||
}
|
||||
|
||||
func TestCheckAndMutateRowWithoutPredicate(t *testing.T) {
|
||||
s := &server{
|
||||
tables: make(map[string]*table),
|
||||
}
|
||||
ctx := context.Background()
|
||||
newTbl := btapb.Table{
|
||||
ColumnFamilies: map[string]*btapb.ColumnFamily{
|
||||
"cf": {GcRule: &btapb.GcRule{Rule: &btapb.GcRule_MaxNumVersions{1}}},
|
||||
},
|
||||
}
|
||||
tbl, err := s.CreateTable(ctx, &btapb.CreateTableRequest{Parent: "cluster", TableId: "t", Table: &newTbl})
|
||||
if err != nil {
|
||||
t.Fatalf("Creating table: %v", err)
|
||||
}
|
||||
|
||||
// Populate the table
|
||||
val := []byte("value")
|
||||
mrreq := &btpb.MutateRowRequest{
|
||||
TableName: tbl.Name,
|
||||
RowKey: []byte("row-present"),
|
||||
Mutations: []*btpb.Mutation{{
|
||||
Mutation: &btpb.Mutation_SetCell_{&btpb.Mutation_SetCell{
|
||||
FamilyName: "cf",
|
||||
ColumnQualifier: []byte("col"),
|
||||
TimestampMicros: 0,
|
||||
Value: val,
|
||||
}},
|
||||
}},
|
||||
}
|
||||
if _, err := s.MutateRow(ctx, mrreq); err != nil {
|
||||
t.Fatalf("Populating table: %v", err)
|
||||
}
|
||||
|
||||
req := &btpb.CheckAndMutateRowRequest{
|
||||
TableName: tbl.Name,
|
||||
RowKey: []byte("row-not-present"),
|
||||
}
|
||||
if res, err := s.CheckAndMutateRow(ctx, req); err != nil {
|
||||
t.Errorf("CheckAndMutateRow error: %v", err)
|
||||
} else if got, want := res.PredicateMatched, false; got != want {
|
||||
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
|
||||
}
|
||||
|
||||
req = &btpb.CheckAndMutateRowRequest{
|
||||
TableName: tbl.Name,
|
||||
RowKey: []byte("row-present"),
|
||||
}
|
||||
if res, err := s.CheckAndMutateRow(ctx, req); err != nil {
|
||||
t.Errorf("CheckAndMutateRow error: %v", err)
|
||||
} else if got, want := res.PredicateMatched, true; got != want {
|
||||
t.Errorf("Invalid PredicateMatched value: got %t, want %t", got, want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -216,6 +216,13 @@ var commands = []struct {
|
|||
"into multiple tablets. Can be repeated to create multiple splits.",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletecolumn",
|
||||
Desc: "Delete all cells in a column",
|
||||
do: doDeleteColumn,
|
||||
Usage: "cbt deletecolumn <table> <row> <family> <column>",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
{
|
||||
Name: "deletefamily",
|
||||
Desc: "Delete a column family",
|
||||
|
@ -284,10 +291,12 @@ var commands = []struct {
|
|||
Name: "read",
|
||||
Desc: "Read rows",
|
||||
do: doRead,
|
||||
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]\n" +
|
||||
Usage: "cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>]" +
|
||||
" [regex=<regex>] [count=<n>]\n" +
|
||||
" start=<row> Start reading at this row\n" +
|
||||
" end=<row> Stop reading before this row\n" +
|
||||
" prefix=<prefix> Read rows with this prefix\n" +
|
||||
" regex=<regex> Read rows with keys matching this regex\n" +
|
||||
" count=<n> Read only this many rows\n",
|
||||
Required: cbtconfig.ProjectAndInstanceRequired,
|
||||
},
|
||||
|
@ -365,6 +374,18 @@ func doCreateTable(ctx context.Context, args ...string) {
|
|||
}
|
||||
}
|
||||
|
||||
func doDeleteColumn(ctx context.Context, args ...string) {
|
||||
if len(args) != 4 {
|
||||
log.Fatal("usage: cbt deletecolumn <table> <row> <family> <column>")
|
||||
}
|
||||
tbl := getClient().Open(args[0])
|
||||
mut := bigtable.NewMutation()
|
||||
mut.DeleteCellsInColumn(args[2], args[3])
|
||||
if err := tbl.Apply(ctx, args[1], mut); err != nil {
|
||||
log.Fatalf("Deleting cells in column: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func doDeleteFamily(ctx context.Context, args ...string) {
|
||||
if len(args) != 2 {
|
||||
log.Fatal("usage: cbt deletefamily <table> <family>")
|
||||
|
@ -474,7 +495,9 @@ var docTemplate = template.Must(template.New("doc").Funcs(template.FuncMap{
|
|||
//go:generate go run cbt.go -o cbtdoc.go doc
|
||||
|
||||
/*
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
|
||||
install the cbt tool, see the
|
||||
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
|
||||
|
||||
Usage:
|
||||
|
||||
|
@ -672,7 +695,7 @@ func doRead(ctx context.Context, args ...string) {
|
|||
case "limit":
|
||||
// Be nicer; we used to support this, but renamed it to "end".
|
||||
log.Fatalf("Unknown arg key %q; did you mean %q?", key, "end")
|
||||
case "start", "end", "prefix", "count":
|
||||
case "start", "end", "prefix", "count", "regex":
|
||||
parsed[key] = val
|
||||
}
|
||||
}
|
||||
|
@ -698,6 +721,9 @@ func doRead(ctx context.Context, args ...string) {
|
|||
}
|
||||
opts = append(opts, bigtable.LimitRows(n))
|
||||
}
|
||||
if regex := parsed["regex"]; regex != "" {
|
||||
opts = append(opts, bigtable.RowFilter(bigtable.RowKeyFilter(regex)))
|
||||
}
|
||||
|
||||
// TODO(dsymonds): Support filters.
|
||||
err := tbl.ReadRows(ctx, rr, func(r bigtable.Row) bool {
|
||||
|
|
|
@ -17,7 +17,9 @@
|
|||
//go:generate go run cbt.go -o cbtdoc.go doc
|
||||
|
||||
/*
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable.
|
||||
Cbt is a tool for doing basic interactions with Cloud Bigtable. To learn how to
|
||||
install the cbt tool, see the
|
||||
[cbt overview](https://cloud.google.com/bigtable/docs/go/cbt-overview).
|
||||
|
||||
Usage:
|
||||
|
||||
|
@ -28,6 +30,7 @@ The commands are:
|
|||
count Count rows in a table
|
||||
createfamily Create a column family
|
||||
createtable Create a table
|
||||
deletecolumn Delete all cells in a column
|
||||
deletefamily Delete a column family
|
||||
deleterow Delete a row
|
||||
deletetable Delete a table
|
||||
|
@ -40,13 +43,14 @@ The commands are:
|
|||
read Read rows
|
||||
set Set value of a cell
|
||||
setgcpolicy Set the GC policy for a column family
|
||||
version Print the current cbt version
|
||||
|
||||
Use "cbt help <command>" for more information about a command.
|
||||
|
||||
The options are:
|
||||
|
||||
-project string
|
||||
project ID
|
||||
project ID, if unset uses gcloud configured project
|
||||
-instance string
|
||||
Cloud Bigtable instance
|
||||
-creds string
|
||||
|
@ -72,7 +76,16 @@ Usage:
|
|||
Create a table
|
||||
|
||||
Usage:
|
||||
cbt createtable <table>
|
||||
cbt createtable <table> [initial_splits...]
|
||||
initial_splits=row A row key to be used to initially split the table into multiple tablets. Can be repeated to create multiple splits.
|
||||
|
||||
|
||||
|
||||
|
||||
Delete all cells in a column
|
||||
|
||||
Usage:
|
||||
cbt deletecolumn <table> <row> <family> <column>
|
||||
|
||||
|
||||
|
||||
|
@ -153,10 +166,11 @@ Usage:
|
|||
Read rows
|
||||
|
||||
Usage:
|
||||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [count=<n>]
|
||||
cbt read <table> [start=<row>] [end=<row>] [prefix=<prefix>] [regex=<regex>] [count=<n>]
|
||||
start=<row> Start reading at this row
|
||||
end=<row> Stop reading before this row
|
||||
prefix=<prefix> Read rows with this prefix
|
||||
regex=<regex> Read rows with keys matching this regex
|
||||
count=<n> Read only this many rows
|
||||
|
||||
|
||||
|
@ -187,5 +201,13 @@ Usage:
|
|||
|
||||
|
||||
|
||||
Print the current cbt version
|
||||
|
||||
Usage:
|
||||
cbt version
|
||||
|
||||
|
||||
|
||||
|
||||
*/
|
||||
package main
|
||||
|
|
|
@ -136,7 +136,7 @@ func (e *EmulatedEnv) Config() IntegrationTestConfig {
|
|||
func (e *EmulatedEnv) NewAdminClient() (*AdminClient, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure())
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
@ -152,7 +152,8 @@ func (e *EmulatedEnv) NewInstanceAdminClient() (*InstanceAdminClient, error) {
|
|||
func (e *EmulatedEnv) NewClient() (*Client, error) {
|
||||
timeout := 20 * time.Second
|
||||
ctx, _ := context.WithTimeout(context.Background(), timeout)
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))
|
||||
conn, err := grpc.Dial(e.server.Addr, grpc.WithInsecure(), grpc.WithBlock(),
|
||||
grpc.WithDefaultCallOptions(grpc.MaxCallSendMsgSize(100<<20), grpc.MaxCallRecvMsgSize(100<<20)))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -181,7 +181,7 @@ type timestampRangeFilter struct {
|
|||
}
|
||||
|
||||
func (trf timestampRangeFilter) String() string {
|
||||
return fmt.Sprintf("timestamp_range(%s,%s)", trf.startTime, trf.endTime)
|
||||
return fmt.Sprintf("timestamp_range(%v,%v)", trf.startTime, trf.endTime)
|
||||
}
|
||||
|
||||
func (trf timestampRangeFilter) proto() *btpb.RowFilter {
|
||||
|
|
|
@ -135,24 +135,24 @@ func GCRuleToString(rule *bttdpb.GcRule) string {
|
|||
if rule == nil {
|
||||
return "<default>"
|
||||
}
|
||||
var ruleStr string
|
||||
if r, ok := rule.Rule.(*bttdpb.GcRule_MaxNumVersions); ok {
|
||||
ruleStr += MaxVersionsPolicy(int(r.MaxNumVersions)).String()
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_MaxAge); ok {
|
||||
ruleStr += MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Intersection_); ok {
|
||||
var chunks []string
|
||||
for _, intRule := range r.Intersection.Rules {
|
||||
chunks = append(chunks, GCRuleToString(intRule))
|
||||
}
|
||||
ruleStr += "(" + strings.Join(chunks, " && ") + ")"
|
||||
} else if r, ok := rule.Rule.(*bttdpb.GcRule_Union_); ok {
|
||||
var chunks []string
|
||||
for _, unionRule := range r.Union.Rules {
|
||||
chunks = append(chunks, GCRuleToString(unionRule))
|
||||
}
|
||||
ruleStr += "(" + strings.Join(chunks, " || ") + ")"
|
||||
switch r := rule.Rule.(type) {
|
||||
case *bttdpb.GcRule_MaxNumVersions:
|
||||
return MaxVersionsPolicy(int(r.MaxNumVersions)).String()
|
||||
case *bttdpb.GcRule_MaxAge:
|
||||
return MaxAgePolicy(time.Duration(r.MaxAge.Seconds) * time.Second).String()
|
||||
case *bttdpb.GcRule_Intersection_:
|
||||
return joinRules(r.Intersection.Rules, " && ")
|
||||
case *bttdpb.GcRule_Union_:
|
||||
return joinRules(r.Union.Rules, " || ")
|
||||
default:
|
||||
return ""
|
||||
}
|
||||
|
||||
return ruleStr
|
||||
}
|
||||
|
||||
func joinRules(rules []*bttdpb.GcRule, sep string) string {
|
||||
var chunks []string
|
||||
for _, r := range rules {
|
||||
chunks = append(chunks, GCRuleToString(r))
|
||||
}
|
||||
return "(" + strings.Join(chunks, sep) + ")"
|
||||
}
|
||||
|
|
|
@ -20,10 +20,11 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
btspb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
|
@ -48,7 +49,7 @@ func TestSingleCell(t *testing.T) {
|
|||
t.Fatalf("Family name length mismatch %d, %d", 1, len(row["fm"]))
|
||||
}
|
||||
want := []ReadItem{ri("rk", "fm", "col", 1, "value")}
|
||||
if !reflect.DeepEqual(row["fm"], want) {
|
||||
if !testutil.Equal(row["fm"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
|
@ -76,14 +77,14 @@ func TestMultipleCells(t *testing.T) {
|
|||
ri("rs", "fm1", "col1", 1, "val2"),
|
||||
ri("rs", "fm1", "col2", 0, "val3"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
if !testutil.Equal(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
want = []ReadItem{
|
||||
ri("rs", "fm2", "col1", 0, "val4"),
|
||||
ri("rs", "fm2", "col2", 1, "extralongval5"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
if !testutil.Equal(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
|
@ -108,7 +109,7 @@ func TestSplitCells(t *testing.T) {
|
|||
ri("rs", "fm1", "col1", 0, "hello world"),
|
||||
ri("rs", "fm1", "col2", 0, "val2"),
|
||||
}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
if !testutil.Equal(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
|
@ -124,7 +125,7 @@ func TestMultipleRows(t *testing.T) {
|
|||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
if !testutil.Equal(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
|
||||
|
@ -133,7 +134,7 @@ func TestMultipleRows(t *testing.T) {
|
|||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
if !testutil.Equal(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
|
||||
|
@ -150,7 +151,7 @@ func TestBlankQualifier(t *testing.T) {
|
|||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want := []ReadItem{ri("rs1", "fm1", "", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
if !testutil.Equal(row["fm1"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
|
||||
|
@ -159,7 +160,7 @@ func TestBlankQualifier(t *testing.T) {
|
|||
t.Fatalf("Processing chunk: %v", err)
|
||||
}
|
||||
want = []ReadItem{ri("rs2", "fm2", "col2", 2, "val2")}
|
||||
if !reflect.DeepEqual(row["fm2"], want) {
|
||||
if !testutil.Equal(row["fm2"], want) {
|
||||
t.Fatalf("Incorrect ReadItem: got: %v\nwant: %v\n", row["fm2"], want)
|
||||
}
|
||||
|
||||
|
@ -177,7 +178,7 @@ func TestReset(t *testing.T) {
|
|||
cr.Process(ccReset())
|
||||
row, _ := cr.Process(cc("rs1", "fm1", "col1", 1, "val1", 0, true))
|
||||
want := []ReadItem{ri("rs1", "fm1", "col1", 1, "val1")}
|
||||
if !reflect.DeepEqual(row["fm1"], want) {
|
||||
if !testutil.Equal(row["fm1"], want) {
|
||||
t.Fatalf("Reset: got: %v\nwant: %v\n", row["fm1"], want)
|
||||
}
|
||||
if err := cr.Close(); err != nil {
|
||||
|
@ -279,7 +280,7 @@ func runTestCase(t *testing.T, test TestCase) {
|
|||
|
||||
got := toSet(results)
|
||||
want := toSet(test.Results)
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Fatalf("[%s]: got: %v\nwant: %v\n", test.Name, got, want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -16,13 +16,14 @@ limitations under the License.
|
|||
package bigtable
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/bigtable/bttest"
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
"github.com/golang/protobuf/ptypes/wrappers"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
btpb "google.golang.org/genproto/googleapis/bigtable/v2"
|
||||
|
@ -36,7 +37,7 @@ func setupFakeServer(opt ...grpc.ServerOption) (tbl *Table, cleanup func(), err
|
|||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure())
|
||||
conn, err := grpc.Dial(srv.Addr, grpc.WithInsecure(), grpc.WithBlock())
|
||||
if err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
|
@ -80,10 +81,10 @@ func TestRetryApply(t *testing.T) {
|
|||
return handler(ctx, req)
|
||||
}
|
||||
tbl, cleanup, err := setupFakeServer(grpc.UnaryInterceptor(errInjector))
|
||||
defer cleanup()
|
||||
if err != nil {
|
||||
t.Fatalf("fake server setup: %v", err)
|
||||
}
|
||||
defer cleanup()
|
||||
|
||||
mut := NewMutation()
|
||||
mut.Set("cf", "col", 1, []byte("val"))
|
||||
|
@ -231,13 +232,13 @@ func TestRetryApplyBulk(t *testing.T) {
|
|||
}
|
||||
errors, err = tbl.ApplyBulk(ctx, []string{"row1", "row2"}, []*Mutation{m1, niMut})
|
||||
if err != nil {
|
||||
t.Errorf("unretryable errors: request failed %v")
|
||||
t.Errorf("unretryable errors: request failed %v", err)
|
||||
}
|
||||
want := []error{
|
||||
grpc.Errorf(codes.FailedPrecondition, ""),
|
||||
grpc.Errorf(codes.Aborted, ""),
|
||||
}
|
||||
if !reflect.DeepEqual(want, errors) {
|
||||
if !testutil.Equal(want, errors) {
|
||||
t.Errorf("unretryable errors: got: %v, want: %v", errors, want)
|
||||
}
|
||||
|
||||
|
@ -273,7 +274,7 @@ func TestRetainRowsAfter(t *testing.T) {
|
|||
prevRowKey := "m"
|
||||
want := NewRange("m\x00", "z")
|
||||
got := prevRowRange.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(want, got) {
|
||||
if !testutil.Equal(want, got, cmp.AllowUnexported(RowRange{})) {
|
||||
t.Errorf("range retry: got %v, want %v", got, want)
|
||||
}
|
||||
|
||||
|
@ -281,7 +282,7 @@ func TestRetainRowsAfter(t *testing.T) {
|
|||
prevRowKey = "f"
|
||||
wantRowRangeList := RowRangeList{NewRange("f\x00", "g"), NewRange("h", "l")}
|
||||
got = prevRowRangeList.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(wantRowRangeList, got) {
|
||||
if !testutil.Equal(wantRowRangeList, got, cmp.AllowUnexported(RowRange{})) {
|
||||
t.Errorf("range list retry: got %v, want %v", got, wantRowRangeList)
|
||||
}
|
||||
|
||||
|
@ -289,7 +290,7 @@ func TestRetainRowsAfter(t *testing.T) {
|
|||
prevRowKey = "b"
|
||||
wantList := RowList{"c", "d", "e", "f"}
|
||||
got = prevRowList.retainRowsAfter(prevRowKey)
|
||||
if !reflect.DeepEqual(wantList, got) {
|
||||
if !testutil.Equal(wantList, got) {
|
||||
t.Errorf("list retry: got %v, want %v", got, wantList)
|
||||
}
|
||||
}
|
||||
|
@ -351,7 +352,7 @@ func TestRetryReadRows(t *testing.T) {
|
|||
return true
|
||||
})
|
||||
want := []string{"a", "b", "c", "d"}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("retry range integration: got %v, want %v", got, want)
|
||||
}
|
||||
}
|
||||
|
|
674
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go
generated
vendored
Normal file
674
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client.go
generated
vendored
Normal file
|
@ -0,0 +1,674 @@
|
|||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package container
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/version"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/transport"
|
||||
containerpb "google.golang.org/genproto/googleapis/container/v1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ClusterManagerCallOptions contains the retry settings for each method of ClusterManagerClient.
|
||||
type ClusterManagerCallOptions struct {
|
||||
ListClusters []gax.CallOption
|
||||
GetCluster []gax.CallOption
|
||||
CreateCluster []gax.CallOption
|
||||
UpdateCluster []gax.CallOption
|
||||
UpdateNodePool []gax.CallOption
|
||||
SetNodePoolAutoscaling []gax.CallOption
|
||||
SetLoggingService []gax.CallOption
|
||||
SetMonitoringService []gax.CallOption
|
||||
SetAddonsConfig []gax.CallOption
|
||||
SetLocations []gax.CallOption
|
||||
UpdateMaster []gax.CallOption
|
||||
SetMasterAuth []gax.CallOption
|
||||
DeleteCluster []gax.CallOption
|
||||
ListOperations []gax.CallOption
|
||||
GetOperation []gax.CallOption
|
||||
CancelOperation []gax.CallOption
|
||||
GetServerConfig []gax.CallOption
|
||||
ListNodePools []gax.CallOption
|
||||
GetNodePool []gax.CallOption
|
||||
CreateNodePool []gax.CallOption
|
||||
DeleteNodePool []gax.CallOption
|
||||
RollbackNodePoolUpgrade []gax.CallOption
|
||||
SetNodePoolManagement []gax.CallOption
|
||||
SetLabels []gax.CallOption
|
||||
SetLegacyAbac []gax.CallOption
|
||||
StartIPRotation []gax.CallOption
|
||||
CompleteIPRotation []gax.CallOption
|
||||
SetNodePoolSize []gax.CallOption
|
||||
SetNetworkPolicy []gax.CallOption
|
||||
SetMaintenancePolicy []gax.CallOption
|
||||
}
|
||||
|
||||
func defaultClusterManagerClientOptions() []option.ClientOption {
|
||||
return []option.ClientOption{
|
||||
option.WithEndpoint("container.googleapis.com:443"),
|
||||
option.WithScopes(DefaultAuthScopes()...),
|
||||
}
|
||||
}
|
||||
|
||||
func defaultClusterManagerCallOptions() *ClusterManagerCallOptions {
|
||||
retry := map[[2]string][]gax.CallOption{
|
||||
{"default", "idempotent"}: {
|
||||
gax.WithRetry(func() gax.Retryer {
|
||||
return gax.OnCodes([]codes.Code{
|
||||
codes.DeadlineExceeded,
|
||||
codes.Unavailable,
|
||||
}, gax.Backoff{
|
||||
Initial: 100 * time.Millisecond,
|
||||
Max: 60000 * time.Millisecond,
|
||||
Multiplier: 1.3,
|
||||
})
|
||||
}),
|
||||
},
|
||||
}
|
||||
return &ClusterManagerCallOptions{
|
||||
ListClusters: retry[[2]string{"default", "idempotent"}],
|
||||
GetCluster: retry[[2]string{"default", "idempotent"}],
|
||||
CreateCluster: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateCluster: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateNodePool: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetNodePoolAutoscaling: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetLoggingService: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetMonitoringService: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetAddonsConfig: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetLocations: retry[[2]string{"default", "non_idempotent"}],
|
||||
UpdateMaster: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetMasterAuth: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteCluster: retry[[2]string{"default", "idempotent"}],
|
||||
ListOperations: retry[[2]string{"default", "idempotent"}],
|
||||
GetOperation: retry[[2]string{"default", "idempotent"}],
|
||||
CancelOperation: retry[[2]string{"default", "non_idempotent"}],
|
||||
GetServerConfig: retry[[2]string{"default", "idempotent"}],
|
||||
ListNodePools: retry[[2]string{"default", "idempotent"}],
|
||||
GetNodePool: retry[[2]string{"default", "idempotent"}],
|
||||
CreateNodePool: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeleteNodePool: retry[[2]string{"default", "idempotent"}],
|
||||
RollbackNodePoolUpgrade: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetNodePoolManagement: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetLabels: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetLegacyAbac: retry[[2]string{"default", "non_idempotent"}],
|
||||
StartIPRotation: retry[[2]string{"default", "non_idempotent"}],
|
||||
CompleteIPRotation: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetNodePoolSize: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetNetworkPolicy: retry[[2]string{"default", "non_idempotent"}],
|
||||
SetMaintenancePolicy: retry[[2]string{"default", "non_idempotent"}],
|
||||
}
|
||||
}
|
||||
|
||||
// ClusterManagerClient is a client for interacting with Google Container Engine API.
|
||||
type ClusterManagerClient struct {
|
||||
// The connection to the service.
|
||||
conn *grpc.ClientConn
|
||||
|
||||
// The gRPC API client.
|
||||
clusterManagerClient containerpb.ClusterManagerClient
|
||||
|
||||
// The call options for this service.
|
||||
CallOptions *ClusterManagerCallOptions
|
||||
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClusterManagerClient creates a new cluster manager client.
|
||||
//
|
||||
// Google Container Engine Cluster Manager v1
|
||||
func NewClusterManagerClient(ctx context.Context, opts ...option.ClientOption) (*ClusterManagerClient, error) {
|
||||
conn, err := transport.DialGRPC(ctx, append(defaultClusterManagerClientOptions(), opts...)...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c := &ClusterManagerClient{
|
||||
conn: conn,
|
||||
CallOptions: defaultClusterManagerCallOptions(),
|
||||
|
||||
clusterManagerClient: containerpb.NewClusterManagerClient(conn),
|
||||
}
|
||||
c.setGoogleClientInfo()
|
||||
return c, nil
|
||||
}
|
||||
|
||||
// Connection returns the client's connection to the API service.
|
||||
func (c *ClusterManagerClient) Connection() *grpc.ClientConn {
|
||||
return c.conn
|
||||
}
|
||||
|
||||
// Close closes the connection to the API service. The user should invoke this when
|
||||
// the client is no longer required.
|
||||
func (c *ClusterManagerClient) Close() error {
|
||||
return c.conn.Close()
|
||||
}
|
||||
|
||||
// setGoogleClientInfo sets the name and version of the application in
|
||||
// the `x-goog-api-client` header passed on each request. Intended for
|
||||
// use by Google-written clients.
|
||||
func (c *ClusterManagerClient) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ListClusters lists all clusters owned by a project in either the specified zone or all
|
||||
// zones.
|
||||
func (c *ClusterManagerClient) ListClusters(ctx context.Context, req *containerpb.ListClustersRequest, opts ...gax.CallOption) (*containerpb.ListClustersResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListClusters[0:len(c.CallOptions.ListClusters):len(c.CallOptions.ListClusters)], opts...)
|
||||
var resp *containerpb.ListClustersResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.ListClusters(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetCluster gets the details of a specific cluster.
|
||||
func (c *ClusterManagerClient) GetCluster(ctx context.Context, req *containerpb.GetClusterRequest, opts ...gax.CallOption) (*containerpb.Cluster, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetCluster[0:len(c.CallOptions.GetCluster):len(c.CallOptions.GetCluster)], opts...)
|
||||
var resp *containerpb.Cluster
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.GetCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateCluster creates a cluster, consisting of the specified number and type of Google
|
||||
// Compute Engine instances.
|
||||
//
|
||||
// By default, the cluster is created in the project's
|
||||
// default network (at /compute/docs/networks-and-firewalls#networks).
|
||||
//
|
||||
// One firewall is added for the cluster. After cluster creation,
|
||||
// the cluster creates routes for each node to allow the containers
|
||||
// on that node to communicate with all other instances in the
|
||||
// cluster.
|
||||
//
|
||||
// Finally, an entry is added to the project's global metadata indicating
|
||||
// which CIDR range is being used by the cluster.
|
||||
func (c *ClusterManagerClient) CreateCluster(ctx context.Context, req *containerpb.CreateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateCluster[0:len(c.CallOptions.CreateCluster):len(c.CallOptions.CreateCluster)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.CreateCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateCluster updates the settings of a specific cluster.
|
||||
func (c *ClusterManagerClient) UpdateCluster(ctx context.Context, req *containerpb.UpdateClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateCluster[0:len(c.CallOptions.UpdateCluster):len(c.CallOptions.UpdateCluster)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.UpdateCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateNodePool updates the version and/or image type of a specific node pool.
|
||||
func (c *ClusterManagerClient) UpdateNodePool(ctx context.Context, req *containerpb.UpdateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateNodePool[0:len(c.CallOptions.UpdateNodePool):len(c.CallOptions.UpdateNodePool)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.UpdateNodePool(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetNodePoolAutoscaling sets the autoscaling settings of a specific node pool.
|
||||
func (c *ClusterManagerClient) SetNodePoolAutoscaling(ctx context.Context, req *containerpb.SetNodePoolAutoscalingRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetNodePoolAutoscaling[0:len(c.CallOptions.SetNodePoolAutoscaling):len(c.CallOptions.SetNodePoolAutoscaling)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetNodePoolAutoscaling(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetLoggingService sets the logging service of a specific cluster.
|
||||
func (c *ClusterManagerClient) SetLoggingService(ctx context.Context, req *containerpb.SetLoggingServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetLoggingService[0:len(c.CallOptions.SetLoggingService):len(c.CallOptions.SetLoggingService)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetLoggingService(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetMonitoringService sets the monitoring service of a specific cluster.
|
||||
func (c *ClusterManagerClient) SetMonitoringService(ctx context.Context, req *containerpb.SetMonitoringServiceRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetMonitoringService[0:len(c.CallOptions.SetMonitoringService):len(c.CallOptions.SetMonitoringService)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetMonitoringService(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetAddonsConfig sets the addons of a specific cluster.
|
||||
func (c *ClusterManagerClient) SetAddonsConfig(ctx context.Context, req *containerpb.SetAddonsConfigRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetAddonsConfig[0:len(c.CallOptions.SetAddonsConfig):len(c.CallOptions.SetAddonsConfig)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetAddonsConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetLocations sets the locations of a specific cluster.
|
||||
func (c *ClusterManagerClient) SetLocations(ctx context.Context, req *containerpb.SetLocationsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetLocations[0:len(c.CallOptions.SetLocations):len(c.CallOptions.SetLocations)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetLocations(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// UpdateMaster updates the master of a specific cluster.
|
||||
func (c *ClusterManagerClient) UpdateMaster(ctx context.Context, req *containerpb.UpdateMasterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateMaster[0:len(c.CallOptions.UpdateMaster):len(c.CallOptions.UpdateMaster)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.UpdateMaster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetMasterAuth used to set master auth materials. Currently supports :-
|
||||
// Changing the admin password of a specific cluster.
|
||||
// This can be either via password generation or explicitly set the password.
|
||||
func (c *ClusterManagerClient) SetMasterAuth(ctx context.Context, req *containerpb.SetMasterAuthRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetMasterAuth[0:len(c.CallOptions.SetMasterAuth):len(c.CallOptions.SetMasterAuth)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetMasterAuth(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteCluster deletes the cluster, including the Kubernetes endpoint and all worker
|
||||
// nodes.
|
||||
//
|
||||
// Firewalls and routes that were configured during cluster creation
|
||||
// are also deleted.
|
||||
//
|
||||
// Other Google Compute Engine resources that might be in use by the cluster
|
||||
// (e.g. load balancer resources) will not be deleted if they weren't present
|
||||
// at the initial create time.
|
||||
func (c *ClusterManagerClient) DeleteCluster(ctx context.Context, req *containerpb.DeleteClusterRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteCluster[0:len(c.CallOptions.DeleteCluster):len(c.CallOptions.DeleteCluster)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.DeleteCluster(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListOperations lists all operations in a project in a specific zone or all zones.
|
||||
func (c *ClusterManagerClient) ListOperations(ctx context.Context, req *containerpb.ListOperationsRequest, opts ...gax.CallOption) (*containerpb.ListOperationsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListOperations[0:len(c.CallOptions.ListOperations):len(c.CallOptions.ListOperations)], opts...)
|
||||
var resp *containerpb.ListOperationsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.ListOperations(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetOperation gets the specified operation.
|
||||
func (c *ClusterManagerClient) GetOperation(ctx context.Context, req *containerpb.GetOperationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetOperation[0:len(c.CallOptions.GetOperation):len(c.CallOptions.GetOperation)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.GetOperation(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CancelOperation cancels the specified operation.
|
||||
func (c *ClusterManagerClient) CancelOperation(ctx context.Context, req *containerpb.CancelOperationRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CancelOperation[0:len(c.CallOptions.CancelOperation):len(c.CallOptions.CancelOperation)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
_, err = c.clusterManagerClient.CancelOperation(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
return err
|
||||
}
|
||||
|
||||
// GetServerConfig returns configuration info about the Container Engine service.
|
||||
func (c *ClusterManagerClient) GetServerConfig(ctx context.Context, req *containerpb.GetServerConfigRequest, opts ...gax.CallOption) (*containerpb.ServerConfig, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetServerConfig[0:len(c.CallOptions.GetServerConfig):len(c.CallOptions.GetServerConfig)], opts...)
|
||||
var resp *containerpb.ServerConfig
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.GetServerConfig(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ListNodePools lists the node pools for a cluster.
|
||||
func (c *ClusterManagerClient) ListNodePools(ctx context.Context, req *containerpb.ListNodePoolsRequest, opts ...gax.CallOption) (*containerpb.ListNodePoolsResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListNodePools[0:len(c.CallOptions.ListNodePools):len(c.CallOptions.ListNodePools)], opts...)
|
||||
var resp *containerpb.ListNodePoolsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.ListNodePools(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// GetNodePool retrieves the node pool requested.
|
||||
func (c *ClusterManagerClient) GetNodePool(ctx context.Context, req *containerpb.GetNodePoolRequest, opts ...gax.CallOption) (*containerpb.NodePool, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetNodePool[0:len(c.CallOptions.GetNodePool):len(c.CallOptions.GetNodePool)], opts...)
|
||||
var resp *containerpb.NodePool
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.GetNodePool(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CreateNodePool creates a node pool for a cluster.
|
||||
func (c *ClusterManagerClient) CreateNodePool(ctx context.Context, req *containerpb.CreateNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateNodePool[0:len(c.CallOptions.CreateNodePool):len(c.CallOptions.CreateNodePool)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.CreateNodePool(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// DeleteNodePool deletes a node pool from a cluster.
|
||||
func (c *ClusterManagerClient) DeleteNodePool(ctx context.Context, req *containerpb.DeleteNodePoolRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteNodePool[0:len(c.CallOptions.DeleteNodePool):len(c.CallOptions.DeleteNodePool)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.DeleteNodePool(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// RollbackNodePoolUpgrade roll back the previously Aborted or Failed NodePool upgrade.
|
||||
// This will be an no-op if the last upgrade successfully completed.
|
||||
func (c *ClusterManagerClient) RollbackNodePoolUpgrade(ctx context.Context, req *containerpb.RollbackNodePoolUpgradeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.RollbackNodePoolUpgrade[0:len(c.CallOptions.RollbackNodePoolUpgrade):len(c.CallOptions.RollbackNodePoolUpgrade)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.RollbackNodePoolUpgrade(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetNodePoolManagement sets the NodeManagement options for a node pool.
|
||||
func (c *ClusterManagerClient) SetNodePoolManagement(ctx context.Context, req *containerpb.SetNodePoolManagementRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetNodePoolManagement[0:len(c.CallOptions.SetNodePoolManagement):len(c.CallOptions.SetNodePoolManagement)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetNodePoolManagement(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetLabels sets labels on a cluster.
|
||||
func (c *ClusterManagerClient) SetLabels(ctx context.Context, req *containerpb.SetLabelsRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetLabels[0:len(c.CallOptions.SetLabels):len(c.CallOptions.SetLabels)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetLabels(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetLegacyAbac enables or disables the ABAC authorization mechanism on a cluster.
|
||||
func (c *ClusterManagerClient) SetLegacyAbac(ctx context.Context, req *containerpb.SetLegacyAbacRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetLegacyAbac[0:len(c.CallOptions.SetLegacyAbac):len(c.CallOptions.SetLegacyAbac)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetLegacyAbac(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// StartIPRotation start master IP rotation.
|
||||
func (c *ClusterManagerClient) StartIPRotation(ctx context.Context, req *containerpb.StartIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.StartIPRotation[0:len(c.CallOptions.StartIPRotation):len(c.CallOptions.StartIPRotation)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.StartIPRotation(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// CompleteIPRotation completes master IP rotation.
|
||||
func (c *ClusterManagerClient) CompleteIPRotation(ctx context.Context, req *containerpb.CompleteIPRotationRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CompleteIPRotation[0:len(c.CallOptions.CompleteIPRotation):len(c.CallOptions.CompleteIPRotation)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.CompleteIPRotation(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetNodePoolSize sets the size of a specific node pool.
|
||||
func (c *ClusterManagerClient) SetNodePoolSize(ctx context.Context, req *containerpb.SetNodePoolSizeRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetNodePoolSize[0:len(c.CallOptions.SetNodePoolSize):len(c.CallOptions.SetNodePoolSize)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetNodePoolSize(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetNetworkPolicy enables/Disables Network Policy for a cluster.
|
||||
func (c *ClusterManagerClient) SetNetworkPolicy(ctx context.Context, req *containerpb.SetNetworkPolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetNetworkPolicy[0:len(c.CallOptions.SetNetworkPolicy):len(c.CallOptions.SetNetworkPolicy)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetNetworkPolicy(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// SetMaintenancePolicy sets the maintenance policy for a cluster.
|
||||
func (c *ClusterManagerClient) SetMaintenancePolicy(ctx context.Context, req *containerpb.SetMaintenancePolicyRequest, opts ...gax.CallOption) (*containerpb.Operation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetMaintenancePolicy[0:len(c.CallOptions.SetMaintenancePolicy):len(c.CallOptions.SetMaintenancePolicy)], opts...)
|
||||
var resp *containerpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.clusterManagerClient.SetMaintenancePolicy(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
571
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go
generated
vendored
Normal file
571
vendor/cloud.google.com/go/container/apiv1/cluster_manager_client_example_test.go
generated
vendored
Normal file
|
@ -0,0 +1,571 @@
|
|||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
package container_test
|
||||
|
||||
import (
|
||||
"cloud.google.com/go/container/apiv1"
|
||||
"golang.org/x/net/context"
|
||||
containerpb "google.golang.org/genproto/googleapis/container/v1"
|
||||
)
|
||||
|
||||
func ExampleNewClusterManagerClient() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use client.
|
||||
_ = c
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_ListClusters() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.ListClustersRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListClusters(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_GetCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.GetClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_CreateCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.CreateClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_UpdateCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.UpdateClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_UpdateNodePool() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.UpdateNodePoolRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateNodePool(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetNodePoolAutoscaling() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetNodePoolAutoscalingRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetNodePoolAutoscaling(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetLoggingService() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetLoggingServiceRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetLoggingService(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetMonitoringService() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetMonitoringServiceRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetMonitoringService(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetAddonsConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetAddonsConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetAddonsConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetLocations() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetLocationsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetLocations(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_UpdateMaster() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.UpdateMasterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.UpdateMaster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetMasterAuth() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetMasterAuthRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetMasterAuth(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_DeleteCluster() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.DeleteClusterRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeleteCluster(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_ListOperations() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.ListOperationsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListOperations(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_GetOperation() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.GetOperationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetOperation(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_CancelOperation() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.CancelOperationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
err = c.CancelOperation(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_GetServerConfig() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.GetServerConfigRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetServerConfig(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_ListNodePools() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.ListNodePoolsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.ListNodePools(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_GetNodePool() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.GetNodePoolRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.GetNodePool(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_CreateNodePool() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.CreateNodePoolRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CreateNodePool(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_DeleteNodePool() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.DeleteNodePoolRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeleteNodePool(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_RollbackNodePoolUpgrade() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.RollbackNodePoolUpgradeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.RollbackNodePoolUpgrade(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetNodePoolManagement() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetNodePoolManagementRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetNodePoolManagement(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetLabels() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetLabelsRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetLabels(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetLegacyAbac() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetLegacyAbacRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetLegacyAbac(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_StartIPRotation() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.StartIPRotationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.StartIPRotation(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_CompleteIPRotation() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.CompleteIPRotationRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.CompleteIPRotation(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetNodePoolSize() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetNodePoolSizeRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetNodePoolSize(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetNetworkPolicy() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetNetworkPolicyRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetNetworkPolicy(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClusterManagerClient_SetMaintenancePolicy() {
|
||||
ctx := context.Background()
|
||||
c, err := container.NewClusterManagerClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &containerpb.SetMaintenancePolicyRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.SetMaintenancePolicy(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
|
@ -0,0 +1,48 @@
|
|||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package container is an auto-generated package for the
|
||||
// Google Container Engine API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// The Google Kubernetes Engine API is used for building and managing
|
||||
// container
|
||||
// based applications, powered by the open source Kubernetes technology.
|
||||
package container // import "cloud.google.com/go/container/apiv1"
|
||||
|
||||
import (
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
func DefaultAuthScopes() []string {
|
||||
return []string{
|
||||
"https://www.googleapis.com/auth/cloud-platform",
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -24,7 +24,10 @@ import (
|
|||
"testing"
|
||||
"time"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"golang.org/x/net/context"
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -1942,18 +1945,7 @@ func TestRoundTrip(t *testing.T) {
|
|||
sortPL(*pl)
|
||||
}
|
||||
|
||||
equal := false
|
||||
switch v := got.(type) {
|
||||
// Round tripping a time.Time can result in a different time.Location: Local instead of UTC.
|
||||
// We therefore test equality explicitly, instead of relying on reflect.DeepEqual.
|
||||
case *T:
|
||||
equal = v.T.Equal(tc.want.(*T).T)
|
||||
case *SpecialTime:
|
||||
equal = v.MyTime.Equal(tc.want.(*SpecialTime).MyTime.Time)
|
||||
default:
|
||||
equal = reflect.DeepEqual(got, tc.want)
|
||||
}
|
||||
if !equal {
|
||||
if !testutil.Equal(got, tc.want, cmp.AllowUnexported(X0{}, X2{})) {
|
||||
t.Errorf("%s: compare:\ngot: %+#v\nwant: %+#v", tc.desc, got, tc.want)
|
||||
continue
|
||||
}
|
||||
|
@ -2707,7 +2699,7 @@ func TestLoadSavePLS(t *testing.T) {
|
|||
t.Errorf("%s: save: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(e, tc.wantSave) {
|
||||
if !testutil.Equal(e, tc.wantSave) {
|
||||
t.Errorf("%s: save: \ngot: %+v\nwant: %+v", tc.desc, e, tc.wantSave)
|
||||
continue
|
||||
}
|
||||
|
@ -2729,7 +2721,7 @@ func TestLoadSavePLS(t *testing.T) {
|
|||
t.Errorf("%s: load: %v", tc.desc, err)
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(gota, tc.wantLoad) {
|
||||
if !testutil.Equal(gota, tc.wantLoad) {
|
||||
t.Errorf("%s: load: \ngot: %+v\nwant: %+v", tc.desc, gota, tc.wantLoad)
|
||||
continue
|
||||
}
|
||||
|
@ -2864,7 +2856,7 @@ func TestQueryConstruction(t *testing.T) {
|
|||
}
|
||||
continue
|
||||
}
|
||||
if !reflect.DeepEqual(test.q, test.exp) {
|
||||
if !testutil.Equal(test.q, test.exp, cmp.AllowUnexported(Query{})) {
|
||||
t.Errorf("%d: mismatch: got %v want %v", i, test.q, test.exp)
|
||||
}
|
||||
}
|
||||
|
@ -3322,7 +3314,7 @@ func TestKeyLoaderEndToEnd(t *testing.T) {
|
|||
}
|
||||
|
||||
for i := range dst {
|
||||
if !reflect.DeepEqual(dst[i].K, keys[i]) {
|
||||
if !testutil.Equal(dst[i].K, keys[i]) {
|
||||
t.Fatalf("unexpected entity %d to have key %+v, got %+v", i, keys[i], dst[i].K)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -78,7 +78,7 @@ func TestBasics(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("client.Delete: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(x0, x1) {
|
||||
if !testutil.Equal(x0, x1) {
|
||||
t.Errorf("compare: x0=%v, x1=%v", x0, x1)
|
||||
}
|
||||
}
|
||||
|
@ -117,7 +117,7 @@ func TestTopLevelKeyLoaded(t *testing.T) {
|
|||
}
|
||||
|
||||
// The two keys should be absolutely identical.
|
||||
if !reflect.DeepEqual(e.K, k) {
|
||||
if !testutil.Equal(e.K, k) {
|
||||
t.Fatalf("e.K not equal to k; got %#v, want %#v", e.K, k)
|
||||
}
|
||||
|
||||
|
@ -142,7 +142,7 @@ func TestListValues(t *testing.T) {
|
|||
if err := client.Get(ctx, k, &p1); err != nil {
|
||||
t.Errorf("client.Get: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(p0, p1) {
|
||||
if !testutil.Equal(p0, p1) {
|
||||
t.Errorf("compare:\np0=%v\np1=%#v", p0, p1)
|
||||
}
|
||||
if err = client.Delete(ctx, k); err != nil {
|
||||
|
@ -402,7 +402,7 @@ func TestFilters(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("client.GetAll: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("compare: got=%v, want=%v", got, want)
|
||||
}
|
||||
}, func() {
|
||||
|
@ -421,7 +421,7 @@ func TestFilters(t *testing.T) {
|
|||
if err != nil {
|
||||
t.Errorf("client.GetAll: %v", err)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("compare: got=%v, want=%v", got, want)
|
||||
}
|
||||
})
|
||||
|
@ -730,10 +730,10 @@ func TestGetAllWithFieldMismatch(t *testing.T) {
|
|||
{X: 22},
|
||||
}
|
||||
getKeys, err := client.GetAll(ctx, NewQuery("GetAllThing").Ancestor(parent), &got)
|
||||
if len(getKeys) != 3 && !reflect.DeepEqual(getKeys, putKeys) {
|
||||
if len(getKeys) != 3 && !testutil.Equal(getKeys, putKeys) {
|
||||
t.Errorf("client.GetAll: keys differ\ngetKeys=%v\nputKeys=%v", getKeys, putKeys)
|
||||
}
|
||||
if !reflect.DeepEqual(got, want) {
|
||||
if !testutil.Equal(got, want) {
|
||||
t.Errorf("client.GetAll: entities differ\ngot =%v\nwant=%v", got, want)
|
||||
}
|
||||
if _, ok := err.(*ErrFieldMismatch); !ok {
|
||||
|
@ -858,7 +858,7 @@ loop:
|
|||
got = append(got, dst.I)
|
||||
}
|
||||
sort.Ints(got)
|
||||
if !reflect.DeepEqual(got, tc.want) {
|
||||
if !testutil.Equal(got, tc.want) {
|
||||
t.Errorf("elems %q: got %+v want %+v", tc.desc, got, tc.want)
|
||||
continue
|
||||
}
|
||||
|
@ -994,7 +994,7 @@ func TestNilPointers(t *testing.T) {
|
|||
xs := make([]*X, 2)
|
||||
if err := client.GetMulti(ctx, keys, xs); err != nil {
|
||||
t.Errorf("GetMulti: %v", err)
|
||||
} else if !reflect.DeepEqual(xs, src) {
|
||||
} else if !testutil.Equal(xs, src) {
|
||||
t.Errorf("GetMulti fetched %v, want %v", xs, src)
|
||||
}
|
||||
|
||||
|
|
|
@ -18,6 +18,8 @@ import (
|
|||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
|
@ -164,7 +166,7 @@ func TestLoadEntityNestedLegacy(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, dst) {
|
||||
if !testutil.Equal(tc.want, dst) {
|
||||
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
|
||||
}
|
||||
}
|
||||
|
@ -407,7 +409,7 @@ func TestLoadEntityNested(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, dst) {
|
||||
if !testutil.Equal(tc.want, dst) {
|
||||
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, dst, tc.want)
|
||||
}
|
||||
}
|
||||
|
@ -503,7 +505,7 @@ func TestAlreadyPopulatedDst(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, tc.dst) {
|
||||
if !testutil.Equal(tc.want, tc.dst) {
|
||||
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, tc.dst, tc.want)
|
||||
}
|
||||
}
|
||||
|
@ -748,7 +750,7 @@ func TestKeyLoader(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, tc.dst) {
|
||||
if !testutil.Equal(tc.want, tc.dst) {
|
||||
t.Errorf("%s: compare:\ngot: %+v\nwant: %+v", tc.desc, tc.dst, tc.want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -21,7 +21,10 @@ import (
|
|||
"sort"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
"github.com/golang/protobuf/proto"
|
||||
"github.com/google/go-cmp/cmp"
|
||||
"golang.org/x/net/context"
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
"google.golang.org/grpc"
|
||||
|
@ -334,7 +337,7 @@ func TestSimpleQuery(t *testing.T) {
|
|||
}
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.dst, tc.want) {
|
||||
if !testutil.Equal(tc.dst, tc.want) {
|
||||
t.Errorf("dst type %T: Entities\ngot %+v\nwant %+v", tc.dst, tc.dst, tc.want)
|
||||
continue
|
||||
}
|
||||
|
@ -357,10 +360,10 @@ func TestQueriesAreImmutable(t *testing.T) {
|
|||
q0 := NewQuery("foo")
|
||||
q1 := NewQuery("foo")
|
||||
q2 := q1.Offset(2)
|
||||
if !reflect.DeepEqual(q0, q1) {
|
||||
if !testutil.Equal(q0, q1, cmp.AllowUnexported(Query{})) {
|
||||
t.Errorf("q0 and q1 were not equal")
|
||||
}
|
||||
if reflect.DeepEqual(q1, q2) {
|
||||
if testutil.Equal(q1, q2, cmp.AllowUnexported(Query{})) {
|
||||
t.Errorf("q1 and q2 were equal")
|
||||
}
|
||||
|
||||
|
@ -381,10 +384,10 @@ func TestQueriesAreImmutable(t *testing.T) {
|
|||
q4 := f()
|
||||
q5 := q4.Order("y")
|
||||
q6 := q4.Order("z")
|
||||
if !reflect.DeepEqual(q3, q5) {
|
||||
if !testutil.Equal(q3, q5, cmp.AllowUnexported(Query{})) {
|
||||
t.Errorf("q3 and q5 were not equal")
|
||||
}
|
||||
if reflect.DeepEqual(q5, q6) {
|
||||
if testutil.Equal(q5, q6, cmp.AllowUnexported(Query{})) {
|
||||
t.Errorf("q5 and q6 were equal")
|
||||
}
|
||||
}
|
||||
|
|
|
@ -15,9 +15,10 @@
|
|||
package datastore
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
pb "google.golang.org/genproto/googleapis/datastore/v1"
|
||||
)
|
||||
|
||||
|
@ -187,7 +188,7 @@ func TestSaveEntityNested(t *testing.T) {
|
|||
continue
|
||||
}
|
||||
|
||||
if !reflect.DeepEqual(tc.want, got) {
|
||||
if !testutil.Equal(tc.want, got) {
|
||||
t.Errorf("%s: compare:\ngot: %#v\nwant: %#v", tc.desc, got, tc.want)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -27,6 +27,7 @@ import (
|
|||
clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// Controller2CallOptions contains the retry settings for each method of Controller2Client.
|
||||
|
@ -76,8 +77,8 @@ type Controller2Client struct {
|
|||
// The call options for this service.
|
||||
CallOptions *Controller2CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewController2Client creates a new controller2 client.
|
||||
|
@ -134,7 +135,7 @@ func (c *Controller2Client) Close() error {
|
|||
func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// RegisterDebuggee registers the debuggee with the controller service.
|
||||
|
@ -148,7 +149,7 @@ func (c *Controller2Client) SetGoogleClientInfo(keyval ...string) {
|
|||
// from data loss, or change the debuggee_id format. Agents must handle
|
||||
// debuggee_id value changing upon re-registration.
|
||||
func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebuggerpb.RegisterDebuggeeRequest, opts ...gax.CallOption) (*clouddebuggerpb.RegisterDebuggeeResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.RegisterDebuggee[0:len(c.CallOptions.RegisterDebuggee):len(c.CallOptions.RegisterDebuggee)], opts...)
|
||||
var resp *clouddebuggerpb.RegisterDebuggeeResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -176,7 +177,7 @@ func (c *Controller2Client) RegisterDebuggee(ctx context.Context, req *clouddebu
|
|||
// until the controller removes them from the active list to avoid
|
||||
// setting those breakpoints again.
|
||||
func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clouddebuggerpb.ListActiveBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListActiveBreakpointsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListActiveBreakpoints[0:len(c.CallOptions.ListActiveBreakpoints):len(c.CallOptions.ListActiveBreakpoints)], opts...)
|
||||
var resp *clouddebuggerpb.ListActiveBreakpointsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -199,7 +200,7 @@ func (c *Controller2Client) ListActiveBreakpoints(ctx context.Context, req *clou
|
|||
// semantics. These may only make changes such as canonicalizing a value
|
||||
// or snapping the location to the correct line of code.
|
||||
func (c *Controller2Client) UpdateActiveBreakpoint(ctx context.Context, req *clouddebuggerpb.UpdateActiveBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.UpdateActiveBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateActiveBreakpoint[0:len(c.CallOptions.UpdateActiveBreakpoint):len(c.CallOptions.UpdateActiveBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.UpdateActiveBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -27,6 +27,7 @@ import (
|
|||
clouddebuggerpb "google.golang.org/genproto/googleapis/devtools/clouddebugger/v2"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// Debugger2CallOptions contains the retry settings for each method of Debugger2Client.
|
||||
|
@ -80,8 +81,8 @@ type Debugger2Client struct {
|
|||
// The call options for this service.
|
||||
CallOptions *Debugger2CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewDebugger2Client creates a new debugger2 client.
|
||||
|
@ -130,12 +131,12 @@ func (c *Debugger2Client) Close() error {
|
|||
func (c *Debugger2Client) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// SetBreakpoint sets the breakpoint to the debuggee.
|
||||
func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerpb.SetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.SetBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.SetBreakpoint[0:len(c.CallOptions.SetBreakpoint):len(c.CallOptions.SetBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.SetBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -151,7 +152,7 @@ func (c *Debugger2Client) SetBreakpoint(ctx context.Context, req *clouddebuggerp
|
|||
|
||||
// GetBreakpoint gets breakpoint information.
|
||||
func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerpb.GetBreakpointRequest, opts ...gax.CallOption) (*clouddebuggerpb.GetBreakpointResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetBreakpoint[0:len(c.CallOptions.GetBreakpoint):len(c.CallOptions.GetBreakpoint)], opts...)
|
||||
var resp *clouddebuggerpb.GetBreakpointResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -167,7 +168,7 @@ func (c *Debugger2Client) GetBreakpoint(ctx context.Context, req *clouddebuggerp
|
|||
|
||||
// DeleteBreakpoint deletes the breakpoint from the debuggee.
|
||||
func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebuggerpb.DeleteBreakpointRequest, opts ...gax.CallOption) error {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteBreakpoint[0:len(c.CallOptions.DeleteBreakpoint):len(c.CallOptions.DeleteBreakpoint)], opts...)
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
|
@ -179,7 +180,7 @@ func (c *Debugger2Client) DeleteBreakpoint(ctx context.Context, req *clouddebugg
|
|||
|
||||
// ListBreakpoints lists all breakpoints for the debuggee.
|
||||
func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebuggerpb.ListBreakpointsRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListBreakpointsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListBreakpoints[0:len(c.CallOptions.ListBreakpoints):len(c.CallOptions.ListBreakpoints)], opts...)
|
||||
var resp *clouddebuggerpb.ListBreakpointsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -195,7 +196,7 @@ func (c *Debugger2Client) ListBreakpoints(ctx context.Context, req *clouddebugge
|
|||
|
||||
// ListDebuggees lists all the debuggees that the user has access to.
|
||||
func (c *Debugger2Client) ListDebuggees(ctx context.Context, req *clouddebuggerpb.ListDebuggeesRequest, opts ...gax.CallOption) (*clouddebuggerpb.ListDebuggeesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListDebuggees[0:len(c.CallOptions.ListDebuggees):len(c.CallOptions.ListDebuggees)], opts...)
|
||||
var resp *clouddebuggerpb.ListDebuggeesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package debugger is an experimental, auto-generated package for the
|
||||
// Package debugger is an auto-generated package for the
|
||||
// Stackdriver Debugger API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Examines the call stack and variables of a running application
|
||||
// without stopping or slowing it down.
|
||||
//
|
||||
|
@ -28,11 +30,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -30,12 +30,15 @@ import (
|
|||
dlppb "google.golang.org/genproto/googleapis/privacy/dlp/v2beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// CallOptions contains the retry settings for each method of Client.
|
||||
type CallOptions struct {
|
||||
InspectContent []gax.CallOption
|
||||
RedactContent []gax.CallOption
|
||||
DeidentifyContent []gax.CallOption
|
||||
AnalyzeDataSourceRisk []gax.CallOption
|
||||
CreateInspectOperation []gax.CallOption
|
||||
ListInspectFindings []gax.CallOption
|
||||
ListInfoTypes []gax.CallOption
|
||||
|
@ -67,6 +70,8 @@ func defaultCallOptions() *CallOptions {
|
|||
return &CallOptions{
|
||||
InspectContent: retry[[2]string{"default", "non_idempotent"}],
|
||||
RedactContent: retry[[2]string{"default", "non_idempotent"}],
|
||||
DeidentifyContent: retry[[2]string{"default", "idempotent"}],
|
||||
AnalyzeDataSourceRisk: retry[[2]string{"default", "idempotent"}],
|
||||
CreateInspectOperation: retry[[2]string{"default", "non_idempotent"}],
|
||||
ListInspectFindings: retry[[2]string{"default", "idempotent"}],
|
||||
ListInfoTypes: retry[[2]string{"default", "idempotent"}],
|
||||
|
@ -90,8 +95,8 @@ type Client struct {
|
|||
// The call options for this service.
|
||||
CallOptions *CallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewClient creates a new dlp service client.
|
||||
|
@ -145,7 +150,7 @@ func (c *Client) Close() error {
|
|||
func (c *Client) setGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ResultPath returns the path for the result resource.
|
||||
|
@ -159,7 +164,7 @@ func ResultPath(result string) string {
|
|||
// InspectContent finds potentially sensitive info in a list of strings.
|
||||
// This method has limits on input size, processing time, and output size.
|
||||
func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRequest, opts ...gax.CallOption) (*dlppb.InspectContentResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.InspectContent[0:len(c.CallOptions.InspectContent):len(c.CallOptions.InspectContent)], opts...)
|
||||
var resp *dlppb.InspectContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -176,7 +181,7 @@ func (c *Client) InspectContent(ctx context.Context, req *dlppb.InspectContentRe
|
|||
// RedactContent redacts potentially sensitive info from a list of strings.
|
||||
// This method has limits on input size, processing time, and output size.
|
||||
func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequest, opts ...gax.CallOption) (*dlppb.RedactContentResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.RedactContent[0:len(c.CallOptions.RedactContent):len(c.CallOptions.RedactContent)], opts...)
|
||||
var resp *dlppb.RedactContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -190,10 +195,46 @@ func (c *Client) RedactContent(ctx context.Context, req *dlppb.RedactContentRequ
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// DeidentifyContent de-identifies potentially sensitive info from a list of strings.
|
||||
// This method has limits on input size and output size.
|
||||
func (c *Client) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest, opts ...gax.CallOption) (*dlppb.DeidentifyContentResponse, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeidentifyContent[0:len(c.CallOptions.DeidentifyContent):len(c.CallOptions.DeidentifyContent)], opts...)
|
||||
var resp *dlppb.DeidentifyContentResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.DeidentifyContent(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRisk schedules a job to compute risk analysis metrics over content in a Google
|
||||
// Cloud Platform repository.
|
||||
func (c *Client) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest, opts ...gax.CallOption) (*AnalyzeDataSourceRiskOperation, error) {
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.AnalyzeDataSourceRisk[0:len(c.CallOptions.AnalyzeDataSourceRisk):len(c.CallOptions.AnalyzeDataSourceRisk)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
var err error
|
||||
resp, err = c.client.AnalyzeDataSourceRisk(ctx, req, settings.GRPC...)
|
||||
return err
|
||||
}, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &AnalyzeDataSourceRiskOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, resp),
|
||||
}, nil
|
||||
}
|
||||
|
||||
// CreateInspectOperation schedules a job scanning content in a Google Cloud Platform data
|
||||
// repository.
|
||||
func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest, opts ...gax.CallOption) (*CreateInspectOperationHandle, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.CreateInspectOperation[0:len(c.CallOptions.CreateInspectOperation):len(c.CallOptions.CreateInspectOperation)], opts...)
|
||||
var resp *longrunningpb.Operation
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -211,7 +252,7 @@ func (c *Client) CreateInspectOperation(ctx context.Context, req *dlppb.CreateIn
|
|||
|
||||
// ListInspectFindings returns list of results for given inspect operation result set id.
|
||||
func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest, opts ...gax.CallOption) (*dlppb.ListInspectFindingsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListInspectFindings[0:len(c.CallOptions.ListInspectFindings):len(c.CallOptions.ListInspectFindings)], opts...)
|
||||
var resp *dlppb.ListInspectFindingsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -227,7 +268,7 @@ func (c *Client) ListInspectFindings(ctx context.Context, req *dlppb.ListInspect
|
|||
|
||||
// ListInfoTypes returns sensitive information types for given category.
|
||||
func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequest, opts ...gax.CallOption) (*dlppb.ListInfoTypesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListInfoTypes[0:len(c.CallOptions.ListInfoTypes):len(c.CallOptions.ListInfoTypes)], opts...)
|
||||
var resp *dlppb.ListInfoTypesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -243,7 +284,7 @@ func (c *Client) ListInfoTypes(ctx context.Context, req *dlppb.ListInfoTypesRequ
|
|||
|
||||
// ListRootCategories returns the list of root categories of sensitive information.
|
||||
func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCategoriesRequest, opts ...gax.CallOption) (*dlppb.ListRootCategoriesResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListRootCategories[0:len(c.CallOptions.ListRootCategories):len(c.CallOptions.ListRootCategories)], opts...)
|
||||
var resp *dlppb.ListRootCategoriesResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -257,6 +298,75 @@ func (c *Client) ListRootCategories(ctx context.Context, req *dlppb.ListRootCate
|
|||
return resp, nil
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRiskOperation manages a long-running operation from AnalyzeDataSourceRisk.
|
||||
type AnalyzeDataSourceRiskOperation struct {
|
||||
lro *longrunning.Operation
|
||||
}
|
||||
|
||||
// AnalyzeDataSourceRiskOperation returns a new AnalyzeDataSourceRiskOperation from a given name.
|
||||
// The name must be that of a previously created AnalyzeDataSourceRiskOperation, possibly from a different process.
|
||||
func (c *Client) AnalyzeDataSourceRiskOperation(name string) *AnalyzeDataSourceRiskOperation {
|
||||
return &AnalyzeDataSourceRiskOperation{
|
||||
lro: longrunning.InternalNewOperation(c.LROClient, &longrunningpb.Operation{Name: name}),
|
||||
}
|
||||
}
|
||||
|
||||
// Wait blocks until the long-running operation is completed, returning the response and any errors encountered.
|
||||
//
|
||||
// See documentation of Poll for error-handling information.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Wait(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
|
||||
var resp dlppb.RiskAnalysisOperationResult
|
||||
if err := op.lro.WaitWithInterval(ctx, &resp, 45000*time.Millisecond, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Poll fetches the latest state of the long-running operation.
|
||||
//
|
||||
// Poll also fetches the latest metadata, which can be retrieved by Metadata.
|
||||
//
|
||||
// If Poll fails, the error is returned and op is unmodified. If Poll succeeds and
|
||||
// the operation has completed with failure, the error is returned and op.Done will return true.
|
||||
// If Poll succeeds and the operation has completed successfully,
|
||||
// op.Done will return true, and the response of the operation is returned.
|
||||
// If Poll succeeds and the operation has not completed, the returned response and error are both nil.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Poll(ctx context.Context, opts ...gax.CallOption) (*dlppb.RiskAnalysisOperationResult, error) {
|
||||
var resp dlppb.RiskAnalysisOperationResult
|
||||
if err := op.lro.Poll(ctx, &resp, opts...); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !op.Done() {
|
||||
return nil, nil
|
||||
}
|
||||
return &resp, nil
|
||||
}
|
||||
|
||||
// Metadata returns metadata associated with the long-running operation.
|
||||
// Metadata itself does not contact the server, but Poll does.
|
||||
// To get the latest metadata, call this method after a successful call to Poll.
|
||||
// If the metadata is not available, the returned metadata and error are both nil.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Metadata() (*dlppb.RiskAnalysisOperationMetadata, error) {
|
||||
var meta dlppb.RiskAnalysisOperationMetadata
|
||||
if err := op.lro.Metadata(&meta); err == longrunning.ErrNoMetadata {
|
||||
return nil, nil
|
||||
} else if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &meta, nil
|
||||
}
|
||||
|
||||
// Done reports whether the long-running operation has completed.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Done() bool {
|
||||
return op.lro.Done()
|
||||
}
|
||||
|
||||
// Name returns the name of the long-running operation.
|
||||
// The name is assigned by the server and is unique within the service from which the operation is created.
|
||||
func (op *AnalyzeDataSourceRiskOperation) Name() string {
|
||||
return op.lro.Name()
|
||||
}
|
||||
|
||||
// CreateInspectOperationHandle manages a long-running operation from CreateInspectOperation.
|
||||
type CreateInspectOperationHandle struct {
|
||||
lro *longrunning.Operation
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -68,6 +68,47 @@ func ExampleClient_RedactContent() {
|
|||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_DeidentifyContent() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.DeidentifyContentRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
resp, err := c.DeidentifyContent(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_AnalyzeDataSourceRisk() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
req := &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
// TODO: Fill request struct fields.
|
||||
}
|
||||
op, err := c.AnalyzeDataSourceRisk(ctx, req)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
|
||||
resp, err := op.Wait(ctx)
|
||||
if err != nil {
|
||||
// TODO: Handle error.
|
||||
}
|
||||
// TODO: Use resp.
|
||||
_ = resp
|
||||
}
|
||||
|
||||
func ExampleClient_CreateInspectOperation() {
|
||||
ctx := context.Background()
|
||||
c, err := dlp.NewClient(ctx)
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package dlp is an experimental, auto-generated package for the
|
||||
// Package dlp is an auto-generated package for the
|
||||
// DLP API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// The Google Data Loss Prevention API provides methods for detection of
|
||||
// privacy-sensitive fragments in text, images, and Google Cloud Platform
|
||||
// storage repositories.
|
||||
|
@ -27,11 +29,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -85,6 +85,18 @@ func (s *mockDlpServer) RedactContent(ctx context.Context, req *dlppb.RedactCont
|
|||
return s.resps[0].(*dlppb.RedactContentResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) DeidentifyContent(ctx context.Context, req *dlppb.DeidentifyContentRequest) (*dlppb.DeidentifyContentResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*dlppb.DeidentifyContentResponse), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.CreateInspectOperationRequest) (*longrunningpb.Operation, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
|
@ -97,6 +109,18 @@ func (s *mockDlpServer) CreateInspectOperation(ctx context.Context, req *dlppb.C
|
|||
return s.resps[0].(*longrunningpb.Operation), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) AnalyzeDataSourceRisk(ctx context.Context, req *dlppb.AnalyzeDataSourceRiskRequest) (*longrunningpb.Operation, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
return nil, fmt.Errorf("x-goog-api-client = %v, expected gl-go key", xg)
|
||||
}
|
||||
s.reqs = append(s.reqs, req)
|
||||
if s.err != nil {
|
||||
return nil, s.err
|
||||
}
|
||||
return s.resps[0].(*longrunningpb.Operation), nil
|
||||
}
|
||||
|
||||
func (s *mockDlpServer) ListInspectFindings(ctx context.Context, req *dlppb.ListInspectFindingsRequest) (*dlppb.ListInspectFindingsResponse, error) {
|
||||
md, _ := metadata.FromIncomingContext(ctx)
|
||||
if xg := md["x-goog-api-client"]; len(xg) == 0 || !strings.Contains(xg[0], "gl-go/") {
|
||||
|
@ -170,8 +194,23 @@ func TestDlpServiceInspectContent(t *testing.T) {
|
|||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.InspectContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
|
@ -201,8 +240,23 @@ func TestDlpServiceInspectContentError(t *testing.T) {
|
|||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.InspectContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
|
@ -230,13 +284,26 @@ func TestDlpServiceRedactContent(t *testing.T) {
|
|||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.RedactContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
ReplaceConfigs: replaceConfigs,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
|
@ -263,13 +330,26 @@ func TestDlpServiceRedactContentError(t *testing.T) {
|
|||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var replaceConfigs []*dlppb.RedactContentRequest_ReplaceConfig = nil
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var type_ string = "text/plain"
|
||||
var value string = "My email is example@example.com."
|
||||
var itemsElement = &dlppb.ContentItem{
|
||||
Type: type_,
|
||||
DataItem: &dlppb.ContentItem_Value{
|
||||
Value: value,
|
||||
},
|
||||
}
|
||||
var items = []*dlppb.ContentItem{itemsElement}
|
||||
var request = &dlppb.RedactContentRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
ReplaceConfigs: replaceConfigs,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
|
@ -286,10 +366,160 @@ func TestDlpServiceRedactContentError(t *testing.T) {
|
|||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceDeidentifyContent(t *testing.T) {
|
||||
var expectedResponse *dlppb.DeidentifyContentResponse = &dlppb.DeidentifyContentResponse{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var request = &dlppb.DeidentifyContentRequest{
|
||||
DeidentifyConfig: deidentifyConfig,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.DeidentifyContent(context.Background(), request)
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceDeidentifyContentError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var deidentifyConfig *dlppb.DeidentifyConfig = &dlppb.DeidentifyConfig{}
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var items []*dlppb.ContentItem = nil
|
||||
var request = &dlppb.DeidentifyContentRequest{
|
||||
DeidentifyConfig: deidentifyConfig,
|
||||
InspectConfig: inspectConfig,
|
||||
Items: items,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
resp, err := c.DeidentifyContent(context.Background(), request)
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceAnalyzeDataSourceRisk(t *testing.T) {
|
||||
var expectedResponse *dlppb.RiskAnalysisOperationResult = &dlppb.RiskAnalysisOperationResult{}
|
||||
|
||||
mockDlp.err = nil
|
||||
mockDlp.reqs = nil
|
||||
|
||||
any, err := ptypes.MarshalAny(expectedResponse)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
|
||||
Name: "longrunning-test",
|
||||
Done: true,
|
||||
Result: &longrunningpb.Operation_Response{Response: any},
|
||||
})
|
||||
|
||||
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
|
||||
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
|
||||
var request = &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
PrivacyMetric: privacyMetric,
|
||||
SourceTable: sourceTable,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := respLRO.Wait(context.Background())
|
||||
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
if want, got := request, mockDlp.reqs[0]; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong request %q, want %q", got, want)
|
||||
}
|
||||
|
||||
if want, got := expectedResponse, resp; !proto.Equal(want, got) {
|
||||
t.Errorf("wrong response %q, want %q)", got, want)
|
||||
}
|
||||
}
|
||||
|
||||
func TestDlpServiceAnalyzeDataSourceRiskError(t *testing.T) {
|
||||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = nil
|
||||
mockDlp.resps = append(mockDlp.resps[:0], &longrunningpb.Operation{
|
||||
Name: "longrunning-test",
|
||||
Done: true,
|
||||
Result: &longrunningpb.Operation_Error{
|
||||
Error: &status.Status{
|
||||
Code: int32(errCode),
|
||||
Message: "test error",
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
var privacyMetric *dlppb.PrivacyMetric = &dlppb.PrivacyMetric{}
|
||||
var sourceTable *dlppb.BigQueryTable = &dlppb.BigQueryTable{}
|
||||
var request = &dlppb.AnalyzeDataSourceRiskRequest{
|
||||
PrivacyMetric: privacyMetric,
|
||||
SourceTable: sourceTable,
|
||||
}
|
||||
|
||||
c, err := NewClient(context.Background(), clientOpt)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
|
||||
respLRO, err := c.AnalyzeDataSourceRisk(context.Background(), request)
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
resp, err := respLRO.Wait(context.Background())
|
||||
|
||||
if st, ok := gstatus.FromError(err); !ok {
|
||||
t.Errorf("got error %v, expected grpc error", err)
|
||||
} else if c := st.Code(); c != errCode {
|
||||
t.Errorf("got error code %q, want %q", c, errCode)
|
||||
}
|
||||
_ = resp
|
||||
}
|
||||
func TestDlpServiceCreateInspectOperation(t *testing.T) {
|
||||
var name string = "name3373707"
|
||||
var name2 string = "name2-1052831874"
|
||||
var expectedResponse = &dlppb.InspectOperationResult{
|
||||
Name: name,
|
||||
Name: name2,
|
||||
}
|
||||
|
||||
mockDlp.err = nil
|
||||
|
@ -305,8 +535,26 @@ func TestDlpServiceCreateInspectOperation(t *testing.T) {
|
|||
Result: &longrunningpb.Operation_Response{Response: any},
|
||||
})
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{}
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var url string = "gs://example_bucket/example_file.png"
|
||||
var fileSet = &dlppb.CloudStorageOptions_FileSet{
|
||||
Url: url,
|
||||
}
|
||||
var cloudStorageOptions = &dlppb.CloudStorageOptions{
|
||||
FileSet: fileSet,
|
||||
}
|
||||
var storageConfig = &dlppb.StorageConfig{
|
||||
Type: &dlppb.StorageConfig_CloudStorageOptions{
|
||||
CloudStorageOptions: cloudStorageOptions,
|
||||
},
|
||||
}
|
||||
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
|
||||
var request = &dlppb.CreateInspectOperationRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
|
@ -352,8 +600,26 @@ func TestDlpServiceCreateInspectOperationError(t *testing.T) {
|
|||
},
|
||||
})
|
||||
|
||||
var inspectConfig *dlppb.InspectConfig = &dlppb.InspectConfig{}
|
||||
var storageConfig *dlppb.StorageConfig = &dlppb.StorageConfig{}
|
||||
var name string = "EMAIL_ADDRESS"
|
||||
var infoTypesElement = &dlppb.InfoType{
|
||||
Name: name,
|
||||
}
|
||||
var infoTypes = []*dlppb.InfoType{infoTypesElement}
|
||||
var inspectConfig = &dlppb.InspectConfig{
|
||||
InfoTypes: infoTypes,
|
||||
}
|
||||
var url string = "gs://example_bucket/example_file.png"
|
||||
var fileSet = &dlppb.CloudStorageOptions_FileSet{
|
||||
Url: url,
|
||||
}
|
||||
var cloudStorageOptions = &dlppb.CloudStorageOptions{
|
||||
FileSet: fileSet,
|
||||
}
|
||||
var storageConfig = &dlppb.StorageConfig{
|
||||
Type: &dlppb.StorageConfig_CloudStorageOptions{
|
||||
CloudStorageOptions: cloudStorageOptions,
|
||||
},
|
||||
}
|
||||
var outputConfig *dlppb.OutputStorageConfig = &dlppb.OutputStorageConfig{}
|
||||
var request = &dlppb.CreateInspectOperationRequest{
|
||||
InspectConfig: inspectConfig,
|
||||
|
@ -446,8 +712,8 @@ func TestDlpServiceListInfoTypes(t *testing.T) {
|
|||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var category string = "category50511102"
|
||||
var languageCode string = "languageCode-412800396"
|
||||
var category string = "PII"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListInfoTypesRequest{
|
||||
Category: category,
|
||||
LanguageCode: languageCode,
|
||||
|
@ -477,8 +743,8 @@ func TestDlpServiceListInfoTypesError(t *testing.T) {
|
|||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var category string = "category50511102"
|
||||
var languageCode string = "languageCode-412800396"
|
||||
var category string = "PII"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListInfoTypesRequest{
|
||||
Category: category,
|
||||
LanguageCode: languageCode,
|
||||
|
@ -506,7 +772,7 @@ func TestDlpServiceListRootCategories(t *testing.T) {
|
|||
|
||||
mockDlp.resps = append(mockDlp.resps[:0], expectedResponse)
|
||||
|
||||
var languageCode string = "languageCode-412800396"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListRootCategoriesRequest{
|
||||
LanguageCode: languageCode,
|
||||
}
|
||||
|
@ -535,7 +801,7 @@ func TestDlpServiceListRootCategoriesError(t *testing.T) {
|
|||
errCode := codes.PermissionDenied
|
||||
mockDlp.err = gstatus.Error(errCode, "test error")
|
||||
|
||||
var languageCode string = "languageCode-412800396"
|
||||
var languageCode string = "en"
|
||||
var request = &dlppb.ListRootCategoriesRequest{
|
||||
LanguageCode: languageCode,
|
||||
}
|
||||
|
|
2
vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go
generated
vendored
2
vendor/cloud.google.com/go/errorreporting/apiv1beta1/ReportErrorEvent_smoke_test.go
generated
vendored
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -14,9 +14,11 @@
|
|||
|
||||
// AUTO-GENERATED CODE. DO NOT EDIT.
|
||||
|
||||
// Package errorreporting is an experimental, auto-generated package for the
|
||||
// Package errorreporting is an auto-generated package for the
|
||||
// Stackdriver Error Reporting API.
|
||||
//
|
||||
// NOTE: This package is in alpha. It is not stable, and is likely to change.
|
||||
//
|
||||
// Stackdriver Error Reporting groups and counts similar errors from cloud
|
||||
// services. The Stackdriver Error Reporting API provides a way to report new
|
||||
// errors and read access to error groups and their associated errors.
|
||||
|
@ -29,11 +31,15 @@ import (
|
|||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
func insertXGoog(ctx context.Context, val []string) context.Context {
|
||||
md, _ := metadata.FromOutgoingContext(ctx)
|
||||
md = md.Copy()
|
||||
md["x-goog-api-client"] = val
|
||||
return metadata.NewOutgoingContext(ctx, md)
|
||||
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
|
||||
out, _ := metadata.FromOutgoingContext(ctx)
|
||||
out = out.Copy()
|
||||
for _, md := range mds {
|
||||
for k, v := range md {
|
||||
out[k] = append(out[k], v...)
|
||||
}
|
||||
}
|
||||
return metadata.NewOutgoingContext(ctx, out)
|
||||
}
|
||||
|
||||
// DefaultAuthScopes reports the default set of authentication scopes to use with this package.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -27,6 +27,7 @@ import (
|
|||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ErrorGroupCallOptions contains the retry settings for each method of ErrorGroupClient.
|
||||
|
@ -74,8 +75,8 @@ type ErrorGroupClient struct {
|
|||
// The call options for this service.
|
||||
CallOptions *ErrorGroupCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewErrorGroupClient creates a new error group service client.
|
||||
|
@ -113,7 +114,7 @@ func (c *ErrorGroupClient) Close() error {
|
|||
func (c *ErrorGroupClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ErrorGroupGroupPath returns the path for the group resource.
|
||||
|
@ -128,7 +129,7 @@ func ErrorGroupGroupPath(project, group string) string {
|
|||
|
||||
// GetGroup get the specified group.
|
||||
func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportingpb.GetGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.GetGroup[0:len(c.CallOptions.GetGroup):len(c.CallOptions.GetGroup)], opts...)
|
||||
var resp *clouderrorreportingpb.ErrorGroup
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
@ -145,7 +146,7 @@ func (c *ErrorGroupClient) GetGroup(ctx context.Context, req *clouderrorreportin
|
|||
// UpdateGroup replace the data for the specified group.
|
||||
// Fails if the group does not exist.
|
||||
func (c *ErrorGroupClient) UpdateGroup(ctx context.Context, req *clouderrorreportingpb.UpdateGroupRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ErrorGroup, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.UpdateGroup[0:len(c.CallOptions.UpdateGroup):len(c.CallOptions.UpdateGroup)], opts...)
|
||||
var resp *clouderrorreportingpb.ErrorGroup
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -29,6 +29,7 @@ import (
|
|||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/codes"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ErrorStatsCallOptions contains the retry settings for each method of ErrorStatsClient.
|
||||
|
@ -78,8 +79,8 @@ type ErrorStatsClient struct {
|
|||
// The call options for this service.
|
||||
CallOptions *ErrorStatsCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewErrorStatsClient creates a new error stats service client.
|
||||
|
@ -118,7 +119,7 @@ func (c *ErrorStatsClient) Close() error {
|
|||
func (c *ErrorStatsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ErrorStatsProjectPath returns the path for the project resource.
|
||||
|
@ -131,7 +132,7 @@ func ErrorStatsProjectPath(project string) string {
|
|||
|
||||
// ListGroupStats lists the specified groups.
|
||||
func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorreportingpb.ListGroupStatsRequest, opts ...gax.CallOption) *ErrorGroupStatsIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListGroupStats[0:len(c.CallOptions.ListGroupStats):len(c.CallOptions.ListGroupStats)], opts...)
|
||||
it := &ErrorGroupStatsIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorGroupStats, string, error) {
|
||||
|
@ -166,7 +167,7 @@ func (c *ErrorStatsClient) ListGroupStats(ctx context.Context, req *clouderrorre
|
|||
|
||||
// ListEvents lists the specified events.
|
||||
func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreportingpb.ListEventsRequest, opts ...gax.CallOption) *ErrorEventIterator {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ListEvents[0:len(c.CallOptions.ListEvents):len(c.CallOptions.ListEvents)], opts...)
|
||||
it := &ErrorEventIterator{}
|
||||
it.InternalFetch = func(pageSize int, pageToken string) ([]*clouderrorreportingpb.ErrorEvent, string, error) {
|
||||
|
@ -201,7 +202,7 @@ func (c *ErrorStatsClient) ListEvents(ctx context.Context, req *clouderrorreport
|
|||
|
||||
// DeleteEvents deletes all error events of a given project.
|
||||
func (c *ErrorStatsClient) DeleteEvents(ctx context.Context, req *clouderrorreportingpb.DeleteEventsRequest, opts ...gax.CallOption) (*clouderrorreportingpb.DeleteEventsResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.DeleteEvents[0:len(c.CallOptions.DeleteEvents):len(c.CallOptions.DeleteEvents)], opts...)
|
||||
var resp *clouderrorreportingpb.DeleteEventsResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
@ -24,6 +24,7 @@ import (
|
|||
"google.golang.org/api/transport"
|
||||
clouderrorreportingpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
"google.golang.org/grpc"
|
||||
"google.golang.org/grpc/metadata"
|
||||
)
|
||||
|
||||
// ReportErrorsCallOptions contains the retry settings for each method of ReportErrorsClient.
|
||||
|
@ -56,8 +57,8 @@ type ReportErrorsClient struct {
|
|||
// The call options for this service.
|
||||
CallOptions *ReportErrorsCallOptions
|
||||
|
||||
// The metadata to be sent with each request.
|
||||
xGoogHeader []string
|
||||
// The x-goog-* metadata to be sent with each request.
|
||||
xGoogMetadata metadata.MD
|
||||
}
|
||||
|
||||
// NewReportErrorsClient creates a new report errors service client.
|
||||
|
@ -95,7 +96,7 @@ func (c *ReportErrorsClient) Close() error {
|
|||
func (c *ReportErrorsClient) SetGoogleClientInfo(keyval ...string) {
|
||||
kv := append([]string{"gl-go", version.Go()}, keyval...)
|
||||
kv = append(kv, "gapic", version.Repo, "gax", gax.Version, "grpc", grpc.Version)
|
||||
c.xGoogHeader = []string{gax.XGoogHeader(kv...)}
|
||||
c.xGoogMetadata = metadata.Pairs("x-goog-api-client", gax.XGoogHeader(kv...))
|
||||
}
|
||||
|
||||
// ReportErrorsProjectPath returns the path for the project resource.
|
||||
|
@ -114,7 +115,7 @@ func ReportErrorsProjectPath(project string) string {
|
|||
// for authentication. To use an API key, append it to the URL as the value of
|
||||
// a key parameter. For example:<pre>POST https://clouderrorreporting.googleapis.com/v1beta1/projects/example-project/events:report?key=123ABC456</pre>
|
||||
func (c *ReportErrorsClient) ReportErrorEvent(ctx context.Context, req *clouderrorreportingpb.ReportErrorEventRequest, opts ...gax.CallOption) (*clouderrorreportingpb.ReportErrorEventResponse, error) {
|
||||
ctx = insertXGoog(ctx, c.xGoogHeader)
|
||||
ctx = insertMetadata(ctx, c.xGoogMetadata)
|
||||
opts = append(c.CallOptions.ReportErrorEvent[0:len(c.CallOptions.ReportErrorEvent):len(c.CallOptions.ReportErrorEvent)], opts...)
|
||||
var resp *clouderrorreportingpb.ReportErrorEventResponse
|
||||
err := gax.Invoke(ctx, func(ctx context.Context, settings gax.CallSettings) error {
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
// Copyright 2017, Google Inc. All rights reserved.
|
||||
// Copyright 2017, Google LLC All rights reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
|
|
|
@ -1,215 +0,0 @@
|
|||
// Copyright 2016 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errorreporting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/logging"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
)
|
||||
|
||||
type fakeLogger struct {
|
||||
entry *logging.Entry
|
||||
fail bool
|
||||
}
|
||||
|
||||
func (c *fakeLogger) LogSync(ctx context.Context, e logging.Entry) error {
|
||||
if c.fail {
|
||||
return errors.New("request failed")
|
||||
}
|
||||
c.entry = &e
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *fakeLogger) Close() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func newTestClientUsingLogging(c *fakeLogger) *Client {
|
||||
newLoggerInterface = func(ctx context.Context, project string, opts ...option.ClientOption) (loggerInterface, error) {
|
||||
return c, nil
|
||||
}
|
||||
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", true)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.RepanicDefault = false
|
||||
return t
|
||||
}
|
||||
|
||||
func TestCatchNothingUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx)
|
||||
}
|
||||
|
||||
func entryMessage(e *logging.Entry) string {
|
||||
return e.Payload.(map[string]interface{})["message"].(string)
|
||||
}
|
||||
|
||||
func commonLoggingChecks(t *testing.T, e *logging.Entry, panickingFunction string) {
|
||||
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["service"] != "myservice" {
|
||||
t.Errorf("error report didn't contain service name")
|
||||
}
|
||||
if e.Payload.(map[string]interface{})["serviceContext"].(map[string]string)["version"] != "v1.000" {
|
||||
t.Errorf("error report didn't contain version name")
|
||||
}
|
||||
if !strings.Contains(entryMessage(e), "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(entryMessage(e), panickingFunction) {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatchPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestCatchPanic")
|
||||
if !strings.Contains(entryMessage(e), "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchPanicNilClientUsingLogging(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "TestCatchPanicNilClient") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
var c *Client
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestLogFailedReportsUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{fail: true}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchNilPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestCatchNilPanic")
|
||||
if !strings.Contains(entryMessage(e), "nil") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
b := true
|
||||
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestNotCatchNilPanicUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
defer func() {
|
||||
e := fl.entry
|
||||
if e != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestReportUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
c.Report(ctx, nil, "hello, ", "error")
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestReport")
|
||||
}
|
||||
|
||||
func TestReportfUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
|
||||
e := fl.entry
|
||||
if e == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonLoggingChecks(t, e, "TestReportf")
|
||||
if !strings.Contains(entryMessage(e), "2+2=4") {
|
||||
t.Errorf("error report didn't contain formatted message")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCloseUsingLogging(t *testing.T) {
|
||||
fl := &fakeLogger{}
|
||||
c := newTestClientUsingLogging(fl)
|
||||
err := c.Close()
|
||||
if err != nil {
|
||||
t.Fatal(err)
|
||||
}
|
||||
}
|
|
@ -17,58 +17,6 @@
|
|||
// This package is still experimental and subject to change.
|
||||
//
|
||||
// See https://cloud.google.com/error-reporting/ for more information.
|
||||
//
|
||||
// To initialize a client, use the NewClient function.
|
||||
//
|
||||
// import "cloud.google.com/go/errorreporting"
|
||||
// ...
|
||||
// errorsClient, err = errorreporting.NewClient(ctx, projectID, "myservice", "v1.0", true)
|
||||
//
|
||||
// The client can recover panics in your program and report them as errors.
|
||||
// To use this functionality, defer its Catch method, as you would any other
|
||||
// function for recovering panics.
|
||||
//
|
||||
// func foo(ctx context.Context, ...) {
|
||||
// defer errorsClient.Catch(ctx)
|
||||
// ...
|
||||
// }
|
||||
//
|
||||
// Catch writes an error report containing the recovered value and a stack trace
|
||||
// to Stackdriver Error Reporting.
|
||||
//
|
||||
// There are various options you can add to the call to Catch that modify how
|
||||
// panics are handled.
|
||||
//
|
||||
// WithMessage and WithMessagef add a custom message after the recovered value,
|
||||
// using fmt.Sprint and fmt.Sprintf respectively.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errorreporting.WithMessagef("x=%d", x))
|
||||
//
|
||||
// WithRequest fills in various fields in the error report with information
|
||||
// about an http.Request that's being handled.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errorreporting.WithRequest(httpReq))
|
||||
//
|
||||
// By default, after recovering a panic, Catch will panic again with the
|
||||
// recovered value. You can turn off this behavior with the Repanic option.
|
||||
//
|
||||
// defer errorsClient.Catch(ctx, errorreporting.Repanic(false))
|
||||
//
|
||||
// You can also change the default behavior for the client by changing the
|
||||
// RepanicDefault field.
|
||||
//
|
||||
// errorsClient.RepanicDefault = false
|
||||
//
|
||||
// It is also possible to write an error report directly without recovering a
|
||||
// panic, using Report or Reportf.
|
||||
//
|
||||
// if err != nil {
|
||||
// errorsClient.Reportf(ctx, r, "unexpected error %v", err)
|
||||
// }
|
||||
//
|
||||
// If you try to write an error report with a nil client, or if the client
|
||||
// fails to write the report to the server, the error report is logged using
|
||||
// log.Println.
|
||||
package errorreporting // import "cloud.google.com/go/errorreporting"
|
||||
|
||||
import (
|
||||
|
@ -77,16 +25,15 @@ import (
|
|||
"log"
|
||||
"net/http"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
api "cloud.google.com/go/errorreporting/apiv1beta1"
|
||||
"cloud.google.com/go/internal/version"
|
||||
"cloud.google.com/go/logging"
|
||||
"github.com/golang/protobuf/ptypes/timestamp"
|
||||
"github.com/golang/protobuf/ptypes"
|
||||
gax "github.com/googleapis/gax-go"
|
||||
"golang.org/x/net/context"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/support/bundler"
|
||||
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
)
|
||||
|
||||
|
@ -94,12 +41,40 @@ const (
|
|||
userAgent = `gcloud-golang-errorreporting/20160701`
|
||||
)
|
||||
|
||||
type apiInterface interface {
|
||||
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
|
||||
Close() error
|
||||
// Config is additional configuration for Client.
|
||||
type Config struct {
|
||||
// ServiceName identifies the running program and is included in the error reports.
|
||||
// Optional.
|
||||
ServiceName string
|
||||
|
||||
// ServiceVersion identifies the version of the running program and is
|
||||
// included in the error reports.
|
||||
// Optional.
|
||||
ServiceVersion string
|
||||
|
||||
// OnError is the function to call if any background
|
||||
// tasks errored. By default, errors are logged.
|
||||
OnError func(err error)
|
||||
}
|
||||
|
||||
var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
|
||||
// Entry holds information about the reported error.
|
||||
type Entry struct {
|
||||
Error error
|
||||
Req *http.Request // if error is associated with a request.
|
||||
Stack []byte // if user does not provide a stack trace, runtime.Stack will be called
|
||||
}
|
||||
|
||||
// Client represents a Google Cloud Error Reporting client.
|
||||
type Client struct {
|
||||
projectID string
|
||||
apiClient client
|
||||
serviceContext erpb.ServiceContext
|
||||
bundler *bundler.Bundler
|
||||
|
||||
onErrorFn func(err error)
|
||||
}
|
||||
|
||||
var newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) {
|
||||
client, err := api.NewReportErrorsClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -108,289 +83,99 @@ var newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (ap
|
|||
return client, nil
|
||||
}
|
||||
|
||||
type loggerInterface interface {
|
||||
LogSync(ctx context.Context, e logging.Entry) error
|
||||
Close() error
|
||||
}
|
||||
|
||||
type logger struct {
|
||||
*logging.Logger
|
||||
c *logging.Client
|
||||
}
|
||||
|
||||
func (l logger) Close() error {
|
||||
return l.c.Close()
|
||||
}
|
||||
|
||||
var newLoggerInterface = func(ctx context.Context, projectID string, opts ...option.ClientOption) (loggerInterface, error) {
|
||||
lc, err := logging.NewClient(ctx, projectID, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Logging client: %v", err)
|
||||
}
|
||||
l := lc.Logger("errorreports")
|
||||
return logger{l, lc}, nil
|
||||
}
|
||||
|
||||
type sender interface {
|
||||
send(ctx context.Context, r *http.Request, message string)
|
||||
close() error
|
||||
}
|
||||
|
||||
// errorApiSender sends error reports using the Stackdriver Error Reporting API.
|
||||
type errorApiSender struct {
|
||||
apiClient apiInterface
|
||||
projectID string
|
||||
serviceContext erpb.ServiceContext
|
||||
}
|
||||
|
||||
// loggingSender sends error reports using the Stackdriver Logging API.
|
||||
type loggingSender struct {
|
||||
logger loggerInterface
|
||||
projectID string
|
||||
serviceContext map[string]string
|
||||
}
|
||||
|
||||
// Client represents a Google Cloud Error Reporting client.
|
||||
type Client struct {
|
||||
sender
|
||||
// RepanicDefault determines whether Catch will re-panic after recovering a
|
||||
// panic. This behavior can be overridden for an individual call to Catch using
|
||||
// the Repanic option.
|
||||
RepanicDefault bool
|
||||
}
|
||||
|
||||
// NewClient returns a new error reporting client. Generally you will want
|
||||
// to create a client on program initialization and use it through the lifetime
|
||||
// of the process.
|
||||
//
|
||||
// The service name and version string identify the running program, and are
|
||||
// included in error reports. The version string can be left empty.
|
||||
//
|
||||
// Set useLogging to report errors also using Stackdriver Logging,
|
||||
// which will result in errors appearing in both the logs and the error
|
||||
// dashboard. This is useful if you are already a user of Stackdriver Logging.
|
||||
func NewClient(ctx context.Context, projectID, serviceName, serviceVersion string, useLogging bool, opts ...option.ClientOption) (*Client, error) {
|
||||
if useLogging {
|
||||
l, err := newLoggerInterface(ctx, projectID, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Logging client: %v", err)
|
||||
}
|
||||
sender := &loggingSender{
|
||||
logger: l,
|
||||
projectID: projectID,
|
||||
serviceContext: map[string]string{
|
||||
"service": serviceName,
|
||||
},
|
||||
}
|
||||
if serviceVersion != "" {
|
||||
sender.serviceContext["version"] = serviceVersion
|
||||
}
|
||||
c := &Client{
|
||||
sender: sender,
|
||||
RepanicDefault: true,
|
||||
}
|
||||
return c, nil
|
||||
} else {
|
||||
a, err := newApiInterface(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating Error Reporting client: %v", err)
|
||||
}
|
||||
c := &Client{
|
||||
sender: &errorApiSender{
|
||||
apiClient: a,
|
||||
projectID: "projects/" + projectID,
|
||||
serviceContext: erpb.ServiceContext{
|
||||
Service: serviceName,
|
||||
Version: serviceVersion,
|
||||
},
|
||||
},
|
||||
RepanicDefault: true,
|
||||
}
|
||||
return c, nil
|
||||
func NewClient(ctx context.Context, projectID string, cfg Config, opts ...option.ClientOption) (*Client, error) {
|
||||
if cfg.ServiceName == "" {
|
||||
cfg.ServiceName = "goapp"
|
||||
}
|
||||
c, err := newClient(ctx, opts...)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("creating client: %v", err)
|
||||
}
|
||||
|
||||
client := &Client{
|
||||
apiClient: c,
|
||||
projectID: "projects/" + projectID,
|
||||
serviceContext: erpb.ServiceContext{
|
||||
Service: cfg.ServiceName,
|
||||
Version: cfg.ServiceVersion,
|
||||
},
|
||||
}
|
||||
bundler := bundler.NewBundler((*erpb.ReportErrorEventRequest)(nil), func(bundle interface{}) {
|
||||
reqs := bundle.([]*erpb.ReportErrorEventRequest)
|
||||
for _, req := range reqs {
|
||||
_, err = client.apiClient.ReportErrorEvent(ctx, req)
|
||||
if err != nil {
|
||||
client.onError(fmt.Errorf("failed to upload: %v", err))
|
||||
}
|
||||
}
|
||||
})
|
||||
// TODO(jbd): Optimize bundler limits.
|
||||
bundler.DelayThreshold = 2 * time.Second
|
||||
bundler.BundleCountThreshold = 100
|
||||
bundler.BundleByteThreshold = 1000
|
||||
bundler.BundleByteLimit = 1000
|
||||
bundler.BufferedByteLimit = 10000
|
||||
client.bundler = bundler
|
||||
return client, nil
|
||||
}
|
||||
|
||||
func (c *Client) onError(err error) {
|
||||
if c.onErrorFn != nil {
|
||||
c.onErrorFn(err)
|
||||
return
|
||||
}
|
||||
log.Println(err)
|
||||
}
|
||||
|
||||
// Close closes any resources held by the client.
|
||||
// Close should be called when the client is no longer needed.
|
||||
// It need not be called at program exit.
|
||||
func (c *Client) Close() error {
|
||||
err := c.sender.close()
|
||||
c.sender = nil
|
||||
return c.apiClient.Close()
|
||||
}
|
||||
|
||||
// Report writes an error report. It doesn't block. Errors in
|
||||
// writing the error report can be handled via Client.OnError.
|
||||
func (c *Client) Report(e Entry) {
|
||||
var stack string
|
||||
if e.Stack != nil {
|
||||
stack = string(e.Stack)
|
||||
}
|
||||
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack)
|
||||
c.bundler.Add(req, 1)
|
||||
}
|
||||
|
||||
// ReportSync writes an error report. It blocks until the entry is written.
|
||||
func (c *Client) ReportSync(ctx context.Context, e Entry) error {
|
||||
var stack string
|
||||
if e.Stack != nil {
|
||||
stack = string(e.Stack)
|
||||
}
|
||||
req := c.makeReportErrorEventRequest(e.Req, e.Error.Error(), stack)
|
||||
_, err := c.apiClient.ReportErrorEvent(ctx, req)
|
||||
return err
|
||||
}
|
||||
|
||||
// An Option is an optional argument to Catch.
|
||||
type Option interface {
|
||||
isOption()
|
||||
}
|
||||
|
||||
// PanicFlag returns an Option that can inform Catch that a panic has occurred.
|
||||
// If *p is true when Catch is called, an error report is made even if recover
|
||||
// returns nil. This allows Catch to report an error for panic(nil).
|
||||
// If p is nil, the option is ignored.
|
||||
// Flush blocks until all currently buffered error reports are sent.
|
||||
//
|
||||
// Here is an example of how to use PanicFlag:
|
||||
//
|
||||
// func foo(ctx context.Context, ...) {
|
||||
// hasPanicked := true
|
||||
// defer errorsClient.Catch(ctx, errorreporting.PanicFlag(&hasPanicked))
|
||||
// ...
|
||||
// ...
|
||||
// // We have reached the end of the function, so we're not panicking.
|
||||
// hasPanicked = false
|
||||
// }
|
||||
func PanicFlag(p *bool) Option { return panicFlag{p} }
|
||||
|
||||
type panicFlag struct {
|
||||
*bool
|
||||
// If any errors occurred since the last call to Flush, or the
|
||||
// creation of the client if this is the first call, then Flush report the
|
||||
// error via the (*Client).OnError handler.
|
||||
func (c *Client) Flush() {
|
||||
c.bundler.Flush()
|
||||
}
|
||||
|
||||
func (h panicFlag) isOption() {}
|
||||
|
||||
// Repanic returns an Option that determines whether Catch will re-panic after
|
||||
// it reports an error. This overrides the default in the client.
|
||||
func Repanic(r bool) Option { return repanic(r) }
|
||||
|
||||
type repanic bool
|
||||
|
||||
func (r repanic) isOption() {}
|
||||
|
||||
// WithRequest returns an Option that informs Catch or Report of an http.Request
|
||||
// that is being handled. Information from the Request is included in the error
|
||||
// report, if one is made.
|
||||
func WithRequest(r *http.Request) Option { return withRequest{r} }
|
||||
|
||||
type withRequest struct {
|
||||
*http.Request
|
||||
}
|
||||
|
||||
func (w withRequest) isOption() {}
|
||||
|
||||
// WithMessage returns an Option that sets a message to be included in the error
|
||||
// report, if one is made. v is converted to a string with fmt.Sprint.
|
||||
func WithMessage(v ...interface{}) Option { return message(v) }
|
||||
|
||||
type message []interface{}
|
||||
|
||||
func (m message) isOption() {}
|
||||
|
||||
// WithMessagef returns an Option that sets a message to be included in the error
|
||||
// report, if one is made. format and v are converted to a string with fmt.Sprintf.
|
||||
func WithMessagef(format string, v ...interface{}) Option { return messagef{format, v} }
|
||||
|
||||
type messagef struct {
|
||||
format string
|
||||
v []interface{}
|
||||
}
|
||||
|
||||
func (m messagef) isOption() {}
|
||||
|
||||
// Catch tries to recover a panic; if it succeeds, it writes an error report.
|
||||
// It should be called by deferring it, like any other function for recovering
|
||||
// panics.
|
||||
//
|
||||
// Catch can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Catch(ctx context.Context, opt ...Option) {
|
||||
panicked := false
|
||||
for _, o := range opt {
|
||||
switch o := o.(type) {
|
||||
case panicFlag:
|
||||
panicked = panicked || o.bool != nil && *o.bool
|
||||
}
|
||||
func (c *Client) makeReportErrorEventRequest(r *http.Request, msg string, stack string) *erpb.ReportErrorEventRequest {
|
||||
if stack == "" {
|
||||
// limit the stack trace to 16k.
|
||||
var buf [16 * 1024]byte
|
||||
stack = chopStack(buf[0:runtime.Stack(buf[:], false)])
|
||||
}
|
||||
x := recover()
|
||||
if x == nil && !panicked {
|
||||
return
|
||||
}
|
||||
var (
|
||||
r *http.Request
|
||||
shouldRepanic = true
|
||||
messages = []string{fmt.Sprint(x)}
|
||||
)
|
||||
if c != nil {
|
||||
shouldRepanic = c.RepanicDefault
|
||||
}
|
||||
for _, o := range opt {
|
||||
switch o := o.(type) {
|
||||
case repanic:
|
||||
shouldRepanic = bool(o)
|
||||
case withRequest:
|
||||
r = o.Request
|
||||
case message:
|
||||
messages = append(messages, fmt.Sprint(o...))
|
||||
case messagef:
|
||||
messages = append(messages, fmt.Sprintf(o.format, o.v...))
|
||||
}
|
||||
}
|
||||
c.logInternal(ctx, r, true, strings.Join(messages, " "))
|
||||
if shouldRepanic {
|
||||
panic(x)
|
||||
}
|
||||
}
|
||||
message := msg + "\n" + stack
|
||||
|
||||
// Report writes an error report unconditionally, instead of only when a panic
|
||||
// occurs.
|
||||
// If r is non-nil, information from the Request is included in the error report.
|
||||
//
|
||||
// Report can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Report(ctx context.Context, r *http.Request, v ...interface{}) {
|
||||
c.logInternal(ctx, r, false, fmt.Sprint(v...))
|
||||
}
|
||||
|
||||
// Reportf writes an error report unconditionally, instead of only when a panic
|
||||
// occurs.
|
||||
// If r is non-nil, information from the Request is included in the error report.
|
||||
//
|
||||
// Reportf can be called concurrently with other calls to Catch, Report or Reportf.
|
||||
func (c *Client) Reportf(ctx context.Context, r *http.Request, format string, v ...interface{}) {
|
||||
c.logInternal(ctx, r, false, fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (c *Client) logInternal(ctx context.Context, r *http.Request, isPanic bool, msg string) {
|
||||
// limit the stack trace to 16k.
|
||||
var buf [16384]byte
|
||||
stack := buf[0:runtime.Stack(buf[:], false)]
|
||||
message := msg + "\n" + chopStack(stack, isPanic)
|
||||
if c == nil {
|
||||
log.Println("Error report used nil client:", message)
|
||||
return
|
||||
}
|
||||
c.send(ctx, r, message)
|
||||
}
|
||||
|
||||
func (s *loggingSender) send(ctx context.Context, r *http.Request, message string) {
|
||||
payload := map[string]interface{}{
|
||||
"eventTime": time.Now().In(time.UTC).Format(time.RFC3339Nano),
|
||||
"message": message,
|
||||
"serviceContext": s.serviceContext,
|
||||
}
|
||||
if r != nil {
|
||||
payload["context"] = map[string]interface{}{
|
||||
"httpRequest": map[string]interface{}{
|
||||
"method": r.Method,
|
||||
"url": r.Host + r.RequestURI,
|
||||
"userAgent": r.UserAgent(),
|
||||
"referrer": r.Referer(),
|
||||
"remoteIp": r.RemoteAddr,
|
||||
},
|
||||
}
|
||||
}
|
||||
e := logging.Entry{
|
||||
Severity: logging.Error,
|
||||
Payload: payload,
|
||||
}
|
||||
err := s.logger.LogSync(ctx, e)
|
||||
if err != nil {
|
||||
log.Println("Error writing error report:", err, "report:", payload)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *loggingSender) close() error {
|
||||
return s.logger.Close()
|
||||
}
|
||||
|
||||
func (s *errorApiSender) send(ctx context.Context, r *http.Request, message string) {
|
||||
time := time.Now()
|
||||
var errorContext *erpb.ErrorContext
|
||||
if r != nil {
|
||||
errorContext = &erpb.ErrorContext{
|
||||
|
@ -403,37 +188,21 @@ func (s *errorApiSender) send(ctx context.Context, r *http.Request, message stri
|
|||
},
|
||||
}
|
||||
}
|
||||
req := erpb.ReportErrorEventRequest{
|
||||
ProjectName: s.projectID,
|
||||
return &erpb.ReportErrorEventRequest{
|
||||
ProjectName: c.projectID,
|
||||
Event: &erpb.ReportedErrorEvent{
|
||||
EventTime: ×tamp.Timestamp{
|
||||
Seconds: time.Unix(),
|
||||
Nanos: int32(time.Nanosecond()),
|
||||
},
|
||||
ServiceContext: &s.serviceContext,
|
||||
EventTime: ptypes.TimestampNow(),
|
||||
ServiceContext: &c.serviceContext,
|
||||
Message: message,
|
||||
Context: errorContext,
|
||||
},
|
||||
}
|
||||
_, err := s.apiClient.ReportErrorEvent(ctx, &req)
|
||||
if err != nil {
|
||||
log.Println("Error writing error report:", err, "report:", message)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *errorApiSender) close() error {
|
||||
return s.apiClient.Close()
|
||||
}
|
||||
|
||||
// chopStack trims a stack trace so that the function which panics or calls
|
||||
// Report is first.
|
||||
func chopStack(s []byte, isPanic bool) string {
|
||||
var f []byte
|
||||
if isPanic {
|
||||
f = []byte("panic(")
|
||||
} else {
|
||||
f = []byte("cloud.google.com/go/errorreporting.(*Client).Report")
|
||||
}
|
||||
func chopStack(s []byte) string {
|
||||
f := []byte("cloud.google.com/go/errorreporting.(*Client).Report")
|
||||
|
||||
lfFirst := bytes.IndexByte(s, '\n')
|
||||
if lfFirst == -1 {
|
||||
|
@ -454,3 +223,8 @@ func chopStack(s []byte, isPanic bool) string {
|
|||
}
|
||||
return string(s[:lfFirst+1]) + string(stack)
|
||||
}
|
||||
|
||||
type client interface {
|
||||
ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, opts ...gax.CallOption) (*erpb.ReportErrorEventResponse, error)
|
||||
Close() error
|
||||
}
|
||||
|
|
|
@ -15,12 +15,12 @@
|
|||
package errorreporting
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"log"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"cloud.google.com/go/internal/testutil"
|
||||
|
||||
gax "github.com/googleapis/gax-go"
|
||||
|
||||
"golang.org/x/net/context"
|
||||
|
@ -28,14 +28,16 @@ import (
|
|||
erpb "google.golang.org/genproto/googleapis/devtools/clouderrorreporting/v1beta1"
|
||||
)
|
||||
|
||||
const testProjectID = "testproject"
|
||||
|
||||
type fakeReportErrorsClient struct {
|
||||
req *erpb.ReportErrorEventRequest
|
||||
fail bool
|
||||
req *erpb.ReportErrorEventRequest
|
||||
fail bool
|
||||
doneCh chan struct{}
|
||||
}
|
||||
|
||||
func (c *fakeReportErrorsClient) ReportErrorEvent(ctx context.Context, req *erpb.ReportErrorEventRequest, _ ...gax.CallOption) (*erpb.ReportErrorEventResponse, error) {
|
||||
defer func() {
|
||||
close(c.doneCh)
|
||||
}()
|
||||
if c.fail {
|
||||
return nil, errors.New("request failed")
|
||||
}
|
||||
|
@ -47,166 +49,65 @@ func (c *fakeReportErrorsClient) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func newFakeReportErrorsClient() *fakeReportErrorsClient {
|
||||
c := &fakeReportErrorsClient{}
|
||||
c.doneCh = make(chan struct{})
|
||||
return c
|
||||
}
|
||||
|
||||
func newTestClient(c *fakeReportErrorsClient) *Client {
|
||||
newApiInterface = func(ctx context.Context, opts ...option.ClientOption) (apiInterface, error) {
|
||||
newClient = func(ctx context.Context, opts ...option.ClientOption) (client, error) {
|
||||
return c, nil
|
||||
}
|
||||
t, err := NewClient(context.Background(), testProjectID, "myservice", "v1.000", false)
|
||||
t, err := NewClient(context.Background(), testutil.ProjID(), Config{
|
||||
ServiceName: "myservice",
|
||||
ServiceVersion: "v1.0",
|
||||
})
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
t.RepanicDefault = false
|
||||
return t
|
||||
}
|
||||
|
||||
var ctx context.Context
|
||||
|
||||
func init() {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
func TestCatchNothing(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx)
|
||||
}
|
||||
|
||||
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, panickingFunction string) {
|
||||
func commonChecks(t *testing.T, req *erpb.ReportErrorEventRequest, fn string) {
|
||||
if req.Event.ServiceContext.Service != "myservice" {
|
||||
t.Errorf("error report didn't contain service name")
|
||||
}
|
||||
if req.Event.ServiceContext.Version != "v1.000" {
|
||||
if req.Event.ServiceContext.Version != "v1.0" {
|
||||
t.Errorf("error report didn't contain version name")
|
||||
}
|
||||
if !strings.Contains(req.Event.Message, "hello, error") {
|
||||
if !strings.Contains(req.Event.Message, "error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(req.Event.Message, panickingFunction) {
|
||||
if !strings.Contains(req.Event.Message, fn) {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
}
|
||||
|
||||
func TestCatchPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestCatchPanic")
|
||||
if !strings.Contains(r.Event.Message, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchPanicNilClient(t *testing.T) {
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "TestCatchPanicNilClient") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
var c *Client
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestLogFailedReports(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{fail: true}
|
||||
c := newTestClient(fc)
|
||||
buf := new(bytes.Buffer)
|
||||
log.SetOutput(buf)
|
||||
defer func() {
|
||||
recover()
|
||||
body := buf.String()
|
||||
if !strings.Contains(body, "hello, error") {
|
||||
t.Errorf("error report didn't contain message")
|
||||
}
|
||||
if !strings.Contains(body, "errorreporting.TestLogFailedReports") {
|
||||
t.Errorf("error report didn't contain stack trace")
|
||||
}
|
||||
if !strings.Contains(body, "divide by zero") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
var x int
|
||||
x = x / x
|
||||
}
|
||||
|
||||
func TestCatchNilPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestCatchNilPanic")
|
||||
if !strings.Contains(r.Event.Message, "nil") {
|
||||
t.Errorf("error report didn't contain recovered value")
|
||||
}
|
||||
}()
|
||||
b := true
|
||||
defer c.Catch(ctx, WithMessage("hello, error"), PanicFlag(&b))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestNotCatchNilPanic(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
c := newTestClient(fc)
|
||||
defer func() {
|
||||
r := fc.req
|
||||
if r != nil {
|
||||
t.Errorf("got error report, expected none")
|
||||
}
|
||||
}()
|
||||
defer c.Catch(ctx, WithMessage("hello, error"))
|
||||
panic(nil)
|
||||
}
|
||||
|
||||
func TestReport(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
fc := newFakeReportErrorsClient()
|
||||
c := newTestClient(fc)
|
||||
c.Report(ctx, nil, "hello, ", "error")
|
||||
c.Report(Entry{Error: errors.New("error")})
|
||||
|
||||
<-fc.doneCh
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestReport")
|
||||
}
|
||||
|
||||
func TestReportf(t *testing.T) {
|
||||
fc := &fakeReportErrorsClient{}
|
||||
func TestReportSync(t *testing.T) {
|
||||
ctx := context.Background()
|
||||
fc := newFakeReportErrorsClient()
|
||||
c := newTestClient(fc)
|
||||
c.Reportf(ctx, nil, "hello, error 2+%d=%d", 2, 2+2)
|
||||
if err := c.ReportSync(ctx, Entry{Error: errors.New("error")}); err != nil {
|
||||
t.Fatalf("cannot upload errors: %v", err)
|
||||
}
|
||||
|
||||
<-fc.doneCh
|
||||
r := fc.req
|
||||
if r == nil {
|
||||
t.Fatalf("got no error report, expected one")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestReportf")
|
||||
if !strings.Contains(r.Event.Message, "2+2=4") {
|
||||
t.Errorf("error report didn't contain formatted message")
|
||||
}
|
||||
commonChecks(t, r, "errorreporting.TestReport")
|
||||
}
|
||||
|
|
|
@ -0,0 +1,49 @@
|
|||
// Copyright 2017 Google Inc. All Rights Reserved.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
package errorreporting_test
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
|
||||
"cloud.google.com/go/errorreporting"
|
||||
"golang.org/x/net/context"
|
||||
)
|
||||
|
||||
func Example() {
|
||||
// Create the client.
|
||||
ctx := context.Background()
|
||||
ec, err := errorreporting.NewClient(ctx, "my-gcp-project", errorreporting.Config{
|
||||
ServiceName: "myservice",
|
||||
ServiceVersion: "v1.0",
|
||||
})
|
||||
defer func() {
|
||||
if err := ec.Close(); err != nil {
|
||||
log.Printf("failed to report errors to Stackdriver: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
// Report an error.
|
||||
err = doSomething()
|
||||
if err != nil {
|
||||
ec.Report(errorreporting.Entry{
|
||||
Error: err,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func doSomething() error {
|
||||
return errors.New("something went wrong")
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue