GCP: support specifying Cloud KMS key name for backup storage locations (#1879)

GCP: support specifying Cloud KMS key for backup storage location

Signed-off-by: Steve Kriss <krisss@vmware.com>
pull/1884/head
Steve Kriss 2019-09-17 14:35:28 -06:00 committed by Nolan Brubaker
parent b7d53d201b
commit fdd04b4d90
43 changed files with 12636 additions and 1960 deletions

24
Gopkg.lock generated
View File

@ -2,19 +2,20 @@
[[projects]]
digest = "1:769af0c7dbdc19798e013900cfa855af9a7fda89912e019330a1dbd80a1e9a8c"
digest = "1:44d9970a8855e319dcc6f8d8bfa0d7b3a50d1a94f76bdfaac86d131892f7ba69"
name = "cloud.google.com/go"
packages = [
"compute/metadata",
"iam",
"internal",
"internal/optional",
"internal/trace",
"internal/version",
"storage",
]
pruneopts = "NUT"
revision = "44bcd0b2078ba5e7fedbeb36808d1ed893534750"
version = "v0.11.0"
revision = "6e28f1c34522dae46e9c37119b78c54471b13ac8"
version = "v0.46.2"
[[projects]]
digest = "1:623dad7b6ddc6b93f983e9852a0785ed606f804d3541fa4b6178d7055b361306"
@ -203,12 +204,12 @@
revision = "24818f796faf91cd76ec7bddd72458fbced7a6c1"
[[projects]]
branch = "master"
digest = "1:139e03a0b4ef05098c2acb7c081b2d84d9478cae11ac777f7c1f6d550efab1ca"
digest = "1:4b76f3e067eed897a45242383a2aa4d0a2fdbf73a8d00c03167dba80c43630b1"
name = "github.com/googleapis/gax-go"
packages = ["."]
packages = ["v2"]
pruneopts = "NUT"
revision = "84ed26760e7f6f80887a2fbfb50db3cc415d2cea"
revision = "bd5b16380fd03dc758d11cef74ba2e3bc8b0e8c2"
version = "v2.0.5"
[[projects]]
digest = "1:3d7c1446fc5c710351b246c0dc6700fae843ca27f5294d0bd9f68bab2a810c44"
@ -583,7 +584,7 @@
revision = "26559e0f760e39c24d730d3224364aef164ee23f"
[[projects]]
digest = "1:cb42335b4b5606082a8e93f437a8962702e7875d157ccb03fba8cdd1ca70e8c3"
digest = "1:352a8b8a41fd11320b8b2327a4c2c8f6967578a2d54886b64748c0a46d4a8b5e"
name = "google.golang.org/api"
packages = [
"compute/v1",
@ -599,8 +600,8 @@
"transport/http/internal/propagation",
]
pruneopts = "NUT"
revision = "0cbcb99a9ea0c8023c794b2693cbe1def82ed4d7"
version = "v0.3.2"
revision = "feb0267beb8644f5088a03be4d5ec3f8c7020152"
version = "v0.9.0"
[[projects]]
digest = "1:7206d98ec77c90c72ec2c405181a1dcf86965803b6dbc4f98ceab7a5047c37a9"
@ -623,11 +624,12 @@
[[projects]]
branch = "master"
digest = "1:a2059631b54cdc40db08f8c4dfb39d3c5ec442003506327df2c675a9384b7115"
digest = "1:56feb4ebffcd6c42edcff74f2d9cc1e36bee7a382eeb0740a31c66585ed804c0"
name = "google.golang.org/genproto"
packages = [
"googleapis/api/annotations",
"googleapis/iam/v1",
"googleapis/rpc/code",
"googleapis/rpc/status",
]
pruneopts = "NUT"

View File

@ -72,11 +72,11 @@
[[constraint]]
name = "cloud.google.com/go"
version = "0.11.0"
version = "0.46.0"
[[constraint]]
name = "google.golang.org/api"
version = "~v0.3.2"
version = "~v0.9.0"
[[constraint]]
name = "golang.org/x/oauth2"

View File

@ -0,0 +1 @@
GCP: add support for specifying a Cloud KMS key name to use for encrypting backups in a storage location.

View File

@ -33,7 +33,10 @@ import (
"github.com/heptio/velero/pkg/cloudprovider"
)
const credentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
const (
credentialsEnvVar = "GOOGLE_APPLICATION_CREDENTIALS"
kmsKeyNameConfigKey = "kmsKeyName"
)
// bucketWriter wraps the GCP SDK functions for accessing object store so they can be faked for testing.
type bucketWriter interface {
@ -43,11 +46,15 @@ type bucketWriter interface {
}
type writer struct {
client *storage.Client
client *storage.Client
kmsKeyName string
}
func (w *writer) getWriteCloser(bucket, key string) io.WriteCloser {
return w.client.Bucket(bucket).Object(key).NewWriter(context.Background())
writer := w.client.Bucket(bucket).Object(key).NewWriter(context.Background())
writer.KMSKeyName = w.kmsKeyName
return writer
}
func (w *writer) getAttrs(bucket, key string) (*storage.ObjectAttrs, error) {
@ -67,7 +74,7 @@ func NewObjectStore(logger logrus.FieldLogger) *ObjectStore {
}
func (o *ObjectStore) Init(config map[string]string) error {
if err := cloudprovider.ValidateObjectStoreConfigKeys(config); err != nil {
if err := cloudprovider.ValidateObjectStoreConfigKeys(config, kmsKeyNameConfigKey); err != nil {
return err
}
@ -101,7 +108,10 @@ func (o *ObjectStore) Init(config map[string]string) error {
}
o.client = client
o.bucketWriter = &writer{client: o.client}
o.bucketWriter = &writer{
client: o.client,
kmsKeyName: config[kmsKeyNameConfigKey],
}
return nil
}

View File

@ -67,7 +67,9 @@ The configurable parameters are as follows:
#### GCP
No parameters required.
| Key | Type | Default | Meaning |
| --- | --- | --- | --- |
| `kmsKeyName` | string | Empty | Name of the Cloud KMS key to use to encrypt backups stored in this location, in the form `projects/P/locations/L/keyRings/R/cryptoKeys/K`. See [customer-managed Cloud KMS keys](https://cloud.google.com/storage/docs/encryption/using-customer-managed-keys) for details. |
[0]: #aws
[1]: #gcp

15
vendor/cloud.google.com/go/AUTHORS generated vendored
View File

@ -1,15 +0,0 @@
# This is the official list of cloud authors for copyright purposes.
# This file is distinct from the CONTRIBUTORS files.
# See the latter for an explanation.
# Names should be added to this file as:
# Name or Organization <email address>
# The email address is not required for organizations.
Filippo Valsorda <hi@filippo.io>
Google Inc.
Ingo Oeser <nightlyone@googlemail.com>
Palm Stone Games, Inc.
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Tyler Treat <ttreat31@gmail.com>

View File

@ -1,37 +0,0 @@
# People who have agreed to one of the CLAs and can contribute patches.
# The AUTHORS file lists the copyright holders; this file
# lists people. For example, Google employees are listed here
# but not in AUTHORS, because Google holds the copyright.
#
# https://developers.google.com/open-source/cla/individual
# https://developers.google.com/open-source/cla/corporate
#
# Names should be added to this file as:
# Name <email address>
# Keep the list alphabetically sorted.
Alexis Hunt <lexer@google.com>
Andreas Litt <andreas.litt@gmail.com>
Andrew Gerrand <adg@golang.org>
Brad Fitzpatrick <bradfitz@golang.org>
Burcu Dogan <jbd@google.com>
Dave Day <djd@golang.org>
David Sansome <me@davidsansome.com>
David Symonds <dsymonds@golang.org>
Filippo Valsorda <hi@filippo.io>
Glenn Lewis <gmlewis@google.com>
Ingo Oeser <nightlyone@googlemail.com>
Johan Euphrosine <proppy@google.com>
Jonathan Amsterdam <jba@google.com>
Luna Duclos <luna.duclos@palmstonegames.com>
Magnus Hiie <magnus.hiie@gmail.com>
Michael McGreevy <mcgreevy@golang.org>
Omar Jarjur <ojarjur@google.com>
Paweł Knap <pawelknap88@gmail.com>
Péter Szilágyi <peterke@gmail.com>
Sarah Adams <shadams@google.com>
Thanatat Tamtan <acoshift@gmail.com>
Toby Burress <kurin@google.com>
Tuo Shan <shantuo@google.com>
Tyler Treat <ttreat31@gmail.com>

2
vendor/cloud.google.com/go/LICENSE generated vendored
View File

@ -187,7 +187,7 @@
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright 2014 Google Inc.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -20,6 +20,7 @@
package metadata // import "cloud.google.com/go/compute/metadata"
import (
"context"
"encoding/json"
"fmt"
"io/ioutil"
@ -31,9 +32,6 @@ import (
"strings"
"sync"
"time"
"golang.org/x/net/context"
"golang.org/x/net/context/ctxhttp"
)
const (
@ -64,7 +62,7 @@ var (
)
var (
metaClient = &http.Client{
defaultClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
@ -72,15 +70,15 @@ var (
}).Dial,
ResponseHeaderTimeout: 2 * time.Second,
},
}
subscribeClient = &http.Client{
}}
subscribeClient = &Client{hc: &http.Client{
Transport: &http.Transport{
Dial: (&net.Dialer{
Timeout: 2 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
},
}
}}
)
// NotDefinedError is returned when requested metadata is not defined.
@ -95,74 +93,16 @@ func (suffix NotDefinedError) Error() string {
return fmt.Sprintf("metadata: GCE metadata %q not defined", string(suffix))
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func Get(suffix string) (string, error) {
val, _, err := getETag(metaClient, suffix)
return val, err
}
// getETag returns a value from the metadata service as well as the associated
// ETag using the provided client. This func is otherwise equivalent to Get.
func getETag(client *http.Client, suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
url := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", url, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := client.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
if res.StatusCode != 200 {
return "", "", fmt.Errorf("status code %d trying to fetch %s", res.StatusCode, url)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
return string(all), res.Header.Get("Etag"), nil
}
func getTrimmed(suffix string) (s string, err error) {
s, err = Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *cachedValue) get() (v string, err error) {
func (c *cachedValue) get(cl *Client) (v string, err error) {
defer c.mu.Unlock()
c.mu.Lock()
if c.v != "" {
return c.v, nil
}
if c.trim {
v, err = getTrimmed(c.k)
v, err = cl.getTrimmed(c.k)
} else {
v, err = Get(c.k)
v, err = cl.Get(c.k)
}
if err == nil {
c.v = v
@ -197,11 +137,11 @@ func testOnGCE() bool {
resc := make(chan bool, 2)
// Try two strategies in parallel.
// See https://github.com/GoogleCloudPlatform/google-cloud-go/issues/194
// See https://github.com/googleapis/google-cloud-go/issues/194
go func() {
req, _ := http.NewRequest("GET", "http://"+metadataIP, nil)
req.Header.Set("User-Agent", userAgent)
res, err := ctxhttp.Do(ctx, metaClient, req)
res, err := defaultClient.hc.Do(req.WithContext(ctx))
if err != nil {
resc <- false
return
@ -266,6 +206,268 @@ func systemInfoSuggestsGCE() bool {
return name == "Google" || name == "Google Compute Engine"
}
// Subscribe calls Client.Subscribe on a client designed for subscribing (one with no
// ResponseHeaderTimeout).
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
return subscribeClient.Subscribe(suffix, fn)
}
// Get calls Client.Get on the default client.
func Get(suffix string) (string, error) { return defaultClient.Get(suffix) }
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return defaultClient.ProjectID() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return defaultClient.NumericProjectID() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) { return defaultClient.InternalIP() }
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) { return defaultClient.ExternalIP() }
// Email calls Client.Email on the default client.
func Email(serviceAccount string) (string, error) { return defaultClient.Email(serviceAccount) }
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) { return defaultClient.Hostname() }
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) { return defaultClient.InstanceTags() }
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) { return defaultClient.InstanceID() }
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) { return defaultClient.InstanceName() }
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) { return defaultClient.Zone() }
// InstanceAttributes calls Client.InstanceAttributes on the default client.
func InstanceAttributes() ([]string, error) { return defaultClient.InstanceAttributes() }
// ProjectAttributes calls Client.ProjectAttributes on the default client.
func ProjectAttributes() ([]string, error) { return defaultClient.ProjectAttributes() }
// InstanceAttributeValue calls Client.InstanceAttributeValue on the default client.
func InstanceAttributeValue(attr string) (string, error) {
return defaultClient.InstanceAttributeValue(attr)
}
// ProjectAttributeValue calls Client.ProjectAttributeValue on the default client.
func ProjectAttributeValue(attr string) (string, error) {
return defaultClient.ProjectAttributeValue(attr)
}
// Scopes calls Client.Scopes on the default client.
func Scopes(serviceAccount string) ([]string, error) { return defaultClient.Scopes(serviceAccount) }
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
}
// A Client provides metadata.
type Client struct {
hc *http.Client
}
// NewClient returns a Client that can be used to fetch metadata. All HTTP requests
// will use the given http.Client instead of the default client.
func NewClient(c *http.Client) *Client {
return &Client{hc: c}
}
// getETag returns a value from the metadata service as well as the associated ETag.
// This func is otherwise equivalent to Get.
func (c *Client) getETag(suffix string) (value, etag string, err error) {
// Using a fixed IP makes it very difficult to spoof the metadata service in
// a container, which is an important use-case for local testing of cloud
// deployments. To enable spoofing of the metadata service, the environment
// variable GCE_METADATA_HOST is first inspected to decide where metadata
// requests shall go.
host := os.Getenv(metadataHostEnv)
if host == "" {
// Using 169.254.169.254 instead of "metadata" here because Go
// binaries built with the "netgo" tag and without cgo won't
// know the search suffix for "metadata" is
// ".google.internal", and this IP address is documented as
// being stable anyway.
host = metadataIP
}
u := "http://" + host + "/computeMetadata/v1/" + suffix
req, _ := http.NewRequest("GET", u, nil)
req.Header.Set("Metadata-Flavor", "Google")
req.Header.Set("User-Agent", userAgent)
res, err := c.hc.Do(req)
if err != nil {
return "", "", err
}
defer res.Body.Close()
if res.StatusCode == http.StatusNotFound {
return "", "", NotDefinedError(suffix)
}
all, err := ioutil.ReadAll(res.Body)
if err != nil {
return "", "", err
}
if res.StatusCode != 200 {
return "", "", &Error{Code: res.StatusCode, Message: string(all)}
}
return string(all), res.Header.Get("Etag"), nil
}
// Get returns a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
//
// If the GCE_METADATA_HOST environment variable is not defined, a default of
// 169.254.169.254 will be used instead.
//
// If the requested metadata is not defined, the returned error will
// be of type NotDefinedError.
func (c *Client) Get(suffix string) (string, error) {
val, _, err := c.getETag(suffix)
return val, err
}
func (c *Client) getTrimmed(suffix string) (s string, err error) {
s, err = c.Get(suffix)
s = strings.TrimSpace(s)
return
}
func (c *Client) lines(suffix string) ([]string, error) {
j, err := c.Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// ProjectID returns the current instance's project ID string.
func (c *Client) ProjectID() (string, error) { return projID.get(c) }
// NumericProjectID returns the current instance's numeric project ID.
func (c *Client) NumericProjectID() (string, error) { return projNum.get(c) }
// InstanceID returns the current VM's numeric instance ID.
func (c *Client) InstanceID() (string, error) { return instID.get(c) }
// InternalIP returns the instance's primary internal IP address.
func (c *Client) InternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/ip")
}
// Email returns the email address associated with the service account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Email(serviceAccount string) (string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.getTrimmed("instance/service-accounts/" + serviceAccount + "/email")
}
// ExternalIP returns the instance's primary external (public) IP address.
func (c *Client) ExternalIP() (string, error) {
return c.getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func (c *Client) Hostname() (string, error) {
return c.getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func (c *Client) InstanceTags() ([]string, error) {
var s []string
j, err := c.Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceName returns the current VM's instance ID string.
func (c *Client) InstanceName() (string, error) {
host, err := c.Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func (c *Client) Zone() (string, error) {
zone, err := c.getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func (c *Client) InstanceAttributes() ([]string, error) { return c.lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func (c *Client) ProjectAttributes() ([]string, error) { return c.lines("project/attributes/") }
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) InstanceAttributeValue(attr string) (string, error) {
return c.Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func (c *Client) ProjectAttributeValue(attr string) (string, error) {
return c.Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func (c *Client) Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return c.lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
// Subscribe subscribes to a value from the metadata service.
// The suffix is appended to "http://${GCE_METADATA_HOST}/computeMetadata/v1/".
// The suffix may contain query parameters.
@ -275,11 +477,11 @@ func systemInfoSuggestsGCE() bool {
// and ok false. Subscribe blocks until fn returns a non-nil error or the value
// is deleted. Subscribe returns the error value returned from the last call to
// fn, which may be nil when ok == false.
func Subscribe(suffix string, fn func(v string, ok bool) error) error {
func (c *Client) Subscribe(suffix string, fn func(v string, ok bool) error) error {
const failedSubscribeSleep = time.Second * 5
// First check to see if the metadata value exists at all.
val, lastETag, err := getETag(subscribeClient, suffix)
val, lastETag, err := c.getETag(suffix)
if err != nil {
return err
}
@ -295,7 +497,7 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
suffix += "?wait_for_change=true&last_etag="
}
for {
val, etag, err := getETag(subscribeClient, suffix+url.QueryEscape(lastETag))
val, etag, err := c.getETag(suffix + url.QueryEscape(lastETag))
if err != nil {
if _, deleted := err.(NotDefinedError); !deleted {
time.Sleep(failedSubscribeSleep)
@ -311,127 +513,14 @@ func Subscribe(suffix string, fn func(v string, ok bool) error) error {
}
}
// ProjectID returns the current instance's project ID string.
func ProjectID() (string, error) { return projID.get() }
// NumericProjectID returns the current instance's numeric project ID.
func NumericProjectID() (string, error) { return projNum.get() }
// InternalIP returns the instance's primary internal IP address.
func InternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/ip")
// Error contains an error response from the server.
type Error struct {
// Code is the HTTP response status code.
Code int
// Message is the server response message.
Message string
}
// ExternalIP returns the instance's primary external (public) IP address.
func ExternalIP() (string, error) {
return getTrimmed("instance/network-interfaces/0/access-configs/0/external-ip")
}
// Hostname returns the instance's hostname. This will be of the form
// "<instanceID>.c.<projID>.internal".
func Hostname() (string, error) {
return getTrimmed("instance/hostname")
}
// InstanceTags returns the list of user-defined instance tags,
// assigned when initially creating a GCE instance.
func InstanceTags() ([]string, error) {
var s []string
j, err := Get("instance/tags")
if err != nil {
return nil, err
}
if err := json.NewDecoder(strings.NewReader(j)).Decode(&s); err != nil {
return nil, err
}
return s, nil
}
// InstanceID returns the current VM's numeric instance ID.
func InstanceID() (string, error) {
return instID.get()
}
// InstanceName returns the current VM's instance ID string.
func InstanceName() (string, error) {
host, err := Hostname()
if err != nil {
return "", err
}
return strings.Split(host, ".")[0], nil
}
// Zone returns the current VM's zone, such as "us-central1-b".
func Zone() (string, error) {
zone, err := getTrimmed("instance/zone")
// zone is of the form "projects/<projNum>/zones/<zoneName>".
if err != nil {
return "", err
}
return zone[strings.LastIndex(zone, "/")+1:], nil
}
// InstanceAttributes returns the list of user-defined attributes,
// assigned when initially creating a GCE VM instance. The value of an
// attribute can be obtained with InstanceAttributeValue.
func InstanceAttributes() ([]string, error) { return lines("instance/attributes/") }
// ProjectAttributes returns the list of user-defined attributes
// applying to the project as a whole, not just this VM. The value of
// an attribute can be obtained with ProjectAttributeValue.
func ProjectAttributes() ([]string, error) { return lines("project/attributes/") }
func lines(suffix string) ([]string, error) {
j, err := Get(suffix)
if err != nil {
return nil, err
}
s := strings.Split(strings.TrimSpace(j), "\n")
for i := range s {
s[i] = strings.TrimSpace(s[i])
}
return s, nil
}
// InstanceAttributeValue returns the value of the provided VM
// instance attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// InstanceAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func InstanceAttributeValue(attr string) (string, error) {
return Get("instance/attributes/" + attr)
}
// ProjectAttributeValue returns the value of the provided
// project attribute.
//
// If the requested attribute is not defined, the returned error will
// be of type NotDefinedError.
//
// ProjectAttributeValue may return ("", nil) if the attribute was
// defined to be the empty string.
func ProjectAttributeValue(attr string) (string, error) {
return Get("project/attributes/" + attr)
}
// Scopes returns the service account scopes for the given account.
// The account may be empty or the string "default" to use the instance's
// main account.
func Scopes(serviceAccount string) ([]string, error) {
if serviceAccount == "" {
serviceAccount = "default"
}
return lines("instance/service-accounts/" + serviceAccount + "/scopes")
}
func strsContains(ss []string, s string) bool {
for _, v := range ss {
if v == s {
return true
}
}
return false
func (e *Error) Error() string {
return fmt.Sprintf("compute: Received %d `%s`", e.Code, e.Message)
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -22,9 +22,15 @@
package iam
import (
"golang.org/x/net/context"
"context"
"fmt"
"time"
gax "github.com/googleapis/gax-go/v2"
pb "google.golang.org/genproto/googleapis/iam/v1"
"google.golang.org/grpc"
"google.golang.org/grpc/codes"
"google.golang.org/grpc/metadata"
)
// client abstracts the IAMPolicy API to allow multiple implementations.
@ -39,26 +45,59 @@ type grpcClient struct {
c pb.IAMPolicyClient
}
var withRetry = gax.WithRetry(func() gax.Retryer {
return gax.OnCodes([]codes.Code{
codes.DeadlineExceeded,
codes.Unavailable,
}, gax.Backoff{
Initial: 100 * time.Millisecond,
Max: 60 * time.Second,
Multiplier: 1.3,
})
})
func (g *grpcClient) Get(ctx context.Context, resource string) (*pb.Policy, error) {
proto, err := g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
var proto *pb.Policy
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
proto, err = g.c.GetIamPolicy(ctx, &pb.GetIamPolicyRequest{Resource: resource})
return err
}, withRetry)
if err != nil {
return nil, err
}
return proto, nil
}
func (g *grpcClient) Set(ctx context.Context, resource string, p *pb.Policy) error {
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
Resource: resource,
Policy: p,
})
return err
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
return gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
_, err := g.c.SetIamPolicy(ctx, &pb.SetIamPolicyRequest{
Resource: resource,
Policy: p,
})
return err
}, withRetry)
}
func (g *grpcClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
res, err := g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
Resource: resource,
Permissions: perms,
})
var res *pb.TestIamPermissionsResponse
md := metadata.Pairs("x-goog-request-params", fmt.Sprintf("%s=%v", "resource", resource))
ctx = insertMetadata(ctx, md)
err := gax.Invoke(ctx, func(ctx context.Context, _ gax.CallSettings) error {
var err error
res, err = g.c.TestIamPermissions(ctx, &pb.TestIamPermissionsRequest{
Resource: resource,
Permissions: perms,
})
return err
}, withRetry)
if err != nil {
return nil, err
}
@ -76,7 +115,15 @@ type Handle struct {
// InternalNewHandle returns a Handle for resource.
// The conn parameter refers to a server that must support the IAMPolicy service.
func InternalNewHandle(conn *grpc.ClientConn, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: pb.NewIAMPolicyClient(conn)}, resource)
return InternalNewHandleGRPCClient(pb.NewIAMPolicyClient(conn), resource)
}
// InternalNewHandleGRPCClient is for use by the Google Cloud Libraries only.
//
// InternalNewHandleClient returns a Handle for resource using the given
// grpc service that implements IAM as a mixin
func InternalNewHandleGRPCClient(c pb.IAMPolicyClient, resource string) *Handle {
return InternalNewHandleClient(&grpcClient{c: c}, resource)
}
// InternalNewHandleClient is for use by the Google Cloud Libraries only.
@ -254,3 +301,15 @@ func memberIndex(m string, b *pb.Binding) int {
}
return -1
}
// insertMetadata inserts metadata into the given context
func insertMetadata(ctx context.Context, mds ...metadata.MD) context.Context {
out, _ := metadata.FromOutgoingContext(ctx)
out = out.Copy()
for _, md := range mds {
for k, v := range md {
out[k] = append(out[k], v...)
}
}
return metadata.NewOutgoingContext(ctx, out)
}

54
vendor/cloud.google.com/go/internal/annotate.go generated vendored Normal file
View File

@ -0,0 +1,54 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package internal
import (
"fmt"
"google.golang.org/api/googleapi"
"google.golang.org/grpc/status"
)
// Annotate prepends msg to the error message in err, attempting
// to preserve other information in err, like an error code.
//
// Annotate panics if err is nil.
//
// Annotate knows about these error types:
// - "google.golang.org/grpc/status".Status
// - "google.golang.org/api/googleapi".Error
// If the error is not one of these types, Annotate behaves
// like
// fmt.Errorf("%s: %v", msg, err)
func Annotate(err error, msg string) error {
if err == nil {
panic("Annotate called with nil")
}
if s, ok := status.FromError(err); ok {
p := s.Proto()
p.Message = msg + ": " + p.Message
return status.ErrorProto(p)
}
if g, ok := err.(*googleapi.Error); ok {
g.Message = msg + ": " + g.Message
return g
}
return fmt.Errorf("%s: %v", msg, err)
}
// Annotatef uses format and args to format a string, then calls Annotate.
func Annotatef(err error, format string, args ...interface{}) error {
return Annotate(err, fmt.Sprintf(format, args...))
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -20,6 +20,7 @@ package optional
import (
"fmt"
"strings"
"time"
)
type (
@ -37,6 +38,9 @@ type (
// Float64 is either a float64 or nil.
Float64 interface{}
// Duration is either a time.Duration or nil.
Duration interface{}
)
// ToBool returns its argument as a bool.
@ -89,6 +93,16 @@ func ToFloat64(v Float64) float64 {
return x
}
// ToDuration returns its argument as a time.Duration.
// It panics if its argument is nil or not a time.Duration.
func ToDuration(v Duration) time.Duration {
x, ok := v.(time.Duration)
if !ok {
doPanic("Duration", v)
}
return x
}
func doPanic(capType string, v interface{}) {
panic(fmt.Sprintf("optional.%s value should be %s, got %T", capType, strings.ToLower(capType), v))
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,12 +15,10 @@
package internal
import (
"fmt"
"context"
"time"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
gax "github.com/googleapis/gax-go/v2"
)
// Retry calls the supplied function f repeatedly according to the provided
@ -48,7 +46,7 @@ func retry(ctx context.Context, bo gax.Backoff, f func() (stop bool, err error),
p := bo.Pause()
if cerr := sleep(ctx, p); cerr != nil {
if lastErr != nil {
return fmt.Errorf("%v; last function err: %v", cerr, lastErr)
return Annotatef(lastErr, "retry failed with %v; last error", cerr)
}
return cerr
}

109
vendor/cloud.google.com/go/internal/trace/trace.go generated vendored Normal file
View File

@ -0,0 +1,109 @@
// Copyright 2018 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package trace
import (
"context"
"fmt"
"go.opencensus.io/trace"
"google.golang.org/api/googleapi"
"google.golang.org/genproto/googleapis/rpc/code"
"google.golang.org/grpc/status"
)
// StartSpan adds a span to the trace with the given name.
func StartSpan(ctx context.Context, name string) context.Context {
ctx, _ = trace.StartSpan(ctx, name)
return ctx
}
// EndSpan ends a span with the given error.
func EndSpan(ctx context.Context, err error) {
span := trace.FromContext(ctx)
if err != nil {
span.SetStatus(toStatus(err))
}
span.End()
}
// toStatus interrogates an error and converts it to an appropriate
// OpenCensus status.
func toStatus(err error) trace.Status {
if err2, ok := err.(*googleapi.Error); ok {
return trace.Status{Code: httpStatusCodeToOCCode(err2.Code), Message: err2.Message}
} else if s, ok := status.FromError(err); ok {
return trace.Status{Code: int32(s.Code()), Message: s.Message()}
} else {
return trace.Status{Code: int32(code.Code_UNKNOWN), Message: err.Error()}
}
}
// TODO(deklerk): switch to using OpenCensus function when it becomes available.
// Reference: https://github.com/googleapis/googleapis/blob/26b634d2724ac5dd30ae0b0cbfb01f07f2e4050e/google/rpc/code.proto
func httpStatusCodeToOCCode(httpStatusCode int) int32 {
switch httpStatusCode {
case 200:
return int32(code.Code_OK)
case 499:
return int32(code.Code_CANCELLED)
case 500:
return int32(code.Code_UNKNOWN) // Could also be Code_INTERNAL, Code_DATA_LOSS
case 400:
return int32(code.Code_INVALID_ARGUMENT) // Could also be Code_OUT_OF_RANGE
case 504:
return int32(code.Code_DEADLINE_EXCEEDED)
case 404:
return int32(code.Code_NOT_FOUND)
case 409:
return int32(code.Code_ALREADY_EXISTS) // Could also be Code_ABORTED
case 403:
return int32(code.Code_PERMISSION_DENIED)
case 401:
return int32(code.Code_UNAUTHENTICATED)
case 429:
return int32(code.Code_RESOURCE_EXHAUSTED)
case 501:
return int32(code.Code_UNIMPLEMENTED)
case 503:
return int32(code.Code_UNAVAILABLE)
default:
return int32(code.Code_UNKNOWN)
}
}
// TODO: (odeke-em): perhaps just pass around spans due to the cost
// incurred from using trace.FromContext(ctx) yet we could avoid
// throwing away the work done by ctx, span := trace.StartSpan.
func TracePrintf(ctx context.Context, attrMap map[string]interface{}, format string, args ...interface{}) {
var attrs []trace.Attribute
for k, v := range attrMap {
var a trace.Attribute
switch v := v.(type) {
case string:
a = trace.StringAttribute(k, v)
case bool:
a = trace.BoolAttribute(k, v)
case int:
a = trace.Int64Attribute(k, int64(v))
case int64:
a = trace.Int64Attribute(k, v)
default:
a = trace.StringAttribute(k, fmt.Sprintf("%#v", v))
}
attrs = append(attrs, a)
}
trace.FromContext(ctx).Annotatef(attrs, format, args...)
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -26,7 +26,7 @@ import (
// Repo is the current version of the client libraries in this
// repo. It should be a date in YYYYMMDD format.
const Repo = "20170621"
const Repo = "20190802"
// Go returns the Go runtime version. The returned string
// has no whitespace.
@ -67,5 +67,5 @@ func goVer(s string) string {
}
func notSemverRune(r rune) bool {
return strings.IndexRune("0123456789.", r) < 0
return !strings.ContainsRune("0123456789.", r)
}

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,11 +15,11 @@
package storage
import (
"fmt"
"context"
"net/http"
"reflect"
"golang.org/x/net/context"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
@ -48,10 +48,21 @@ const (
AllAuthenticatedUsers ACLEntity = "allAuthenticatedUsers"
)
// ACLRule represents a grant for a role to an entity (user, group or team) for a Google Cloud Storage object or bucket.
// ACLRule represents a grant for a role to an entity (user, group or team) for a
// Google Cloud Storage object or bucket.
type ACLRule struct {
Entity ACLEntity
Role ACLRole
Entity ACLEntity
EntityID string
Role ACLRole
Domain string
Email string
ProjectTeam *ProjectTeam
}
// ProjectTeam is the project team associated with the entity, if any.
type ProjectTeam struct {
ProjectNumber string
Team string
}
// ACLHandle provides operations on an access control list for a Google Cloud Storage bucket or object.
@ -64,7 +75,10 @@ type ACLHandle struct {
}
// Delete permanently deletes the ACL entry for the given entity.
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Delete")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectDelete(ctx, entity)
}
@ -74,8 +88,11 @@ func (a *ACLHandle) Delete(ctx context.Context, entity ACLEntity) error {
return a.bucketDelete(ctx, entity)
}
// Set sets the permission level for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) error {
// Set sets the role for the given entity.
func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.Set")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectSet(ctx, entity, role, false)
}
@ -86,7 +103,10 @@ func (a *ACLHandle) Set(ctx context.Context, entity ACLEntity, role ACLRole) err
}
// List retrieves ACL entries.
func (a *ACLHandle) List(ctx context.Context) ([]ACLRule, error) {
func (a *ACLHandle) List(ctx context.Context) (rules []ACLRule, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.ACL.List")
defer func() { trace.EndSpan(ctx, err) }()
if a.object != "" {
return a.objectList(ctx)
}
@ -101,26 +121,22 @@ func (a *ACLHandle) bucketDefaultList(ctx context.Context) ([]ACLRule, error) {
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.List(a.bucket)
a.configureCall(req, ctx)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing default object ACL for bucket %q: %v", a.bucket, err)
return nil, err
}
return toACLRules(acls.Items), nil
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketDefaultDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
req := a.c.raw.DefaultObjectAccessControls.Delete(a.bucket, string(entity))
a.configureCall(req, ctx)
a.configureCall(ctx, req)
return req.Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
}
func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
@ -128,19 +144,14 @@ func (a *ACLHandle) bucketList(ctx context.Context) ([]ACLRule, error) {
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.List(a.bucket)
a.configureCall(req, ctx)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing bucket ACL for bucket %q: %v", a.bucket, err)
return nil, err
}
r := make([]ACLRule, len(acls.Items))
for i, v := range acls.Items {
r[i].Entity = ACLEntity(v.Entity)
r[i].Role = ACLRole(v.Role)
}
return r, nil
return toBucketACLRules(acls.Items), nil
}
func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRole) error {
@ -151,26 +162,22 @@ func (a *ACLHandle) bucketSet(ctx context.Context, entity ACLEntity, role ACLRol
}
err := runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Update(a.bucket, string(entity), acl)
a.configureCall(req, ctx)
a.configureCall(ctx, req)
_, err := req.Do()
return err
})
if err != nil {
return fmt.Errorf("storage: error updating bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
return err
}
return nil
}
func (a *ACLHandle) bucketDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
req := a.c.raw.BucketAccessControls.Delete(a.bucket, string(entity))
a.configureCall(req, ctx)
a.configureCall(ctx, req)
return req.Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting bucket ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
}
return nil
}
func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
@ -178,14 +185,14 @@ func (a *ACLHandle) objectList(ctx context.Context) ([]ACLRule, error) {
var err error
err = runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.List(a.bucket, a.object)
a.configureCall(req, ctx)
a.configureCall(ctx, req)
acls, err = req.Do()
return err
})
if err != nil {
return nil, fmt.Errorf("storage: error listing object ACL for bucket %q, file %q: %v", a.bucket, a.object, err)
return nil, err
}
return toACLRules(acls.Items), nil
return toObjectACLRules(acls.Items), nil
}
func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRole, isBucketDefault bool) error {
@ -205,36 +212,22 @@ func (a *ACLHandle) objectSet(ctx context.Context, entity ACLEntity, role ACLRol
} else {
req = a.c.raw.ObjectAccessControls.Update(a.bucket, a.object, string(entity), acl)
}
a.configureCall(req, ctx)
err := runWithRetry(ctx, func() error {
a.configureCall(ctx, req)
return runWithRetry(ctx, func() error {
_, err := req.Do()
return err
})
if err != nil {
if isBucketDefault {
return fmt.Errorf("storage: error updating default ACL entry for bucket %q, entity %q: %v", a.bucket, entity, err)
} else {
return fmt.Errorf("storage: error updating object ACL entry for bucket %q, object %q, entity %q: %v", a.bucket, a.object, entity, err)
}
}
return nil
}
func (a *ACLHandle) objectDelete(ctx context.Context, entity ACLEntity) error {
err := runWithRetry(ctx, func() error {
return runWithRetry(ctx, func() error {
req := a.c.raw.ObjectAccessControls.Delete(a.bucket, a.object, string(entity))
a.configureCall(req, ctx)
a.configureCall(ctx, req)
return req.Do()
})
if err != nil {
return fmt.Errorf("storage: error deleting object ACL entry for bucket %q, file %q, entity %q: %v", a.bucket, a.object, entity, err)
}
return nil
}
func (a *ACLHandle) configureCall(call interface {
Header() http.Header
}, ctx context.Context) {
func (a *ACLHandle) configureCall(ctx context.Context, call interface{ Header() http.Header }) {
vc := reflect.ValueOf(call)
vc.MethodByName("Context").Call([]reflect.Value{reflect.ValueOf(ctx)})
if a.userProject != "" {
@ -243,10 +236,100 @@ func (a *ACLHandle) configureCall(call interface {
setClientHeader(call.Header())
}
func toACLRules(items []*raw.ObjectAccessControl) []ACLRule {
r := make([]ACLRule, 0, len(items))
func toObjectACLRules(items []*raw.ObjectAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
r = append(r, ACLRule{Entity: ACLEntity(item.Entity), Role: ACLRole(item.Role)})
rs = append(rs, toObjectACLRule(item))
}
return rs
}
func toBucketACLRules(items []*raw.BucketAccessControl) []ACLRule {
var rs []ACLRule
for _, item := range items {
rs = append(rs, toBucketACLRule(item))
}
return rs
}
func toObjectACLRule(a *raw.ObjectAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toObjectProjectTeam(a.ProjectTeam),
}
}
func toBucketACLRule(a *raw.BucketAccessControl) ACLRule {
return ACLRule{
Entity: ACLEntity(a.Entity),
EntityID: a.EntityId,
Role: ACLRole(a.Role),
Domain: a.Domain,
Email: a.Email,
ProjectTeam: toBucketProjectTeam(a.ProjectTeam),
}
}
func toRawObjectACL(rules []ACLRule) []*raw.ObjectAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.ObjectAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawObjectAccessControl("")) // bucket name unnecessary
}
return r
}
func toRawBucketACL(rules []ACLRule) []*raw.BucketAccessControl {
if len(rules) == 0 {
return nil
}
r := make([]*raw.BucketAccessControl, 0, len(rules))
for _, rule := range rules {
r = append(r, rule.toRawBucketAccessControl("")) // bucket name unnecessary
}
return r
}
func (r ACLRule) toRawBucketAccessControl(bucket string) *raw.BucketAccessControl {
return &raw.BucketAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func (r ACLRule) toRawObjectAccessControl(bucket string) *raw.ObjectAccessControl {
return &raw.ObjectAccessControl{
Bucket: bucket,
Entity: string(r.Entity),
Role: string(r.Role),
// The other fields are not settable.
}
}
func toBucketProjectTeam(p *raw.BucketAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}
func toObjectProjectTeam(p *raw.ObjectAccessControlProjectTeam) *ProjectTeam {
if p == nil {
return nil
}
return &ProjectTeam{
ProjectNumber: p.ProjectNumber,
Team: p.Team,
}
}

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,13 +15,14 @@
package storage
import (
"context"
"fmt"
"net/http"
"reflect"
"time"
"cloud.google.com/go/internal/optional"
"golang.org/x/net/context"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"
@ -35,7 +36,7 @@ type BucketHandle struct {
acl ACLHandle
defaultObjectACL ACLHandle
conds *BucketConditions
userProject string // project for requester-pays buckets
userProject string // project for Requester Pays buckets
}
// Bucket returns a BucketHandle, which provides operations on the named bucket.
@ -63,7 +64,10 @@ func (c *Client) Bucket(name string) *BucketHandle {
// Create creates the Bucket in the project.
// If attrs is nil the API defaults will be used.
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) error {
func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *BucketAttrs) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
defer func() { trace.EndSpan(ctx, err) }()
var bkt *raw.Bucket
if attrs != nil {
bkt = attrs.toRawBucket()
@ -71,13 +75,27 @@ func (b *BucketHandle) Create(ctx context.Context, projectID string, attrs *Buck
bkt = &raw.Bucket{}
}
bkt.Name = b.name
// If there is lifecycle information but no location, explicitly set
// the location. This is a GCS quirk/bug.
if bkt.Location == "" && bkt.Lifecycle != nil {
bkt.Location = "US"
}
req := b.c.raw.Buckets.Insert(projectID, bkt)
setClientHeader(req.Header())
if attrs != nil && attrs.PredefinedACL != "" {
req.PredefinedAcl(attrs.PredefinedACL)
}
if attrs != nil && attrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(attrs.PredefinedDefaultObjectACL)
}
return runWithRetry(ctx, func() error { _, err := req.Context(ctx).Do(); return err })
}
// Delete deletes the Bucket.
func (b *BucketHandle) Delete(ctx context.Context) error {
func (b *BucketHandle) Delete(ctx context.Context) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Delete")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newDeleteCall()
if err != nil {
return err
@ -134,7 +152,10 @@ func (b *BucketHandle) Object(name string) *ObjectHandle {
}
// Attrs returns the metadata for the bucket.
func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
func (b *BucketHandle) Attrs(ctx context.Context) (attrs *BucketAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newGetCall()
if err != nil {
return nil, err
@ -150,7 +171,7 @@ func (b *BucketHandle) Attrs(ctx context.Context) (*BucketAttrs, error) {
if err != nil {
return nil, err
}
return newBucket(resp), nil
return newBucket(resp)
}
func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
@ -165,17 +186,27 @@ func (b *BucketHandle) newGetCall() (*raw.BucketsGetCall, error) {
return req, nil
}
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (*BucketAttrs, error) {
// Update updates a bucket's attributes.
func (b *BucketHandle) Update(ctx context.Context, uattrs BucketAttrsToUpdate) (attrs *BucketAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Create")
defer func() { trace.EndSpan(ctx, err) }()
req, err := b.newPatchCall(&uattrs)
if err != nil {
return nil, err
}
if uattrs.PredefinedACL != "" {
req.PredefinedAcl(uattrs.PredefinedACL)
}
if uattrs.PredefinedDefaultObjectACL != "" {
req.PredefinedDefaultObjectAcl(uattrs.PredefinedDefaultObjectACL)
}
// TODO(jba): retry iff metagen is set?
rb, err := req.Context(ctx).Do()
if err != nil {
return nil, err
}
return newBucket(rb), nil
return newBucket(rb)
}
func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPatchCall, error) {
@ -192,92 +223,296 @@ func (b *BucketHandle) newPatchCall(uattrs *BucketAttrsToUpdate) (*raw.BucketsPa
}
// BucketAttrs represents the metadata for a Google Cloud Storage bucket.
// Read-only fields are ignored by BucketHandle.Create.
type BucketAttrs struct {
// Name is the name of the bucket.
// This field is read-only.
Name string
// ACL is the list of access control rules on the bucket.
ACL []ACLRule
// BucketPolicyOnly configures access checks to use only bucket-level IAM
// policies.
BucketPolicyOnly BucketPolicyOnly
// DefaultObjectACL is the list of access controls to
// apply to new objects when no object ACL is provided.
DefaultObjectACL []ACLRule
// DefaultEventBasedHold is the default value for event-based hold on
// newly created objects in this bucket. It defaults to false.
DefaultEventBasedHold bool
// If not empty, applies a predefined set of access controls. It should be set
// only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// It should be set only when creating a bucket.
// It is always empty for BucketAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/insert
// for valid values.
PredefinedDefaultObjectACL string
// Location is the location of the bucket. It defaults to "US".
Location string
// MetaGeneration is the metadata generation of the bucket.
// This field is read-only.
MetaGeneration int64
// StorageClass is the default storage class of the bucket. This defines
// how objects in the bucket are stored and determines the SLA
// and the cost of storage. Typical values are "MULTI_REGIONAL",
// "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD" and
// "DURABLE_REDUCED_AVAILABILITY". Defaults to "STANDARD", which
// is equivalent to "MULTI_REGIONAL" or "REGIONAL" depending on
// the bucket's location settings.
// and the cost of storage. Typical values are "NEARLINE", "COLDLINE" and
// "STANDARD". Defaults to "STANDARD".
StorageClass string
// Created is the creation time of the bucket.
// This field is read-only.
Created time.Time
// VersioningEnabled reports whether this bucket has versioning enabled.
// This field is read-only.
VersioningEnabled bool
// Labels are the bucket's labels.
Labels map[string]string
// RequesterPays reports whether the bucket is a Requester Pays bucket.
// Clients performing operations on Requester Pays buckets must provide
// a user project (see BucketHandle.UserProject), which will be billed
// for the operations.
RequesterPays bool
// Lifecycle is the lifecycle configuration for objects in the bucket.
Lifecycle Lifecycle
// Retention policy enforces a minimum retention time for all objects
// contained in the bucket. A RetentionPolicy of nil implies the bucket
// has no minimum data retention.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
RetentionPolicy *RetentionPolicy
// The bucket's Cross-Origin Resource Sharing (CORS) configuration.
CORS []CORS
// The encryption configuration used by default for newly inserted objects.
Encryption *BucketEncryption
// The logging configuration.
Logging *BucketLogging
// The website configuration.
Website *BucketWebsite
// Etag is the HTTP/1.1 Entity tag for the bucket.
// This field is read-only.
Etag string
// LocationType describes how data is stored and replicated.
// Typical values are "multi-region", "region" and "dual-region".
// This field is read-only.
LocationType string
}
func newBucket(b *raw.Bucket) *BucketAttrs {
// BucketPolicyOnly configures access checks to use only bucket-level IAM
// policies.
type BucketPolicyOnly struct {
// Enabled specifies whether access checks use only bucket-level IAM
// policies. Enabled may be disabled until the locked time.
Enabled bool
// LockedTime specifies the deadline for changing Enabled from true to
// false.
LockedTime time.Time
}
// Lifecycle is the lifecycle configuration for objects in the bucket.
type Lifecycle struct {
Rules []LifecycleRule
}
// RetentionPolicy enforces a minimum retention time for all objects
// contained in the bucket.
//
// Any attempt to overwrite or delete objects younger than the retention
// period will result in an error. An unlocked retention policy can be
// modified or removed from the bucket via the Update method. A
// locked retention policy cannot be removed or shortened in duration
// for the lifetime of the bucket.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
type RetentionPolicy struct {
// RetentionPeriod specifies the duration that objects need to be
// retained. Retention duration must be greater than zero and less than
// 100 years. Note that enforcement of retention periods less than a day
// is not guaranteed. Such periods should only be used for testing
// purposes.
RetentionPeriod time.Duration
// EffectiveTime is the time from which the policy was enforced and
// effective. This field is read-only.
EffectiveTime time.Time
// IsLocked describes whether the bucket is locked. Once locked, an
// object retention policy cannot be modified.
// This field is read-only.
IsLocked bool
}
const (
// RFC3339 date with only the date segment, used for CreatedBefore in LifecycleRule.
rfc3339Date = "2006-01-02"
// DeleteAction is a lifecycle action that deletes a live and/or archived
// objects. Takes precedence over SetStorageClass actions.
DeleteAction = "Delete"
// SetStorageClassAction changes the storage class of live and/or archived
// objects.
SetStorageClassAction = "SetStorageClass"
)
// LifecycleRule is a lifecycle configuration rule.
//
// When all the configured conditions are met by an object in the bucket, the
// configured action will automatically be taken on that object.
type LifecycleRule struct {
// Action is the action to take when all of the associated conditions are
// met.
Action LifecycleAction
// Condition is the set of conditions that must be met for the associated
// action to be taken.
Condition LifecycleCondition
}
// LifecycleAction is a lifecycle configuration action.
type LifecycleAction struct {
// Type is the type of action to take on matching objects.
//
// Acceptable values are "Delete" to delete matching objects and
// "SetStorageClass" to set the storage class defined in StorageClass on
// matching objects.
Type string
// StorageClass is the storage class to set on matching objects if the Action
// is "SetStorageClass".
StorageClass string
}
// Liveness specifies whether the object is live or not.
type Liveness int
const (
// LiveAndArchived includes both live and archived objects.
LiveAndArchived Liveness = iota
// Live specifies that the object is still live.
Live
// Archived specifies that the object is archived.
Archived
)
// LifecycleCondition is a set of conditions used to match objects and take an
// action automatically.
//
// All configured conditions must be met for the associated action to be taken.
type LifecycleCondition struct {
// AgeInDays is the age of the object in days.
AgeInDays int64
// CreatedBefore is the time the object was created.
//
// This condition is satisfied when an object is created before midnight of
// the specified date in UTC.
CreatedBefore time.Time
// Liveness specifies the object's liveness. Relevant only for versioned objects
Liveness Liveness
// MatchesStorageClasses is the condition matching the object's storage
// class.
//
// Values include "NEARLINE", "COLDLINE" and "STANDARD".
MatchesStorageClasses []string
// NumNewerVersions is the condition matching objects with a number of newer versions.
//
// If the value is N, this condition is satisfied when there are at least N
// versions (including the live version) newer than this version of the
// object.
NumNewerVersions int64
}
// BucketLogging holds the bucket's logging configuration, which defines the
// destination bucket and optional name prefix for the current bucket's
// logs.
type BucketLogging struct {
// The destination bucket where the current bucket's logs
// should be placed.
LogBucket string
// A prefix for log object names.
LogObjectPrefix string
}
// BucketWebsite holds the bucket's website configuration, controlling how the
// service behaves when accessing bucket contents as a web site. See
// https://cloud.google.com/storage/docs/static-website for more information.
type BucketWebsite struct {
// If the requested object path is missing, the service will ensure the path has
// a trailing '/', append this suffix, and attempt to retrieve the resulting
// object. This allows the creation of index.html objects to represent directory
// pages.
MainPageSuffix string
// If the requested object path is missing, and any mainPageSuffix object is
// missing, if applicable, the service will return the named object from this
// bucket as the content for a 404 Not Found result.
NotFoundPage string
}
func newBucket(b *raw.Bucket) (*BucketAttrs, error) {
if b == nil {
return nil
return nil, nil
}
bucket := &BucketAttrs{
Name: b.Name,
Location: b.Location,
MetaGeneration: b.Metageneration,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
Labels: b.Labels,
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
rp, err := toRetentionPolicy(b.RetentionPolicy)
if err != nil {
return nil, err
}
acl := make([]ACLRule, len(b.Acl))
for i, rule := range b.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.ACL = acl
objACL := make([]ACLRule, len(b.DefaultObjectAcl))
for i, rule := range b.DefaultObjectAcl {
objACL[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
bucket.DefaultObjectACL = objACL
return bucket
return &BucketAttrs{
Name: b.Name,
Location: b.Location,
MetaGeneration: b.Metageneration,
DefaultEventBasedHold: b.DefaultEventBasedHold,
StorageClass: b.StorageClass,
Created: convertTime(b.TimeCreated),
VersioningEnabled: b.Versioning != nil && b.Versioning.Enabled,
ACL: toBucketACLRules(b.Acl),
DefaultObjectACL: toObjectACLRules(b.DefaultObjectAcl),
Labels: b.Labels,
RequesterPays: b.Billing != nil && b.Billing.RequesterPays,
Lifecycle: toLifecycle(b.Lifecycle),
RetentionPolicy: rp,
CORS: toCORS(b.Cors),
Encryption: toBucketEncryption(b.Encryption),
Logging: toBucketLogging(b.Logging),
Website: toBucketWebsite(b.Website),
BucketPolicyOnly: toBucketPolicyOnly(b.IamConfiguration),
Etag: b.Etag,
LocationType: b.LocationType,
}, nil
}
// toRawBucket copies the editable attribute from b to the raw library's Bucket type.
func (b *BucketAttrs) toRawBucket() *raw.Bucket {
var acl []*raw.BucketAccessControl
if len(b.ACL) > 0 {
acl = make([]*raw.BucketAccessControl, len(b.ACL))
for i, rule := range b.ACL {
acl[i] = &raw.BucketAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
dACL := toRawObjectACL(b.DefaultObjectACL)
// Copy label map.
var labels map[string]string
if len(b.Labels) > 0 {
@ -297,25 +532,114 @@ func (b *BucketAttrs) toRawBucket() *raw.Bucket {
if b.RequesterPays {
bb = &raw.BucketBilling{RequesterPays: true}
}
var bktIAM *raw.BucketIamConfiguration
if b.BucketPolicyOnly.Enabled {
bktIAM = &raw.BucketIamConfiguration{
BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{
Enabled: true,
},
}
}
return &raw.Bucket{
Name: b.Name,
DefaultObjectAcl: dACL,
Location: b.Location,
StorageClass: b.StorageClass,
Acl: acl,
Acl: toRawBucketACL(b.ACL),
DefaultObjectAcl: toRawObjectACL(b.DefaultObjectACL),
Versioning: v,
Labels: labels,
Billing: bb,
Lifecycle: toRawLifecycle(b.Lifecycle),
RetentionPolicy: b.RetentionPolicy.toRawRetentionPolicy(),
Cors: toRawCORS(b.CORS),
Encryption: b.Encryption.toRawBucketEncryption(),
Logging: b.Logging.toRawBucketLogging(),
Website: b.Website.toRawBucketWebsite(),
IamConfiguration: bktIAM,
}
}
// CORS is the bucket's Cross-Origin Resource Sharing (CORS) configuration.
type CORS struct {
// MaxAge is the value to return in the Access-Control-Max-Age
// header used in preflight responses.
MaxAge time.Duration
// Methods is the list of HTTP methods on which to include CORS response
// headers, (GET, OPTIONS, POST, etc) Note: "*" is permitted in the list
// of methods, and means "any method".
Methods []string
// Origins is the list of Origins eligible to receive CORS response
// headers. Note: "*" is permitted in the list of origins, and means
// "any Origin".
Origins []string
// ResponseHeaders is the list of HTTP headers other than the simple
// response headers to give permission for the user-agent to share
// across domains.
ResponseHeaders []string
}
// BucketEncryption is a bucket's encryption configuration.
type BucketEncryption struct {
// A Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, that will be used to encrypt
// objects inserted into this bucket, if no encryption method is specified.
// The key's location must be the same as the bucket's.
DefaultKMSKeyName string
}
// BucketAttrsToUpdate define the attributes to update during an Update call.
type BucketAttrsToUpdate struct {
// VersioningEnabled, if set, updates whether the bucket uses versioning.
// If set, updates whether the bucket uses versioning.
VersioningEnabled optional.Bool
// RequesterPays, if set, updates whether the bucket is a Requester Pays bucket.
// If set, updates whether the bucket is a Requester Pays bucket.
RequesterPays optional.Bool
// DefaultEventBasedHold is the default value for event-based hold on
// newly created objects in this bucket.
DefaultEventBasedHold optional.Bool
// BucketPolicyOnly configures access checks to use only bucket-level IAM
// policies.
BucketPolicyOnly *BucketPolicyOnly
// If set, updates the retention policy of the bucket. Using
// RetentionPolicy.RetentionPeriod = 0 will delete the existing policy.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
RetentionPolicy *RetentionPolicy
// If set, replaces the CORS configuration with a new configuration.
// An empty (rather than nil) slice causes all CORS policies to be removed.
CORS []CORS
// If set, replaces the encryption configuration of the bucket. Using
// BucketEncryption.DefaultKMSKeyName = "" will delete the existing
// configuration.
Encryption *BucketEncryption
// If set, replaces the lifecycle configuration of the bucket.
Lifecycle *Lifecycle
// If set, replaces the logging configuration of the bucket.
Logging *BucketLogging
// If set, replaces the website configuration of the bucket.
Website *BucketWebsite
// If not empty, applies a predefined set of access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedACL string
// If not empty, applies a predefined set of default object access controls.
// See https://cloud.google.com/storage/docs/json_api/v1/buckets/patch.
PredefinedDefaultObjectACL string
setLabels map[string]string
deleteLabels map[string]bool
}
@ -340,6 +664,22 @@ func (ua *BucketAttrsToUpdate) DeleteLabel(name string) {
func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
rb := &raw.Bucket{}
if ua.CORS != nil {
rb.Cors = toRawCORS(ua.CORS)
rb.ForceSendFields = append(rb.ForceSendFields, "Cors")
}
if ua.DefaultEventBasedHold != nil {
rb.DefaultEventBasedHold = optional.ToBool(ua.DefaultEventBasedHold)
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultEventBasedHold")
}
if ua.RetentionPolicy != nil {
if ua.RetentionPolicy.RetentionPeriod == 0 {
rb.NullFields = append(rb.NullFields, "RetentionPolicy")
rb.RetentionPolicy = nil
} else {
rb.RetentionPolicy = ua.RetentionPolicy.toRawRetentionPolicy()
}
}
if ua.VersioningEnabled != nil {
rb.Versioning = &raw.BucketVersioning{
Enabled: optional.ToBool(ua.VersioningEnabled),
@ -352,6 +692,50 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
ForceSendFields: []string{"RequesterPays"},
}
}
if ua.BucketPolicyOnly != nil {
rb.IamConfiguration = &raw.BucketIamConfiguration{
BucketPolicyOnly: &raw.BucketIamConfigurationBucketPolicyOnly{
Enabled: ua.BucketPolicyOnly.Enabled,
},
}
}
if ua.Encryption != nil {
if ua.Encryption.DefaultKMSKeyName == "" {
rb.NullFields = append(rb.NullFields, "Encryption")
rb.Encryption = nil
} else {
rb.Encryption = ua.Encryption.toRawBucketEncryption()
}
}
if ua.Lifecycle != nil {
rb.Lifecycle = toRawLifecycle(*ua.Lifecycle)
}
if ua.Logging != nil {
if *ua.Logging == (BucketLogging{}) {
rb.NullFields = append(rb.NullFields, "Logging")
rb.Logging = nil
} else {
rb.Logging = ua.Logging.toRawBucketLogging()
}
}
if ua.Website != nil {
if *ua.Website == (BucketWebsite{}) {
rb.NullFields = append(rb.NullFields, "Website")
rb.Website = nil
} else {
rb.Website = ua.Website.toRawBucketWebsite()
}
}
if ua.PredefinedACL != "" {
// Clear ACL or the call will fail.
rb.Acl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "Acl")
}
if ua.PredefinedDefaultObjectACL != "" {
// Clear ACLs or the call will fail.
rb.DefaultObjectAcl = nil
rb.ForceSendFields = append(rb.ForceSendFields, "DefaultObjectAcl")
}
if ua.setLabels != nil || ua.deleteLabels != nil {
rb.Labels = map[string]string{}
for k, v := range ua.setLabels {
@ -369,7 +753,7 @@ func (ua *BucketAttrsToUpdate) toRawBucket() *raw.Bucket {
// If returns a new BucketHandle that applies a set of preconditions.
// Preconditions already set on the BucketHandle are ignored.
// Operations on the new handle will only occur if the preconditions are
// Operations on the new handle will return an error if the preconditions are not
// satisfied. The only valid preconditions for buckets are MetagenerationMatch
// and MetagenerationNotMatch.
func (b *BucketHandle) If(conds BucketConditions) *BucketHandle {
@ -404,8 +788,10 @@ func (c *BucketConditions) validate(method string) error {
}
// UserProject returns a new BucketHandle that passes the project ID as the user
// project for all subsequent calls. A user project is required for all operations
// on requester-pays buckets.
// project for all subsequent calls. Calls with a user project will be billed to that
// project rather than to the bucket's owning project.
//
// A user project is required for all operations on Requester Pays buckets.
func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
b2 := *b
b2.userProject = projectID
@ -414,6 +800,25 @@ func (b *BucketHandle) UserProject(projectID string) *BucketHandle {
return &b2
}
// LockRetentionPolicy locks a bucket's retention policy until a previously-configured
// RetentionPeriod past the EffectiveTime. Note that if RetentionPeriod is set to less
// than a day, the retention policy is treated as a development configuration and locking
// will have no effect. The BucketHandle must have a metageneration condition that
// matches the bucket's metageneration. See BucketHandle.If.
//
// This feature is in private alpha release. It is not currently available to
// most customers. It might be changed in backwards-incompatible ways and is not
// subject to any SLA or deprecation policy.
func (b *BucketHandle) LockRetentionPolicy(ctx context.Context) error {
var metageneration int64
if b.conds != nil {
metageneration = b.conds.MetagenerationMatch
}
req := b.c.raw.Buckets.LockRetentionPolicy(b.name, metageneration)
_, err := req.Context(ctx).Do()
return err
}
// applyBucketConds modifies the provided call using the conditions in conds.
// call is something that quacks like a *raw.WhateverCall.
func applyBucketConds(method string, conds *BucketConditions, call interface{}) error {
@ -437,6 +842,198 @@ func applyBucketConds(method string, conds *BucketConditions, call interface{})
return nil
}
func (rp *RetentionPolicy) toRawRetentionPolicy() *raw.BucketRetentionPolicy {
if rp == nil {
return nil
}
return &raw.BucketRetentionPolicy{
RetentionPeriod: int64(rp.RetentionPeriod / time.Second),
}
}
func toRetentionPolicy(rp *raw.BucketRetentionPolicy) (*RetentionPolicy, error) {
if rp == nil {
return nil, nil
}
t, err := time.Parse(time.RFC3339, rp.EffectiveTime)
if err != nil {
return nil, err
}
return &RetentionPolicy{
RetentionPeriod: time.Duration(rp.RetentionPeriod) * time.Second,
EffectiveTime: t,
IsLocked: rp.IsLocked,
}, nil
}
func toRawCORS(c []CORS) []*raw.BucketCors {
var out []*raw.BucketCors
for _, v := range c {
out = append(out, &raw.BucketCors{
MaxAgeSeconds: int64(v.MaxAge / time.Second),
Method: v.Methods,
Origin: v.Origins,
ResponseHeader: v.ResponseHeaders,
})
}
return out
}
func toCORS(rc []*raw.BucketCors) []CORS {
var out []CORS
for _, v := range rc {
out = append(out, CORS{
MaxAge: time.Duration(v.MaxAgeSeconds) * time.Second,
Methods: v.Method,
Origins: v.Origin,
ResponseHeaders: v.ResponseHeader,
})
}
return out
}
func toRawLifecycle(l Lifecycle) *raw.BucketLifecycle {
var rl raw.BucketLifecycle
if len(l.Rules) == 0 {
return nil
}
for _, r := range l.Rules {
rr := &raw.BucketLifecycleRule{
Action: &raw.BucketLifecycleRuleAction{
Type: r.Action.Type,
StorageClass: r.Action.StorageClass,
},
Condition: &raw.BucketLifecycleRuleCondition{
Age: r.Condition.AgeInDays,
MatchesStorageClass: r.Condition.MatchesStorageClasses,
NumNewerVersions: r.Condition.NumNewerVersions,
},
}
switch r.Condition.Liveness {
case LiveAndArchived:
rr.Condition.IsLive = nil
case Live:
rr.Condition.IsLive = googleapi.Bool(true)
case Archived:
rr.Condition.IsLive = googleapi.Bool(false)
}
if !r.Condition.CreatedBefore.IsZero() {
rr.Condition.CreatedBefore = r.Condition.CreatedBefore.Format(rfc3339Date)
}
rl.Rule = append(rl.Rule, rr)
}
return &rl
}
func toLifecycle(rl *raw.BucketLifecycle) Lifecycle {
var l Lifecycle
if rl == nil {
return l
}
for _, rr := range rl.Rule {
r := LifecycleRule{
Action: LifecycleAction{
Type: rr.Action.Type,
StorageClass: rr.Action.StorageClass,
},
Condition: LifecycleCondition{
AgeInDays: rr.Condition.Age,
MatchesStorageClasses: rr.Condition.MatchesStorageClass,
NumNewerVersions: rr.Condition.NumNewerVersions,
},
}
switch {
case rr.Condition.IsLive == nil:
r.Condition.Liveness = LiveAndArchived
case *rr.Condition.IsLive == true:
r.Condition.Liveness = Live
case *rr.Condition.IsLive == false:
r.Condition.Liveness = Archived
}
if rr.Condition.CreatedBefore != "" {
r.Condition.CreatedBefore, _ = time.Parse(rfc3339Date, rr.Condition.CreatedBefore)
}
l.Rules = append(l.Rules, r)
}
return l
}
func (e *BucketEncryption) toRawBucketEncryption() *raw.BucketEncryption {
if e == nil {
return nil
}
return &raw.BucketEncryption{
DefaultKmsKeyName: e.DefaultKMSKeyName,
}
}
func toBucketEncryption(e *raw.BucketEncryption) *BucketEncryption {
if e == nil {
return nil
}
return &BucketEncryption{DefaultKMSKeyName: e.DefaultKmsKeyName}
}
func (b *BucketLogging) toRawBucketLogging() *raw.BucketLogging {
if b == nil {
return nil
}
return &raw.BucketLogging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func toBucketLogging(b *raw.BucketLogging) *BucketLogging {
if b == nil {
return nil
}
return &BucketLogging{
LogBucket: b.LogBucket,
LogObjectPrefix: b.LogObjectPrefix,
}
}
func (w *BucketWebsite) toRawBucketWebsite() *raw.BucketWebsite {
if w == nil {
return nil
}
return &raw.BucketWebsite{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
func toBucketWebsite(w *raw.BucketWebsite) *BucketWebsite {
if w == nil {
return nil
}
return &BucketWebsite{
MainPageSuffix: w.MainPageSuffix,
NotFoundPage: w.NotFoundPage,
}
}
func toBucketPolicyOnly(b *raw.BucketIamConfiguration) BucketPolicyOnly {
if b == nil || b.BucketPolicyOnly == nil || !b.BucketPolicyOnly.Enabled {
return BucketPolicyOnly{}
}
lt, err := time.Parse(time.RFC3339, b.BucketPolicyOnly.LockedTime)
if err != nil {
return BucketPolicyOnly{
Enabled: true,
}
}
return BucketPolicyOnly{
Enabled: true,
LockedTime: lt,
}
}
// Objects returns an iterator over the objects in the bucket that match the Query q.
// If q is nil, no filtering is done.
func (b *BucketHandle) Objects(ctx context.Context, q *Query) *ObjectIterator {
@ -518,8 +1115,6 @@ func (it *ObjectIterator) fetch(pageSize int, pageToken string) (string, error)
return resp.NextPageToken, nil
}
// TODO(jbd): Add storage.buckets.update.
// Buckets returns an iterator over the buckets in the project. You may
// optionally set the iterator's Prefix field to restrict the list to buckets
// whose names begin with the prefix. By default, all buckets in the project
@ -565,7 +1160,7 @@ func (it *BucketIterator) Next() (*BucketAttrs, error) {
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
func (it *BucketIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error) {
func (it *BucketIterator) fetch(pageSize int, pageToken string) (token string, err error) {
req := it.client.raw.Buckets.List(it.projectID)
setClientHeader(req.Header())
req.Projection("full")
@ -575,7 +1170,6 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error)
req.MaxResults(int64(pageSize))
}
var resp *raw.Buckets
var err error
err = runWithRetry(it.ctx, func() error {
resp, err = req.Context(it.ctx).Do()
return err
@ -584,7 +1178,11 @@ func (it *BucketIterator) fetch(pageSize int, pageToken string) (string, error)
return "", err
}
for _, item := range resp.Items {
it.buckets = append(it.buckets, newBucket(item))
b, err := newBucket(item)
if err != nil {
return "", err
}
it.buckets = append(it.buckets, b)
}
return resp.NextPageToken, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,10 +15,11 @@
package storage
import (
"context"
"errors"
"fmt"
"golang.org/x/net/context"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
)
@ -59,17 +60,32 @@ type Copier struct {
// ProgressFunc should return quickly without blocking.
ProgressFunc func(copiedBytes, totalBytes uint64)
// The Cloud KMS key, in the form projects/P/locations/L/keyRings/R/cryptoKeys/K,
// that will be used to encrypt the object. Overrides the object's KMSKeyName, if
// any.
//
// Providing both a DestinationKMSKeyName and a customer-supplied encryption key
// (via ObjectHandle.Key) on the destination object will result in an error when
// Run is called.
DestinationKMSKeyName string
dst, src *ObjectHandle
}
// Run performs the copy.
func (c *Copier) Run(ctx context.Context) (*ObjectAttrs, error) {
func (c *Copier) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Copier.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.src.validate(); err != nil {
return nil, err
}
if err := c.dst.validate(); err != nil {
return nil, err
}
if c.DestinationKMSKeyName != "" && c.dst.encryptionKey != nil {
return nil, errors.New("storage: cannot use DestinationKMSKeyName with a customer-supplied encryption key")
}
// Convert destination attributes to raw form, omitting the bucket.
// If the bucket is included but name or content-type aren't, the service
// returns a 400 with "Required" as the only message. Omitting the bucket
@ -96,6 +112,12 @@ func (c *Copier) callRewrite(ctx context.Context, rawObj *raw.Object) (*raw.Rewr
if c.RewriteToken != "" {
call.RewriteToken(c.RewriteToken)
}
if c.DestinationKMSKeyName != "" {
call.DestinationKmsKeyName(c.DestinationKMSKeyName)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := applyConds("Copy destination", c.dst.gen, c.dst.conds, call); err != nil {
return nil, err
}
@ -149,7 +171,10 @@ type Composer struct {
}
// Run performs the compose operation.
func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
func (c *Composer) Run(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Composer.Run")
defer func() { trace.EndSpan(ctx, err) }()
if err := c.dst.validate(); err != nil {
return nil, err
}
@ -187,11 +212,13 @@ func (c *Composer) Run(ctx context.Context) (*ObjectAttrs, error) {
if c.dst.userProject != "" {
call.UserProject(c.dst.userProject)
}
if c.PredefinedACL != "" {
call.DestinationPredefinedAcl(c.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), c.dst.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -19,11 +19,14 @@ Google Cloud Storage stores data in named objects, which are grouped into bucket
More information about Google Cloud Storage is available at
https://cloud.google.com/storage/docs.
All of the methods of this package use exponential backoff to retry calls
that fail with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff.
See https://godoc.org/cloud.google.com/go for authentication, timeouts,
connection pooling and similar aspects of this package.
Note: This package is in beta. Some backwards-incompatible changes may occur.
All of the methods of this package use exponential backoff to retry calls that fail
with certain errors, as described in
https://cloud.google.com/storage/docs/exponential-backoff. Retrying continues
indefinitely unless the controlling context is canceled or the client is closed. See
context.WithTimeout and context.WithCancel.
Creating a Client
@ -36,6 +39,13 @@ To start working with this package, create a client:
// TODO: Handle error.
}
The client will use your default application credentials.
If you only wish to access public data, you can create
an unauthenticated client with
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
Buckets
A Google Cloud Storage bucket is a collection of objects. To work with a
@ -56,7 +66,7 @@ global across all projects.
Each bucket has associated metadata, represented in this package by
BucketAttrs. The third argument to BucketHandle.Create allows you to set
the intial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
the initial BucketAttrs of a bucket. To retrieve a bucket's attributes, use
Attrs:
attrs, err := bkt.Attrs(ctx)
@ -69,15 +79,16 @@ Attrs:
Objects
An object holds arbitrary data as a sequence of bytes, like a file. You
refer to objects using a handle, just as with buckets. You can use the
standard Go io.Reader and io.Writer interfaces to read and write
object data:
refer to objects using a handle, just as with buckets, but unlike buckets
you don't explicitly create an object. Instead, the first time you write
to an object it will be created. You can use the standard Go io.Reader
and io.Writer interfaces to read and write object data:
obj := bkt.Object("data")
// Write something to obj.
// w implements io.Writer.
w := obj.NewWriter(ctx)
// Write some text to obj. This will overwrite whatever is there.
// Write some text to obj. This will either create the object or overwrite whatever is there already.
if _, err := fmt.Fprintf(w, "This object contains text.\n"); err != nil {
// TODO: Handle error.
}
@ -153,9 +164,13 @@ SignedURL for details.
}
fmt.Println(url)
Authentication
Errors
See examples of authorization and authentication at
https://godoc.org/cloud.google.com/go#pkg-examples.
Errors returned by this client are often of the type [`googleapi.Error`](https://godoc.org/google.golang.org/api/googleapi#Error).
These errors can be introspected for more information by type asserting to the richer `googleapi.Error` type. For example:
if e, ok := err.(*googleapi.Error); ok {
if e.Code == 409 { ... }
}
*/
package storage // import "cloud.google.com/go/storage"

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -12,15 +12,21 @@
// See the License for the specific language governing permissions and
// limitations under the License.
// +build go1.7
// +build go1.10
package storage
import (
"context"
"net/http"
)
import "google.golang.org/api/googleapi"
func withContext(r *http.Request, ctx context.Context) *http.Request {
return r.WithContext(ctx)
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}

387
vendor/cloud.google.com/go/storage/hmac.go generated vendored Normal file
View File

@ -0,0 +1,387 @@
// Copyright 2019 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"time"
"google.golang.org/api/iterator"
raw "google.golang.org/api/storage/v1"
)
// HMACState is the state of the HMAC key.
type HMACState string
const (
// Active is the status for an active key that can be used to sign
// requests.
Active HMACState = "ACTIVE"
// Inactive is the status for an inactive key thus requests signed by
// this key will be denied.
Inactive HMACState = "INACTIVE"
// Deleted is the status for a key that is deleted.
// Once in this state the key cannot key cannot be recovered
// and does not count towards key limits. Deleted keys will be cleaned
// up later.
Deleted HMACState = "DELETED"
)
// HMACKey is the representation of a Google Cloud Storage HMAC key.
//
// HMAC keys are used to authenticate signed access to objects. To enable HMAC key
// authentication, please visit https://cloud.google.com/storage/docs/migrating.
//
// This type is EXPERIMENTAL and subject to change or removal without notice.
type HMACKey struct {
// The HMAC's secret key.
Secret string
// AccessID is the ID of the HMAC key.
AccessID string
// Etag is the HTTP/1.1 Entity tag.
Etag string
// ID is the ID of the HMAC key, including the ProjectID and AccessID.
ID string
// ProjectID is the ID of the project that owns the
// service account to which the key authenticates.
ProjectID string
// ServiceAccountEmail is the email address
// of the key's associated service account.
ServiceAccountEmail string
// CreatedTime is the creation time of the HMAC key.
CreatedTime time.Time
// UpdatedTime is the last modification time of the HMAC key metadata.
UpdatedTime time.Time
// State is the state of the HMAC key.
// It can be one of StateActive, StateInactive or StateDeleted.
State HMACState
}
// HMACKeyHandle helps provide access and management for HMAC keys.
//
// This type is EXPERIMENTAL and subject to change or removal without notice.
type HMACKeyHandle struct {
projectID string
accessID string
raw *raw.ProjectsHmacKeysService
}
// HMACKeyHandle creates a handle that will be used for HMACKey operations.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (c *Client) HMACKeyHandle(projectID, accessID string) *HMACKeyHandle {
return &HMACKeyHandle{
projectID: projectID,
accessID: accessID,
raw: raw.NewProjectsHmacKeysService(c.raw),
}
}
// Get invokes an RPC to retrieve the HMAC key referenced by the
// HMACKeyHandle's accessID.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (hkh *HMACKeyHandle) Get(ctx context.Context) (*HMACKey, error) {
call := hkh.raw.Get(hkh.projectID, hkh.accessID)
setClientHeader(call.Header())
var metadata *raw.HmacKeyMetadata
var err error
err = runWithRetry(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
hkPb := &raw.HmacKey{
Metadata: metadata,
}
return pbHmacKeyToHMACKey(hkPb, false)
}
// Delete invokes an RPC to delete the key referenced by accessID, on Google Cloud Storage.
// Only inactive HMAC keys can be deleted.
// After deletion, a key cannot be used to authenticate requests.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (hkh *HMACKeyHandle) Delete(ctx context.Context) error {
delCall := hkh.raw.Delete(hkh.projectID, hkh.accessID)
setClientHeader(delCall.Header())
return runWithRetry(ctx, func() error {
return delCall.Context(ctx).Do()
})
}
func pbHmacKeyToHMACKey(pb *raw.HmacKey, updatedTimeCanBeNil bool) (*HMACKey, error) {
pbmd := pb.Metadata
if pbmd == nil {
return nil, errors.New("field Metadata cannot be nil")
}
createdTime, err := time.Parse(time.RFC3339, pbmd.TimeCreated)
if err != nil {
return nil, fmt.Errorf("field CreatedTime: %v", err)
}
updatedTime, err := time.Parse(time.RFC3339, pbmd.Updated)
if err != nil && !updatedTimeCanBeNil {
return nil, fmt.Errorf("field UpdatedTime: %v", err)
}
hmk := &HMACKey{
AccessID: pbmd.AccessId,
Secret: pb.Secret,
Etag: pbmd.Etag,
ID: pbmd.Id,
State: HMACState(pbmd.State),
ProjectID: pbmd.ProjectId,
CreatedTime: createdTime,
UpdatedTime: updatedTime,
ServiceAccountEmail: pbmd.ServiceAccountEmail,
}
return hmk, nil
}
// CreateHMACKey invokes an RPC for Google Cloud Storage to create a new HMACKey.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (c *Client) CreateHMACKey(ctx context.Context, projectID, serviceAccountEmail string) (*HMACKey, error) {
if projectID == "" {
return nil, errors.New("storage: expecting a non-blank projectID")
}
if serviceAccountEmail == "" {
return nil, errors.New("storage: expecting a non-blank service account email")
}
svc := raw.NewProjectsHmacKeysService(c.raw)
call := svc.Create(projectID, serviceAccountEmail)
setClientHeader(call.Header())
var hkPb *raw.HmacKey
var err error
err = runWithRetry(ctx, func() error {
hkPb, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return pbHmacKeyToHMACKey(hkPb, true)
}
// HMACKeyAttrsToUpdate defines the attributes of an HMACKey that will be updated.
//
// This type is EXPERIMENTAL and subject to change or removal without notice.
type HMACKeyAttrsToUpdate struct {
// State is required and must be either StateActive or StateInactive.
State HMACState
// Etag is an optional field and it is the HTTP/1.1 Entity tag.
Etag string
}
// Update mutates the HMACKey referred to by accessID.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (h *HMACKeyHandle) Update(ctx context.Context, au HMACKeyAttrsToUpdate) (*HMACKey, error) {
if au.State != Active && au.State != Inactive {
return nil, fmt.Errorf("storage: invalid state %q for update, must be either %q or %q", au.State, Active, Inactive)
}
call := h.raw.Update(h.projectID, h.accessID, &raw.HmacKeyMetadata{
Etag: au.Etag,
State: string(au.State),
})
setClientHeader(call.Header())
var metadata *raw.HmacKeyMetadata
var err error
err = runWithRetry(ctx, func() error {
metadata, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
hkPb := &raw.HmacKey{
Metadata: metadata,
}
return pbHmacKeyToHMACKey(hkPb, false)
}
// An HMACKeysIterator is an iterator over HMACKeys.
//
// This type is EXPERIMENTAL and subject to change or removal without notice.
type HMACKeysIterator struct {
ctx context.Context
raw *raw.ProjectsHmacKeysService
projectID string
hmacKeys []*HMACKey
pageInfo *iterator.PageInfo
nextFunc func() error
index int
desc hmacKeyDesc
}
// ListHMACKeys returns an iterator for listing HMACKeys.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (c *Client) ListHMACKeys(ctx context.Context, projectID string, opts ...HMACKeyOption) *HMACKeysIterator {
it := &HMACKeysIterator{
ctx: ctx,
raw: raw.NewProjectsHmacKeysService(c.raw),
projectID: projectID,
}
for _, opt := range opts {
opt.withHMACKeyDesc(&it.desc)
}
it.pageInfo, it.nextFunc = iterator.NewPageInfo(
it.fetch,
func() int { return len(it.hmacKeys) - it.index },
func() interface{} {
prev := it.hmacKeys
it.hmacKeys = it.hmacKeys[:0]
it.index = 0
return prev
})
return it
}
// Next returns the next result. Its second return value is iterator.Done if
// there are no more results. Once Next returns iterator.Done, all subsequent
// calls will return iterator.Done.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (it *HMACKeysIterator) Next() (*HMACKey, error) {
if err := it.nextFunc(); err != nil {
return nil, err
}
key := it.hmacKeys[it.index]
it.index++
return key, nil
}
// PageInfo supports pagination. See the google.golang.org/api/iterator package for details.
//
// This method is EXPERIMENTAL and subject to change or removal without notice.
func (it *HMACKeysIterator) PageInfo() *iterator.PageInfo { return it.pageInfo }
func (it *HMACKeysIterator) fetch(pageSize int, pageToken string) (token string, err error) {
call := it.raw.List(it.projectID)
setClientHeader(call.Header())
if pageToken != "" {
call = call.PageToken(pageToken)
}
if it.desc.showDeletedKeys {
call = call.ShowDeletedKeys(true)
}
if it.desc.userProjectID != "" {
call = call.UserProject(it.desc.userProjectID)
}
if it.desc.forServiceAccountEmail != "" {
call = call.ServiceAccountEmail(it.desc.forServiceAccountEmail)
}
if pageSize > 0 {
call = call.MaxResults(int64(pageSize))
}
ctx := it.ctx
var resp *raw.HmacKeysMetadata
err = runWithRetry(it.ctx, func() error {
resp, err = call.Context(ctx).Do()
return err
})
if err != nil {
return "", err
}
for _, metadata := range resp.Items {
hkPb := &raw.HmacKey{
Metadata: metadata,
}
hkey, err := pbHmacKeyToHMACKey(hkPb, true)
if err != nil {
return "", err
}
it.hmacKeys = append(it.hmacKeys, hkey)
}
return resp.NextPageToken, nil
}
type hmacKeyDesc struct {
forServiceAccountEmail string
showDeletedKeys bool
userProjectID string
}
// HMACKeyOption configures the behavior of HMACKey related methods and actions.
type HMACKeyOption interface {
withHMACKeyDesc(*hmacKeyDesc)
}
type hmacKeyDescFunc func(*hmacKeyDesc)
func (hkdf hmacKeyDescFunc) withHMACKeyDesc(hkd *hmacKeyDesc) {
hkdf(hkd)
}
// ForHMACKeyServiceAccountEmail returns HMAC Keys that are
// associated with the email address of a service account in the project.
//
// Only one service account email can be used as a filter, so if multiple
// of these options are applied, the last email to be set will be used.
func ForHMACKeyServiceAccountEmail(serviceAccountEmail string) HMACKeyOption {
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
hkd.forServiceAccountEmail = serviceAccountEmail
})
}
// ShowDeletedHMACKeys will also list keys whose state is "DELETED".
func ShowDeletedHMACKeys() HMACKeyOption {
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
hkd.showDeletedKeys = true
})
}
// HMACKeysForUserProject will bill the request against userProjectID.
//
// Note: This is a noop right now and only provided for API compatibility.
func HMACKeysForUserProject(userProjectID string) HMACKeyOption {
return hmacKeyDescFunc(func(hkd *hmacKeyDesc) {
hkd.userProjectID = userProjectID
})
}

View File

@ -1,4 +1,4 @@
// Copyright 2017 Google Inc. All Rights Reserved.
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,29 +15,40 @@
package storage
import (
"context"
"cloud.google.com/go/iam"
"golang.org/x/net/context"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
iampb "google.golang.org/genproto/googleapis/iam/v1"
)
// IAM provides access to IAM access control for the bucket.
func (b *BucketHandle) IAM() *iam.Handle {
return iam.InternalNewHandleClient(&iamClient{raw: b.c.raw}, b.name)
return iam.InternalNewHandleClient(&iamClient{
raw: b.c.raw,
userProject: b.userProject,
}, b.name)
}
// iamClient implements the iam.client interface.
type iamClient struct {
raw *raw.Service
raw *raw.Service
userProject string
}
func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, error) {
req := c.raw.Buckets.GetIamPolicy(resource)
setClientHeader(req.Header())
func (c *iamClient) Get(ctx context.Context, resource string) (p *iampb.Policy, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Get")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.GetIamPolicy(resource)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var rp *raw.Policy
var err error
err = runWithRetry(ctx, func() error {
rp, err = req.Context(ctx).Do()
rp, err = call.Context(ctx).Do()
return err
})
if err != nil {
@ -46,23 +57,34 @@ func (c *iamClient) Get(ctx context.Context, resource string) (*iampb.Policy, er
return iamFromStoragePolicy(rp), nil
}
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) error {
func (c *iamClient) Set(ctx context.Context, resource string, p *iampb.Policy) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Set")
defer func() { trace.EndSpan(ctx, err) }()
rp := iamToStoragePolicy(p)
req := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(req.Header())
call := c.raw.Buckets.SetIamPolicy(resource, rp)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
return runWithRetry(ctx, func() error {
_, err := req.Context(ctx).Do()
_, err := call.Context(ctx).Do()
return err
})
}
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) ([]string, error) {
req := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(req.Header())
func (c *iamClient) Test(ctx context.Context, resource string, perms []string) (permissions []string, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.IAM.Test")
defer func() { trace.EndSpan(ctx, err) }()
call := c.raw.Buckets.TestIamPermissions(resource, perms)
setClientHeader(call.Header())
if c.userProject != "" {
call.UserProject(c.userProject)
}
var res *raw.TestIamPermissionsResponse
var err error
err = runWithRetry(ctx, func() error {
res, err = req.Context(ctx).Do()
res, err = call.Context(ctx).Do()
return err
})
if err != nil {

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,10 +15,10 @@
package storage
import (
"context"
"cloud.google.com/go/internal"
gax "github.com/googleapis/gax-go"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
gax "github.com/googleapis/gax-go/v2"
)
// runWithRetry calls the function until it returns nil or a non-retryable error, or
@ -29,13 +29,7 @@ func runWithRetry(ctx context.Context, call func() error) error {
if err == nil {
return true, nil
}
e, ok := err.(*googleapi.Error)
if !ok {
return true, err
}
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
if e.Code == 429 || (e.Code >= 500 && e.Code < 600) {
if shouldRetry(err) {
return false, nil
}
return true, err

42
vendor/cloud.google.com/go/storage/not_go110.go generated vendored Normal file
View File

@ -0,0 +1,42 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.10
package storage
import (
"net/url"
"strings"
"google.golang.org/api/googleapi"
)
func shouldRetry(err error) bool {
switch e := err.(type) {
case *googleapi.Error:
// Retry on 429 and 5xx, according to
// https://cloud.google.com/storage/docs/exponential-backoff.
return e.Code == 429 || (e.Code >= 500 && e.Code < 600)
case *url.Error:
// Retry on REFUSED_STREAM.
// Unfortunately the error type is unexported, so we resort to string
// matching.
return strings.Contains(e.Error(), "REFUSED_STREAM")
case interface{ Temporary() bool }:
return e.Temporary()
default:
return false
}
}

View File

@ -1,26 +0,0 @@
// Copyright 2017 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// +build !go1.7
package storage
import (
"net/http"
)
func withContext(r *http.Request, _ interface{}) *http.Request {
// In Go 1.6 and below, ignore the context.
return r
}

188
vendor/cloud.google.com/go/storage/notifications.go generated vendored Normal file
View File

@ -0,0 +1,188 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package storage
import (
"context"
"errors"
"fmt"
"regexp"
"cloud.google.com/go/internal/trace"
raw "google.golang.org/api/storage/v1"
)
// A Notification describes how to send Cloud PubSub messages when certain
// events occur in a bucket.
type Notification struct {
//The ID of the notification.
ID string
// The ID of the topic to which this subscription publishes.
TopicID string
// The ID of the project to which the topic belongs.
TopicProjectID string
// Only send notifications about listed event types. If empty, send notifications
// for all event types.
// See https://cloud.google.com/storage/docs/pubsub-notifications#events.
EventTypes []string
// If present, only apply this notification configuration to object names that
// begin with this prefix.
ObjectNamePrefix string
// An optional list of additional attributes to attach to each Cloud PubSub
// message published for this notification subscription.
CustomAttributes map[string]string
// The contents of the message payload.
// See https://cloud.google.com/storage/docs/pubsub-notifications#payload.
PayloadFormat string
}
// Values for Notification.PayloadFormat.
const (
// Send no payload with notification messages.
NoPayload = "NONE"
// Send object metadata as JSON with notification messages.
JSONPayload = "JSON_API_V1"
)
// Values for Notification.EventTypes.
const (
// Event that occurs when an object is successfully created.
ObjectFinalizeEvent = "OBJECT_FINALIZE"
// Event that occurs when the metadata of an existing object changes.
ObjectMetadataUpdateEvent = "OBJECT_METADATA_UPDATE"
// Event that occurs when an object is permanently deleted.
ObjectDeleteEvent = "OBJECT_DELETE"
// Event that occurs when the live version of an object becomes an
// archived version.
ObjectArchiveEvent = "OBJECT_ARCHIVE"
)
func toNotification(rn *raw.Notification) *Notification {
n := &Notification{
ID: rn.Id,
EventTypes: rn.EventTypes,
ObjectNamePrefix: rn.ObjectNamePrefix,
CustomAttributes: rn.CustomAttributes,
PayloadFormat: rn.PayloadFormat,
}
n.TopicProjectID, n.TopicID = parseNotificationTopic(rn.Topic)
return n
}
var topicRE = regexp.MustCompile("^//pubsub.googleapis.com/projects/([^/]+)/topics/([^/]+)")
// parseNotificationTopic extracts the project and topic IDs from from the full
// resource name returned by the service. If the name is malformed, it returns
// "?" for both IDs.
func parseNotificationTopic(nt string) (projectID, topicID string) {
matches := topicRE.FindStringSubmatch(nt)
if matches == nil {
return "?", "?"
}
return matches[1], matches[2]
}
func toRawNotification(n *Notification) *raw.Notification {
return &raw.Notification{
Id: n.ID,
Topic: fmt.Sprintf("//pubsub.googleapis.com/projects/%s/topics/%s",
n.TopicProjectID, n.TopicID),
EventTypes: n.EventTypes,
ObjectNamePrefix: n.ObjectNamePrefix,
CustomAttributes: n.CustomAttributes,
PayloadFormat: string(n.PayloadFormat),
}
}
// AddNotification adds a notification to b. You must set n's TopicProjectID, TopicID
// and PayloadFormat, and must not set its ID. The other fields are all optional. The
// returned Notification's ID can be used to refer to it.
func (b *BucketHandle) AddNotification(ctx context.Context, n *Notification) (ret *Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.AddNotification")
defer func() { trace.EndSpan(ctx, err) }()
if n.ID != "" {
return nil, errors.New("storage: AddNotification: ID must not be set")
}
if n.TopicProjectID == "" {
return nil, errors.New("storage: AddNotification: missing TopicProjectID")
}
if n.TopicID == "" {
return nil, errors.New("storage: AddNotification: missing TopicID")
}
call := b.c.raw.Notifications.Insert(b.name, toRawNotification(n))
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
rn, err := call.Context(ctx).Do()
if err != nil {
return nil, err
}
return toNotification(rn), nil
}
// Notifications returns all the Notifications configured for this bucket, as a map
// indexed by notification ID.
func (b *BucketHandle) Notifications(ctx context.Context) (n map[string]*Notification, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.Notifications")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.List(b.name)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
var res *raw.Notifications
err = runWithRetry(ctx, func() error {
res, err = call.Context(ctx).Do()
return err
})
if err != nil {
return nil, err
}
return notificationsToMap(res.Items), nil
}
func notificationsToMap(rns []*raw.Notification) map[string]*Notification {
m := map[string]*Notification{}
for _, rn := range rns {
m[rn.Id] = toNotification(rn)
}
return m
}
// DeleteNotification deletes the notification with the given ID.
func (b *BucketHandle) DeleteNotification(ctx context.Context, id string) (err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Bucket.DeleteNotification")
defer func() { trace.EndSpan(ctx, err) }()
call := b.c.raw.Notifications.Delete(b.name, id)
setClientHeader(call.Header())
if b.userProject != "" {
call.UserProject(b.userProject)
}
return call.Context(ctx).Do()
}

View File

@ -1,4 +1,4 @@
// Copyright 2016 Google Inc. All Rights Reserved.
// Copyright 2016 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,22 +15,295 @@
package storage
import (
"context"
"errors"
"fmt"
"hash/crc32"
"io"
"io/ioutil"
"net/http"
"net/url"
"reflect"
"strconv"
"strings"
"time"
"cloud.google.com/go/internal/trace"
"google.golang.org/api/googleapi"
)
var crc32cTable = crc32.MakeTable(crc32.Castagnoli)
// ReaderObjectAttrs are attributes about the object being read. These are populated
// during the New call. This struct only holds a subset of object attributes: to
// get the full set of attributes, use ObjectHandle.Attrs.
//
// Each field is read-only.
type ReaderObjectAttrs struct {
// Size is the length of the object's content.
Size int64
// StartOffset is the byte offset within the object
// from which reading begins.
// This value is only non-zero for range requests.
StartOffset int64
// ContentType is the MIME type of the object's content.
ContentType string
// ContentEncoding is the encoding of the object's content.
ContentEncoding string
// CacheControl specifies whether and for how long browser and Internet
// caches are allowed to cache your objects.
CacheControl string
// LastModified is the time that the object was last modified.
LastModified time.Time
// Generation is the generation number of the object's content.
Generation int64
// Metageneration is the version of the metadata for this object at
// this generation. This field is used for preconditions and for
// detecting changes in metadata. A metageneration number is only
// meaningful in the context of a particular generation of a
// particular object.
Metageneration int64
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end. If offset is negative, the object is read abs(offset) bytes
// from the end, and length must also be negative to indicate all remaining
// bytes will be read.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (r *Reader, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.NewRangeReader")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 && length >= 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0 requires negative length", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: o.c.scheme,
Host: o.c.readHost,
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = req.WithContext(ctx)
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if o.readCompressed {
req.Header.Set("Accept-Encoding", "gzip")
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
gen := o.gen
// Define a function that initiates a Read with offset and length, assuming we
// have already read seen bytes.
reopen := func(seen int64) (*http.Response, error) {
start := offset + seen
if length < 0 && start < 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d", start))
} else if length < 0 && start > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", start))
} else if length > 0 {
// The end character isn't affected by how many bytes we've seen.
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", start, offset+length-1))
}
// We wait to assign conditions here because the generation number can change in between reopen() runs.
req.URL.RawQuery = conditionsQuery(gen, o.conds)
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if start > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
// If a generation hasn't been specified, and this is the first response we get, let's record the
// generation. In future requests we'll use this generation as a precondition to avoid data races.
if gen < 0 && res.Header.Get("X-Goog-Generation") != "" {
gen64, err := strconv.ParseInt(res.Header.Get("X-Goog-Generation"), 10, 64)
if err != nil {
return err
}
gen = gen64
}
return nil
})
if err != nil {
return nil, err
}
return res, nil
}
res, err := reopen(0)
if err != nil {
return nil, err
}
var (
size int64 // total size of object, even if a range was requested.
checkCRC bool
crc uint32
startOffset int64 // non-zero if range request.
)
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
dashIndex := strings.Index(cr, "-")
if dashIndex >= 0 {
startOffset, err = strconv.ParseInt(cr[len("bytes="):dashIndex], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q: %v", cr, err)
}
}
} else {
size = res.ContentLength
// Check the CRC iff all of the following hold:
// - We asked for content (length != 0).
// - We got all the content (status != PartialContent).
// - The server sent a CRC header.
// - The Go http stack did not uncompress the file.
// - We were not served compressed data that was uncompressed on download.
// The problem with the last two cases is that the CRC will not match -- GCS
// computes it on the compressed contents, but we compute it on the
// uncompressed contents.
if length != 0 && !res.Uncompressed && !uncompressedByServer(res) {
crc, checkCRC = parseCRC32c(res)
}
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var metaGen int64
if res.Header.Get("X-Goog-Generation") != "" {
metaGen, err = strconv.ParseInt(res.Header.Get("X-Goog-Metageneration"), 10, 64)
if err != nil {
return nil, err
}
}
var lm time.Time
if res.Header.Get("Last-Modified") != "" {
lm, err = http.ParseTime(res.Header.Get("Last-Modified"))
if err != nil {
return nil, err
}
}
attrs := ReaderObjectAttrs{
Size: size,
ContentType: res.Header.Get("Content-Type"),
ContentEncoding: res.Header.Get("Content-Encoding"),
CacheControl: res.Header.Get("Cache-Control"),
LastModified: lm,
StartOffset: startOffset,
Generation: gen,
Metageneration: metaGen,
}
return &Reader{
Attrs: attrs,
body: body,
size: size,
remain: remain,
wantCRC: crc,
checkCRC: checkCRC,
reopen: reopen,
}, nil
}
func uncompressedByServer(res *http.Response) bool {
// If the data is stored as gzip but is not encoded as gzip, then it
// was uncompressed by the server.
return res.Header.Get("X-Goog-Stored-Content-Encoding") == "gzip" &&
res.Header.Get("Content-Encoding") != "gzip"
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// Reader reads a Cloud Storage object.
// It implements io.Reader.
//
// Typically, a Reader computes the CRC of the downloaded content and compares it to
// the stored CRC, returning an error from Read if there is a mismatch. This integrity check
// is skipped if transcoding occurs. See https://cloud.google.com/storage/docs/transcoding.
type Reader struct {
body io.ReadCloser
remain, size int64
contentType string
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
Attrs ReaderObjectAttrs
body io.ReadCloser
seen, remain, size int64
checkCRC bool // should we check the CRC?
wantCRC uint32 // the CRC32c value the server sent in the header
gotCRC uint32 // running crc
reopen func(seen int64) (*http.Response, error)
}
// Close closes the Reader. It must be called when done reading.
@ -39,7 +312,7 @@ func (r *Reader) Close() error {
}
func (r *Reader) Read(p []byte) (int, error) {
n, err := r.body.Read(p)
n, err := r.readWithRetry(p)
if r.remain != -1 {
r.remain -= int64(n)
}
@ -48,19 +321,52 @@ func (r *Reader) Read(p []byte) (int, error) {
// Check CRC here. It would be natural to check it in Close, but
// everybody defers Close on the assumption that it doesn't return
// anything worth looking at.
if r.remain == 0 && r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
if err == io.EOF {
if r.gotCRC != r.wantCRC {
return n, fmt.Errorf("storage: bad CRC on read: got %d, want %d",
r.gotCRC, r.wantCRC)
}
}
}
return n, err
}
func (r *Reader) readWithRetry(p []byte) (int, error) {
n := 0
for len(p[n:]) > 0 {
m, err := r.body.Read(p[n:])
n += m
r.seen += int64(m)
if !shouldRetryRead(err) {
return n, err
}
// Read failed, but we will try again. Send a ranged read request that takes
// into account the number of bytes we've already seen.
res, err := r.reopen(r.seen)
if err != nil {
// reopen already retries
return n, err
}
r.body.Close()
r.body = res.Body
}
return n, nil
}
func shouldRetryRead(err error) bool {
if err == nil {
return false
}
return strings.HasSuffix(err.Error(), "INTERNAL_ERROR") && strings.Contains(reflect.TypeOf(err).String(), "http2")
}
// Size returns the size of the object in bytes.
// The returned value is always the same and is not affected by
// calls to Read or Close.
//
// Deprecated: use Reader.Attrs.Size.
func (r *Reader) Size() int64 {
return r.size
return r.Attrs.Size
}
// Remain returns the number of bytes left to read, or -1 if unknown.
@ -69,6 +375,29 @@ func (r *Reader) Remain() int64 {
}
// ContentType returns the content type of the object.
//
// Deprecated: use Reader.Attrs.ContentType.
func (r *Reader) ContentType() string {
return r.contentType
return r.Attrs.ContentType
}
// ContentEncoding returns the content encoding of the object.
//
// Deprecated: use Reader.Attrs.ContentEncoding.
func (r *Reader) ContentEncoding() string {
return r.Attrs.ContentEncoding
}
// CacheControl returns the cache control of the object.
//
// Deprecated: use Reader.Attrs.CacheControl.
func (r *Reader) CacheControl() string {
return r.Attrs.CacheControl
}
// LastModified returns the value of the Last-Modified header.
//
// Deprecated: use Reader.Attrs.LastModified.
func (r *Reader) LastModified() (time.Time, error) {
return r.Attrs.LastModified, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -16,37 +16,41 @@ package storage
import (
"bytes"
"context"
"crypto"
"crypto/rand"
"crypto/rsa"
"crypto/sha256"
"crypto/x509"
"encoding/base64"
"encoding/hex"
"encoding/pem"
"errors"
"fmt"
"io"
"io/ioutil"
"net/http"
"net/url"
"os"
"reflect"
"regexp"
"sort"
"strconv"
"strings"
"time"
"unicode/utf8"
"google.golang.org/api/option"
htransport "google.golang.org/api/transport/http"
"cloud.google.com/go/internal/optional"
"cloud.google.com/go/internal/trace"
"cloud.google.com/go/internal/version"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
"google.golang.org/api/option"
raw "google.golang.org/api/storage/v1"
htransport "google.golang.org/api/transport/http"
)
var (
// ErrBucketNotExist indicates that the bucket does not exist.
ErrBucketNotExist = errors.New("storage: bucket doesn't exist")
// ErrObjectNotExist indicates that the object does not exist.
ErrObjectNotExist = errors.New("storage: object doesn't exist")
)
@ -79,6 +83,12 @@ func setClientHeader(headers http.Header) {
type Client struct {
hc *http.Client
raw *raw.Service
// Scheme describes the scheme under the current host.
scheme string
// EnvHost is the host set on the STORAGE_EMULATOR_HOST variable.
envHost string
// ReadHost is the default host used on the reader.
readHost string
}
// NewClient creates a new Google Cloud Storage client.
@ -100,9 +110,20 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
if ep != "" {
rawService.BasePath = ep
}
scheme := "https"
var host, readHost string
if host = os.Getenv("STORAGE_EMULATOR_HOST"); host != "" {
scheme = "http"
readHost = host
} else {
readHost = "storage.googleapis.com"
}
return &Client{
hc: hc,
raw: rawService,
hc: hc,
raw: rawService,
scheme: scheme,
envHost: host,
readHost: readHost,
}, nil
}
@ -110,10 +131,26 @@ func NewClient(ctx context.Context, opts ...option.ClientOption) (*Client, error
//
// Close need not be called at program exit.
func (c *Client) Close() error {
// Set fields to nil so that subsequent uses will panic.
c.hc = nil
c.raw = nil
return nil
}
// SigningScheme determines the API version to use when signing URLs.
type SigningScheme int
const (
// SigningSchemeDefault is presently V2 and will change to V4 in the future.
SigningSchemeDefault SigningScheme = iota
// SigningSchemeV2 uses the V2 scheme to sign URLs.
SigningSchemeV2
// SigningSchemeV4 uses the V4 scheme to sign URLs.
SigningSchemeV4
)
// SignedURLOptions allows you to restrict the access to the signed URL.
type SignedURLOptions struct {
// GoogleAccessID represents the authorizer of the signed URL generation.
@ -136,8 +173,9 @@ type SignedURLOptions struct {
// Exactly one of PrivateKey or SignBytes must be non-nil.
PrivateKey []byte
// SignBytes is a function for implementing custom signing.
// If your application is running on Google App Engine, you can use appengine's internal signing function:
// SignBytes is a function for implementing custom signing. For example, if
// your application is running on Google App Engine, you can use
// appengine's internal signing function:
// ctx := appengine.NewContext(request)
// acc, _ := appengine.ServiceAccount(ctx)
// url, err := SignedURL("bucket", "object", &SignedURLOptions{
@ -158,7 +196,8 @@ type SignedURLOptions struct {
Method string
// Expires is the expiration time on the signed URL. It must be
// a datetime in the future.
// a datetime in the future. For SigningSchemeV4, the expiration may be no
// more than seven days in the future.
// Required.
Expires time.Time
@ -167,7 +206,7 @@ type SignedURLOptions struct {
// Optional.
ContentType string
// Headers is a list of extention headers the client must provide
// Headers is a list of extension headers the client must provide
// in order to use the generated signed URL.
// Optional.
Headers []string
@ -177,6 +216,123 @@ type SignedURLOptions struct {
// header in order to use the signed URL.
// Optional.
MD5 string
// Scheme determines the version of URL signing to use. Default is
// SigningSchemeV2.
Scheme SigningScheme
}
var (
tabRegex = regexp.MustCompile(`[\t]+`)
// I was tempted to call this spacex. :)
spaceRegex = regexp.MustCompile(` +`)
canonicalHeaderRegexp = regexp.MustCompile(`(?i)^(x-goog-[^:]+):(.*)?$`)
excludedCanonicalHeaders = map[string]bool{
"x-goog-encryption-key": true,
"x-goog-encryption-key-sha256": true,
}
)
// v2SanitizeHeaders applies the specifications for canonical extension headers at
// https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers.
func v2SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
var header, value string
// Only keep canonical headers, discard any others.
headerMatches := canonicalHeaderRegexp.FindStringSubmatch(sanitizedHeader)
if len(headerMatches) == 0 {
continue
}
header = headerMatches[1]
value = headerMatches[2]
header = strings.ToLower(strings.TrimSpace(header))
value = strings.TrimSpace(value)
if excludedCanonicalHeaders[header] {
// Do not keep any deliberately excluded canonical headers when signing.
continue
}
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[header] = append(headerMap[header], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the header name
// from the header value or around the values themselves. The values
// should be separated by commas.
//
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases anyway and we
// should assume that there will be no canonical headers using empty
// values. Any such headers are discarded at the regexp stage above.
sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ",")))
}
sort.Strings(sanitizedHeaders)
return sanitizedHeaders
}
// v4SanitizeHeaders applies the specifications for canonical extension headers
// at https://cloud.google.com/storage/docs/access-control/signed-urls#about-canonical-extension-headers.
//
// V4 does a couple things differently from V2:
// - Headers get sorted by key, instead of by key:value. We do this in
// signedURLV4.
// - There's no canonical regexp: we simply split headers on :.
// - We don't exclude canonical headers.
// - We replace leading and trailing spaces in header values, like v2, but also
// all intermediate space duplicates get stripped. That is, there's only ever
// a single consecutive space.
func v4SanitizeHeaders(hdrs []string) []string {
headerMap := map[string][]string{}
for _, hdr := range hdrs {
// No leading or trailing whitespaces.
sanitizedHeader := strings.TrimSpace(hdr)
var key, value string
headerMatches := strings.Split(sanitizedHeader, ":")
if len(headerMatches) < 2 {
continue
}
key = headerMatches[0]
value = headerMatches[1]
key = strings.ToLower(strings.TrimSpace(key))
value = strings.TrimSpace(value)
value = string(spaceRegex.ReplaceAll([]byte(value), []byte(" ")))
value = string(tabRegex.ReplaceAll([]byte(value), []byte("\t")))
if len(value) > 0 {
// Remove duplicate headers by appending the values of duplicates
// in their order of appearance.
headerMap[key] = append(headerMap[key], value)
}
}
var sanitizedHeaders []string
for header, values := range headerMap {
// There should be no spaces around the colon separating the header name
// from the header value or around the values themselves. The values
// should be separated by commas.
//
// NOTE: The semantics for headers without a value are not clear.
// However from specifications these should be edge-cases anyway and we
// should assume that there will be no canonical headers using empty
// values. Any such headers are discarded at the regexp stage above.
sanitizedHeaders = append(sanitizedHeaders, fmt.Sprintf("%s:%s", header, strings.Join(values, ",")))
}
return sanitizedHeaders
}
// SignedURL returns a URL for the specified object. Signed URLs allow
@ -184,28 +340,184 @@ type SignedURLOptions struct {
// Google account or signing in. For more information about the signed
// URLs, see https://cloud.google.com/storage/docs/accesscontrol#Signed-URLs.
func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
now := utcNow()
if err := validateOptions(opts, now); err != nil {
return "", err
}
switch opts.Scheme {
case SigningSchemeV2:
opts.Headers = v2SanitizeHeaders(opts.Headers)
return signedURLV2(bucket, name, opts)
case SigningSchemeV4:
opts.Headers = v4SanitizeHeaders(opts.Headers)
return signedURLV4(bucket, name, opts, now)
default: // SigningSchemeDefault
opts.Headers = v2SanitizeHeaders(opts.Headers)
return signedURLV2(bucket, name, opts)
}
}
func validateOptions(opts *SignedURLOptions, now time.Time) error {
if opts == nil {
return "", errors.New("storage: missing required SignedURLOptions")
return errors.New("storage: missing required SignedURLOptions")
}
if opts.GoogleAccessID == "" {
return "", errors.New("storage: missing required GoogleAccessID")
return errors.New("storage: missing required GoogleAccessID")
}
if (opts.PrivateKey == nil) == (opts.SignBytes == nil) {
return "", errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
return errors.New("storage: exactly one of PrivateKey or SignedBytes must be set")
}
if opts.Method == "" {
return "", errors.New("storage: missing required method option")
return errors.New("storage: missing required method option")
}
if opts.Expires.IsZero() {
return "", errors.New("storage: missing required expires option")
return errors.New("storage: missing required expires option")
}
if opts.MD5 != "" {
md5, err := base64.StdEncoding.DecodeString(opts.MD5)
if err != nil || len(md5) != 16 {
return "", errors.New("storage: invalid MD5 checksum")
return errors.New("storage: invalid MD5 checksum")
}
}
if opts.Scheme == SigningSchemeV4 {
cutoff := now.Add(604801 * time.Second) // 7 days + 1 second
if !opts.Expires.Before(cutoff) {
return errors.New("storage: expires must be within seven days from now")
}
}
return nil
}
const (
iso8601 = "20060102T150405Z"
yearMonthDay = "20060102"
)
// utcNow returns the current time in UTC and is a variable to allow for
// reassignment in tests to provide deterministic signed URL values.
var utcNow = func() time.Time {
return time.Now().UTC()
}
// extractHeaderNames takes in a series of key:value headers and returns the
// header names only.
func extractHeaderNames(kvs []string) []string {
var res []string
for _, header := range kvs {
nameValue := strings.Split(header, ":")
res = append(res, nameValue[0])
}
return res
}
// signedURLV4 creates a signed URL using the sigV4 algorithm.
func signedURLV4(bucket, name string, opts *SignedURLOptions, now time.Time) (string, error) {
buf := &bytes.Buffer{}
fmt.Fprintf(buf, "%s\n", opts.Method)
u := &url.URL{Path: bucket}
if name != "" {
u.Path += "/" + name
}
// Note: we have to add a / here because GCS does so auto-magically, despite
// Go's EscapedPath not doing so (and we have to exactly match their
// canonical query).
fmt.Fprintf(buf, "/%s\n", u.EscapedPath())
headerNames := append(extractHeaderNames(opts.Headers), "host")
if opts.ContentType != "" {
headerNames = append(headerNames, "content-type")
}
if opts.MD5 != "" {
headerNames = append(headerNames, "content-md5")
}
sort.Strings(headerNames)
signedHeaders := strings.Join(headerNames, ";")
timestamp := now.Format(iso8601)
credentialScope := fmt.Sprintf("%s/auto/storage/goog4_request", now.Format(yearMonthDay))
canonicalQueryString := url.Values{
"X-Goog-Algorithm": {"GOOG4-RSA-SHA256"},
"X-Goog-Credential": {fmt.Sprintf("%s/%s", opts.GoogleAccessID, credentialScope)},
"X-Goog-Date": {timestamp},
"X-Goog-Expires": {fmt.Sprintf("%d", int(opts.Expires.Sub(now).Seconds()))},
"X-Goog-SignedHeaders": {signedHeaders},
}
fmt.Fprintf(buf, "%s\n", canonicalQueryString.Encode())
u.Host = "storage.googleapis.com"
var headersWithValue []string
headersWithValue = append(headersWithValue, "host:"+u.Host)
headersWithValue = append(headersWithValue, opts.Headers...)
if opts.ContentType != "" {
headersWithValue = append(headersWithValue, "content-type:"+strings.TrimSpace(opts.ContentType))
}
if opts.MD5 != "" {
headersWithValue = append(headersWithValue, "content-md5:"+strings.TrimSpace(opts.MD5))
}
canonicalHeaders := strings.Join(sortHeadersByKey(headersWithValue), "\n")
fmt.Fprintf(buf, "%s\n\n", canonicalHeaders)
fmt.Fprintf(buf, "%s\n", signedHeaders)
fmt.Fprint(buf, "UNSIGNED-PAYLOAD")
sum := sha256.Sum256(buf.Bytes())
hexDigest := hex.EncodeToString(sum[:])
signBuf := &bytes.Buffer{}
fmt.Fprint(signBuf, "GOOG4-RSA-SHA256\n")
fmt.Fprintf(signBuf, "%s\n", timestamp)
fmt.Fprintf(signBuf, "%s\n", credentialScope)
fmt.Fprintf(signBuf, "%s", hexDigest)
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
key, err := parseKey(opts.PrivateKey)
if err != nil {
return "", err
}
signBytes = func(b []byte) ([]byte, error) {
sum := sha256.Sum256(b)
return rsa.SignPKCS1v15(
rand.Reader,
key,
crypto.SHA256,
sum[:],
)
}
}
b, err := signBytes(signBuf.Bytes())
if err != nil {
return "", err
}
signature := hex.EncodeToString(b)
canonicalQueryString.Set("X-Goog-Signature", string(signature))
u.Scheme = "https"
u.RawQuery = canonicalQueryString.Encode()
return u.String(), nil
}
// takes a list of headerKey:headervalue1,headervalue2,etc and sorts by header
// key.
func sortHeadersByKey(hdrs []string) []string {
headersMap := map[string]string{}
var headersKeys []string
for _, h := range hdrs {
parts := strings.Split(h, ":")
k := parts[0]
v := parts[1]
headersMap[k] = v
headersKeys = append(headersKeys, k)
}
sort.Strings(headersKeys)
var sorted []string
for _, k := range headersKeys {
v := headersMap[k]
sorted = append(sorted, fmt.Sprintf("%s:%s", k, v))
}
return sorted
}
func signedURLV2(bucket, name string, opts *SignedURLOptions) (string, error) {
signBytes := opts.SignBytes
if opts.PrivateKey != nil {
key, err := parseKey(opts.PrivateKey)
@ -255,14 +567,15 @@ func SignedURL(bucket, name string, opts *SignedURLOptions) (string, error) {
// ObjectHandle provides operations on an object in a Google Cloud Storage bucket.
// Use BucketHandle.Object to get a handle.
type ObjectHandle struct {
c *Client
bucket string
object string
acl ACLHandle
gen int64 // a negative value indicates latest
conds *Conditions
encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
c *Client
bucket string
object string
acl ACLHandle
gen int64 // a negative value indicates latest
conds *Conditions
encryptionKey []byte // AES-256 key
userProject string // for requester-pays buckets
readCompressed bool // Accept-Encoding: gzip
}
// ACL provides access to the object's access control list.
@ -285,7 +598,7 @@ func (o *ObjectHandle) Generation(gen int64) *ObjectHandle {
// If returns a new ObjectHandle that applies a set of preconditions.
// Preconditions already set on the ObjectHandle are ignored.
// Operations on the new handle will only occur if the preconditions are
// Operations on the new handle will return an error if the preconditions are not
// satisfied. See https://cloud.google.com/storage/docs/generations-preconditions
// for more details.
func (o *ObjectHandle) If(conds Conditions) *ObjectHandle {
@ -307,7 +620,10 @@ func (o *ObjectHandle) Key(encryptionKey []byte) *ObjectHandle {
// Attrs returns meta information about the object.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
func (o *ObjectHandle) Attrs(ctx context.Context) (attrs *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Attrs")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
@ -322,7 +638,6 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
@ -337,7 +652,10 @@ func (o *ObjectHandle) Attrs(ctx context.Context) (*ObjectAttrs, error) {
// Update updates an object with the provided attributes.
// All zero-value attributes are ignored.
// ErrObjectNotExist will be returned if the object is not found.
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (*ObjectAttrs, error) {
func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (oa *ObjectAttrs, err error) {
ctx = trace.StartSpan(ctx, "cloud.google.com/go/storage.Object.Update")
defer func() { trace.EndSpan(ctx, err) }()
if err := o.validate(); err != nil {
return nil, err
}
@ -346,11 +664,17 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
var forceSendFields, nullFields []string
if uattrs.ContentType != nil {
attrs.ContentType = optional.ToString(uattrs.ContentType)
forceSendFields = append(forceSendFields, "ContentType")
// For ContentType, sending the empty string is a no-op.
// Instead we send a null.
if attrs.ContentType == "" {
nullFields = append(nullFields, "ContentType")
} else {
forceSendFields = append(forceSendFields, "ContentType")
}
}
if uattrs.ContentLanguage != nil {
attrs.ContentLanguage = optional.ToString(uattrs.ContentLanguage)
// For ContentLanguage It's an error to send the empty string.
// For ContentLanguage it's an error to send the empty string.
// Instead we send a null.
if attrs.ContentLanguage == "" {
nullFields = append(nullFields, "ContentLanguage")
@ -370,6 +694,14 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
attrs.CacheControl = optional.ToString(uattrs.CacheControl)
forceSendFields = append(forceSendFields, "CacheControl")
}
if uattrs.EventBasedHold != nil {
attrs.EventBasedHold = optional.ToBool(uattrs.EventBasedHold)
forceSendFields = append(forceSendFields, "EventBasedHold")
}
if uattrs.TemporaryHold != nil {
attrs.TemporaryHold = optional.ToBool(uattrs.TemporaryHold)
forceSendFields = append(forceSendFields, "TemporaryHold")
}
if uattrs.Metadata != nil {
attrs.Metadata = uattrs.Metadata
if len(attrs.Metadata) == 0 {
@ -395,11 +727,13 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
if o.userProject != "" {
call.UserProject(o.userProject)
}
if uattrs.PredefinedACL != "" {
call.PredefinedAcl(uattrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), o.encryptionKey, false); err != nil {
return nil, err
}
var obj *raw.Object
var err error
setClientHeader(call.Header())
err = runWithRetry(ctx, func() error { obj, err = call.Do(); return err })
if e, ok := err.(*googleapi.Error); ok && e.Code == http.StatusNotFound {
@ -411,6 +745,16 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
return newObject(obj), nil
}
// BucketName returns the name of the bucket.
func (o *ObjectHandle) BucketName() string {
return o.bucket
}
// ObjectName returns the name of the object.
func (o *ObjectHandle) ObjectName() string {
return o.object
}
// ObjectAttrsToUpdate is used to update the attributes of an object.
// Only fields set to non-nil values will be updated.
// Set a field to its zero value to delete it.
@ -423,6 +767,8 @@ func (o *ObjectHandle) Update(ctx context.Context, uattrs ObjectAttrsToUpdate) (
// Metadata: map[string]string{},
// }
type ObjectAttrsToUpdate struct {
EventBasedHold optional.Bool
TemporaryHold optional.Bool
ContentType optional.String
ContentLanguage optional.String
ContentEncoding optional.String
@ -430,6 +776,10 @@ type ObjectAttrsToUpdate struct {
CacheControl optional.String
Metadata map[string]string // set to map[string]string{} to delete
ACL []ACLRule
// If not empty, applies a predefined set of access controls. ACL must be nil.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/patch.
PredefinedACL string
}
// Delete deletes the single specified object.
@ -458,139 +808,13 @@ func (o *ObjectHandle) Delete(ctx context.Context) error {
return err
}
// NewReader creates a new Reader to read the contents of the
// object.
// ErrObjectNotExist will be returned if the object is not found.
//
// The caller must call Close on the returned Reader when done reading.
func (o *ObjectHandle) NewReader(ctx context.Context) (*Reader, error) {
return o.NewRangeReader(ctx, 0, -1)
// ReadCompressed when true causes the read to happen without decompressing.
func (o *ObjectHandle) ReadCompressed(compressed bool) *ObjectHandle {
o2 := *o
o2.readCompressed = compressed
return &o2
}
// NewRangeReader reads part of an object, reading at most length bytes
// starting at the given offset. If length is negative, the object is read
// until the end.
func (o *ObjectHandle) NewRangeReader(ctx context.Context, offset, length int64) (*Reader, error) {
if err := o.validate(); err != nil {
return nil, err
}
if offset < 0 {
return nil, fmt.Errorf("storage: invalid offset %d < 0", offset)
}
if o.conds != nil {
if err := o.conds.validate("NewRangeReader"); err != nil {
return nil, err
}
}
u := &url.URL{
Scheme: "https",
Host: "storage.googleapis.com",
Path: fmt.Sprintf("/%s/%s", o.bucket, o.object),
RawQuery: conditionsQuery(o.gen, o.conds),
}
verb := "GET"
if length == 0 {
verb = "HEAD"
}
req, err := http.NewRequest(verb, u.String(), nil)
if err != nil {
return nil, err
}
req = withContext(req, ctx)
if length < 0 && offset > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-", offset))
} else if length > 0 {
req.Header.Set("Range", fmt.Sprintf("bytes=%d-%d", offset, offset+length-1))
}
if o.userProject != "" {
req.Header.Set("X-Goog-User-Project", o.userProject)
}
if err := setEncryptionHeaders(req.Header, o.encryptionKey, false); err != nil {
return nil, err
}
var res *http.Response
err = runWithRetry(ctx, func() error {
res, err = o.c.hc.Do(req)
if err != nil {
return err
}
if res.StatusCode == http.StatusNotFound {
res.Body.Close()
return ErrObjectNotExist
}
if res.StatusCode < 200 || res.StatusCode > 299 {
body, _ := ioutil.ReadAll(res.Body)
res.Body.Close()
return &googleapi.Error{
Code: res.StatusCode,
Header: res.Header,
Body: string(body),
}
}
if offset > 0 && length != 0 && res.StatusCode != http.StatusPartialContent {
res.Body.Close()
return errors.New("storage: partial request not satisfied")
}
return nil
})
if err != nil {
return nil, err
}
var size int64 // total size of object, even if a range was requested.
if res.StatusCode == http.StatusPartialContent {
cr := strings.TrimSpace(res.Header.Get("Content-Range"))
if !strings.HasPrefix(cr, "bytes ") || !strings.Contains(cr, "/") {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
size, err = strconv.ParseInt(cr[strings.LastIndex(cr, "/")+1:], 10, 64)
if err != nil {
return nil, fmt.Errorf("storage: invalid Content-Range %q", cr)
}
} else {
size = res.ContentLength
}
remain := res.ContentLength
body := res.Body
if length == 0 {
remain = 0
body.Close()
body = emptyBody
}
var (
checkCRC bool
crc uint32
)
// Even if there is a CRC header, we can't compute the hash on partial data.
if remain == size {
crc, checkCRC = parseCRC32c(res)
}
return &Reader{
body: body,
size: size,
remain: remain,
contentType: res.Header.Get("Content-Type"),
wantCRC: crc,
checkCRC: checkCRC,
}, nil
}
func parseCRC32c(res *http.Response) (uint32, bool) {
const prefix = "crc32c="
for _, spec := range res.Header["X-Goog-Hash"] {
if strings.HasPrefix(spec, prefix) {
c, err := decodeUint32(spec[len(prefix):])
if err == nil {
return c, true
}
}
}
return 0, false
}
var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// NewWriter returns a storage Writer that writes to the GCS object
// associated with this ObjectHandle.
//
@ -604,7 +828,8 @@ var emptyBody = ioutil.NopCloser(strings.NewReader(""))
// attribute is specified, the content type will be automatically sniffed
// using net/http.DetectContentType.
//
// It is the caller's responsibility to call Close when writing is done.
// It is the caller's responsibility to call Close when writing is done. To
// stop writing without saving the data, cancel the context.
func (o *ObjectHandle) NewWriter(ctx context.Context) *Writer {
return &Writer{
ctx: ctx,
@ -628,11 +853,10 @@ func (o *ObjectHandle) validate() error {
return nil
}
// parseKey converts the binary contents of a private key file
// to an *rsa.PrivateKey. It detects whether the private key is in a
// PEM container or not. If so, it extracts the the private key
// from PEM container before conversion. It only supports PEM
// containers with no passphrase.
// parseKey converts the binary contents of a private key file to an
// *rsa.PrivateKey. It detects whether the private key is in a PEM container or
// not. If so, it extracts the private key from PEM container before
// conversion. It only supports PEM containers with no passphrase.
func parseKey(key []byte) (*rsa.PrivateKey, error) {
if block, _ := pem.Decode(key); block != nil {
key = block.Bytes
@ -651,34 +875,26 @@ func parseKey(key []byte) (*rsa.PrivateKey, error) {
return parsed, nil
}
func toRawObjectACL(oldACL []ACLRule) []*raw.ObjectAccessControl {
var acl []*raw.ObjectAccessControl
if len(oldACL) > 0 {
acl = make([]*raw.ObjectAccessControl, len(oldACL))
for i, rule := range oldACL {
acl[i] = &raw.ObjectAccessControl{
Entity: string(rule.Entity),
Role: string(rule.Role),
}
}
}
return acl
}
// toRawObject copies the editable attributes from o to the raw library's Object type.
func (o *ObjectAttrs) toRawObject(bucket string) *raw.Object {
acl := toRawObjectACL(o.ACL)
var ret string
if !o.RetentionExpirationTime.IsZero() {
ret = o.RetentionExpirationTime.Format(time.RFC3339)
}
return &raw.Object{
Bucket: bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: acl,
Metadata: o.Metadata,
Bucket: bucket,
Name: o.Name,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: ret,
ContentType: o.ContentType,
ContentEncoding: o.ContentEncoding,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ContentDisposition: o.ContentDisposition,
StorageClass: o.StorageClass,
Acl: toRawObjectACL(o.ACL),
Metadata: o.Metadata,
}
}
@ -702,9 +918,32 @@ type ObjectAttrs struct {
// headers when serving the object data.
CacheControl string
// EventBasedHold specifies whether an object is under event-based hold. New
// objects created in a bucket whose DefaultEventBasedHold is set will
// default to that value.
EventBasedHold bool
// TemporaryHold specifies whether an object is under temporary hold. While
// this flag is set to true, the object is protected against deletion and
// overwrites.
TemporaryHold bool
// RetentionExpirationTime is a server-determined value that specifies the
// earliest time that the object's retention period expires.
// This is a read-only field.
RetentionExpirationTime time.Time
// ACL is the list of access control rules for the object.
ACL []ACLRule
// If not empty, applies a predefined set of access controls. It should be set
// only when writing, copying or composing an object. When copying or composing,
// it acts as the destinationPredefinedAcl parameter.
// PredefinedACL is always empty for ObjectAttrs returned from the service.
// See https://cloud.google.com/storage/docs/json_api/v1/objects/insert
// for valid values.
PredefinedACL string
// Owner is the owner of the object. This field is read-only.
//
// If non-zero, it is in the form of "user-<userId>".
@ -720,11 +959,16 @@ type ObjectAttrs struct {
// sent in the response headers.
ContentDisposition string
// MD5 is the MD5 hash of the object's content. This field is read-only.
// MD5 is the MD5 hash of the object's content. This field is read-only,
// except when used from a Writer. If set on a Writer, the uploaded
// data is rejected if its MD5 hash does not match this field.
MD5 []byte
// CRC32C is the CRC32 checksum of the object's content using
// the Castagnoli93 polynomial. This field is read-only.
// the Castagnoli93 polynomial. This field is read-only, except when
// used from a Writer. If set on a Writer and Writer.SendCRC32C
// is true, the uploaded data is rejected if its CRC32c hash does not
// match this field.
CRC32C uint32
// MediaLink is an URL to the object's content. This field is read-only.
@ -748,10 +992,8 @@ type ObjectAttrs struct {
// StorageClass is the storage class of the object.
// This value defines how objects in the bucket are stored and
// determines the SLA and the cost of storage. Typical values are
// "MULTI_REGIONAL", "REGIONAL", "NEARLINE", "COLDLINE", "STANDARD"
// and "DURABLE_REDUCED_AVAILABILITY".
// It defaults to "STANDARD", which is equivalent to "MULTI_REGIONAL"
// or "REGIONAL" depending on the bucket's location settings.
// "NEARLINE", "COLDLINE" and "STANDARD".
// It defaults to "STANDARD".
StorageClass string
// Created is the time the object was created. This field is read-only.
@ -773,11 +1015,23 @@ type ObjectAttrs struct {
// encryption in Google Cloud Storage.
CustomerKeySHA256 string
// Cloud KMS key name, in the form
// projects/P/locations/L/keyRings/R/cryptoKeys/K, used to encrypt this object,
// if the object is encrypted by such a key.
//
// Providing both a KMSKeyName and a customer-supplied encryption key (via
// ObjectHandle.Key) will result in an error when writing an object.
KMSKeyName string
// Prefix is set only for ObjectAttrs which represent synthetic "directory
// entries" when iterating over buckets using Query.Delimiter. See
// ObjectIterator.Next. When set, no other fields in ObjectAttrs will be
// populated.
Prefix string
// Etag is the HTTP/1.1 Entity tag for the object.
// This field is read-only.
Etag string
}
// convertTime converts a time in RFC3339 format to time.Time.
@ -794,13 +1048,6 @@ func newObject(o *raw.Object) *ObjectAttrs {
if o == nil {
return nil
}
acl := make([]ACLRule, len(o.Acl))
for i, rule := range o.Acl {
acl[i] = ACLRule{
Entity: ACLEntity(rule.Entity),
Role: ACLRole(rule.Role),
}
}
owner := ""
if o.Owner != nil {
owner = o.Owner.Entity
@ -812,27 +1059,32 @@ func newObject(o *raw.Object) *ObjectAttrs {
sha256 = o.CustomerEncryption.KeySha256
}
return &ObjectAttrs{
Bucket: o.Bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
ACL: acl,
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size),
MD5: md5,
CRC32C: crc32c,
MediaLink: o.MediaLink,
Metadata: o.Metadata,
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
Bucket: o.Bucket,
Name: o.Name,
ContentType: o.ContentType,
ContentLanguage: o.ContentLanguage,
CacheControl: o.CacheControl,
EventBasedHold: o.EventBasedHold,
TemporaryHold: o.TemporaryHold,
RetentionExpirationTime: convertTime(o.RetentionExpirationTime),
ACL: toObjectACLRules(o.Acl),
Owner: owner,
ContentEncoding: o.ContentEncoding,
ContentDisposition: o.ContentDisposition,
Size: int64(o.Size),
MD5: md5,
CRC32C: crc32c,
MediaLink: o.MediaLink,
Metadata: o.Metadata,
Generation: o.Generation,
Metageneration: o.Metageneration,
StorageClass: o.StorageClass,
CustomerKeySHA256: sha256,
KMSKeyName: o.KmsKeyName,
Created: convertTime(o.TimeCreated),
Deleted: convertTime(o.TimeDeleted),
Updated: convertTime(o.Updated),
Etag: o.Etag,
}
}
@ -875,17 +1127,6 @@ type Query struct {
Versions bool
}
// contentTyper implements ContentTyper to enable an
// io.ReadCloser to specify its MIME type.
type contentTyper struct {
io.Reader
t string
}
func (c *contentTyper) ContentType() string {
return c.t
}
// Conditions constrain methods to act on specific generations of
// objects.
//
@ -1115,4 +1356,12 @@ func setEncryptionHeaders(headers http.Header, key []byte, copySource bool) erro
return nil
}
// TODO(jbd): Add storage.objects.watch.
// ServiceAccount fetches the email address of the given project's Google Cloud Storage service account.
func (c *Client) ServiceAccount(ctx context.Context, projectID string) (string, error) {
r := c.raw.Projects.ServiceAccount.Get(projectID)
res, err := r.Context(ctx).Do()
if err != nil {
return "", err
}
return res.EmailAddress, nil
}

View File

@ -1,4 +1,4 @@
// Copyright 2014 Google Inc. All Rights Reserved.
// Copyright 2014 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
@ -15,13 +15,14 @@
package storage
import (
"context"
"encoding/base64"
"errors"
"fmt"
"io"
"sync"
"unicode/utf8"
"golang.org/x/net/context"
"google.golang.org/api/googleapi"
raw "google.golang.org/api/storage/v1"
)
@ -36,6 +37,8 @@ type Writer struct {
// SendCRC specifies whether to transmit a CRC32C field. It should be set
// to true in addition to setting the Writer's CRC32C field, because zero
// is a valid CRC and normally a zero would not be transmitted.
// If a CRC32C is sent, and the data written does not match the checksum,
// the write will be rejected.
SendCRC32C bool
// ChunkSize controls the maximum number of bytes of the object that the
@ -45,8 +48,11 @@ type Writer struct {
// to the nearest multiple of 256K. If zero, chunking will be disabled and
// the object will be uploaded in a single request.
//
// ChunkSize will default to a reasonable value. Any custom configuration
// must be done before the first Write call.
// ChunkSize will default to a reasonable value. If you perform many concurrent
// writes of small objects, you may wish set ChunkSize to a value that matches
// your objects' sizes to avoid consuming large amounts of memory.
//
// ChunkSize must be set before the first Write call.
ChunkSize int
// ProgressFunc can be used to monitor the progress of a large write.
@ -66,8 +72,10 @@ type Writer struct {
pw *io.PipeWriter
donec chan struct{} // closed after err and obj are set.
err error
obj *ObjectAttrs
mu sync.Mutex
err error
}
func (w *Writer) open() error {
@ -80,12 +88,17 @@ func (w *Writer) open() error {
if !utf8.ValidString(attrs.Name) {
return fmt.Errorf("storage: object name %q is not valid UTF-8", attrs.Name)
}
if attrs.KMSKeyName != "" && w.o.encryptionKey != nil {
return errors.New("storage: cannot use KMSKeyName with a customer-supplied encryption key")
}
pr, pw := io.Pipe()
w.pw = pw
w.opened = true
go w.monitorCancel()
if w.ChunkSize < 0 {
return errors.New("storage: Writer.ChunkSize must non-negative")
return errors.New("storage: Writer.ChunkSize must be non-negative")
}
mediaOpts := []googleapi.MediaOption{
googleapi.ChunkSize(w.ChunkSize),
@ -104,16 +117,28 @@ func (w *Writer) open() error {
if w.MD5 != nil {
rawObj.Md5Hash = base64.StdEncoding.EncodeToString(w.MD5)
}
if w.o.c.envHost != "" {
w.o.c.raw.BasePath = fmt.Sprintf("%s://%s", w.o.c.scheme, w.o.c.envHost)
}
call := w.o.c.raw.Objects.Insert(w.o.bucket, rawObj).
Media(pr, mediaOpts...).
Projection("full").
Context(w.ctx)
if w.ProgressFunc != nil {
call.ProgressUpdater(func(n, _ int64) { w.ProgressFunc(n) })
}
if attrs.KMSKeyName != "" {
call.KmsKeyName(attrs.KMSKeyName)
}
if attrs.PredefinedACL != "" {
call.PredefinedAcl(attrs.PredefinedACL)
}
if err := setEncryptionHeaders(call.Header(), w.o.encryptionKey, false); err != nil {
w.mu.Lock()
w.err = err
pr.CloseWithError(w.err)
w.mu.Unlock()
pr.CloseWithError(err)
return
}
var resp *raw.Object
@ -123,18 +148,22 @@ func (w *Writer) open() error {
call.UserProject(w.o.userProject)
}
setClientHeader(call.Header())
// We will only retry here if the initial POST, which obtains a URI for
// the resumable upload, fails with a retryable error. The upload itself
// has its own retry logic.
err = runWithRetry(w.ctx, func() error {
var err2 error
resp, err2 = call.Do()
return err2
})
// The internals that perform call.Do automatically retry
// uploading chunks, hence no need to add retries here.
// See issue https://github.com/googleapis/google-cloud-go/issues/1507.
//
// However, since this whole call's internals involve making the initial
// resumable upload session, the first HTTP request is not retried.
// TODO: Follow-up with google.golang.org/gensupport to solve
// https://github.com/googleapis/google-api-go-client/issues/392.
resp, err = call.Do()
}
if err != nil {
w.mu.Lock()
w.err = err
pr.CloseWithError(w.err)
w.mu.Unlock()
pr.CloseWithError(err)
return
}
w.obj = newObject(resp)
@ -149,15 +178,30 @@ func (w *Writer) open() error {
// use the error returned from Writer.Close to determine if
// the upload was successful.
func (w *Writer) Write(p []byte) (n int, err error) {
if w.err != nil {
return 0, w.err
w.mu.Lock()
werr := w.err
w.mu.Unlock()
if werr != nil {
return 0, werr
}
if !w.opened {
if err := w.open(); err != nil {
return 0, err
}
}
return w.pw.Write(p)
n, err = w.pw.Write(p)
if err != nil {
w.mu.Lock()
werr := w.err
w.mu.Unlock()
// Preserve existing functionality that when context is canceled, Write will return
// context.Canceled instead of "io: read/write on closed pipe". This hides the
// pipe implementation detail from users and makes Write seem as though it's an RPC.
if werr == context.Canceled || werr == context.DeadlineExceeded {
return n, werr
}
}
return n, err
}
// Close completes the write operation and flushes any buffered data.
@ -169,15 +213,39 @@ func (w *Writer) Close() error {
return err
}
}
// Closing either the read or write causes the entire pipe to close.
if err := w.pw.Close(); err != nil {
return err
}
<-w.donec
w.mu.Lock()
defer w.mu.Unlock()
return w.err
}
// monitorCancel is intended to be used as a background goroutine. It monitors the
// context, and when it observes that the context has been canceled, it manually
// closes things that do not take a context.
func (w *Writer) monitorCancel() {
select {
case <-w.ctx.Done():
w.mu.Lock()
werr := w.ctx.Err()
w.err = werr
w.mu.Unlock()
// Closing either the read or write causes the entire pipe to close.
w.CloseWithError(werr)
case <-w.donec:
}
}
// CloseWithError aborts the write operation with the provided error.
// CloseWithError always returns nil.
//
// Deprecated: cancel the context passed to NewWriter instead.
func (w *Writer) CloseWithError(err error) error {
if !w.opened {
return nil

View File

@ -1,24 +0,0 @@
package gax
import "bytes"
// XGoogHeader is for use by the Google Cloud Libraries only.
//
// XGoogHeader formats key-value pairs.
// The resulting string is suitable for x-goog-api-client header.
func XGoogHeader(keyval ...string) string {
if len(keyval) == 0 {
return ""
}
if len(keyval)%2 != 0 {
panic("gax.Header: odd argument count")
}
var buf bytes.Buffer
for i := 0; i < len(keyval); i += 2 {
buf.WriteByte(' ')
buf.WriteString(keyval[i])
buf.WriteByte('/')
buf.WriteString(keyval[i+1])
}
return buf.String()[1:]
}

View File

@ -1,176 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"errors"
"fmt"
"strings"
)
type matcher interface {
match([]string) (int, error)
String() string
}
type segment struct {
matcher
name string
}
type labelMatcher string
func (ls labelMatcher) match(segments []string) (int, error) {
if len(segments) == 0 {
return 0, fmt.Errorf("expected %s but no more segments found", ls)
}
if segments[0] != string(ls) {
return 0, fmt.Errorf("expected %s but got %s", ls, segments[0])
}
return 1, nil
}
func (ls labelMatcher) String() string {
return string(ls)
}
type wildcardMatcher int
func (wm wildcardMatcher) match(segments []string) (int, error) {
if len(segments) == 0 {
return 0, errors.New("no more segments found")
}
return 1, nil
}
func (wm wildcardMatcher) String() string {
return "*"
}
type pathWildcardMatcher int
func (pwm pathWildcardMatcher) match(segments []string) (int, error) {
length := len(segments) - int(pwm)
if length <= 0 {
return 0, errors.New("not sufficient segments are supplied for path wildcard")
}
return length, nil
}
func (pwm pathWildcardMatcher) String() string {
return "**"
}
type ParseError struct {
Pos int
Template string
Message string
}
func (pe ParseError) Error() string {
return fmt.Sprintf("at %d of template '%s', %s", pe.Pos, pe.Template, pe.Message)
}
// PathTemplate manages the template to build and match with paths used
// by API services. It holds a template and variable names in it, and
// it can extract matched patterns from a path string or build a path
// string from a binding.
//
// See http.proto in github.com/googleapis/googleapis/ for the details of
// the template syntax.
type PathTemplate struct {
segments []segment
}
// NewPathTemplate parses a path template, and returns a PathTemplate
// instance if successful.
func NewPathTemplate(template string) (*PathTemplate, error) {
return parsePathTemplate(template)
}
// MustCompilePathTemplate is like NewPathTemplate but panics if the
// expression cannot be parsed. It simplifies safe initialization of
// global variables holding compiled regular expressions.
func MustCompilePathTemplate(template string) *PathTemplate {
pt, err := NewPathTemplate(template)
if err != nil {
panic(err)
}
return pt
}
// Match attempts to match the given path with the template, and returns
// the mapping of the variable name to the matched pattern string.
func (pt *PathTemplate) Match(path string) (map[string]string, error) {
paths := strings.Split(path, "/")
values := map[string]string{}
for _, segment := range pt.segments {
length, err := segment.match(paths)
if err != nil {
return nil, err
}
if segment.name != "" {
value := strings.Join(paths[:length], "/")
if oldValue, ok := values[segment.name]; ok {
values[segment.name] = oldValue + "/" + value
} else {
values[segment.name] = value
}
}
paths = paths[length:]
}
if len(paths) != 0 {
return nil, fmt.Errorf("Trailing path %s remains after the matching", strings.Join(paths, "/"))
}
return values, nil
}
// Render creates a path string from its template and the binding from
// the variable name to the value.
func (pt *PathTemplate) Render(binding map[string]string) (string, error) {
result := make([]string, 0, len(pt.segments))
var lastVariableName string
for _, segment := range pt.segments {
name := segment.name
if lastVariableName != "" && name == lastVariableName {
continue
}
lastVariableName = name
if name == "" {
result = append(result, segment.String())
} else if value, ok := binding[name]; ok {
result = append(result, value)
} else {
return "", fmt.Errorf("%s is not found", name)
}
}
built := strings.Join(result, "/")
return built, nil
}

View File

@ -1,227 +0,0 @@
// Copyright 2016, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import (
"fmt"
"io"
"strings"
)
// This parser follows the syntax of path templates, from
// https://github.com/googleapis/googleapis/blob/master/google/api/http.proto.
// The differences are that there is no custom verb, we allow the initial slash
// to be absent, and that we are not strict as
// https://tools.ietf.org/html/rfc6570 about the characters in identifiers and
// literals.
type pathTemplateParser struct {
r *strings.Reader
runeCount int // the number of the current rune in the original string
nextVar int // the number to use for the next unnamed variable
seenName map[string]bool // names we've seen already
seenPathWildcard bool // have we seen "**" already?
}
func parsePathTemplate(template string) (pt *PathTemplate, err error) {
p := &pathTemplateParser{
r: strings.NewReader(template),
seenName: map[string]bool{},
}
// Handle panics with strings like errors.
// See pathTemplateParser.error, below.
defer func() {
if x := recover(); x != nil {
errmsg, ok := x.(errString)
if !ok {
panic(x)
}
pt = nil
err = ParseError{p.runeCount, template, string(errmsg)}
}
}()
segs := p.template()
// If there is a path wildcard, set its length. We can't do this
// until we know how many segments we've got all together.
for i, seg := range segs {
if _, ok := seg.matcher.(pathWildcardMatcher); ok {
segs[i].matcher = pathWildcardMatcher(len(segs) - i - 1)
break
}
}
return &PathTemplate{segments: segs}, nil
}
// Used to indicate errors "thrown" by this parser. We don't use string because
// many parts of the standard library panic with strings.
type errString string
// Terminates parsing immediately with an error.
func (p *pathTemplateParser) error(msg string) {
panic(errString(msg))
}
// Template = [ "/" ] Segments
func (p *pathTemplateParser) template() []segment {
var segs []segment
if p.consume('/') {
// Initial '/' needs an initial empty matcher.
segs = append(segs, segment{matcher: labelMatcher("")})
}
return append(segs, p.segments("")...)
}
// Segments = Segment { "/" Segment }
func (p *pathTemplateParser) segments(name string) []segment {
var segs []segment
for {
subsegs := p.segment(name)
segs = append(segs, subsegs...)
if !p.consume('/') {
break
}
}
return segs
}
// Segment = "*" | "**" | LITERAL | Variable
func (p *pathTemplateParser) segment(name string) []segment {
if p.consume('*') {
if name == "" {
name = fmt.Sprintf("$%d", p.nextVar)
p.nextVar++
}
if p.consume('*') {
if p.seenPathWildcard {
p.error("multiple '**' disallowed")
}
p.seenPathWildcard = true
// We'll change 0 to the right number at the end.
return []segment{{name: name, matcher: pathWildcardMatcher(0)}}
}
return []segment{{name: name, matcher: wildcardMatcher(0)}}
}
if p.consume('{') {
if name != "" {
p.error("recursive named bindings are not allowed")
}
return p.variable()
}
return []segment{{name: name, matcher: labelMatcher(p.literal())}}
}
// Variable = "{" FieldPath [ "=" Segments ] "}"
// "{" is already consumed.
func (p *pathTemplateParser) variable() []segment {
// Simplification: treat FieldPath as LITERAL, instead of IDENT { '.' IDENT }
name := p.literal()
if p.seenName[name] {
p.error(name + " appears multiple times")
}
p.seenName[name] = true
var segs []segment
if p.consume('=') {
segs = p.segments(name)
} else {
// "{var}" is equivalent to "{var=*}"
segs = []segment{{name: name, matcher: wildcardMatcher(0)}}
}
if !p.consume('}') {
p.error("expected '}'")
}
return segs
}
// A literal is any sequence of characters other than a few special ones.
// The list of stop characters is not quite the same as in the template RFC.
func (p *pathTemplateParser) literal() string {
lit := p.consumeUntil("/*}{=")
if lit == "" {
p.error("empty literal")
}
return lit
}
// Read runes until EOF or one of the runes in stopRunes is encountered.
// If the latter, unread the stop rune. Return the accumulated runes as a string.
func (p *pathTemplateParser) consumeUntil(stopRunes string) string {
var runes []rune
for {
r, ok := p.readRune()
if !ok {
break
}
if strings.IndexRune(stopRunes, r) >= 0 {
p.unreadRune()
break
}
runes = append(runes, r)
}
return string(runes)
}
// If the next rune is r, consume it and return true.
// Otherwise, leave the input unchanged and return false.
func (p *pathTemplateParser) consume(r rune) bool {
rr, ok := p.readRune()
if !ok {
return false
}
if r == rr {
return true
}
p.unreadRune()
return false
}
// Read the next rune from the input. Return it.
// The second return value is false at EOF.
func (p *pathTemplateParser) readRune() (rune, bool) {
r, _, err := p.r.ReadRune()
if err == io.EOF {
return r, false
}
if err != nil {
p.error(err.Error())
}
p.runeCount++
return r, true
}
// Put the last rune that was read back on the input.
func (p *pathTemplateParser) unreadRune() {
if err := p.r.UnreadRune(); err != nil {
p.error(err.Error())
}
p.runeCount--
}

View File

@ -113,6 +113,7 @@ type Backoff struct {
cur time.Duration
}
// Pause returns the next time.Duration that the caller should use to backoff.
func (bo *Backoff) Pause() time.Duration {
if bo.Initial == 0 {
bo.Initial = time.Second
@ -126,10 +127,11 @@ func (bo *Backoff) Pause() time.Duration {
if bo.Multiplier < 1 {
bo.Multiplier = 2
}
// Select a duration between zero and the current max. It might seem counterintuitive to
// have so much jitter, but https://www.awsarchitectureblog.com/2015/03/backoff.html
// argues that that is the best strategy.
d := time.Duration(rand.Int63n(int64(bo.cur)))
// Select a duration between 1ns and the current max. It might seem
// counterintuitive to have so much jitter, but
// https://www.awsarchitectureblog.com/2015/03/backoff.html argues that
// that is the best strategy.
d := time.Duration(1 + rand.Int63n(int64(bo.cur)))
bo.cur = time.Duration(float64(bo.cur) * bo.Multiplier)
if bo.cur > bo.Max {
bo.cur = bo.Max
@ -143,10 +145,12 @@ func (o grpcOpt) Resolve(s *CallSettings) {
s.GRPC = o
}
// WithGRPCOptions allows passing gRPC call options during client creation.
func WithGRPCOptions(opt ...grpc.CallOption) CallOption {
return grpcOpt(append([]grpc.CallOption(nil), opt...))
}
// CallSettings allow fine-grained control over how calls are made.
type CallSettings struct {
// Retry returns a Retryer to be used to control retry logic of a method call.
// If Retry is nil or the returned Retryer is nil, the call will not be retried.

View File

@ -33,8 +33,7 @@
// Application code will rarely need to use this library directly.
// However, code generated automatically from API definition files can use it
// to simplify code generation and to provide more convenient and idiomatic API surfaces.
//
// This project is currently experimental and not supported.
package gax
const Version = "0.1.0"
// Version specifies the gax-go version being used.
const Version = "2.0.4"

53
vendor/github.com/googleapis/gax-go/v2/header.go generated vendored Normal file
View File

@ -0,0 +1,53 @@
// Copyright 2018, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
package gax
import "bytes"
// XGoogHeader is for use by the Google Cloud Libraries only.
//
// XGoogHeader formats key-value pairs.
// The resulting string is suitable for x-goog-api-client header.
func XGoogHeader(keyval ...string) string {
if len(keyval) == 0 {
return ""
}
if len(keyval)%2 != 0 {
panic("gax.Header: odd argument count")
}
var buf bytes.Buffer
for i := 0; i < len(keyval); i += 2 {
buf.WriteByte(' ')
buf.WriteString(keyval[i])
buf.WriteByte('/')
buf.WriteString(keyval[i+1])
}
return buf.String()[1:]
}

View File

@ -30,12 +30,12 @@
package gax
import (
"context"
"strings"
"time"
"golang.org/x/net/context"
)
// A user defined call stub.
// APICall is a user defined call stub.
type APICall func(context.Context, CallSettings) error
// Invoke calls the given APICall,
@ -74,6 +74,15 @@ func invoke(ctx context.Context, call APICall, settings CallSettings, sp sleeper
if settings.Retry == nil {
return err
}
// Never retry permanent certificate errors. (e.x. if ca-certificates
// are not installed). We should only make very few, targeted
// exceptions: many (other) status=Unavailable should be retried, such
// as if there's a network hiccup, or the internet goes out for a
// minute. This is also why here we are doing string parsing instead of
// simply making Unavailable a non-retried code elsewhere.
if strings.Contains(err.Error(), "x509: certificate signed by unknown authority") {
return err
}
if retryer == nil {
if r := settings.Retry(); r != nil {
retryer = r

File diff suppressed because it is too large Load Diff

View File

@ -1,51 +0,0 @@
// Copyright 2016 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"math/rand"
"time"
)
// BackoffStrategy defines the set of functions that a backoff-er must
// implement.
type BackoffStrategy interface {
// Pause returns the duration of the next pause and true if the operation should be
// retried, or false if no further retries should be attempted.
Pause() (time.Duration, bool)
// Reset restores the strategy to its initial state.
Reset()
}
// ExponentialBackoff performs exponential backoff as per https://en.wikipedia.org/wiki/Exponential_backoff.
// The initial pause time is given by Base.
// Once the total pause time exceeds Max, Pause will indicate no further retries.
type ExponentialBackoff struct {
Base time.Duration
Max time.Duration
total time.Duration
n uint
}
// Pause returns the amount of time the caller should wait.
func (eb *ExponentialBackoff) Pause() (time.Duration, bool) {
if eb.total > eb.Max {
return 0, false
}
// The next pause is selected from randomly from [0, 2^n * Base).
d := time.Duration(rand.Int63n((1 << eb.n) * int64(eb.Base)))
eb.total += d
eb.n++
return d, true
}
// Reset resets the backoff strategy such that the next Pause call will begin
// counting from the start. It is not safe to call concurrently with Pause.
func (eb *ExponentialBackoff) Reset() {
eb.n = 0
eb.total = 0
}

View File

@ -1,22 +0,0 @@
// Copyright 2017 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package gensupport
import (
"fmt"
"runtime"
"strings"
)
// GoogleClientHeader returns the value to use for the x-goog-api-client
// header, which is used internally by Google.
func GoogleClientHeader(generatorVersion, clientElement string) string {
elts := []string{"gl-go/" + strings.Replace(runtime.Version(), " ", "_", -1)}
if clientElement != "" {
elts = append(elts, clientElement)
}
elts = append(elts, fmt.Sprintf("gdcl/%s", generatorVersion))
return strings.Join(elts, " ")
}

View File

@ -12,6 +12,22 @@ import (
"net/http"
"sync"
"time"
gax "github.com/googleapis/gax-go/v2"
)
// Backoff is an interface around gax.Backoff's Pause method, allowing tests to provide their
// own implementation.
type Backoff interface {
Pause() time.Duration
}
// These are declared as global variables so that tests can overwrite them.
var (
retryDeadline = 32 * time.Second
backoff = func() Backoff {
return &gax.Backoff{Initial: 100 * time.Millisecond}
}
)
const (
@ -39,9 +55,6 @@ type ResumableUpload struct {
// Callback is an optional function that will be periodically called with the cumulative number of bytes uploaded.
Callback func(int64)
// If not specified, a default exponential backoff strategy will be used.
Backoff BackoffStrategy
}
// Progress returns the number of bytes uploaded at this point.
@ -138,15 +151,6 @@ func (rx *ResumableUpload) transferChunk(ctx context.Context) (*http.Response, e
return res, nil
}
func contextDone(ctx context.Context) bool {
select {
case <-ctx.Done():
return true
default:
return false
}
}
// Upload starts the process of a resumable upload with a cancellable context.
// It retries using the provided back off strategy until cancelled or the
// strategy indicates to stop retrying.
@ -156,61 +160,82 @@ func contextDone(ctx context.Context) bool {
// rx is private to the auto-generated API code.
// Exactly one of resp or err will be nil. If resp is non-nil, the caller must call resp.Body.Close.
func (rx *ResumableUpload) Upload(ctx context.Context) (resp *http.Response, err error) {
var pause time.Duration
backoff := rx.Backoff
if backoff == nil {
backoff = DefaultBackoffStrategy()
var shouldRetry = func(status int, err error) bool {
if 500 <= status && status <= 599 {
return true
}
if status == statusTooManyRequests {
return true
}
if err == io.ErrUnexpectedEOF {
return true
}
if err, ok := err.(interface{ Temporary() bool }); ok {
return err.Temporary()
}
return false
}
for {
// Ensure that we return in the case of cancelled context, even if pause is 0.
if contextDone(ctx) {
return nil, ctx.Err()
}
select {
case <-ctx.Done():
return nil, ctx.Err()
case <-time.After(pause):
}
resp, err = rx.transferChunk(ctx)
var status int
if resp != nil {
status = resp.StatusCode
}
// Check if we should retry the request.
if shouldRetry(status, err) {
var retry bool
pause, retry = backoff.Pause()
if retry {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
continue
}
}
// If the chunk was uploaded successfully, but there's still
// more to go, upload the next chunk without any delay.
if statusResumeIncomplete(resp) {
pause = 0
backoff.Reset()
resp.Body.Close()
continue
}
// It's possible for err and resp to both be non-nil here, but we expose a simpler
// contract to our callers: exactly one of resp and err will be non-nil. This means
// that any response body must be closed here before returning a non-nil error.
// There are a couple of cases where it's possible for err and resp to both
// be non-nil. However, we expose a simpler contract to our callers: exactly
// one of resp and err will be non-nil. This means that any response body
// must be closed here before returning a non-nil error.
var prepareReturn = func(resp *http.Response, err error) (*http.Response, error) {
if err != nil {
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
return nil, err
}
return resp, nil
}
// Send all chunks.
for {
var pause time.Duration
// Each chunk gets its own initialized-at-zero retry.
bo := backoff()
quitAfter := time.After(retryDeadline)
// Retry loop for a single chunk.
for {
select {
case <-ctx.Done():
if err == nil {
err = ctx.Err()
}
return prepareReturn(resp, err)
case <-time.After(pause):
case <-quitAfter:
return prepareReturn(resp, err)
}
resp, err = rx.transferChunk(ctx)
var status int
if resp != nil {
status = resp.StatusCode
}
// Check if we should retry the request.
if !shouldRetry(status, err) {
break
}
pause = bo.Pause()
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
}
// If the chunk was uploaded successfully, but there's still
// more to go, upload the next chunk without any delay.
if statusResumeIncomplete(resp) {
resp.Body.Close()
continue
}
return prepareReturn(resp, err)
}
}

View File

@ -1,84 +0,0 @@
// Copyright 2017 Google LLC
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package gensupport
import (
"context"
"io"
"net"
"net/http"
"time"
)
// Retry invokes the given function, retrying it multiple times if the connection failed or
// the HTTP status response indicates the request should be attempted again. ctx may be nil.
func Retry(ctx context.Context, f func() (*http.Response, error), backoff BackoffStrategy) (*http.Response, error) {
for {
resp, err := f()
var status int
if resp != nil {
status = resp.StatusCode
}
// Return if we shouldn't retry.
pause, retry := backoff.Pause()
if !shouldRetry(status, err) || !retry {
return resp, err
}
// Ensure the response body is closed, if any.
if resp != nil && resp.Body != nil {
resp.Body.Close()
}
// Pause, but still listen to ctx.Done if context is not nil.
var done <-chan struct{}
if ctx != nil {
done = ctx.Done()
}
select {
case <-done:
return nil, ctx.Err()
case <-time.After(pause):
}
}
}
// DefaultBackoffStrategy returns a default strategy to use for retrying failed upload requests.
func DefaultBackoffStrategy() BackoffStrategy {
return &ExponentialBackoff{
Base: 250 * time.Millisecond,
Max: 16 * time.Second,
}
}
// shouldRetry returns true if the HTTP response / error indicates that the
// request should be attempted again.
func shouldRetry(status int, err error) bool {
if 500 <= status && status <= 599 {
return true
}
if status == statusTooManyRequests {
return true
}
if err == io.ErrUnexpectedEOF {
return true
}
if err, ok := err.(net.Error); ok {
return err.Temporary()
}
return false
}

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,246 @@
// Code generated by protoc-gen-go. DO NOT EDIT.
// source: google/rpc/code.proto
/*
Package code is a generated protocol buffer package.
It is generated from these files:
google/rpc/code.proto
It has these top-level messages:
*/
package code
import proto "github.com/golang/protobuf/proto"
import fmt "fmt"
import math "math"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
var _ = math.Inf
// This is a compile-time assertion to ensure that this generated file
// is compatible with the proto package it is being compiled against.
// A compilation error at this line likely means your copy of the
// proto package needs to be updated.
const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package
// The canonical error codes for Google APIs.
//
//
// Sometimes multiple error codes may apply. Services should return
// the most specific error code that applies. For example, prefer
// `OUT_OF_RANGE` over `FAILED_PRECONDITION` if both codes apply.
// Similarly prefer `NOT_FOUND` or `ALREADY_EXISTS` over `FAILED_PRECONDITION`.
type Code int32
const (
// Not an error; returned on success
//
// HTTP Mapping: 200 OK
Code_OK Code = 0
// The operation was cancelled, typically by the caller.
//
// HTTP Mapping: 499 Client Closed Request
Code_CANCELLED Code = 1
// Unknown error. For example, this error may be returned when
// a `Status` value received from another address space belongs to
// an error space that is not known in this address space. Also
// errors raised by APIs that do not return enough error information
// may be converted to this error.
//
// HTTP Mapping: 500 Internal Server Error
Code_UNKNOWN Code = 2
// The client specified an invalid argument. Note that this differs
// from `FAILED_PRECONDITION`. `INVALID_ARGUMENT` indicates arguments
// that are problematic regardless of the state of the system
// (e.g., a malformed file name).
//
// HTTP Mapping: 400 Bad Request
Code_INVALID_ARGUMENT Code = 3
// The deadline expired before the operation could complete. For operations
// that change the state of the system, this error may be returned
// even if the operation has completed successfully. For example, a
// successful response from a server could have been delayed long
// enough for the deadline to expire.
//
// HTTP Mapping: 504 Gateway Timeout
Code_DEADLINE_EXCEEDED Code = 4
// Some requested entity (e.g., file or directory) was not found.
// For privacy reasons, this code *may* be returned when the client
// does not have the access rights to the entity, though such usage is
// discouraged.
//
// HTTP Mapping: 404 Not Found
Code_NOT_FOUND Code = 5
// The entity that a client attempted to create (e.g., file or directory)
// already exists.
//
// HTTP Mapping: 409 Conflict
Code_ALREADY_EXISTS Code = 6
// The caller does not have permission to execute the specified
// operation. `PERMISSION_DENIED` must not be used for rejections
// caused by exhausting some resource (use `RESOURCE_EXHAUSTED`
// instead for those errors). `PERMISSION_DENIED` must not be
// used if the caller can not be identified (use `UNAUTHENTICATED`
// instead for those errors).
//
// HTTP Mapping: 403 Forbidden
Code_PERMISSION_DENIED Code = 7
// The request does not have valid authentication credentials for the
// operation.
//
// HTTP Mapping: 401 Unauthorized
Code_UNAUTHENTICATED Code = 16
// Some resource has been exhausted, perhaps a per-user quota, or
// perhaps the entire file system is out of space.
//
// HTTP Mapping: 429 Too Many Requests
Code_RESOURCE_EXHAUSTED Code = 8
// The operation was rejected because the system is not in a state
// required for the operation's execution. For example, the directory
// to be deleted is non-empty, an rmdir operation is applied to
// a non-directory, etc.
//
// Service implementors can use the following guidelines to decide
// between `FAILED_PRECONDITION`, `ABORTED`, and `UNAVAILABLE`:
// (a) Use `UNAVAILABLE` if the client can retry just the failing call.
// (b) Use `ABORTED` if the client should retry at a higher level
// (e.g., restarting a read-modify-write sequence).
// (c) Use `FAILED_PRECONDITION` if the client should not retry until
// the system state has been explicitly fixed. E.g., if an "rmdir"
// fails because the directory is non-empty, `FAILED_PRECONDITION`
// should be returned since the client should not retry unless
// the files are deleted from the directory.
//
// HTTP Mapping: 400 Bad Request
Code_FAILED_PRECONDITION Code = 9
// The operation was aborted, typically due to a concurrency issue such as
// a sequencer check failure or transaction abort.
//
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
// `ABORTED`, and `UNAVAILABLE`.
//
// HTTP Mapping: 409 Conflict
Code_ABORTED Code = 10
// The operation was attempted past the valid range. E.g., seeking or
// reading past end-of-file.
//
// Unlike `INVALID_ARGUMENT`, this error indicates a problem that may
// be fixed if the system state changes. For example, a 32-bit file
// system will generate `INVALID_ARGUMENT` if asked to read at an
// offset that is not in the range [0,2^32-1], but it will generate
// `OUT_OF_RANGE` if asked to read from an offset past the current
// file size.
//
// There is a fair bit of overlap between `FAILED_PRECONDITION` and
// `OUT_OF_RANGE`. We recommend using `OUT_OF_RANGE` (the more specific
// error) when it applies so that callers who are iterating through
// a space can easily look for an `OUT_OF_RANGE` error to detect when
// they are done.
//
// HTTP Mapping: 400 Bad Request
Code_OUT_OF_RANGE Code = 11
// The operation is not implemented or is not supported/enabled in this
// service.
//
// HTTP Mapping: 501 Not Implemented
Code_UNIMPLEMENTED Code = 12
// Internal errors. This means that some invariants expected by the
// underlying system have been broken. This error code is reserved
// for serious errors.
//
// HTTP Mapping: 500 Internal Server Error
Code_INTERNAL Code = 13
// The service is currently unavailable. This is most likely a
// transient condition, which can be corrected by retrying with
// a backoff.
//
// See the guidelines above for deciding between `FAILED_PRECONDITION`,
// `ABORTED`, and `UNAVAILABLE`.
//
// HTTP Mapping: 503 Service Unavailable
Code_UNAVAILABLE Code = 14
// Unrecoverable data loss or corruption.
//
// HTTP Mapping: 500 Internal Server Error
Code_DATA_LOSS Code = 15
)
var Code_name = map[int32]string{
0: "OK",
1: "CANCELLED",
2: "UNKNOWN",
3: "INVALID_ARGUMENT",
4: "DEADLINE_EXCEEDED",
5: "NOT_FOUND",
6: "ALREADY_EXISTS",
7: "PERMISSION_DENIED",
16: "UNAUTHENTICATED",
8: "RESOURCE_EXHAUSTED",
9: "FAILED_PRECONDITION",
10: "ABORTED",
11: "OUT_OF_RANGE",
12: "UNIMPLEMENTED",
13: "INTERNAL",
14: "UNAVAILABLE",
15: "DATA_LOSS",
}
var Code_value = map[string]int32{
"OK": 0,
"CANCELLED": 1,
"UNKNOWN": 2,
"INVALID_ARGUMENT": 3,
"DEADLINE_EXCEEDED": 4,
"NOT_FOUND": 5,
"ALREADY_EXISTS": 6,
"PERMISSION_DENIED": 7,
"UNAUTHENTICATED": 16,
"RESOURCE_EXHAUSTED": 8,
"FAILED_PRECONDITION": 9,
"ABORTED": 10,
"OUT_OF_RANGE": 11,
"UNIMPLEMENTED": 12,
"INTERNAL": 13,
"UNAVAILABLE": 14,
"DATA_LOSS": 15,
}
func (x Code) String() string {
return proto.EnumName(Code_name, int32(x))
}
func (Code) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{0} }
func init() {
proto.RegisterEnum("google.rpc.Code", Code_name, Code_value)
}
func init() { proto.RegisterFile("google/rpc/code.proto", fileDescriptor0) }
var fileDescriptor0 = []byte{
// 362 bytes of a gzipped FileDescriptorProto
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x44, 0x51, 0xcd, 0x6e, 0x93, 0x31,
0x10, 0xa4, 0x69, 0x49, 0x9b, 0xcd, 0xdf, 0xd6, 0xa5, 0xf0, 0x0e, 0x1c, 0x92, 0x43, 0x8f, 0x9c,
0x36, 0x9f, 0x37, 0xad, 0x55, 0x67, 0xfd, 0xc9, 0x3f, 0x25, 0x70, 0xb1, 0x4a, 0x1a, 0x7d, 0x42,
0x2a, 0x75, 0xf4, 0xc1, 0x13, 0xf1, 0x12, 0xbc, 0x1e, 0x72, 0x8b, 0xe8, 0xc5, 0x87, 0x99, 0xf1,
0xee, 0xce, 0x0c, 0x5c, 0x76, 0xa5, 0x74, 0x8f, 0xfb, 0x65, 0x7f, 0xd8, 0x2d, 0x77, 0xe5, 0x61,
0xbf, 0x38, 0xf4, 0xe5, 0x57, 0x51, 0xf0, 0x02, 0x2f, 0xfa, 0xc3, 0xee, 0xe3, 0x9f, 0x01, 0x9c,
0x34, 0xe5, 0x61, 0xaf, 0x86, 0x30, 0x70, 0xb7, 0xf8, 0x46, 0x4d, 0x61, 0xd4, 0x90, 0x34, 0x6c,
0x2d, 0x6b, 0x3c, 0x52, 0x63, 0x38, 0x4d, 0x72, 0x2b, 0xee, 0xb3, 0xe0, 0x40, 0xbd, 0x03, 0x34,
0x72, 0x47, 0xd6, 0xe8, 0x4c, 0xfe, 0x3a, 0x6d, 0x58, 0x22, 0x1e, 0xab, 0x4b, 0x38, 0xd7, 0x4c,
0xda, 0x1a, 0xe1, 0xcc, 0xdb, 0x86, 0x59, 0xb3, 0xc6, 0x93, 0x3a, 0x48, 0x5c, 0xcc, 0x6b, 0x97,
0x44, 0xe3, 0x5b, 0xa5, 0x60, 0x46, 0xd6, 0x33, 0xe9, 0x2f, 0x99, 0xb7, 0x26, 0xc4, 0x80, 0xc3,
0xfa, 0xb3, 0x65, 0xbf, 0x31, 0x21, 0x18, 0x27, 0x59, 0xb3, 0x18, 0xd6, 0x78, 0xaa, 0x2e, 0x60,
0x9e, 0x84, 0x52, 0xbc, 0x61, 0x89, 0xa6, 0xa1, 0xc8, 0x1a, 0x51, 0xbd, 0x07, 0xe5, 0x39, 0xb8,
0xe4, 0x9b, 0xba, 0xe5, 0x86, 0x52, 0xa8, 0xf8, 0x99, 0xfa, 0x00, 0x17, 0x6b, 0x32, 0x96, 0x75,
0x6e, 0x3d, 0x37, 0x4e, 0xb4, 0x89, 0xc6, 0x09, 0x8e, 0xea, 0xe5, 0xb4, 0x72, 0xbe, 0xaa, 0x40,
0x21, 0x4c, 0x5c, 0x8a, 0xd9, 0xad, 0xb3, 0x27, 0xb9, 0x66, 0x1c, 0xab, 0x73, 0x98, 0x26, 0x31,
0x9b, 0xd6, 0x72, 0xb5, 0xc1, 0x1a, 0x27, 0x6a, 0x02, 0x67, 0x46, 0x22, 0x7b, 0x21, 0x8b, 0x53,
0x35, 0x87, 0x71, 0x12, 0xba, 0x23, 0x63, 0x69, 0x65, 0x19, 0x67, 0xd5, 0x90, 0xa6, 0x48, 0xd9,
0xba, 0x10, 0x70, 0xbe, 0xda, 0xc2, 0x6c, 0x57, 0x7e, 0x2c, 0x5e, 0xb3, 0x5c, 0x8d, 0x6a, 0x90,
0x6d, 0x8d, 0xb8, 0x3d, 0xfa, 0x7a, 0xf5, 0x8f, 0xe8, 0xca, 0xe3, 0xfd, 0x53, 0xb7, 0x28, 0x7d,
0xb7, 0xec, 0xf6, 0x4f, 0xcf, 0x05, 0x2c, 0x5f, 0xa8, 0xfb, 0xc3, 0xf7, 0x9f, 0xff, 0xab, 0xf9,
0x54, 0x9f, 0xdf, 0x83, 0x63, 0xdf, 0x36, 0xdf, 0x86, 0xcf, 0xaa, 0xab, 0xbf, 0x01, 0x00, 0x00,
0xff, 0xff, 0x8e, 0x97, 0x77, 0xc2, 0xbf, 0x01, 0x00, 0x00,
}