From 5c9558bbafa0a0998658d1403e070cc16770c0cf Mon Sep 17 00:00:00 2001 From: Igor Komlew Date: Fri, 15 Dec 2017 17:37:01 +0100 Subject: [PATCH 1/6] move slack related bot code to a sub module --- bot/bot.go | 384 ++++------------------ bot/{ => slack}/approvals.go | 4 +- bot/{ => slack}/deployments.go | 2 +- bot/slack/slack.go | 401 +++++++++++++++++++++++ bot/{bot_test.go => slack/slack_test.go} | 2 +- cmd/keel/main.go | 50 +-- 6 files changed, 467 insertions(+), 376 deletions(-) rename bot/{ => slack}/approvals.go (99%) rename bot/{ => slack}/deployments.go (99%) create mode 100644 bot/slack/slack.go rename bot/{bot_test.go => slack/slack_test.go} (99%) diff --git a/bot/bot.go b/bot/bot.go index c6e08ae2..bd51b6dd 100644 --- a/bot/bot.go +++ b/bot/bot.go @@ -1,359 +1,89 @@ package bot import ( - "context" - "encoding/json" - "errors" - "fmt" - "strconv" - "strings" - "time" - - "github.com/nlopes/slack" + "sync" "github.com/keel-hq/keel/approvals" "github.com/keel-hq/keel/provider/kubernetes" - "github.com/keel-hq/keel/types" log "github.com/Sirupsen/logrus" ) -const ( - removeApprovalPrefix = "rm approval" -) +type Bot interface { + Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) (teardown func(), err error) +} + +type BotFactory func(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) (teardown func(), err error) +type teardown func() + +// type Teardown func() var ( - botEventTextToResponse = map[string][]string{ - "help": { - `Here's a list of supported commands`, - `- "get deployments" -> get a list of all deployments`, - `- "get approvals" -> get a list of approvals`, - `- "rm approval " -> remove approval`, - `- "approve " -> approve update request`, - `- "reject " -> reject update request`, - // `- "get deployments all" -> get a list of all deployments`, - // `- "describe deployment " -> get details for specified deployment`, - }, - } - - // static bot commands can be used straight away - staticBotCommands = map[string]bool{ - "get deployments": true, - "get approvals": true, - } - - // dynamic bot command prefixes have to be matched - dynamicBotCommandPrefixes = []string{removeApprovalPrefix} - - approvalResponseKeyword = "approve" - rejectResponseKeyword = "reject" + botsM sync.RWMutex + bots = make(map[string]BotFactory) + teardowns = make(map[string]teardown) ) -// SlackImplementer - implementes slack HTTP functionality, used to -// send messages with attachments -type SlackImplementer interface { - PostMessage(channel, text string, params slack.PostMessageParameters) (string, string, error) -} - -// approvalResponse - used to track approvals once vote begins -type approvalResponse struct { - User string - Status types.ApprovalStatus - Text string -} - -// Bot - main slack bot container -type Bot struct { - id string // bot id - name string // bot name - - users map[string]string - - msgPrefix string - - slackClient *slack.Client - slackRTM *slack.RTM - - slackHTTPClient SlackImplementer - - approvalsRespCh chan *approvalResponse - - approvalsManager approvals.Manager - approvalsChannel string // slack approvals channel name - - k8sImplementer kubernetes.Implementer - - ctx context.Context -} - -// New - create new bot instance -func New(name, token, approvalsChannel string, k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) *Bot { - client := slack.New(token) - - bot := &Bot{ - slackClient: client, - slackHTTPClient: client, - k8sImplementer: k8sImplementer, - name: name, - approvalsManager: approvalsManager, - approvalsChannel: approvalsChannel, - approvalsRespCh: make(chan *approvalResponse), // don't add buffer to make it blocking +func RegisterBot(name string, b BotFactory) { + log.Debug("bot.RegisterBot") + if name == "" { + panic("bot: could not register a BotFactory with an empty name") } - return bot -} - -// Start - start bot -func (b *Bot) Start(ctx context.Context) error { - - // setting root context - b.ctx = ctx - - users, err := b.slackClient.GetUsers() - if err != nil { - return err + if b == nil { + panic("bot: could not register a nil BotFactory") } - b.users = map[string]string{} + botsM.Lock() + defer botsM.Unlock() - for _, user := range users { - switch user.Name { - case b.name: - if user.IsBot { - b.id = user.ID - } - default: - continue - } - } - if b.id == "" { - return errors.New("could not find bot in the list of names, check if the bot is called \"" + b.name + "\" ") - } - - b.msgPrefix = strings.ToLower("<@" + b.id + ">") - - // processing messages coming from slack RTM client - go b.startInternal() - - // processing slack approval responses - go b.processApprovalResponses() - - // subscribing for approval requests - go b.subscribeForApprovals() - - return nil -} - -func (b *Bot) startInternal() error { - b.slackRTM = b.slackClient.NewRTM() - - go b.slackRTM.ManageConnection() - - for { - select { - case <-b.ctx.Done(): - return nil - - case msg := <-b.slackRTM.IncomingEvents: - switch ev := msg.Data.(type) { - case *slack.HelloEvent: - // Ignore hello - - case *slack.ConnectedEvent: - // fmt.Println("Infos:", ev.Info) - // fmt.Println("Connection counter:", ev.ConnectionCount) - // Replace #general with your Channel ID - // b.slackRTM.SendMessage(b.slackRTM.NewOutgoingMessage("Hello world", "#general")) - - case *slack.MessageEvent: - b.handleMessage(ev) - case *slack.PresenceChangeEvent: - // fmt.Printf("Presence Change: %v\n", ev) - - // case *slack.LatencyReport: - // fmt.Printf("Current latency: %v\n", ev.Value) - - case *slack.RTMError: - fmt.Printf("Error: %s\n", ev.Error()) - - case *slack.InvalidAuthEvent: - fmt.Printf("Invalid credentials") - return fmt.Errorf("invalid credentials") - - default: - - // Ignore other events.. - // fmt.Printf("Unexpected: %v\n", msg.Data) - } - } - } -} - -func (b *Bot) postMessage(title, message, color string, fields []slack.AttachmentField) error { - params := slack.NewPostMessageParameters() - params.Username = b.name - - params.Attachments = []slack.Attachment{ - slack.Attachment{ - Fallback: message, - Color: color, - Fields: fields, - Footer: "https://keel.sh", - Ts: json.Number(strconv.Itoa(int(time.Now().Unix()))), - }, - } - - _, _, err := b.slackHTTPClient.PostMessage(b.approvalsChannel, "", params) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - }).Error("bot.postMessage: failed to send message") - } - return err -} - -func (b *Bot) isApproval(event *slack.MessageEvent, eventText string) (resp *approvalResponse, ok bool) { - if strings.HasPrefix(strings.ToLower(eventText), approvalResponseKeyword) { - return &approvalResponse{ - User: event.User, - Status: types.ApprovalStatusApproved, - Text: eventText, - }, true - } - - if strings.HasPrefix(strings.ToLower(eventText), rejectResponseKeyword) { - return &approvalResponse{ - User: event.User, - Status: types.ApprovalStatusRejected, - Text: eventText, - }, true - } - - return nil, false -} - -// TODO(k): cache results in a map or get this info on startup. Although -// if channel was then recreated (unlikely), we would miss results -func (b *Bot) isApprovalsChannel(event *slack.MessageEvent) bool { - for _, ch := range b.slackRTM.GetInfo().Channels { - if ch.ID == event.Channel && ch.Name == b.approvalsChannel { - return true - } - } - return false -} - -func (b *Bot) handleMessage(event *slack.MessageEvent) { - if event.BotID != "" || event.User == "" || event.SubType == "bot_message" { - log.WithFields(log.Fields{ - "event_bot_ID": event.BotID, - "event_user": event.User, - "event_subtype": event.SubType, - }).Info("handleMessage: ignoring message") - return - } - - eventText := strings.Trim(strings.ToLower(event.Text), " \n\r") - - if !b.isBotMessage(event, eventText) { - return - } - - eventText = b.trimBot(eventText) - - // only accepting approvals from approvals channel - if b.isApprovalsChannel(event) { - approval, ok := b.isApproval(event, eventText) - if ok { - b.approvalsRespCh <- approval - return - } - } - - // Responses that are just a canned string response - if responseLines, ok := botEventTextToResponse[eventText]; ok { - response := strings.Join(responseLines, "\n") - b.respond(event, formatAsSnippet(response)) - return - } - - if b.isCommand(event, eventText) { - b.handleCommand(event, eventText) - return + if _, dup := bots[name]; dup { + panic("bot: RegisterBot called twice for " + name) } log.WithFields(log.Fields{ - "name": b.name, - "bot_id": b.id, - "command": eventText, - "untrimmed": strings.Trim(strings.ToLower(event.Text), " \n\r"), - }).Debug("handleMessage: bot couldn't recognise command") + "name": name, + }).Info("bot: registered") + + bots[name] = b } -func (b *Bot) isCommand(event *slack.MessageEvent, eventText string) bool { - if staticBotCommands[eventText] { - return true - } +type DefaultBot struct { +} - for _, prefix := range dynamicBotCommandPrefixes { - if strings.HasPrefix(eventText, prefix) { - return true +func Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) { + log.Debugf("bot.Run(): %#v\n", bots) + for botName, runner := range bots { + log.Debugf("bot.Run(): run bot %s\n", botName) + teardownBot, err := runner(k8sImplementer, approvalsManager) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + }).Fatalf("main: failed to setup %s bot\n", botName) + } else { + teardowns[botName] = teardownBot } } - - return false + // return teardowns } -func (b *Bot) handleCommand(event *slack.MessageEvent, eventText string) { - switch eventText { - case "get deployments": - log.Info("getting deployments") - response := b.deploymentsResponse(Filter{}) - b.respond(event, formatAsSnippet(response)) - return - case "get approvals": - response := b.approvalsResponse() - b.respond(event, formatAsSnippet(response)) - return +func Stop() { + log.Debug("bot.Stop()") + for botName, teardown := range teardowns { + log.Debugf("Teardown %s bot\n", botName) + teardown() + } +} + +// Senders returns the list of the registered Senders. +func Bots() map[string]BotFactory { + botsM.RLock() + defer botsM.RUnlock() + // bots = make(map[string]BotFactory) + ret := make(map[string]BotFactory) + for k, v := range bots { + ret[k] = v } - // handle dynamic commands - if strings.HasPrefix(eventText, removeApprovalPrefix) { - b.respond(event, formatAsSnippet(b.removeApprovalHandler(strings.TrimSpace(strings.TrimPrefix(eventText, removeApprovalPrefix))))) - return - } - - log.Info("command not found") -} - -func (b *Bot) respond(event *slack.MessageEvent, response string) { - b.slackRTM.SendMessage(b.slackRTM.NewOutgoingMessage(response, event.Channel)) -} - -func (b *Bot) isBotMessage(event *slack.MessageEvent, eventText string) bool { - prefixes := []string{ - b.msgPrefix, - "keel", - } - - for _, p := range prefixes { - if strings.HasPrefix(eventText, p) { - return true - } - } - - // Direct message channels always starts with 'D' - return strings.HasPrefix(event.Channel, "D") -} - -func (b *Bot) trimBot(msg string) string { - msg = strings.Replace(msg, strings.ToLower(b.msgPrefix), "", 1) - msg = strings.TrimPrefix(msg, b.name) - msg = strings.Trim(msg, " :\n") - - return msg -} - -func formatAsSnippet(response string) string { - return "```" + response + "```" + return ret } diff --git a/bot/approvals.go b/bot/slack/approvals.go similarity index 99% rename from bot/approvals.go rename to bot/slack/approvals.go index 96095be8..e8eef06c 100644 --- a/bot/approvals.go +++ b/bot/slack/approvals.go @@ -1,13 +1,13 @@ -package bot +package slack import ( "bytes" "fmt" "strings" - "github.com/nlopes/slack" "github.com/keel-hq/keel/bot/formatter" "github.com/keel-hq/keel/types" + "github.com/nlopes/slack" log "github.com/Sirupsen/logrus" ) diff --git a/bot/deployments.go b/bot/slack/deployments.go similarity index 99% rename from bot/deployments.go rename to bot/slack/deployments.go index b18a75ee..5f8e544c 100644 --- a/bot/deployments.go +++ b/bot/slack/deployments.go @@ -1,4 +1,4 @@ -package bot +package slack import ( "bytes" diff --git a/bot/slack/slack.go b/bot/slack/slack.go new file mode 100644 index 00000000..656a320b --- /dev/null +++ b/bot/slack/slack.go @@ -0,0 +1,401 @@ +package slack + +import ( + "context" + "encoding/json" + "errors" + "fmt" + "os" + "strconv" + "strings" + "time" + + "github.com/nlopes/slack" + + "github.com/keel-hq/keel/approvals" + "github.com/keel-hq/keel/bot" + "github.com/keel-hq/keel/constants" + "github.com/keel-hq/keel/provider/kubernetes" + "github.com/keel-hq/keel/types" + + log "github.com/Sirupsen/logrus" +) + +const ( + removeApprovalPrefix = "rm approval" +) + +var ( + botEventTextToResponse = map[string][]string{ + "help": { + `Here's a list of supported commands`, + `- "get deployments" -> get a list of all deployments`, + `- "get approvals" -> get a list of approvals`, + `- "rm approval " -> remove approval`, + `- "approve " -> approve update request`, + `- "reject " -> reject update request`, + // `- "get deployments all" -> get a list of all deployments`, + // `- "describe deployment " -> get details for specified deployment`, + }, + } + + // static bot commands can be used straight away + staticBotCommands = map[string]bool{ + "get deployments": true, + "get approvals": true, + } + + // dynamic bot command prefixes have to be matched + dynamicBotCommandPrefixes = []string{removeApprovalPrefix} + + approvalResponseKeyword = "approve" + rejectResponseKeyword = "reject" +) + +// SlackImplementer - implementes slack HTTP functionality, used to +// send messages with attachments +type SlackImplementer interface { + PostMessage(channel, text string, params slack.PostMessageParameters) (string, string, error) +} + +// approvalResponse - used to track approvals once vote begins +type approvalResponse struct { + User string + Status types.ApprovalStatus + Text string +} + +// Bot - main slack bot container +type Bot struct { + id string // bot id + name string // bot name + + users map[string]string + + msgPrefix string + + slackClient *slack.Client + slackRTM *slack.RTM + + slackHTTPClient SlackImplementer + + approvalsRespCh chan *approvalResponse + + approvalsManager approvals.Manager + approvalsChannel string // slack approvals channel name + + k8sImplementer kubernetes.Implementer + + ctx context.Context +} + +func init() { + bot.RegisterBot("slack", Run) +} + +func Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) (teardown func(), err error) { + if os.Getenv(constants.EnvSlackToken) != "" { + botName := "keel" + + if os.Getenv(constants.EnvSlackBotName) != "" { + botName = os.Getenv(constants.EnvSlackBotName) + } + + token := os.Getenv(constants.EnvSlackToken) + + approvalsChannel := "general" + if os.Getenv(constants.EnvSlackApprovalsChannel) != "" { + approvalsChannel = os.Getenv(constants.EnvSlackApprovalsChannel) + } + + slackBot := New(botName, token, approvalsChannel, k8sImplementer, approvalsManager) + + ctx, cancel := context.WithCancel(context.Background()) + + err := slackBot.Start(ctx) + if err != nil { + cancel() + return nil, err + } + + teardown := func() { + // cancelling context + cancel() + } + + return teardown, nil + } + + return func() {}, nil +} + +// New - create new bot instance +func New(name, token, approvalsChannel string, k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) *Bot { + client := slack.New(token) + + bot := &Bot{ + slackClient: client, + slackHTTPClient: client, + k8sImplementer: k8sImplementer, + name: name, + approvalsManager: approvalsManager, + approvalsChannel: approvalsChannel, + approvalsRespCh: make(chan *approvalResponse), // don't add buffer to make it blocking + } + + return bot +} + +// Start - start bot +func (b *Bot) Start(ctx context.Context) error { + // setting root context + b.ctx = ctx + + users, err := b.slackClient.GetUsers() + if err != nil { + return err + } + + b.users = map[string]string{} + + for _, user := range users { + switch user.Name { + case b.name: + if user.IsBot { + b.id = user.ID + } + default: + continue + } + } + if b.id == "" { + return errors.New("could not find bot in the list of names, check if the bot is called \"" + b.name + "\" ") + } + + b.msgPrefix = strings.ToLower("<@" + b.id + ">") + + // processing messages coming from slack RTM client + go b.startInternal() + + // processing slack approval responses + go b.processApprovalResponses() + + // subscribing for approval requests + go b.subscribeForApprovals() + + return nil +} + +func (b *Bot) startInternal() error { + b.slackRTM = b.slackClient.NewRTM() + + go b.slackRTM.ManageConnection() + + for { + select { + case <-b.ctx.Done(): + return nil + + case msg := <-b.slackRTM.IncomingEvents: + switch ev := msg.Data.(type) { + case *slack.HelloEvent: + // Ignore hello + + case *slack.ConnectedEvent: + // fmt.Println("Infos:", ev.Info) + // fmt.Println("Connection counter:", ev.ConnectionCount) + // Replace #general with your Channel ID + // b.slackRTM.SendMessage(b.slackRTM.NewOutgoingMessage("Hello world", "#general")) + + case *slack.MessageEvent: + b.handleMessage(ev) + case *slack.PresenceChangeEvent: + // fmt.Printf("Presence Change: %v\n", ev) + + // case *slack.LatencyReport: + // fmt.Printf("Current latency: %v\n", ev.Value) + + case *slack.RTMError: + fmt.Printf("Error: %s\n", ev.Error()) + + case *slack.InvalidAuthEvent: + fmt.Printf("Invalid credentials") + return fmt.Errorf("invalid credentials") + + default: + + // Ignore other events.. + // fmt.Printf("Unexpected: %v\n", msg.Data) + } + } + } +} + +func (b *Bot) postMessage(title, message, color string, fields []slack.AttachmentField) error { + params := slack.NewPostMessageParameters() + params.Username = b.name + + params.Attachments = []slack.Attachment{ + slack.Attachment{ + Fallback: message, + Color: color, + Fields: fields, + Footer: "https://keel.sh", + Ts: json.Number(strconv.Itoa(int(time.Now().Unix()))), + }, + } + + _, _, err := b.slackHTTPClient.PostMessage(b.approvalsChannel, "", params) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + }).Error("bot.postMessage: failed to send message") + } + return err +} + +func (b *Bot) isApproval(event *slack.MessageEvent, eventText string) (resp *approvalResponse, ok bool) { + if strings.HasPrefix(strings.ToLower(eventText), approvalResponseKeyword) { + return &approvalResponse{ + User: event.User, + Status: types.ApprovalStatusApproved, + Text: eventText, + }, true + } + + if strings.HasPrefix(strings.ToLower(eventText), rejectResponseKeyword) { + return &approvalResponse{ + User: event.User, + Status: types.ApprovalStatusRejected, + Text: eventText, + }, true + } + + return nil, false +} + +// TODO(k): cache results in a map or get this info on startup. Although +// if channel was then recreated (unlikely), we would miss results +func (b *Bot) isApprovalsChannel(event *slack.MessageEvent) bool { + for _, ch := range b.slackRTM.GetInfo().Channels { + if ch.ID == event.Channel && ch.Name == b.approvalsChannel { + return true + } + } + return false +} + +func (b *Bot) handleMessage(event *slack.MessageEvent) { + if event.BotID != "" || event.User == "" || event.SubType == "bot_message" { + log.WithFields(log.Fields{ + "event_bot_ID": event.BotID, + "event_user": event.User, + "event_subtype": event.SubType, + }).Info("handleMessage: ignoring message") + return + } + + eventText := strings.Trim(strings.ToLower(event.Text), " \n\r") + + if !b.isBotMessage(event, eventText) { + return + } + + eventText = b.trimBot(eventText) + + // only accepting approvals from approvals channel + if b.isApprovalsChannel(event) { + approval, ok := b.isApproval(event, eventText) + if ok { + b.approvalsRespCh <- approval + return + } + } + + // Responses that are just a canned string response + if responseLines, ok := botEventTextToResponse[eventText]; ok { + response := strings.Join(responseLines, "\n") + b.respond(event, formatAsSnippet(response)) + return + } + + if b.isCommand(event, eventText) { + b.handleCommand(event, eventText) + return + } + + log.WithFields(log.Fields{ + "name": b.name, + "bot_id": b.id, + "command": eventText, + "untrimmed": strings.Trim(strings.ToLower(event.Text), " \n\r"), + }).Debug("handleMessage: bot couldn't recognise command") +} + +func (b *Bot) isCommand(event *slack.MessageEvent, eventText string) bool { + if staticBotCommands[eventText] { + return true + } + + for _, prefix := range dynamicBotCommandPrefixes { + if strings.HasPrefix(eventText, prefix) { + return true + } + } + + return false +} + +func (b *Bot) handleCommand(event *slack.MessageEvent, eventText string) { + switch eventText { + case "get deployments": + log.Info("getting deployments") + response := b.deploymentsResponse(Filter{}) + b.respond(event, formatAsSnippet(response)) + return + case "get approvals": + response := b.approvalsResponse() + b.respond(event, formatAsSnippet(response)) + return + } + + // handle dynamic commands + if strings.HasPrefix(eventText, removeApprovalPrefix) { + b.respond(event, formatAsSnippet(b.removeApprovalHandler(strings.TrimSpace(strings.TrimPrefix(eventText, removeApprovalPrefix))))) + return + } + + log.Info("command not found") +} + +func (b *Bot) respond(event *slack.MessageEvent, response string) { + b.slackRTM.SendMessage(b.slackRTM.NewOutgoingMessage(response, event.Channel)) +} + +func (b *Bot) isBotMessage(event *slack.MessageEvent, eventText string) bool { + prefixes := []string{ + b.msgPrefix, + "keel", + } + + for _, p := range prefixes { + if strings.HasPrefix(eventText, p) { + return true + } + } + + // Direct message channels always starts with 'D' + return strings.HasPrefix(event.Channel, "D") +} + +func (b *Bot) trimBot(msg string) string { + msg = strings.Replace(msg, strings.ToLower(b.msgPrefix), "", 1) + msg = strings.TrimPrefix(msg, b.name) + msg = strings.Trim(msg, " :\n") + + return msg +} + +func formatAsSnippet(response string) string { + return "```" + response + "```" +} diff --git a/bot/bot_test.go b/bot/slack/slack_test.go similarity index 99% rename from bot/bot_test.go rename to bot/slack/slack_test.go index 7813e5d1..94dbe54d 100644 --- a/bot/bot_test.go +++ b/bot/slack/slack_test.go @@ -1,4 +1,4 @@ -package bot +package slack import ( "context" diff --git a/cmd/keel/main.go b/cmd/keel/main.go index 31d3f717..1594ea37 100644 --- a/cmd/keel/main.go +++ b/cmd/keel/main.go @@ -34,6 +34,9 @@ import ( _ "github.com/keel-hq/keel/extension/notification/slack" _ "github.com/keel-hq/keel/extension/notification/webhook" + // bots + _ "github.com/keel-hq/keel/bot/slack" + log "github.com/Sirupsen/logrus" ) @@ -58,7 +61,6 @@ const ( const EnvDebug = "DEBUG" func main() { - ver := version.GetKeelVersion() inCluster := kingpin.Flag("incluster", "use in cluster configuration (defaults to 'true'), use '--no-incluster' if running outside of the cluster").Default("true").Bool() @@ -155,12 +157,7 @@ func main() { teardownTriggers := setupTriggers(ctx, providers, secretsGetter, approvalsManager) - teardownBot, err := setupBot(implementer, approvalsManager) - if err != nil { - log.WithFields(log.Fields{ - "error": err, - }).Fatal("main: failed to setup slack bot") - } + bot.Run(implementer, approvalsManager) signalChan := make(chan os.Signal, 1) cleanupDone := make(chan bool) @@ -183,7 +180,7 @@ func main() { // teardownProviders() providers.Stop() teardownTriggers() - teardownBot() + bot.Stop() cleanupDone <- true } @@ -221,43 +218,6 @@ func setupProviders(k8sImplementer kubernetes.Implementer, sender notification.S return providers } -func setupBot(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) (teardown func(), err error) { - - if os.Getenv(constants.EnvSlackToken) != "" { - botName := "keel" - - if os.Getenv(constants.EnvSlackBotName) != "" { - botName = os.Getenv(constants.EnvSlackBotName) - } - - token := os.Getenv(constants.EnvSlackToken) - - approvalsChannel := "general" - if os.Getenv(constants.EnvSlackApprovalsChannel) != "" { - approvalsChannel = os.Getenv(constants.EnvSlackApprovalsChannel) - } - - slackBot := bot.New(botName, token, approvalsChannel, k8sImplementer, approvalsManager) - - ctx, cancel := context.WithCancel(context.Background()) - - err := slackBot.Start(ctx) - if err != nil { - cancel() - return nil, err - } - - teardown := func() { - // cancelling context - cancel() - } - - return teardown, nil - } - - return func() {}, nil -} - // setupTriggers - setting up triggers. New triggers should be added to this function. Each trigger // should go through all providers (or not if there is a reason) and submit events) func setupTriggers(ctx context.Context, providers provider.Providers, secretsGetter secrets.Getter, approvalsManager approvals.Manager) (teardown func()) { From 914a497570fd2d37567fb03f83c856f84d698513 Mon Sep 17 00:00:00 2001 From: Igor Komlew Date: Fri, 15 Dec 2017 18:37:11 +0100 Subject: [PATCH 2/6] remove debug and unused code --- bot/bot.go | 25 +------------------------ 1 file changed, 1 insertion(+), 24 deletions(-) diff --git a/bot/bot.go b/bot/bot.go index bd51b6dd..006d3c79 100644 --- a/bot/bot.go +++ b/bot/bot.go @@ -16,8 +16,6 @@ type Bot interface { type BotFactory func(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) (teardown func(), err error) type teardown func() -// type Teardown func() - var ( botsM sync.RWMutex bots = make(map[string]BotFactory) @@ -25,7 +23,6 @@ var ( ) func RegisterBot(name string, b BotFactory) { - log.Debug("bot.RegisterBot") if name == "" { panic("bot: could not register a BotFactory with an empty name") } @@ -48,13 +45,8 @@ func RegisterBot(name string, b BotFactory) { bots[name] = b } -type DefaultBot struct { -} - func Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) { - log.Debugf("bot.Run(): %#v\n", bots) for botName, runner := range bots { - log.Debugf("bot.Run(): run bot %s\n", botName) teardownBot, err := runner(k8sImplementer, approvalsManager) if err != nil { log.WithFields(log.Fields{ @@ -64,26 +56,11 @@ func Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manag teardowns[botName] = teardownBot } } - // return teardowns } func Stop() { - log.Debug("bot.Stop()") for botName, teardown := range teardowns { - log.Debugf("Teardown %s bot\n", botName) + log.Infof("Teardown %s bot\n", botName) teardown() } } - -// Senders returns the list of the registered Senders. -func Bots() map[string]BotFactory { - botsM.RLock() - defer botsM.RUnlock() - // bots = make(map[string]BotFactory) - ret := make(map[string]BotFactory) - for k, v := range bots { - ret[k] = v - } - - return ret -} From 96b6ed9393a67c214c278306221a66e26acba402 Mon Sep 17 00:00:00 2001 From: Igor Komlew Date: Wed, 20 Dec 2017 11:54:15 +0100 Subject: [PATCH 3/6] refactoring for approval bot --- bot/bot.go | 40 ++++++++++++++++++++++++++ bot/slack/approvals.go | 9 +++--- bot/slack/slack.go | 62 ++++++++--------------------------------- bot/slack/slack_test.go | 9 +++--- 4 files changed, 62 insertions(+), 58 deletions(-) diff --git a/bot/bot.go b/bot/bot.go index 006d3c79..b92082f8 100644 --- a/bot/bot.go +++ b/bot/bot.go @@ -5,10 +5,42 @@ import ( "github.com/keel-hq/keel/approvals" "github.com/keel-hq/keel/provider/kubernetes" + "github.com/keel-hq/keel/types" log "github.com/Sirupsen/logrus" ) +const ( + RemoveApprovalPrefix = "rm approval" +) + +var ( + BotEventTextToResponse = map[string][]string{ + "help": { + `Here's a list of supported commands`, + `- "get deployments" -> get a list of all deployments`, + `- "get approvals" -> get a list of approvals`, + `- "rm approval " -> remove approval`, + `- "approve " -> approve update request`, + `- "reject " -> reject update request`, + // `- "get deployments all" -> get a list of all deployments`, + // `- "describe deployment " -> get details for specified deployment`, + }, + } + + // static bot commands can be used straight away + StaticBotCommands = map[string]bool{ + "get deployments": true, + "get approvals": true, + } + + // dynamic bot command prefixes have to be matched + DynamicBotCommandPrefixes = []string{RemoveApprovalPrefix} + + ApprovalResponseKeyword = "approve" + RejectResponseKeyword = "reject" +) + type Bot interface { Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) (teardown func(), err error) } @@ -22,6 +54,14 @@ var ( teardowns = make(map[string]teardown) ) +// ApprovalResponse - used to track approvals once vote begins +type ApprovalResponse struct { + User string + Status types.ApprovalStatus + Text string +} + +// RegisterBot makes a BotRunner available by the provided name. func RegisterBot(name string, b BotFactory) { if name == "" { panic("bot: could not register a BotFactory with an empty name") diff --git a/bot/slack/approvals.go b/bot/slack/approvals.go index e8eef06c..f78cb49f 100644 --- a/bot/slack/approvals.go +++ b/bot/slack/approvals.go @@ -5,6 +5,7 @@ import ( "fmt" "strings" + "github.com/keel-hq/keel/bot" "github.com/keel-hq/keel/bot/formatter" "github.com/keel-hq/keel/types" "github.com/nlopes/slack" @@ -99,8 +100,8 @@ func (b *Bot) processApprovalResponses() error { } } -func (b *Bot) processApprovedResponse(approvalResponse *approvalResponse) error { - trimmed := strings.TrimPrefix(approvalResponse.Text, approvalResponseKeyword) +func (b *Bot) processApprovedResponse(approvalResponse *bot.ApprovalResponse) error { + trimmed := strings.TrimPrefix(approvalResponse.Text, bot.ApprovalResponseKeyword) identifiers := strings.Split(trimmed, " ") if len(identifiers) == 0 { return nil @@ -131,8 +132,8 @@ func (b *Bot) processApprovedResponse(approvalResponse *approvalResponse) error return nil } -func (b *Bot) processRejectedResponse(approvalResponse *approvalResponse) error { - trimmed := strings.TrimPrefix(approvalResponse.Text, rejectResponseKeyword) +func (b *Bot) processRejectedResponse(approvalResponse *bot.ApprovalResponse) error { + trimmed := strings.TrimPrefix(approvalResponse.Text, bot.RejectResponseKeyword) identifiers := strings.Split(trimmed, " ") if len(identifiers) == 0 { return nil diff --git a/bot/slack/slack.go b/bot/slack/slack.go index 656a320b..511e9b38 100644 --- a/bot/slack/slack.go +++ b/bot/slack/slack.go @@ -21,50 +21,12 @@ import ( log "github.com/Sirupsen/logrus" ) -const ( - removeApprovalPrefix = "rm approval" -) - -var ( - botEventTextToResponse = map[string][]string{ - "help": { - `Here's a list of supported commands`, - `- "get deployments" -> get a list of all deployments`, - `- "get approvals" -> get a list of approvals`, - `- "rm approval " -> remove approval`, - `- "approve " -> approve update request`, - `- "reject " -> reject update request`, - // `- "get deployments all" -> get a list of all deployments`, - // `- "describe deployment " -> get details for specified deployment`, - }, - } - - // static bot commands can be used straight away - staticBotCommands = map[string]bool{ - "get deployments": true, - "get approvals": true, - } - - // dynamic bot command prefixes have to be matched - dynamicBotCommandPrefixes = []string{removeApprovalPrefix} - - approvalResponseKeyword = "approve" - rejectResponseKeyword = "reject" -) - // SlackImplementer - implementes slack HTTP functionality, used to // send messages with attachments type SlackImplementer interface { PostMessage(channel, text string, params slack.PostMessageParameters) (string, string, error) } -// approvalResponse - used to track approvals once vote begins -type approvalResponse struct { - User string - Status types.ApprovalStatus - Text string -} - // Bot - main slack bot container type Bot struct { id string // bot id @@ -79,7 +41,7 @@ type Bot struct { slackHTTPClient SlackImplementer - approvalsRespCh chan *approvalResponse + approvalsRespCh chan *bot.ApprovalResponse approvalsManager approvals.Manager approvalsChannel string // slack approvals channel name @@ -140,7 +102,7 @@ func New(name, token, approvalsChannel string, k8sImplementer kubernetes.Impleme name: name, approvalsManager: approvalsManager, approvalsChannel: approvalsChannel, - approvalsRespCh: make(chan *approvalResponse), // don't add buffer to make it blocking + approvalsRespCh: make(chan *bot.ApprovalResponse), // don't add buffer to make it blocking } return bot @@ -254,17 +216,17 @@ func (b *Bot) postMessage(title, message, color string, fields []slack.Attachmen return err } -func (b *Bot) isApproval(event *slack.MessageEvent, eventText string) (resp *approvalResponse, ok bool) { - if strings.HasPrefix(strings.ToLower(eventText), approvalResponseKeyword) { - return &approvalResponse{ +func (b *Bot) isApproval(event *slack.MessageEvent, eventText string) (resp *bot.ApprovalResponse, ok bool) { + if strings.HasPrefix(strings.ToLower(eventText), bot.ApprovalResponseKeyword) { + return &bot.ApprovalResponse{ User: event.User, Status: types.ApprovalStatusApproved, Text: eventText, }, true } - if strings.HasPrefix(strings.ToLower(eventText), rejectResponseKeyword) { - return &approvalResponse{ + if strings.HasPrefix(strings.ToLower(eventText), bot.RejectResponseKeyword) { + return &bot.ApprovalResponse{ User: event.User, Status: types.ApprovalStatusRejected, Text: eventText, @@ -313,7 +275,7 @@ func (b *Bot) handleMessage(event *slack.MessageEvent) { } // Responses that are just a canned string response - if responseLines, ok := botEventTextToResponse[eventText]; ok { + if responseLines, ok := bot.BotEventTextToResponse[eventText]; ok { response := strings.Join(responseLines, "\n") b.respond(event, formatAsSnippet(response)) return @@ -333,11 +295,11 @@ func (b *Bot) handleMessage(event *slack.MessageEvent) { } func (b *Bot) isCommand(event *slack.MessageEvent, eventText string) bool { - if staticBotCommands[eventText] { + if bot.StaticBotCommands[eventText] { return true } - for _, prefix := range dynamicBotCommandPrefixes { + for _, prefix := range bot.DynamicBotCommandPrefixes { if strings.HasPrefix(eventText, prefix) { return true } @@ -360,8 +322,8 @@ func (b *Bot) handleCommand(event *slack.MessageEvent, eventText string) { } // handle dynamic commands - if strings.HasPrefix(eventText, removeApprovalPrefix) { - b.respond(event, formatAsSnippet(b.removeApprovalHandler(strings.TrimSpace(strings.TrimPrefix(eventText, removeApprovalPrefix))))) + if strings.HasPrefix(eventText, bot.RemoveApprovalPrefix) { + b.respond(event, formatAsSnippet(b.removeApprovalHandler(strings.TrimSpace(strings.TrimPrefix(eventText, bot.RemoveApprovalPrefix))))) return } diff --git a/bot/slack/slack_test.go b/bot/slack/slack_test.go index 94dbe54d..4571925e 100644 --- a/bot/slack/slack_test.go +++ b/bot/slack/slack_test.go @@ -9,6 +9,7 @@ import ( "github.com/nlopes/slack" "github.com/keel-hq/keel/approvals" + b "github.com/keel-hq/keel/bot" "github.com/keel-hq/keel/cache/memory" "github.com/keel-hq/keel/constants" "github.com/keel-hq/keel/extension/approval" @@ -209,10 +210,10 @@ func TestProcessApprovalReply(t *testing.T) { time.Sleep(1 * time.Second) // approval resp - bot.approvalsRespCh <- &approvalResponse{ + bot.approvalsRespCh <- &b.ApprovalResponse{ User: "123", Status: types.ApprovalStatusApproved, - Text: fmt.Sprintf("%s %s", approvalResponseKeyword, identifier), + Text: fmt.Sprintf("%s %s", bot.ApprovalResponseKeyword, identifier), } time.Sleep(1 * time.Second) @@ -284,10 +285,10 @@ func TestProcessRejectedReply(t *testing.T) { time.Sleep(1 * time.Second) // approval resp - bot.approvalsRespCh <- &approvalResponse{ + bot.approvalsRespCh <- &b.ApprovalResponse{ User: "123", Status: types.ApprovalStatusRejected, - Text: fmt.Sprintf("%s %s", rejectResponseKeyword, identifier), + Text: fmt.Sprintf("%s %s", bot.RejectResponseKeyword, identifier), } time.Sleep(1 * time.Second) From 04261f80f2aa66876e78b6ca4608c7541ccfdebe Mon Sep 17 00:00:00 2001 From: Igor Komlew Date: Wed, 20 Dec 2017 11:54:22 +0100 Subject: [PATCH 4/6] WIP: first step for hipchat integration --- bot/hipchat/hipchat.go | 332 ++++++++++++++++++++++++++++++++++++ bot/hipchat/hipchat_test.go | 58 +++++++ constants/constants.go | 5 + 3 files changed, 395 insertions(+) create mode 100644 bot/hipchat/hipchat.go create mode 100644 bot/hipchat/hipchat_test.go diff --git a/bot/hipchat/hipchat.go b/bot/hipchat/hipchat.go new file mode 100644 index 00000000..75307786 --- /dev/null +++ b/bot/hipchat/hipchat.go @@ -0,0 +1,332 @@ +package hipchat + +import ( + "context" + "errors" + "fmt" + "os" + "regexp" + "strings" + "time" + + "github.com/keel-hq/keel/approvals" + "github.com/keel-hq/keel/bot" + "github.com/keel-hq/keel/constants" + "github.com/keel-hq/keel/provider/kubernetes" + "github.com/keel-hq/keel/types" + + log "github.com/Sirupsen/logrus" + "github.com/daneharrigan/hipchat" +) + +// Bot - main hipchat bot container +type Bot struct { + id string // bot id + name string // bot name + mentionName string + + userName string // bot user name + password string // bot user password + + users map[string]string + + msgPrefix string + + hipchatClient *hipchat.Client + + approvalsRespCh chan *bot.ApprovalResponse + + approvalsManager approvals.Manager + approvalsChannel string // hipchat approvals channel name + + k8sImplementer kubernetes.Implementer + + ctx context.Context +} + +func init() { + bot.RegisterBot("hipchat", Run) +} + +// Run ... +func Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) (teardown func(), err error) { + if os.Getenv(constants.EnvHipchatApprovalsPasswort) != "" { + botName := "keel" + if os.Getenv(constants.EnvHipchatApprovalsBotName) != "" { + botName = os.Getenv(constants.EnvHipchatApprovalsBotName) + } + + botUserName := "" + if os.Getenv(constants.EnvHipchatApprovalsUserName) != "" { // need this!!!! + botUserName = os.Getenv(constants.EnvHipchatApprovalsUserName) + } + + pass := os.Getenv(constants.EnvHipchatApprovalsPasswort) + + approvalsChannel := "general" + if os.Getenv(constants.EnvHipchatApprovalsChannel) != "" { + approvalsChannel = os.Getenv(constants.EnvHipchatApprovalsChannel) + } + + bot := new(botName, botUserName, pass, approvalsChannel, k8sImplementer, approvalsManager) + + ctx, cancel := context.WithCancel(context.Background()) + + err := bot.Start(ctx) + if err != nil { + cancel() + return nil, err + } + + teardown := func() { + // cancelling context + cancel() + } + + return teardown, nil + } + + return func() {}, nil +} + +//--------------------- ------------------------------------- + +func connect(username, password string) *hipchat.Client { + fmt.Printf("NewClient(): user=%s, pass=%s\n", username, password) + + attempts := 10 + for { + fmt.Println(">>> try to connect to hipchat") + client, err := hipchat.NewClient(username, password, "bot", "plain") + // could not authenticate + if err != nil { + fmt.Printf("bot.hipchat.connect: Error=%s\n", err) + if err.Error() == "could not authenticate" { + return nil + } + } + if attempts == 0 { + return nil + } + if client != nil && err == nil { + return client + } + fmt.Println("wait fo 30 seconds") + time.Sleep(30 * time.Second) + attempts-- + } +} + +//--------------------- ------------------------------------- + +func new(name, username, pass, approvalsChannel string, k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manager) *Bot { + + client := connect(username, pass) + + bot := &Bot{ + hipchatClient: client, + k8sImplementer: k8sImplementer, + name: name, + mentionName: "@" + strings.Replace(name, " ", "", -1), + approvalsManager: approvalsManager, + approvalsChannel: approvalsChannel, // roomJid + approvalsRespCh: make(chan *bot.ApprovalResponse), // don't add buffer to make it blocking + } + + return bot +} + +// Start the bot +func (b *Bot) Start(ctx context.Context) error { + fmt.Println("bot.hipchat.Start()") + + if b.hipchatClient == nil { + return errors.New("could not conect to hipchat server") + } + + // setting root context + b.ctx = ctx + + // processing messages coming from slack RTM client + go b.startInternal() + + // processing slack approval responses + go b.processApprovalResponses() + + // subscribing for approval requests + go b.subscribeForApprovals() + + return nil +} + +func (b *Bot) startInternal() error { + client := b.hipchatClient + fmt.Printf("startInternal(): channel=%s, userName=%s\n", b.approvalsChannel, b.name) + client.Status("chat") // chat, away or idle + client.Join(b.approvalsChannel, b.name) + go client.KeepAlive() + go func() { + for { + select { + case message := <-client.Messages(): + b.handleMessage(message) + } + } + }() + + return nil +} + +// // A Message represents a message received from HipChat. +// type Message struct { +// From string +// To string +// Body string +// MentionName string +// } +// Body:"@IgorKomlew release notification from keel" +// hipchat.handleMessage(): &hipchat.Message{From:"701032_keel-bot@conf.hipchat.com", To:"701032_4966430@chat.hipchat.com/bot", Body:"release notification from keel", MentionName:""} +func (b *Bot) handleMessage(message *hipchat.Message) { + msg := b.trimXMPPMessage(message) + fmt.Printf("hipchat.handleMessage(): %#v // %#v\n", message, msg) + if msg.From == "" || msg.To == "" { + fmt.Println("hipchat.handleMessage(): ignore") + return + } + + if !b.isBotMessage(msg) { + fmt.Printf("hipchat.handleMessage(): is not a bot message") + return + } + + approval, ok := b.isApproval(msg) + if ok { + b.approvalsRespCh <- approval + return + } + + if responseLines, ok := bot.BotEventTextToResponse[msg.Body]; ok { + response := strings.Join(responseLines, "\n") + fmt.Println(">>> " + response) + b.respond(response) + return + } + + if b.isCommand(msg) { + b.handleCommand(msg) + return + } + + log.WithFields(log.Fields{ + "name": b.name, + "bot_id": b.id, + "command": msg.Body, + "untrimmed": message.Body, + }).Debug("handleMessage: bot couldn't recognise command") +} + +func (b *Bot) respond(response string) { + b.hipchatClient.Say(b.approvalsChannel, b.name, response) + // b.slackRTM.SendMessage(b.slackRTM.NewOutgoingMessage(response, event.Channel)) +} + +func (b *Bot) handleCommand(message *hipchat.Message) { + fmt.Printf("bot.hipchat.handleCommand() %v\n", message) +} + +func (b *Bot) isCommand(message *hipchat.Message) bool { + fmt.Printf("bot.hipchat.isCommand=%s\n", message.Body) + + if bot.StaticBotCommands[message.Body] { + return true + } + + for _, prefix := range bot.DynamicBotCommandPrefixes { + if strings.HasPrefix(message.Body, prefix) { + return true + } + } + + return false +} + +func (b *Bot) trimXMPPMessage(message *hipchat.Message) *hipchat.Message { + msg := hipchat.Message{} + msg.MentionName = trimMentionName(message.Body) + msg.Body = b.trimBot(message.Body) + msg.From = b.trimUser(message.From) + msg.To = b.trimUser(message.To) + + return &msg +} + +func trimMentionName(message string) string { + re := regexp.MustCompile(`^(@\w+)`) + match := re.FindStringSubmatch(message) + if match == nil { + return "" + } + if len(match) != 0 { + return match[1] + } + return "" +} + +func (b *Bot) trimUser(user string) string { + re := regexp.MustCompile("/(.*?)$") + match := re.FindStringSubmatch(user) + if match == nil { + return "" + } + if len(match) != 0 { + return match[1] + } + return "" +} + +func (b *Bot) trimBot(msg string) string { + // msg = strings.Replace(msg, strings.ToLower(b.msgPrefix), "", 1) + msg = strings.TrimPrefix(msg, b.mentionName) + msg = strings.Trim(msg, "\n") + msg = strings.TrimSpace(msg) + return strings.ToLower(msg) +} + +func (b *Bot) isApproval(message *hipchat.Message) (resp *bot.ApprovalResponse, ok bool) { + + if strings.HasPrefix(message.Body, bot.ApprovalResponseKeyword) { + return &bot.ApprovalResponse{ + User: message.From, + Status: types.ApprovalStatusApproved, + Text: message.Body, + }, true + } + + if strings.HasPrefix(message.Body, bot.RejectResponseKeyword) { + return &bot.ApprovalResponse{ + User: message.From, + Status: types.ApprovalStatusRejected, + Text: message.Body, + }, true + } + + return nil, false +} + +func (b *Bot) isBotMessage(message *hipchat.Message) bool { + if message.MentionName == b.mentionName { + return true + } + return false +} + +// +func (b *Bot) processApprovalResponses() error { + return nil +} + +func (b *Bot) subscribeForApprovals() error { + return nil +} + +// diff --git a/bot/hipchat/hipchat_test.go b/bot/hipchat/hipchat_test.go new file mode 100644 index 00000000..ef4a81ab --- /dev/null +++ b/bot/hipchat/hipchat_test.go @@ -0,0 +1,58 @@ +package hipchat + +import ( + "os" + "testing" + "time" + + "github.com/glower/keel/approvals" + "github.com/glower/keel/cache/memory" + "github.com/glower/keel/types" + "github.com/glower/keel/util/codecs" + testutil "github.com/keel-hq/keel/util/testing" +) + +type fakeSlackImplementer struct { + postedMessages []postedMessage +} + +type fakeProvider struct { + submitted []types.Event + images []*types.TrackedImage +} + +func (p *fakeProvider) TrackedImages() ([]*types.TrackedImage, error) { + return p.images, nil +} + +func (p *fakeProvider) List() []string { + return []string{"fakeprovider"} +} +func (p *fakeProvider) Stop() { + return +} +func (p *fakeProvider) GetName() string { + return "fp" +} + +type postedMessage struct { + channel string + text string + // params slack.PostMessageParameters +} + +func TestBot(t *testing.T) { + k8sImplementer := &testutil.FakeK8sImplementer{} + + mem := memory.NewMemoryCache(100*time.Second, 100*time.Second, 10*time.Second) + + os.Setenv("HIPCHAT_APPROVALS_CHANNEL", "701032_keel-bot@conf.hipchat.com") + os.Setenv("HIPCHAT_APPROVALS_BOT_NAME", "Igor Komlew") + os.Setenv("HIPCHAT_APPROVALS_USER_NAME", "701032_4966430") + os.Setenv("HIPCHAT_APPROVALS_PASSWORT", "B10nadeL!tschi22") + + approvalsManager := approvals.New(mem, codecs.DefaultSerializer()) + + Run(k8sImplementer, approvalsManager) + select {} +} diff --git a/constants/constants.go b/constants/constants.go index e64caffa..8a0fcbc5 100644 --- a/constants/constants.go +++ b/constants/constants.go @@ -19,6 +19,11 @@ const ( EnvHipchatToken = "HIPCHAT_TOKEN" EnvHipchatBotName = "HIPCHAT_BOT_NAME" EnvHipchatChannels = "HIPCHAT_CHANNELS" + + EnvHipchatApprovalsChannel = "HIPCHAT_APPROVALS_CHANNEL" + EnvHipchatApprovalsUserName = "HIPCHAT_APPROVALS_USER_NAME" + EnvHipchatApprovalsBotName = "HIPCHAT_APPROVALS_BOT_NAME" + EnvHipchatApprovalsPasswort = "HIPCHAT_APPROVALS_PASSWORT" ) // EnvNotificationLevel - minimum level for notifications, defaults to info From abaea36c7b46b1a2331bc4d6a56b9e4c92a43154 Mon Sep 17 00:00:00 2001 From: Igor Komlew Date: Wed, 20 Dec 2017 20:04:12 +0100 Subject: [PATCH 5/6] WIP send message to the hip chat over XMPP --- bot/bot.go | 1 + bot/hipchat/approvals.go | 267 ++++++++++++++++++++++++++++++++++++ bot/hipchat/hipchat.go | 37 +++-- bot/hipchat/hipchat_test.go | 58 -------- cmd/keel/main.go | 1 + 5 files changed, 285 insertions(+), 79 deletions(-) create mode 100644 bot/hipchat/approvals.go delete mode 100644 bot/hipchat/hipchat_test.go diff --git a/bot/bot.go b/bot/bot.go index b92082f8..9d940ebc 100644 --- a/bot/bot.go +++ b/bot/bot.go @@ -93,6 +93,7 @@ func Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manag "error": err, }).Fatalf("main: failed to setup %s bot\n", botName) } else { + log.Debugf(">>> Run [%s] bot", botName) teardowns[botName] = teardownBot } } diff --git a/bot/hipchat/approvals.go b/bot/hipchat/approvals.go new file mode 100644 index 00000000..9e69c864 --- /dev/null +++ b/bot/hipchat/approvals.go @@ -0,0 +1,267 @@ +package hipchat + +import ( + "bytes" + "fmt" + "strings" + + "github.com/keel-hq/keel/bot" + "github.com/keel-hq/keel/bot/formatter" + "github.com/keel-hq/keel/types" + + log "github.com/Sirupsen/logrus" +) + +func (b *Bot) subscribeForApprovals() error { + log.Debugf(">>> hipchat.subscribeForApprovals()\n") + + approvalsCh, err := b.approvalsManager.Subscribe(b.ctx) + if err != nil { + log.Debugf(">>> [ERROR] hipchat.subscribeForApprovals(): %s\n", err.Error()) + return err + } + + for { + select { + case <-b.ctx.Done(): + return nil + case a := <-approvalsCh: + err = b.requestApproval(a) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "approval": a.Identifier, + }).Error("bot.subscribeForApprovals: approval request failed") + } + } + } +} + +// Request - request approval +func (b *Bot) requestApproval(req *types.Approval) error { + msg := fmt.Sprintf(`Approval required! + %s + To vote for change type '%s approve %s' + To reject it: '%s reject %s' + Votes: %d/%d + Delta: %s + Identifier: %s + Provider: %s`, + req.Message, b.mentionName, req.Identifier, b.mentionName, req.Identifier, + req.VotesReceived, req.VotesRequired, req.Delta(), req.Identifier, + req.Provider.String()) + return b.postMessage(msg) +} + +func (b *Bot) processApprovalResponses() error { + for { + select { + case <-b.ctx.Done(): + return nil + case resp := <-b.approvalsRespCh: + switch resp.Status { + case types.ApprovalStatusApproved: + err := b.processApprovedResponse(resp) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + }).Error("bot.processApprovalResponses: failed to process approval response message") + } + case types.ApprovalStatusRejected: + err := b.processRejectedResponse(resp) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + }).Error("bot.processApprovalResponses: failed to process approval reject response message") + } + } + + } + } +} + +func (b *Bot) processApprovedResponse(approvalResponse *bot.ApprovalResponse) error { + trimmed := strings.TrimPrefix(approvalResponse.Text, bot.ApprovalResponseKeyword) + identifiers := strings.Split(trimmed, " ") + if len(identifiers) == 0 { + return nil + } + + for _, identifier := range identifiers { + if identifier == "" { + continue + } + approval, err := b.approvalsManager.Approve(identifier, approvalResponse.User) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "identifier": identifier, + }).Error("bot.processApprovedResponse: failed to approve") + continue + } + + err = b.replyToApproval(approval) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "identifier": identifier, + }).Error("bot.processApprovedResponse: got error while replying after processing approved approval") + } + + } + return nil +} + +func (b *Bot) processRejectedResponse(approvalResponse *bot.ApprovalResponse) error { + trimmed := strings.TrimPrefix(approvalResponse.Text, bot.RejectResponseKeyword) + identifiers := strings.Split(trimmed, " ") + if len(identifiers) == 0 { + return nil + } + + for _, identifier := range identifiers { + approval, err := b.approvalsManager.Reject(identifier) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "identifier": identifier, + }).Error("bot.processApprovedResponse: failed to reject") + continue + } + + err = b.replyToApproval(approval) + if err != nil { + log.WithFields(log.Fields{ + "error": err, + "identifier": identifier, + }).Error("bot.processApprovedResponse: got error while replying after processing rejected approval") + } + + } + return nil +} + +func (b *Bot) replyToApproval(approval *types.Approval) error { + switch approval.Status() { + case types.ApprovalStatusPending: + b.postMessage("Vote received") + // "Vote received", + // "All approvals received, thanks for voting!", + // types.LevelInfo.Color(), + // []slack.AttachmentField{ + // slack.AttachmentField{ + // Title: "vote received!", + // Value: "Waiting for remaining votes.", + // Short: false, + // }, + // slack.AttachmentField{ + // Title: "Votes", + // Value: fmt.Sprintf("%d/%d", approval.VotesReceived, approval.VotesRequired), + // Short: true, + // }, + // slack.AttachmentField{ + // Title: "Delta", + // Value: approval.Delta(), + // Short: true, + // }, + // slack.AttachmentField{ + // Title: "Identifier", + // Value: approval.Identifier, + // Short: true, + // }, + // }) + case types.ApprovalStatusRejected: + b.postMessage("Change rejected") + // "Change rejected", + // "Change was rejected", + // types.LevelWarn.Color(), + // []slack.AttachmentField{ + // slack.AttachmentField{ + // Title: "change rejected", + // Value: "Change was rejected.", + // Short: false, + // }, + // slack.AttachmentField{ + // Title: "Status", + // Value: approval.Status().String(), + // Short: true, + // }, + // slack.AttachmentField{ + // Title: "Votes", + // Value: fmt.Sprintf("%d/%d", approval.VotesReceived, approval.VotesRequired), + // Short: true, + // }, + // slack.AttachmentField{ + // Title: "Delta", + // Value: approval.Delta(), + // Short: true, + // }, + // slack.AttachmentField{ + // Title: "Identifier", + // Value: approval.Identifier, + // Short: true, + // }, + // }) + case types.ApprovalStatusApproved: + b.postMessage("approval received") + // "approval received", + // "All approvals received, thanks for voting!", + // types.LevelSuccess.Color(), + // []slack.AttachmentField{ + // slack.AttachmentField{ + // Title: "update approved!", + // Value: "All approvals received, thanks for voting!", + // Short: false, + // }, + // slack.AttachmentField{ + // Title: "Votes", + // Value: fmt.Sprintf("%d/%d", approval.VotesReceived, approval.VotesRequired), + // Short: true, + // }, + // slack.AttachmentField{ + // Title: "Delta", + // Value: approval.Delta(), + // Short: true, + // }, + // slack.AttachmentField{ + // Title: "Identifier", + // Value: approval.Identifier, + // Short: true, + // }, + // }) + } + return nil +} + +func (b *Bot) approvalsResponse() string { + approvals, err := b.approvalsManager.List() + if err != nil { + return fmt.Sprintf("got error while fetching approvals: %s", err) + } + + if len(approvals) == 0 { + return fmt.Sprintf("there are currently no request waiting to be approved.") + } + + buf := &bytes.Buffer{} + + approvalCtx := formatter.Context{ + Output: buf, + Format: formatter.NewApprovalsFormat(formatter.TableFormatKey, false), + } + err = formatter.ApprovalWrite(approvalCtx, approvals) + + if err != nil { + return fmt.Sprintf("got error while formatting approvals: %s", err) + } + + return buf.String() +} + +func (b *Bot) removeApprovalHandler(identifier string) string { + err := b.approvalsManager.Delete(identifier) + if err != nil { + return fmt.Sprintf("failed to remove '%s' approval: %s.", identifier, err) + } + return fmt.Sprintf("approval '%s' removed.", identifier) +} diff --git a/bot/hipchat/hipchat.go b/bot/hipchat/hipchat.go index 75307786..a8ef30f2 100644 --- a/bot/hipchat/hipchat.go +++ b/bot/hipchat/hipchat.go @@ -92,15 +92,15 @@ func Run(k8sImplementer kubernetes.Implementer, approvalsManager approvals.Manag //--------------------- ------------------------------------- func connect(username, password string) *hipchat.Client { - fmt.Printf("NewClient(): user=%s, pass=%s\n", username, password) + log.Debugf(">>> bot.hipchat.NewClient(): user=%s, pass=%s\n", username, password) attempts := 10 for { - fmt.Println(">>> try to connect to hipchat") + log.Debugf(">>> try to connect to hipchat") client, err := hipchat.NewClient(username, password, "bot", "plain") // could not authenticate if err != nil { - fmt.Printf("bot.hipchat.connect: Error=%s\n", err) + log.Errorf("bot.hipchat.connect: Error=%s\n", err) if err.Error() == "could not authenticate" { return nil } @@ -111,7 +111,7 @@ func connect(username, password string) *hipchat.Client { if client != nil && err == nil { return client } - fmt.Println("wait fo 30 seconds") + log.Debugf("wait fo 30 seconds") time.Sleep(30 * time.Second) attempts-- } @@ -138,7 +138,7 @@ func new(name, username, pass, approvalsChannel string, k8sImplementer kubernete // Start the bot func (b *Bot) Start(ctx context.Context) error { - fmt.Println("bot.hipchat.Start()") + log.Debugln(">>> bot.hipchat.Start()") if b.hipchatClient == nil { return errors.New("could not conect to hipchat server") @@ -161,7 +161,7 @@ func (b *Bot) Start(ctx context.Context) error { func (b *Bot) startInternal() error { client := b.hipchatClient - fmt.Printf("startInternal(): channel=%s, userName=%s\n", b.approvalsChannel, b.name) + log.Debugf("startInternal(): channel=%s, userName=%s\n", b.approvalsChannel, b.name) client.Status("chat") // chat, away or idle client.Join(b.approvalsChannel, b.name) go client.KeepAlive() @@ -188,14 +188,14 @@ func (b *Bot) startInternal() error { // hipchat.handleMessage(): &hipchat.Message{From:"701032_keel-bot@conf.hipchat.com", To:"701032_4966430@chat.hipchat.com/bot", Body:"release notification from keel", MentionName:""} func (b *Bot) handleMessage(message *hipchat.Message) { msg := b.trimXMPPMessage(message) - fmt.Printf("hipchat.handleMessage(): %#v // %#v\n", message, msg) + log.Debugf("hipchat.handleMessage(): %#v // %#v\n", message, msg) if msg.From == "" || msg.To == "" { - fmt.Println("hipchat.handleMessage(): ignore") + log.Debugf("hipchat.handleMessage(): ignore") return } if !b.isBotMessage(msg) { - fmt.Printf("hipchat.handleMessage(): is not a bot message") + log.Debugf("hipchat.handleMessage(): is not a bot message") return } @@ -227,7 +227,6 @@ func (b *Bot) handleMessage(message *hipchat.Message) { func (b *Bot) respond(response string) { b.hipchatClient.Say(b.approvalsChannel, b.name, response) - // b.slackRTM.SendMessage(b.slackRTM.NewOutgoingMessage(response, event.Channel)) } func (b *Bot) handleCommand(message *hipchat.Message) { @@ -284,6 +283,13 @@ func (b *Bot) trimUser(user string) string { return "" } +func (b *Bot) postMessage(msg string) error { + log.Debugf(">>> bot.hipchat.postMessage: %s\n", msg) + b.hipchatClient.Say(b.approvalsChannel, b.name, msg) + // b.respond(msg) + return nil +} + func (b *Bot) trimBot(msg string) string { // msg = strings.Replace(msg, strings.ToLower(b.msgPrefix), "", 1) msg = strings.TrimPrefix(msg, b.mentionName) @@ -319,14 +325,3 @@ func (b *Bot) isBotMessage(message *hipchat.Message) bool { } return false } - -// -func (b *Bot) processApprovalResponses() error { - return nil -} - -func (b *Bot) subscribeForApprovals() error { - return nil -} - -// diff --git a/bot/hipchat/hipchat_test.go b/bot/hipchat/hipchat_test.go deleted file mode 100644 index ef4a81ab..00000000 --- a/bot/hipchat/hipchat_test.go +++ /dev/null @@ -1,58 +0,0 @@ -package hipchat - -import ( - "os" - "testing" - "time" - - "github.com/glower/keel/approvals" - "github.com/glower/keel/cache/memory" - "github.com/glower/keel/types" - "github.com/glower/keel/util/codecs" - testutil "github.com/keel-hq/keel/util/testing" -) - -type fakeSlackImplementer struct { - postedMessages []postedMessage -} - -type fakeProvider struct { - submitted []types.Event - images []*types.TrackedImage -} - -func (p *fakeProvider) TrackedImages() ([]*types.TrackedImage, error) { - return p.images, nil -} - -func (p *fakeProvider) List() []string { - return []string{"fakeprovider"} -} -func (p *fakeProvider) Stop() { - return -} -func (p *fakeProvider) GetName() string { - return "fp" -} - -type postedMessage struct { - channel string - text string - // params slack.PostMessageParameters -} - -func TestBot(t *testing.T) { - k8sImplementer := &testutil.FakeK8sImplementer{} - - mem := memory.NewMemoryCache(100*time.Second, 100*time.Second, 10*time.Second) - - os.Setenv("HIPCHAT_APPROVALS_CHANNEL", "701032_keel-bot@conf.hipchat.com") - os.Setenv("HIPCHAT_APPROVALS_BOT_NAME", "Igor Komlew") - os.Setenv("HIPCHAT_APPROVALS_USER_NAME", "701032_4966430") - os.Setenv("HIPCHAT_APPROVALS_PASSWORT", "B10nadeL!tschi22") - - approvalsManager := approvals.New(mem, codecs.DefaultSerializer()) - - Run(k8sImplementer, approvalsManager) - select {} -} diff --git a/cmd/keel/main.go b/cmd/keel/main.go index 1594ea37..eeab1ffa 100644 --- a/cmd/keel/main.go +++ b/cmd/keel/main.go @@ -35,6 +35,7 @@ import ( _ "github.com/keel-hq/keel/extension/notification/webhook" // bots + _ "github.com/keel-hq/keel/bot/hipchat" _ "github.com/keel-hq/keel/bot/slack" log "github.com/Sirupsen/logrus" From 1ab904c9494b7d10a7239cbf5626b3ef3b32feec Mon Sep 17 00:00:00 2001 From: Igor Komlew Date: Wed, 20 Dec 2017 20:11:58 +0100 Subject: [PATCH 6/6] WIP added vendor --- glide.lock | 22 +- glide.yaml | 14 +- hack/deployment.sample.yml | 8 + .../github.com/daneharrigan/hipchat/LICENSE | 27 + .../github.com/daneharrigan/hipchat/Readme.md | 43 + .../hipchat/example/discover_self.go | 31 + .../daneharrigan/hipchat/example/hello.go | 26 + .../daneharrigan/hipchat/example/reply.go | 40 + .../daneharrigan/hipchat/hipchat.go | 279 ++ .../daneharrigan/hipchat/xmpp/xmpp.go | 198 ++ .../tbruyelle/hipchat-go/.travis.yml | 6 +- .../github.com/tbruyelle/hipchat-go/Makefile | 29 +- .../github.com/tbruyelle/hipchat-go/checks.mk | 18 - .../hipchat-go/examples/hipfile/.gitignore | 1 - .../hipchat-go/examples/hiptail/.gitignore | 1 - .../hipchat-go/examples/hiptail/main.go | 2 +- .../hipchat-go/examples/hipwebhooks/main.go | 2 +- .../tbruyelle/hipchat-go/hipchat/hipchat.go | 184 +- .../tbruyelle/hipchat-go/hipchat/oauth.go | 4 +- .../tbruyelle/hipchat-go/hipchat/room.go | 49 +- .../tbruyelle/hipchat-go/hipchat/room_test.go | 104 +- .../tbruyelle/hipchat-go/hipchat/user.go | 3 +- .../tbruyelle/hipchat-go/hipchat/user_test.go | 4 +- vendor/golang.org/x/crypto/argon2/argon2.go | 219 ++ .../golang.org/x/crypto/argon2/argon2_test.go | 113 + vendor/golang.org/x/crypto/argon2/blake2b.go | 53 + .../x/crypto/argon2/blamka_amd64.go | 59 + .../golang.org/x/crypto/argon2/blamka_amd64.s | 252 ++ .../x/crypto/argon2/blamka_generic.go | 163 ++ .../golang.org/x/crypto/argon2/blamka_ref.go | 15 + vendor/golang.org/x/crypto/blake2b/blake2b.go | 16 +- .../golang.org/x/crypto/pbkdf2/pbkdf2_test.go | 19 + vendor/golang.org/x/crypto/ssh/certs_test.go | 113 + vendor/golang.org/x/crypto/ssh/server.go | 2 +- .../golang.org/x/crypto/ssh/testdata/keys.go | 21 + .../v1alpha2/embedded_assistant.pb.go | 1196 ++++++++ .../cloud/dataproc/v1/clusters.pb.go | 660 +++-- .../googleapis/cloud/dataproc/v1/jobs.pb.go | 785 +++-- .../cloud/dataproc/v1/operations.pb.go | 105 +- .../cloud/dataproc/v1beta2/clusters.pb.go | 1775 ++++++++++++ .../cloud/dataproc/v1beta2/jobs.pb.go | 2573 +++++++++++++++++ .../cloud/dataproc/v1beta2/operations.pb.go | 221 ++ .../dataproc/v1beta2/workflow_templates.pb.go | 1526 ++++++++++ .../cloud/iot/v1/device_manager.pb.go | 1327 +++++++++ .../googleapis/cloud/iot/v1/resources.pb.go | 1026 +++++++ .../googleapis/monitoring/v3/common.pb.go | 12 + .../googleapis/monitoring/v3/uptime.pb.go | 748 +++++ .../monitoring/v3/uptime_service.pb.go | 591 ++++ .../protobuf/field_mask/field_mask.pb.go | 6 - .../gopkg.in/alecthomas/kingpin.v2/README.md | 4 +- .../kingpin.v2/_examples/completion/main.go | 2 +- vendor/gopkg.in/alecthomas/kingpin.v2/app.go | 3 + .../alecthomas/kingpin.v2/app_test.go | 2 +- .../alecthomas/kingpin.v2/args_test.go | 2 +- .../alecthomas/kingpin.v2/cmd_test.go | 2 +- .../alecthomas/kingpin.v2/completions_test.go | 2 +- .../alecthomas/kingpin.v2/flags_test.go | 2 +- .../gopkg.in/alecthomas/kingpin.v2/parser.go | 18 +- .../alecthomas/kingpin.v2/parser_test.go | 2 +- .../alecthomas/kingpin.v2/parsers_test.go | 2 +- .../alecthomas/kingpin.v2/usage_test.go | 2 +- .../alecthomas/kingpin.v2/values_test.go | 2 +- 62 files changed, 13927 insertions(+), 809 deletions(-) create mode 100644 vendor/github.com/daneharrigan/hipchat/LICENSE create mode 100644 vendor/github.com/daneharrigan/hipchat/Readme.md create mode 100644 vendor/github.com/daneharrigan/hipchat/example/discover_self.go create mode 100644 vendor/github.com/daneharrigan/hipchat/example/hello.go create mode 100644 vendor/github.com/daneharrigan/hipchat/example/reply.go create mode 100644 vendor/github.com/daneharrigan/hipchat/hipchat.go create mode 100644 vendor/github.com/daneharrigan/hipchat/xmpp/xmpp.go delete mode 100644 vendor/github.com/tbruyelle/hipchat-go/checks.mk delete mode 100644 vendor/github.com/tbruyelle/hipchat-go/examples/hipfile/.gitignore delete mode 100644 vendor/github.com/tbruyelle/hipchat-go/examples/hiptail/.gitignore create mode 100644 vendor/golang.org/x/crypto/argon2/argon2.go create mode 100644 vendor/golang.org/x/crypto/argon2/argon2_test.go create mode 100644 vendor/golang.org/x/crypto/argon2/blake2b.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_amd64.s create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_generic.go create mode 100644 vendor/golang.org/x/crypto/argon2/blamka_ref.go create mode 100644 vendor/google.golang.org/genproto/googleapis/assistant/embedded/v1alpha2/embedded_assistant.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/clusters.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/jobs.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/operations.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/workflow_templates.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/iot/v1/device_manager.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/cloud/iot/v1/resources.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go create mode 100644 vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go diff --git a/glide.lock b/glide.lock index 7cd1e920..de2ecd6f 100644 --- a/glide.lock +++ b/glide.lock @@ -1,5 +1,5 @@ -hash: b4faf989713037ee4eb401a0af5ea985065140cf7ffdcaab025c62969d5803ea -updated: 2017-12-11T21:33:40.88196567Z +hash: 2b3a6924fbd5b637b899b9bd08c66b36690f23b72ffe140eb14a2016f16770ae +updated: 2017-12-20T20:08:27.365624741+01:00 imports: - name: cloud.google.com/go version: 050b16d2314d5fc3d4c9a51e4cd5c7468e77f162 @@ -18,6 +18,10 @@ imports: version: 2efee857e7cfd4f3d0138cc3cbb1b4966962b93a - name: github.com/BurntSushi/toml version: b26d9c308763d68093482582cea63d69be07a0f0 +- name: github.com/daneharrigan/hipchat + version: 835dc879394a24080bf1c01e199c4cda001b6c46 + subpackages: + - xmpp - name: github.com/davecgh/go-spew version: 5215b55f46b2b919f50a1df0eaa5886afe4e3b3d subpackages: @@ -76,6 +80,10 @@ imports: - ptypes/duration - ptypes/empty - ptypes/timestamp +- name: github.com/google/go-querystring + version: 53e6ce116135b80d037921a7fdd5138cf32d7a8a + subpackages: + - query - name: github.com/google/gofuzz version: 44d81051d367757e1c7c6a5a86423ece9afcf63c - name: github.com/googleapis/gax-go @@ -120,6 +128,10 @@ imports: version: d682213848ed68c0a260ca37d6dd5ace8423f5ba - name: github.com/spf13/pflag version: 9ff6c6923cfffbcd502984b8e0c80539a94968b7 +- name: github.com/tbruyelle/hipchat-go + version: 749fb9e14beb9995f677c101a754393cecb64b0f + subpackages: + - hipchat - name: github.com/ugorji/go version: ded73eae5db7e7a0ef6f55aace87a2873c5d2b74 subpackages: @@ -127,7 +139,7 @@ imports: - name: github.com/urfave/negroni version: 5dbbc83f748fc3ad38585842b0aedab546d0ea1e - name: golang.org/x/crypto - version: 94eea52f7b742c7cbe0b03b22f0c4c8631ece122 + version: d585fd2cc9195196078f516b69daff6744ef5e84 subpackages: - ssh/terminal - name: golang.org/x/net @@ -200,7 +212,7 @@ imports: - socket - urlfetch - name: google.golang.org/genproto - version: 73cb5d0be5af113b42057925bd6c93e3cd9f60fd + version: a8101f21cf983e773d0c1133ebc5424792003214 subpackages: - googleapis/api/annotations - googleapis/iam/v1 @@ -225,7 +237,7 @@ imports: - tap - transport - name: gopkg.in/alecthomas/kingpin.v2 - version: 1087e65c9441605df944fb12c33f0fe7072d18ca + version: 947dcec5ba9c011838740e680966fd7087a71d0d - name: gopkg.in/inf.v0 version: 3887ee99ecf07df5b447e9b00d9c0b2adaa9f3e4 - name: gopkg.in/yaml.v2 diff --git a/glide.yaml b/glide.yaml index 94f5bf53..913b145b 100644 --- a/glide.yaml +++ b/glide.yaml @@ -1,9 +1,5 @@ package: github.com/keel-hq/keel import: -# - package: cloud.google.com/go -# version: ^0.10.0 -# subpackages: -# - pubsub - package: cloud.google.com/go version: ^0.11.0 subpackages: @@ -14,7 +10,6 @@ import: version: ^1.3.1 - package: github.com/Sirupsen/logrus - package: github.com/docker/distribution - # version: 5db89f0ca68677abc5eefce8f2a0a772c98ba52d subpackages: - digest - reference @@ -42,14 +37,10 @@ import: version: 07c182904dbd53199946ba614a412c61d3c548f5 subpackages: - unix - - windows -# - package: google.golang.org/api -# subpackages: -# - option + - windows - package: github.com/golang/protobuf version: 0a4f71a498b7c4812f64969510bcb4eca251e33a - package: google.golang.org/api - # version: 324744a33f1f37e63dd1695cfb3ec9a3e4a1cb05 version: 295e4bb0ade057ae2cfb9876ab0b54635dbfcea4 subpackages: - iterator @@ -57,7 +48,7 @@ import: - support - transport - package: google.golang.org/grpc - version: v1.5.0 + version: v1.5.0 - package: k8s.io/apimachinery subpackages: - status @@ -78,3 +69,4 @@ import: - pkg/proto/hapi/chart - pkg/proto/hapi/services - pkg/strvals +- package: github.com/daneharrigan/hipchat diff --git a/hack/deployment.sample.yml b/hack/deployment.sample.yml index 569ec13d..903d836e 100644 --- a/hack/deployment.sample.yml +++ b/hack/deployment.sample.yml @@ -40,6 +40,14 @@ spec: # value: your-token-here # - name: HIPCHAT_CHANNELS # value: keel-bot + # - name: HIPCHAT_APPROVALS_CHANNEL + # value: "111111_keel-approvals@conf.hipchat.com" + # - name: HIPCHAT_APPROVALS_BOT_NAME + # value: "Mr Bor" + # - name: HIPCHAT_APPROVALS_USER_NAME + # value: "111111_2222222" + # - name: HIPCHAT_APPROVALS_PASSWORT + # value: "pass" name: keel command: ["/bin/keel"] ports: diff --git a/vendor/github.com/daneharrigan/hipchat/LICENSE b/vendor/github.com/daneharrigan/hipchat/LICENSE new file mode 100644 index 00000000..09adfce0 --- /dev/null +++ b/vendor/github.com/daneharrigan/hipchat/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2015, Dane Harrigan +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are met: + +* Redistributions of source code must retain the above copyright notice, this + list of conditions and the following disclaimer. + +* Redistributions in binary form must reproduce the above copyright notice, + this list of conditions and the following disclaimer in the documentation + and/or other materials provided with the distribution. + +* Neither the name of [project] nor the names of its + contributors may be used to endorse or promote products derived from + this software without specific prior written permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" +AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE +IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE +DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE +FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL +DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR +SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER +CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, +OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE +OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/daneharrigan/hipchat/Readme.md b/vendor/github.com/daneharrigan/hipchat/Readme.md new file mode 100644 index 00000000..f35fbfbf --- /dev/null +++ b/vendor/github.com/daneharrigan/hipchat/Readme.md @@ -0,0 +1,43 @@ +# hipchat + +This is a abstraction in golang to Hipchat's implementation of XMPP. It communicates over +TLS and requires zero knowledge of XML or the XMPP protocol. + +* Examples [available here][1] +* Documentation [available here][2] + +### bot building + +Hipchat treats the "bot" resource differently from any other resource connected to their service. When connecting to Hipchat with a resource of "bot", a chat history will not be sent. Any other resource will receive a chat history. + +### example/hello.go + +```go +package main + +import ( + "github.com/daneharrigan/hipchat" +) + +func main() { + user := "11111_22222" + pass := "secret" + resource := "bot" + roomJid := "11111_room_name@conf.hipchat.com" + fullName := "Some Bot" + + client, err := hipchat.NewClient(user, pass, resource) + if err != nil { + fmt.Printf("client error: %s\n", err) + return + } + + client.Status("chat") + client.Join(roomJid, fullName) + client.Say(roomJid, fullName, "Hello") + select {} +} +``` + +[1]: https://github.com/daneharrigan/hipchat/tree/master/example +[2]: http://godoc.org/github.com/daneharrigan/hipchat diff --git a/vendor/github.com/daneharrigan/hipchat/example/discover_self.go b/vendor/github.com/daneharrigan/hipchat/example/discover_self.go new file mode 100644 index 00000000..41b86112 --- /dev/null +++ b/vendor/github.com/daneharrigan/hipchat/example/discover_self.go @@ -0,0 +1,31 @@ +package main + +import ( + "fmt" + + "github.com/daneharrigan/hipchat" +) + +func main() { + user := "11111_22222" + pass := "secret" + resource := "bot" + + client, err := hipchat.NewClient(user, pass, resource) + if err != nil { + fmt.Printf("client error: %s\n", err) + return + } + + client.RequestUsers() + + select { + case users := <-client.Users(): + for _, user := range users { + if user.Id == client.Id { + fmt.Printf("name: %s\n", user.Name) + fmt.Printf("mention: %s\n", user.MentionName) + } + } + } +} diff --git a/vendor/github.com/daneharrigan/hipchat/example/hello.go b/vendor/github.com/daneharrigan/hipchat/example/hello.go new file mode 100644 index 00000000..339b2047 --- /dev/null +++ b/vendor/github.com/daneharrigan/hipchat/example/hello.go @@ -0,0 +1,26 @@ +package main + +import ( + "fmt" + + "github.com/daneharrigan/hipchat" +) + +func main() { + user := "11111_22222" + pass := "secret" + resource := "bot" + roomJid := "11111_room_name@conf.hipchat.com" + fullName := "Some Bot" + + client, err := hipchat.NewClient(user, pass, resource) + if err != nil { + fmt.Printf("client error: %s\n", err) + return + } + + client.Status("chat") + client.Join(roomJid, fullName) + client.Say(roomJid, fullName, "Hello") + select {} +} diff --git a/vendor/github.com/daneharrigan/hipchat/example/reply.go b/vendor/github.com/daneharrigan/hipchat/example/reply.go new file mode 100644 index 00000000..d4b80f41 --- /dev/null +++ b/vendor/github.com/daneharrigan/hipchat/example/reply.go @@ -0,0 +1,40 @@ +package main + +import ( + "fmt" + "strings" + + "github.com/daneharrigan/hipchat" +) + +func main() { + user := "11111_22222" + pass := "secret" + resource := "bot" + roomJid := "11111_room_name@conf.hipchat.com" + fullName := "Some Bot" + mentionName := "SomeBot" + + client, err := hipchat.NewClient(user, pass, resource) + if err != nil { + fmt.Printf("client error: %s\n", err) + return + } + + client.Status("chat") + client.Join(roomJid, fullName) + + go client.KeepAlive() + + go func() { + for { + select { + case message := <-client.Messages(): + if strings.HasPrefix(message.Body, "@"+mentionName) { + client.Say(roomJid, fullName, "Hello") + } + } + } + }() + select {} +} diff --git a/vendor/github.com/daneharrigan/hipchat/hipchat.go b/vendor/github.com/daneharrigan/hipchat/hipchat.go new file mode 100644 index 00000000..b354df8e --- /dev/null +++ b/vendor/github.com/daneharrigan/hipchat/hipchat.go @@ -0,0 +1,279 @@ +package hipchat + +import ( + "bytes" + "encoding/xml" + "errors" + "time" + + "github.com/daneharrigan/hipchat/xmpp" +) + +const ( + defaultAuthType = "plain" // or "oauth" + defaultConf = "conf.hipchat.com" + defaultDomain = "chat.hipchat.com" + defaultHost = "chat.hipchat.com" +) + +// A Client represents the connection between the application to the HipChat +// service. +type Client struct { + AuthType string + Username string + Password string + Resource string + Id string + + // private + mentionNames map[string]string + connection *xmpp.Conn + receivedUsers chan []*User + receivedRooms chan []*Room + receivedMessage chan *Message + host string + domain string + conf string +} + +// A Message represents a message received from HipChat. +type Message struct { + From string + To string + Body string + MentionName string +} + +// A User represents a member of the HipChat service. +type User struct { + Email string + Id string + Name string + MentionName string +} + +// A Room represents a room in HipChat the Client can join to communicate with +// other members.. +type Room struct { + Id string + LastActive string + Name string + NumParticipants string + Owner string + Privacy string + RoomId string + Topic string +} + +// NewClient creates a new Client connection from the user name, password and +// resource passed to it. It uses default host URL and conf URL. +func NewClient(user, pass, resource, authType string) (*Client, error) { + return NewClientWithServerInfo(user, pass, resource, authType, defaultHost, defaultDomain, defaultConf) +} + +// NewClientWithServerInfo creates a new Client connection from the user name, password, +// resource, host URL and conf URL passed to it. +func NewClientWithServerInfo(user, pass, resource, authType, host, domain, conf string) (*Client, error) { + connection, err := xmpp.Dial(host) + + var b bytes.Buffer + if err := xml.EscapeText(&b, []byte(pass)); err != nil { + return nil, err + } + + c := &Client{ + AuthType: authType, + Username: user, + Password: b.String(), + Resource: resource, + Id: user + "@" + domain, + + // private + connection: connection, + mentionNames: make(map[string]string), + receivedUsers: make(chan []*User), + receivedRooms: make(chan []*Room), + receivedMessage: make(chan *Message), + host: host, + domain: domain, + conf: conf, + } + + if err != nil { + return c, err + } + + err = c.authenticate() + if err != nil { + return c, err + } + + go c.listen() + return c, nil +} + +// Messages returns a read-only channel of Message structs. After joining a +// room, messages will be sent on the channel. +func (c *Client) Messages() <-chan *Message { + return c.receivedMessage +} + +// Rooms returns a channel of Room slices +func (c *Client) Rooms() <-chan []*Room { + return c.receivedRooms +} + +// Users returns a channel of User slices +func (c *Client) Users() <-chan []*User { + return c.receivedUsers +} + +// Status sends a string to HipChat to indicate whether the client is available +// to chat, away or idle. +func (c *Client) Status(s string) { + c.connection.Presence(c.Id, s) +} + +// Join accepts the room id and the name used to display the client in the +// room. +func (c *Client) Join(roomId, resource string) { + c.connection.MUCPresence(roomId+"/"+resource, c.Id) +} + +// Part accepts the room id to part. +func (c *Client) Part(roomId, name string) { + c.connection.MUCPart(roomId + "/" + name) +} + +// Say accepts a room id, the name of the client in the room, and the message +// body and sends the message to the HipChat room. +func (c *Client) Say(roomId, name, body string) { + c.connection.MUCSend("groupchat", roomId, c.Id+"/"+name, body) +} + +// PrivSay accepts a client id, the name of the client, and the message +// body and sends the private message to the HipChat +// user. +func (c *Client) PrivSay(user, name, body string) { + c.connection.MUCSend("chat", user, c.Id+"/"+name, body) +} + +// KeepAlive is meant to run as a goroutine. It sends a single whitespace +// character to HipChat every 60 seconds. This keeps the connection from +// idling after 150 seconds. +func (c *Client) KeepAlive() { + for _ = range time.Tick(60 * time.Second) { + c.connection.KeepAlive() + } +} + +// RequestRooms will send an outgoing request to get +// the room information for all rooms +func (c *Client) RequestRooms() { + c.connection.Discover(c.Id, c.conf) +} + +// RequestUsers will send an outgoing request to get +// the user information for all users +func (c *Client) RequestUsers() { + c.connection.Roster(c.Id, c.domain) +} + +func (c *Client) authenticate() error { + var errStr string + c.connection.Stream(c.Id, c.host) + for { + element, err := c.connection.Next() + if err != nil { + return err + } + + switch element.Name.Local + element.Name.Space { + case "stream" + xmpp.NsStream: + features := c.connection.Features() + if features.StartTLS != nil { + c.connection.StartTLS() + } else { + for _, m := range features.Mechanisms { + if m == "PLAIN" && c.AuthType == "plain" { + c.connection.Auth(c.Username, c.Password, c.Resource) + } else if m == "X-HIPCHAT-OAUTH2" && c.AuthType == "oauth" { + c.connection.Oauth(c.Password, c.Resource) + } + } + } + case "proceed" + xmpp.NsTLS: + c.connection.UseTLS(c.host) + c.connection.Stream(c.Id, c.host) + case "iq" + xmpp.NsJabberClient: + for _, attr := range element.Attr { + if attr.Name.Local == "type" && attr.Value == "result" { + return nil // authenticated + } + } + return errors.New("could not authenticate") + + // oauth: + case "failure" + xmpp.NsSASL: + errStr = "Unable to authenticate:" + case "invalid-authzid" + xmpp.NsSASL: + errStr += " no identity provided" + case "not-authorized" + xmpp.NsSASL: + errStr += " token not authorized" + return errors.New(errStr) + case "success" + xmpp.NsSASL: + return nil + } + } + + return errors.New("unexpectedly ended auth loop") +} + +func (c *Client) listen() { + for { + element, err := c.connection.Next() + if err != nil { + return + } + + switch element.Name.Local + element.Name.Space { + case "iq" + xmpp.NsJabberClient: // rooms and rosters + query := c.connection.Query() + switch query.XMLName.Space { + case xmpp.NsDisco: + items := make([]*Room, len(query.Items)) + for i, item := range query.Items { + items[i] = &Room{Id: item.Jid, + LastActive: item.LastActive, + NumParticipants: item.NumParticipants, + Name: item.Name, + Owner: item.Owner, + Privacy: item.Privacy, + RoomId: item.RoomId, + Topic: item.Topic, + } + } + c.receivedRooms <- items + case xmpp.NsIqRoster: + items := make([]*User, len(query.Items)) + for i, item := range query.Items { + items[i] = &User{Email: item.Email, + Id: item.Jid, + Name: item.Name, + MentionName: item.MentionName, + } + } + c.receivedUsers <- items + } + case "message" + xmpp.NsJabberClient: + attr := xmpp.ToMap(element.Attr) + + c.receivedMessage <- &Message{ + From: attr["from"], + To: attr["to"], + Body: c.connection.Body(), + } + } + } + panic("unreachable") +} diff --git a/vendor/github.com/daneharrigan/hipchat/xmpp/xmpp.go b/vendor/github.com/daneharrigan/hipchat/xmpp/xmpp.go new file mode 100644 index 00000000..32ce2fd6 --- /dev/null +++ b/vendor/github.com/daneharrigan/hipchat/xmpp/xmpp.go @@ -0,0 +1,198 @@ +package xmpp + +import ( + "crypto/rand" + "crypto/tls" + "encoding/base64" + "encoding/xml" + "errors" + "fmt" + "html" + "io" + "net" +) + +const ( + NsJabberClient = "jabber:client" + NsStream = "http://etherx.jabber.org/streams" + NsIqAuth = "jabber:iq:auth" + NsIqRoster = "jabber:iq:roster" + NsSASL = "urn:ietf:params:xml:ns:xmpp-sasl" + NsTLS = "urn:ietf:params:xml:ns:xmpp-tls" + NsDisco = "http://jabber.org/protocol/disco#items" + NsMuc = "http://jabber.org/protocol/muc" + + xmlStream = "" + xmlStartTLS = "" + xmlIqSet = "%s%s%s" + xmlIqGet = "" + xmlOauth = "%s" + xmlPresence = "%s" + xmlMUCPart = "" + xmlMUCPresence = "" + xmlMUCMessage = "%s" +) + +type required struct{} + +type features struct { + Auth xml.Name `xml:"auth"` + XMLName xml.Name `xml:"features"` + StartTLS *required `xml:"starttls>required"` + Mechanisms []string `xml:"mechanisms>mechanism"` +} + +type item struct { + Email string `xml:"email,attr"` + Jid string `xml:"jid,attr"` + LastActive string `xml:"x>last_active"` + MentionName string `xml:"mention_name,attr"` + Name string `xml:"name,attr"` + NumParticipants string `xml:"x>num_participants"` + Owner string `xml:"x>owner"` + Privacy string `xml:"x>privacy"` + RoomId string `xml:"x>id"` + Topic string `xml:"x>topic"` +} + +type query struct { + XMLName xml.Name `xml:"query"` + Items []*item `xml:"item"` +} + +type body struct { + Body string `xml:",innerxml"` +} + +type Conn struct { + incoming *xml.Decoder + outgoing net.Conn +} + +type Message struct { + Jid string + MentionName string + Body string +} + +func (c *Conn) Stream(jid, host string) { + fmt.Fprintf(c.outgoing, xmlStream, jid, host, NsJabberClient, NsStream) +} + +func (c *Conn) StartTLS() { + fmt.Fprintf(c.outgoing, xmlStartTLS, NsTLS) +} + +func (c *Conn) UseTLS(host string) { + c.outgoing = tls.Client(c.outgoing, &tls.Config{ServerName: host}) + c.incoming = xml.NewDecoder(c.outgoing) +} + +func (c *Conn) Auth(user, pass, resource string) { + fmt.Fprintf(c.outgoing, xmlIqSet, id(), NsIqAuth, user, pass, resource) +} + +func (c *Conn) Oauth(token, resource string) { + msg := "\x00" + token + "\x00" + resource + b64 := base64.StdEncoding.EncodeToString([]byte(msg)) + fmt.Fprintf(c.outgoing, xmlOauth, b64) +} + +func (c *Conn) Features() *features { + var f features + c.incoming.DecodeElement(&f, nil) + return &f +} + +func (c *Conn) Next() (xml.StartElement, error) { + for { + var element xml.StartElement + + var err error + var t xml.Token + t, err = c.incoming.Token() + if err != nil { + return element, err + } + + switch t := t.(type) { + case xml.StartElement: + element = t + if element.Name.Local == "" { + return element, errors.New("invalid xml response") + } + + return element, nil + } + } + panic("unreachable") +} + +func (c *Conn) Discover(from, to string) { + fmt.Fprintf(c.outgoing, xmlIqGet, from, to, id(), NsDisco) +} + +func (c *Conn) Body() string { + b := new(body) + c.incoming.DecodeElement(b, nil) + return b.Body +} + +func (c *Conn) Query() *query { + q := new(query) + c.incoming.DecodeElement(q, nil) + return q +} + +func (c *Conn) Presence(jid, pres string) { + fmt.Fprintf(c.outgoing, xmlPresence, jid, pres) +} + +func (c *Conn) MUCPart(roomId string) { + fmt.Fprintf(c.outgoing, xmlMUCPart, roomId) +} + +func (c *Conn) MUCPresence(roomId, jid string) { + fmt.Fprintf(c.outgoing, xmlMUCPresence, id(), roomId, jid, NsMuc) +} + +func (c *Conn) MUCSend(mtype, to, from, body string) { + fmt.Fprintf(c.outgoing, xmlMUCMessage, from, id(), to, mtype, html.EscapeString(body)) +} + +func (c *Conn) Roster(from, to string) { + fmt.Fprintf(c.outgoing, xmlIqGet, from, to, id(), NsIqRoster) +} + +func (c *Conn) KeepAlive() { + fmt.Fprintf(c.outgoing, " ") +} + +func Dial(host string) (*Conn, error) { + c := new(Conn) + outgoing, err := net.Dial("tcp", host+":5222") + + if err != nil { + return c, err + } + + c.outgoing = outgoing + c.incoming = xml.NewDecoder(outgoing) + + return c, nil +} + +func ToMap(attr []xml.Attr) map[string]string { + m := make(map[string]string) + for _, a := range attr { + m[a.Name.Local] = a.Value + } + + return m +} + +func id() string { + b := make([]byte, 8) + io.ReadFull(rand.Reader, b) + return fmt.Sprintf("%x", b) +} diff --git a/vendor/github.com/tbruyelle/hipchat-go/.travis.yml b/vendor/github.com/tbruyelle/hipchat-go/.travis.yml index e3ea49fc..74f6dce0 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/.travis.yml +++ b/vendor/github.com/tbruyelle/hipchat-go/.travis.yml @@ -1,17 +1,17 @@ language: go sudo: false go: + - 1.5 - 1.6 - 1.7 - - 1.8 install: go get -v ./hipchat script: - go get -u github.com/golang/lint/golint - golint ./... - test `gofmt -l . | wc -l` = 0 - - make + - make all matrix: allow_failures: - go: tip + - go: tip diff --git a/vendor/github.com/tbruyelle/hipchat-go/Makefile b/vendor/github.com/tbruyelle/hipchat-go/Makefile index d2a34c6a..a6df1fc4 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/Makefile +++ b/vendor/github.com/tbruyelle/hipchat-go/Makefile @@ -1,9 +1,30 @@ -SRC_DIR=./hipchat +TEST?=./hipchat +VETARGS?=-asmdecl -atomic -bool -buildtags -copylocks -methods -nilfunc -printf -rangeloops -shift -structtags -unsafeptr -include checks.mk +all: test testrace vet -default: test checks +default: test # test runs the unit tests and vets the code test: - go test -v $(SRC_DIR) $(TESTARGS) -timeout=30s -parallel=4 + TF_ACC= go test -v $(TEST) $(TESTARGS) -timeout=30s -parallel=4 + +# testrace runs the race checker +testrace: + TF_ACC= go test -race $(TEST) $(TESTARGS) + +# vet runs the Go source code static analysis tool `vet` to find +# any common errors +vet: + @go tool vet 2>/dev/null ; if [ $$? -eq 3 ]; then \ + go get golang.org/x/tools/cmd/vet; \ + fi + @echo "go tool vet $(VETARGS) $(TEST) " + @go tool vet $(VETARGS) $(TEST) ; if [ $$? -eq 1 ]; then \ + echo ""; \ + echo "Vet found suspicious constructs. Please check the reported constructs"; \ + echo "and fix them if necessary before submitting the code for review."; \ + exit 1; \ + fi + +.PHONY: default test updatedeps vet diff --git a/vendor/github.com/tbruyelle/hipchat-go/checks.mk b/vendor/github.com/tbruyelle/hipchat-go/checks.mk deleted file mode 100644 index 40af798b..00000000 --- a/vendor/github.com/tbruyelle/hipchat-go/checks.mk +++ /dev/null @@ -1,18 +0,0 @@ -checks: - go test -race $(SRC_DIR) - @$(call checkbin,go tool vet,golang.org/x/tools/cms/vet) - go tool vet $(SRC_DIR) - @$(call checkbin,golint,github.com/golang/lint/golint) - golint -set_exit_status $(SRC_DIR) - @$(call checkbin,errcheck,github.com/kisielk/errcheck) - errcheck -ignore 'Close' -ignoretests $(SRC_DIR) - @$(call checkbin,structcheck,github.com/opennota/check/cmd/structcheck) - structcheck $(SRC_DIR) - @$(call checkbin,varcheck,github.com/opennota/check/cmd/varcheck) - varcheck $(SRC_DIR) - -checkbin = $1 2> /dev/null; if [ $$? -eq 127 ]; then\ - echo "Retrieving missing tool $1...";\ - go get $2; \ - fi; - diff --git a/vendor/github.com/tbruyelle/hipchat-go/examples/hipfile/.gitignore b/vendor/github.com/tbruyelle/hipchat-go/examples/hipfile/.gitignore deleted file mode 100644 index a8292681..00000000 --- a/vendor/github.com/tbruyelle/hipchat-go/examples/hipfile/.gitignore +++ /dev/null @@ -1 +0,0 @@ -hipfile diff --git a/vendor/github.com/tbruyelle/hipchat-go/examples/hiptail/.gitignore b/vendor/github.com/tbruyelle/hipchat-go/examples/hiptail/.gitignore deleted file mode 100644 index 5ec375c7..00000000 --- a/vendor/github.com/tbruyelle/hipchat-go/examples/hiptail/.gitignore +++ /dev/null @@ -1 +0,0 @@ -hiptail diff --git a/vendor/github.com/tbruyelle/hipchat-go/examples/hiptail/main.go b/vendor/github.com/tbruyelle/hipchat-go/examples/hiptail/main.go index dd6eebed..8be65498 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/examples/hiptail/main.go +++ b/vendor/github.com/tbruyelle/hipchat-go/examples/hiptail/main.go @@ -42,7 +42,7 @@ func main() { } msg := m.Message if len(m.Message) > (maxMsgLen - len(moreString)) { - msg = fmt.Sprintf("%s%s", strings.Replace(m.Message[:len(m.Message)], "\n", " - ", -1), moreString) + msg = fmt.Sprintf("%s%s", strings.Replace(m.Message[:maxMsgLen], "\n", " - ", -1), moreString) } fmt.Printf("%s [%s]: %s\n", from, m.Date, msg) } diff --git a/vendor/github.com/tbruyelle/hipchat-go/examples/hipwebhooks/main.go b/vendor/github.com/tbruyelle/hipchat-go/examples/hipwebhooks/main.go index 6a612e85..96804dda 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/examples/hipwebhooks/main.go +++ b/vendor/github.com/tbruyelle/hipchat-go/examples/hipwebhooks/main.go @@ -35,7 +35,7 @@ func main() { if *action == "" { if *roomId == "" { // If no room is given, look up all rooms and all of their webhooks - rooms, resp, err := c.Room.List(&hipchat.RoomsListOptions{}) + rooms, resp, err := c.Room.List() handleRequestError(resp, err) for _, room := range rooms.Items { diff --git a/vendor/github.com/tbruyelle/hipchat-go/hipchat/hipchat.go b/vendor/github.com/tbruyelle/hipchat-go/hipchat/hipchat.go index bd1288ff..d475a2fe 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/hipchat/hipchat.go +++ b/vendor/github.com/tbruyelle/hipchat-go/hipchat/hipchat.go @@ -9,7 +9,6 @@ import ( "fmt" "io" "io/ioutil" - "math/rand" "mime" "net/http" "net/url" @@ -19,7 +18,6 @@ import ( "reflect" "strconv" "strings" - "time" "github.com/google/go-querystring/query" ) @@ -34,33 +32,17 @@ type HTTPClient interface { Do(req *http.Request) (res *http.Response, err error) } -// LimitData contains the latest Rate Limit or Flood Control data sent with every API call response. -// -// Limit is the number of API calls per period of time -// Remaining is the current number of API calls that can be done before the ResetTime -// ResetTime is the UTC time in Unix epoch format for when the full Limit of API calls will be restored. -type LimitData struct { - Limit int - Remaining int - ResetTime int -} - // Client manages the communication with the HipChat API. -// -// LatestFloodControl contains the response from the latest API call's response headers X-Floodcontrol-{Limit, Remaining, ResetTime} -// LatestRateLimit contains the response from the latest API call's response headers X-Ratelimit-{Limit, Remaining, ResetTime} -// Room gives access to the /room part of the API. -// User gives access to the /user part of the API. -// Emoticon gives access to the /emoticon part of the API. type Client struct { - authToken string - BaseURL *url.URL - client HTTPClient - LatestFloodControl LimitData - LatestRateLimit LimitData - Room *RoomService - User *UserService - Emoticon *EmoticonService + authToken string + BaseURL *url.URL + client HTTPClient + // Room gives access to the /room part of the API. + Room *RoomService + // User gives access to the /user part of the API. + User *UserService + // Emoticon gives access to the /emoticon part of the API. + Emoticon *EmoticonService } // Links represents the HipChat default links. @@ -81,41 +63,23 @@ type ID struct { ID string `json:"id"` } -// ListOptions specifies the optional parameters to various List methods that +// ListOptions specifies the optional parameters to various List methods that // support pagination. -// -// For paginated results, StartIndex represents the first page to display. -// For paginated results, MaxResults reprensents the number of items per page. Default value is 100. Maximum value is 1000. type ListOptions struct { + // For paginated results, represents the first page to display. StartIndex int `url:"start-index,omitempty"` + // For paginated results, reprensents the number of items per page. MaxResults int `url:"max-results,omitempty"` } -// ExpandOptions specifies which Hipchat collections to automatically expand. -// This functionality is primarily used to reduce the total time to receive the data. -// It also reduces the sheer number of API calls from 1+N, to 1. -// -// cf: https://developer.atlassian.com/hipchat/guide/hipchat-rest-api/api-title-expansion -type ExpandOptions struct { - Expand string `url:"expand,omitempty"` -} - -// Color is set of hard-coded string values for the HipChat API for notifications. -// cf: https://www.hipchat.com/docs/apiv2/method/send_room_notification type Color string const ( - // ColorYellow is the color yellow ColorYellow Color = "yellow" - // ColorGreen is the color green - ColorGreen Color = "green" - // ColorRed is the color red - ColorRed Color = "red" - // ColorPurple is the color purple + ColorGreen Color = "green" + ColorRed Color = "red" ColorPurple Color = "purple" - // ColorGray is the color gray - ColorGray Color = "gray" - // ColorRandom is the random "surprise me!" color + ColorGray Color = "gray" ColorRandom Color = "random" ) @@ -128,46 +92,6 @@ var AuthTest = false // API calls if AuthTest=true. var AuthTestResponse = map[string]interface{}{} -// RetryOnRateLimit can be set to true to automatically retry the API call until it succeeds, -// subject to the RateLimitRetryPolicy settings. This behavior is only active when the API -// call returns 429 (StatusTooManyRequests). -var RetryOnRateLimit = false - -// RetryPolicy defines a RetryPolicy. -// -// MaxRetries is the maximum number of attempts to make before returning an error -// MinDelay is the initial delay between attempts. This value is multiplied by the current attempt number. -// MaxDelay is the largest delay between attempts. -// JitterDelay is the amount of random jitter to add to the delay. -// JitterBias is the amount of jitter to remove from the delay. -// -// The use of Jitter avoids inadvertant and undesirable synchronization of network -// operations between otherwise unrelated clients. -// cf: https://brooker.co.za/blog/2015/03/21/backoff.html and https://www.awsarchitectureblog.com/2015/03/backoff.html -// -// Using the values of JitterDelay = 250 milliseconds and a JitterBias of negative 125 milliseconds, -// would result in a uniformly distributed Jitter between -125 and +125 milliseconds, centered -// around the current trial Delay (between MinDelay and MaxDelay). -// -// -type RetryPolicy struct { - MaxRetries int - MinDelay time.Duration - MaxDelay time.Duration - JitterDelay time.Duration - JitterBias time.Duration -} - -// NoRateLimitRetryPolicy defines the "never retry an API call" policy's values. -var NoRateLimitRetryPolicy = RetryPolicy{0, 1 * time.Second, 1 * time.Second, 500 * time.Millisecond, 0 * time.Millisecond} - -// DefaultRateLimitRetryPolicy defines the "up to 300 times, 1 second apart, randomly adding an additional up-to-500 milliseconds of delay" policy. -var DefaultRateLimitRetryPolicy = RetryPolicy{300, 1 * time.Second, 1 * time.Second, 500 * time.Millisecond, 0 * time.Millisecond} - -// RateLimitRetryPolicy can be set to a custom RetryPolicy's values, -// or to one of the two predefined ones: NoRateLimitRetryPolicy or DefaultRateLimitRetryPolicy -var RateLimitRetryPolicy = DefaultRateLimitRetryPolicy - // NewClient returns a new HipChat API client. You must provide a valid // AuthToken retrieved from your HipChat account. func NewClient(authToken string) *Client { @@ -296,10 +220,7 @@ func (c *Client) NewFileUploadRequest(method, urlStr string, v interface{}) (*ht "--hipfileboundary\n" b := &bytes.Buffer{} - _, err = b.Write([]byte(body)) - if err != nil { - return nil, err - } + b.Write([]byte(body)) req, err := http.NewRequest(method, u.String(), b) if err != nil { @@ -314,15 +235,10 @@ func (c *Client) NewFileUploadRequest(method, urlStr string, v interface{}) (*ht // Do performs the request, the json received in the response is decoded // and stored in the value pointed by v. -// Do can be used to perform the request created with NewRequest, which -// should be used only for API requests not implemented in this library. +// Do can be used to perform the request created with NewRequest, as the latter +// it should be used only for API requests not implemented in this library. func (c *Client) Do(req *http.Request, v interface{}) (*http.Response, error) { - var policy = NoRateLimitRetryPolicy - if RetryOnRateLimit { - policy = RateLimitRetryPolicy - } - - resp, err := c.doWithRetryPolicy(req, policy) + resp, err := c.client.Do(req) if err != nil { return nil, err } @@ -339,7 +255,7 @@ func (c *Client) Do(req *http.Request, v interface{}) (*http.Response, error) { if v != nil { defer resp.Body.Close() if w, ok := v.(io.Writer); ok { - _, err = io.Copy(w, resp.Body) + io.Copy(w, resp.Body) } else { err = json.NewDecoder(resp.Body).Decode(v) } @@ -348,66 +264,6 @@ func (c *Client) Do(req *http.Request, v interface{}) (*http.Response, error) { return resp, err } -func (c *Client) doWithRetryPolicy(req *http.Request, policy RetryPolicy) (*http.Response, error) { - currentTry := 0 - - for willContinue(currentTry, policy) { - currentTry = currentTry + 1 - resp, err := c.client.Do(req) - if err != nil { - return nil, err - } - c.captureRateLimits(resp) - if http.StatusTooManyRequests == resp.StatusCode { - resp.Body.Close() - if willContinue(currentTry, policy) { - sleep(currentTry, policy) - } - } else { - return resp, nil - } - } - return nil, fmt.Errorf("max retries exceeded (%d)", policy.MaxRetries) -} - -func willContinue(currentTry int, policy RetryPolicy) bool { - return currentTry <= policy.MaxRetries -} - -func sleep(currentTry int, policy RetryPolicy) { - jitter := time.Duration(rand.Int63n(2*int64(policy.JitterDelay))) - policy.JitterBias - linearDelay := time.Duration(currentTry)*policy.MinDelay + jitter - if linearDelay > policy.MaxDelay { - linearDelay = policy.MaxDelay - } - time.Sleep(time.Duration(linearDelay)) -} - -func setIfPresent(src string, dest *int) { - if len(src) > 0 { - v, err := strconv.Atoi(src) - if err != nil { - *dest = v - } - } -} - -func (c *Client) captureRateLimits(resp *http.Response) { - // BY DESIGN: - // if and only if the HTTP Response headers contain the header are the values updated. - // The Floodcontrol limits are orthogonal to the API limits. - // API Limits are consumed for each and every API call. - // The default value for API limits are 500 (app token) or 100 (user token). - // Flood Control limits are consumed only when a user message, room message, or room notification is sent. - // The default value for Flood Control limits is 30 per minute per user token. - setIfPresent(resp.Header.Get("X-Ratelimit-Limit"), &c.LatestRateLimit.Limit) - setIfPresent(resp.Header.Get("X-Ratelimit-Remaining"), &c.LatestRateLimit.Remaining) - setIfPresent(resp.Header.Get("X-Ratelimit-Reset"), &c.LatestRateLimit.ResetTime) - setIfPresent(resp.Header.Get("X-Floodcontrol-Limit"), &c.LatestFloodControl.Limit) - setIfPresent(resp.Header.Get("X-Floodcontrol-Remaining"), &c.LatestFloodControl.Remaining) - setIfPresent(resp.Header.Get("X-Floodcontrol-Reset"), &c.LatestFloodControl.ResetTime) -} - // addOptions adds the parameters in opt as URL query parameters to s. opt // must be a struct whose fields may contain "url" tags. func addOptions(s string, opt interface{}) (*url.URL, error) { diff --git a/vendor/github.com/tbruyelle/hipchat-go/hipchat/oauth.go b/vendor/github.com/tbruyelle/hipchat-go/hipchat/oauth.go index 7211ba80..9beb51d5 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/hipchat/oauth.go +++ b/vendor/github.com/tbruyelle/hipchat-go/hipchat/oauth.go @@ -72,9 +72,9 @@ func (c *Client) GenerateToken(credentials ClientCredentials, scopes []string) ( content, err := ioutil.ReadAll(resp.Body) var token OAuthAccessToken - err = json.Unmarshal(content, &token) + json.Unmarshal(content, &token) - return &token, resp, err + return &token, resp, nil } const ( diff --git a/vendor/github.com/tbruyelle/hipchat-go/hipchat/room.go b/vendor/github.com/tbruyelle/hipchat-go/hipchat/room.go index 50e1a6d6..a5e2f702 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/hipchat/room.go +++ b/vendor/github.com/tbruyelle/hipchat-go/hipchat/room.go @@ -25,7 +25,7 @@ type Room struct { ID int `json:"id"` Links RoomLinks `json:"links"` Name string `json:"name"` - XMPPJid string `json:"xmpp_jid"` + XmppJid string `json:"xmpp_jid"` Statistics RoomStatistics `json:"statistics"` Created string `json:"created"` IsArchived bool `json:"is_archived"` @@ -93,7 +93,7 @@ type Card struct { Format string `json:"format,omitempty"` URL string `json:"url,omitempty"` Title string `json:"title"` - Thumbnail *Thumbnail `json:"thumbnail,omitempty"` + Thumbnail *Icon `json:"thumbnail,omitempty"` Activity *Activity `json:"activity,omitempty"` Attributes []Attribute `json:"attributes,omitempty"` ID string `json:"id,omitempty"` @@ -182,7 +182,7 @@ type Thumbnail struct { URL string `json:"url"` URL2x string `json:"url@2x,omitempty"` Width uint `json:"width,omitempty"` - Height uint `json:"height,omitempty"` + Height uint `json:"url,omitempty"` } // Attribute represents an attribute on a Card @@ -243,17 +243,12 @@ type InviteRequest struct { Reason string `json:"reason"` } -// AddMemberRequest represents a HipChat add member request -type AddMemberRequest struct { - Roles []string `json:"roles,omitempty"` -} - // GlanceRequest represents a HipChat room ui glance type GlanceRequest struct { Key string `json:"key"` Name GlanceName `json:"name"` Target string `json:"target"` - QueryURL string `json:"queryUrl,omitempty"` + QueryURL string `json:"queryUrl"` Icon Icon `json:"icon"` Conditions []*GlanceCondition `json:"conditions,omitempty"` } @@ -284,7 +279,7 @@ type GlanceUpdate struct { // GlanceContent is a component of a Glance type GlanceContent struct { - Status *GlanceStatus `json:"status,omitempty"` + Status GlanceStatus `json:"status"` Metadata interface{} `json:"metadata,omitempty"` Label AttributeValue `json:"label"` // AttributeValue{Type, Label} } @@ -365,7 +360,6 @@ func (c *Card) AddAttribute(mainLabel, subLabel, url, iconURL string) { // method. type RoomsListOptions struct { ListOptions - ExpandOptions // Include private rooms in the result, API defaults to true IncludePrivate bool `url:"include-private,omitempty"` @@ -505,7 +499,6 @@ func (r *RoomService) Update(id string, roomReq *UpdateRoomRequest) (*http.Respo // HistoryOptions represents a HipChat room chat history request. type HistoryOptions struct { ListOptions - ExpandOptions // Either the latest date to fetch history for in ISO-8601 format, or 'recent' to fetch // the latest 75 messages. Paging isn't supported for 'recent', however they are real-time @@ -518,14 +511,6 @@ type HistoryOptions struct { // Reverse the output such that the oldest message is first. // For consistent paging, set to 'false'. Reverse bool `url:"reverse,omitempty"` - - // Either the earliest date to fetch history for the ISO-8601 format string, - // or leave blank to disable this filter. - // to be effective, the API call requires Date also be filled in with an ISO-8601 format string. - EndDate string `url:"end-date,omitempty"` - - // Include records about deleted messages into results (body of a message isn't returned). Set to 'true'. - IncludeDeleted bool `url:"include_deleted,omitempty"` } // History fetches a room's chat history. @@ -633,27 +618,3 @@ func (r *RoomService) UpdateGlance(id string, glanceUpdateReq *GlanceUpdateReque return r.client.Do(req, nil) } - -// AddMember adds a member to a private room and sends member's unavailable presence to all room members asynchronously. -// -// HipChat API docs: https://www.hipchat.com/docs/apiv2/method/add_member -func (r *RoomService) AddMember(roomID string, userID string, addMemberReq *AddMemberRequest) (*http.Response, error) { - req, err := r.client.NewRequest("PUT", fmt.Sprintf("room/%s/member/%s", roomID, userID), nil, addMemberReq) - if err != nil { - return nil, err - } - - return r.client.Do(req, nil) -} - -// RemoveMember removes a member from a private room -// -// HipChat API docs: https://www.hipchat.com/docs/apiv2/method/remove_member -func (r *RoomService) RemoveMember(roomID string, userID string) (*http.Response, error) { - req, err := r.client.NewRequest("DELETE", fmt.Sprintf("room/%s/member/%s", roomID, userID), nil, nil) - if err != nil { - return nil, err - } - - return r.client.Do(req, nil) -} diff --git a/vendor/github.com/tbruyelle/hipchat-go/hipchat/room_test.go b/vendor/github.com/tbruyelle/hipchat-go/hipchat/room_test.go index 59bcf1ef..c4227c70 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/hipchat/room_test.go +++ b/vendor/github.com/tbruyelle/hipchat-go/hipchat/room_test.go @@ -54,7 +54,6 @@ func TestRoomList(t *testing.T) { testFormValues(t, r, values{ "start-index": "1", "max-results": "10", - "expand": "expansion", "include-private": "true", "include-archived": "true", }) @@ -67,7 +66,7 @@ func TestRoomList(t *testing.T) { }`) }) want := &Rooms{Items: []Room{{ID: 1, Name: "n"}}, StartIndex: 1, MaxResults: 1, Links: PageLinks{Links: Links{Self: "s"}}} - opt := &RoomsListOptions{ListOptions{1, 10}, ExpandOptions{"expansion"}, true, true} + opt := &RoomsListOptions{ListOptions{1, 10}, true, true} rooms, _, err := client.Room.List(opt) if err != nil { t.Fatalf("Room.List returns an error %v", err) @@ -126,32 +125,6 @@ func TestRoomNotification(t *testing.T) { } } -func TestRoomNotificationCardWithThumbnail(t *testing.T) { - setup() - defer teardown() - - thumbnail := &Thumbnail{URL: "http://foo.com", URL2x: "http://foo2x.com", Width: 1, Height: 2} - description := CardDescription{Format: "format", Value: "value"} - card := &Card{Style: "style", Description: description, Title: "title", Thumbnail: thumbnail} - args := &NotificationRequest{Card: card} - - mux.HandleFunc("/room/2/notification", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "POST") - v := new(NotificationRequest) - json.NewDecoder(r.Body).Decode(v) - - if !reflect.DeepEqual(v, args) { - t.Errorf("Request body %+v, want %+v", v, args) - } - w.WriteHeader(http.StatusNoContent) - }) - - _, err := client.Room.Notification("2", args) - if err != nil { - t.Fatalf("Room.Notification returns an error %v", err) - } -} - func TestRoomMessage(t *testing.T) { setup() defer teardown() @@ -282,14 +255,11 @@ func TestRoomHistory(t *testing.T) { mux.HandleFunc("/room/1/history", func(w http.ResponseWriter, r *http.Request) { testMethod(t, r, "GET") testFormValues(t, r, values{ - "start-index": "1", - "max-results": "100", - "expand": "expansion", - "date": "date", - "timezone": "tz", - "reverse": "true", - "end-date": "end-date", - "include_deleted": "true", + "start-index": "1", + "max-results": "100", + "date": "date", + "timezone": "tz", + "reverse": "true", }) fmt.Fprintf(w, ` { @@ -313,7 +283,7 @@ func TestRoomHistory(t *testing.T) { }) opt := &HistoryOptions{ - ListOptions{1, 100}, ExpandOptions{"expansion"}, "date", "tz", true, "end-date", true, + ListOptions{1, 100}, "date", "tz", true, } hist, _, err := client.Room.History("1", opt) if err != nil { @@ -424,7 +394,7 @@ func TestRoomGlanceUpdate(t *testing.T) { &GlanceUpdate{ Key: "abc", Content: GlanceContent{ - Status: &GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, + Status: GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, Label: AttributeValue{Type: "html", Value: "hello"}, }, }, @@ -565,7 +535,7 @@ func TestGlanceUpdateRequestJSONEncodeWithString(t *testing.T) { &GlanceUpdate{ Key: "abc", Content: GlanceContent{ - Status: &GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, + Status: GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, Label: AttributeValue{Type: "html", Value: "hello"}, }, }, @@ -590,7 +560,7 @@ func TestGlanceContentJSONEncodeWithString(t *testing.T) { }{ { GlanceContent{ - Status: &GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, + Status: GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, Label: AttributeValue{Type: "html", Value: "hello"}, }, `{"status":{"type":"lozenge","value":{"type":"default","label":"something"}},"label":{"type":"html","value":"hello"}}`, @@ -616,7 +586,7 @@ func TestGlanceContentJSONDecodeWithObject(t *testing.T) { }{ { GlanceContent{ - Status: &GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, + Status: GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, Label: AttributeValue{Type: "html", Value: "hello"}, }, `{"status":{"type":"lozenge","value":{"type":"default","label":"something"}},"label":{"type":"html","value":"hello"}}`, @@ -631,8 +601,8 @@ func TestGlanceContentJSONDecodeWithObject(t *testing.T) { t.Errorf("Decoding of GlanceContent failed: %v", err) } - if !reflect.DeepEqual(actual.Status, tt.gc.Status) { - t.Fatalf("Unexpected GlanceContent.Status: %+v, want %+v", actual.Status, tt.gc.Status) + if actual.Status != tt.gc.Status { + t.Fatalf("Unexpected GlanceContent.Status: %v", actual.Status) } if actual.Label != tt.gc.Label { @@ -647,12 +617,12 @@ func TestGlanceContentJSONDecodeWithObject(t *testing.T) { func TestGlanceStatusJSONEncodeWithString(t *testing.T) { gsTests := []struct { - gs *GlanceStatus + gs GlanceStatus expected string }{ - {&GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, + {GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, `{"type":"lozenge","value":{"type":"default","label":"something"}}`}, - {&GlanceStatus{Type: "icon", Value: Icon{URL: "z", URL2x: "x"}}, + {GlanceStatus{Type: "icon", Value: Icon{URL: "z", URL2x: "x"}}, `{"type":"icon","value":{"url":"z","url@2x":"x"}}`}, } @@ -670,12 +640,12 @@ func TestGlanceStatusJSONEncodeWithString(t *testing.T) { func TestGlanceStatusJSONDecodeWithObject(t *testing.T) { gsTests := []struct { - gs *GlanceStatus + gs GlanceStatus encoded string }{ - {&GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, + {GlanceStatus{Type: "lozenge", Value: AttributeValue{Type: "default", Label: "something"}}, `{"type":"lozenge","value":{"type":"default","label":"something"}}`}, - {&GlanceStatus{Type: "icon", Value: Icon{URL: "z", URL2x: "x"}}, + {GlanceStatus{Type: "icon", Value: Icon{URL: "z", URL2x: "x"}}, `{"type":"icon","value":{"url":"z","url@2x":"x"}}`}, } @@ -696,39 +666,3 @@ func TestGlanceStatusJSONDecodeWithObject(t *testing.T) { } } } - -func TestAddMember(t *testing.T) { - setup() - defer teardown() - - args := &AddMemberRequest{Roles: []string{"room_member"}} - - mux.HandleFunc("/room/1/member/user", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "PUT") - v := new(AddMemberRequest) - json.NewDecoder(r.Body).Decode(v) - - if !reflect.DeepEqual(v, args) { - t.Errorf("Request body %+v, want %+v", v, args) - } - }) - - _, err := client.Room.AddMember("1", "user", args) - if err != nil { - t.Fatalf("Room.AddMember returns an error %v", err) - } -} - -func TestRemoveMember(t *testing.T) { - setup() - defer teardown() - - mux.HandleFunc("/room/1/member/user", func(w http.ResponseWriter, r *http.Request) { - testMethod(t, r, "DELETE") - }) - - _, err := client.Room.RemoveMember("1", "user") - if err != nil { - t.Fatalf("Room.RemoveMember returns an error %v", err) - } -} diff --git a/vendor/github.com/tbruyelle/hipchat-go/hipchat/user.go b/vendor/github.com/tbruyelle/hipchat-go/hipchat/user.go index c45872ec..81fa1500 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/hipchat/user.go +++ b/vendor/github.com/tbruyelle/hipchat-go/hipchat/user.go @@ -50,7 +50,7 @@ type UpdateUserPresenceRequest struct { // User represents the HipChat user. type User struct { - XMPPJid string `json:"xmpp_jid"` + XmppJid string `json:"xmpp_jid"` IsDeleted bool `json:"is_deleted"` Name string `json:"name"` LastActive string `json:"last_active"` @@ -121,7 +121,6 @@ func (u *UserService) Message(id string, msgReq *MessageRequest) (*http.Response // UserListOptions specified the parameters to the UserService.List method. type UserListOptions struct { ListOptions - ExpandOptions // Include active guest users in response. IncludeGuests bool `url:"include-guests,omitempty"` // Include deleted users in response. diff --git a/vendor/github.com/tbruyelle/hipchat-go/hipchat/user_test.go b/vendor/github.com/tbruyelle/hipchat-go/hipchat/user_test.go index 9e245673..bc4a7747 100644 --- a/vendor/github.com/tbruyelle/hipchat-go/hipchat/user_test.go +++ b/vendor/github.com/tbruyelle/hipchat-go/hipchat/user_test.go @@ -111,7 +111,7 @@ func TestUserView(t *testing.T) { "xmpp_jid": "1@chat.hipchat.com" }`) }) - want := &User{XMPPJid: "1@chat.hipchat.com", + want := &User{XmppJid: "1@chat.hipchat.com", IsDeleted: false, Name: "First Last", LastActive: "1421029691", @@ -145,7 +145,6 @@ func TestUserList(t *testing.T) { testFormValues(t, r, values{ "start-index": "1", "max-results": "100", - "expand": "expansion", "include-guests": "true", "include-deleted": "true", }) @@ -179,7 +178,6 @@ func TestUserList(t *testing.T) { opt := &UserListOptions{ ListOptions{StartIndex: 1, MaxResults: 100}, - ExpandOptions{"expansion"}, true, true, } diff --git a/vendor/golang.org/x/crypto/argon2/argon2.go b/vendor/golang.org/x/crypto/argon2/argon2.go new file mode 100644 index 00000000..61216e83 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2.go @@ -0,0 +1,219 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// Package argon2 implements the key derivation function Argon2. +// Argon2 was selected as the winner of the Password Hashing Competition and can +// be used to derive cryptographic keys from passwords. +// Argon2 is specfifed at https://github.com/P-H-C/phc-winner-argon2/blob/master/argon2-specs.pdf +package argon2 + +import ( + "encoding/binary" + "sync" + + "golang.org/x/crypto/blake2b" +) + +// The Argon2 version implemented by this package. +const Version = 0x13 + +const ( + argon2d = iota + argon2i + argon2id +) + +// Key derives a key from the password, salt, and cost parameters using Argon2i +// returning a byte slice of length keyLen that can be used as cryptographic key. +// The CPU cost and parallism degree must be greater than zero. +// +// For example, you can get a derived key for e.g. AES-256 (which needs a 32-byte key) by doing: +// `key := argon2.Key([]byte("some password"), salt, 4, 32*1024, 4, 32)` +// +// The recommended parameters for interactive logins as of 2017 are time=4, memory=32*1024. +// The number of threads can be adjusted to the numbers of available CPUs. +// The time parameter specifies the number of passes over the memory and the memory +// parameter specifies the size of the memory in KiB. For example memory=32*1024 sets the +// memory cost to ~32 MB. +// The cost parameters should be increased as memory latency and CPU parallelism increases. +// Remember to get a good random salt. +func Key(password, salt []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + return deriveKey(argon2i, password, salt, nil, nil, time, memory, threads, keyLen) +} + +func deriveKey(mode int, password, salt, secret, data []byte, time, memory uint32, threads uint8, keyLen uint32) []byte { + if time < 1 { + panic("argon2: number of rounds too small") + } + if threads < 1 { + panic("argon2: paralisim degree too low") + } + mem := memory / (4 * uint32(threads)) * (4 * uint32(threads)) + if mem < 8*uint32(threads) { + mem = 8 * uint32(threads) + } + B := initBlocks(password, salt, secret, data, time, mem, uint32(threads), keyLen, mode) + processBlocks(B, time, mem, uint32(threads), mode) + return extractKey(B, mem, uint32(threads), keyLen) +} + +const blockLength = 128 + +type block [blockLength]uint64 + +func initBlocks(password, salt, key, data []byte, time, memory, threads, keyLen uint32, mode int) []block { + var ( + block0 [1024]byte + h0 [blake2b.Size + 8]byte + params [24]byte + tmp [4]byte + ) + + b2, _ := blake2b.New512(nil) + binary.LittleEndian.PutUint32(params[0:4], threads) + binary.LittleEndian.PutUint32(params[4:8], keyLen) + binary.LittleEndian.PutUint32(params[8:12], memory) + binary.LittleEndian.PutUint32(params[12:16], time) + binary.LittleEndian.PutUint32(params[16:20], uint32(Version)) + binary.LittleEndian.PutUint32(params[20:24], uint32(mode)) + b2.Write(params[:]) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(password))) + b2.Write(tmp[:]) + b2.Write(password) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(salt))) + b2.Write(tmp[:]) + b2.Write(salt) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(key))) + b2.Write(tmp[:]) + b2.Write(key) + binary.LittleEndian.PutUint32(tmp[:], uint32(len(data))) + b2.Write(tmp[:]) + b2.Write(data) + b2.Sum(h0[:0]) + + B := make([]block, memory) + for lane := uint32(0); lane < threads; lane++ { + j := lane * (memory / threads) + binary.LittleEndian.PutUint32(h0[blake2b.Size+4:], lane) + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 0) + blake2bHash(block0[:], h0[:]) + for i := range B[0] { + B[j+0][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + + binary.LittleEndian.PutUint32(h0[blake2b.Size:], 1) + blake2bHash(block0[:], h0[:]) + for i := range B[0] { + B[j+1][i] = binary.LittleEndian.Uint64(block0[i*8:]) + } + } + return B +} + +func processBlocks(B []block, time, memory, threads uint32, mode int) { + const syncPoints = 4 + lanes := memory / threads + segments := lanes / syncPoints + + processSegment := func(n, slice, lane uint32, wg *sync.WaitGroup) { + var addresses, in, zero block + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[0] = uint64(n) + in[1] = uint64(lane) + in[2] = uint64(slice) + in[3] = uint64(memory) + in[4] = uint64(time) + in[5] = uint64(mode) + } + + index := uint32(0) + if n == 0 && slice == 0 { + index = 2 // we have already generated the first two blocks + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + } + + offset := lane*lanes + slice*segments + index + var random uint64 + for index < segments { + prev := offset - 1 + if index == 0 && slice == 0 { + prev = lane*lanes + lanes - 1 // last block in lane + } + if mode == argon2i || (mode == argon2id && n == 0 && slice < syncPoints/2) { + if index%blockLength == 0 { + in[6]++ + processBlock(&addresses, &in, &zero) + processBlock(&addresses, &addresses, &zero) + } + random = addresses[index%blockLength] + } else { + random = B[prev][0] + } + newOffset := indexAlpha(random, lanes, segments, threads, n, slice, lane, index) + processBlockXOR(&B[offset], &B[prev], &B[newOffset]) + index, offset = index+1, offset+1 + } + wg.Done() + } + + for n := uint32(0); n < time; n++ { + for slice := uint32(0); slice < syncPoints; slice++ { + var wg sync.WaitGroup + for lane := uint32(0); lane < threads; lane++ { + wg.Add(1) + go processSegment(n, slice, lane, &wg) + } + wg.Wait() + } + } + +} + +func extractKey(B []block, memory, threads, keyLen uint32) []byte { + lanes := memory / threads + for lane := uint32(0); lane < threads-1; lane++ { + for i, v := range B[(lane*lanes)+lanes-1] { + B[memory-1][i] ^= v + } + } + + var block [1024]byte + for i, v := range B[memory-1] { + binary.LittleEndian.PutUint64(block[i*8:], v) + } + key := make([]byte, keyLen) + blake2bHash(key, block[:]) + return key +} + +func indexAlpha(rand uint64, lanes, segments, threads, n, slice, lane, index uint32) uint32 { + refLane := uint32(rand>>32) % threads + + m, s := 3*segments, (slice+1)%4*segments + if lane == refLane { + m += index + } + if n == 0 { + m, s = slice*segments, 0 + if slice == 0 || lane == refLane { + m += index + } + } + if index == 0 || lane == refLane { + m-- + } + return phi(rand, uint64(m), uint64(s), refLane, lanes) +} + +func phi(rand, m, s uint64, lane, lanes uint32) uint32 { + p := rand & 0xFFFFFFFF + p = (p * p) >> 32 + p = (p * m) >> 32 + return lane*lanes + uint32((s+m-(p+1))%uint64(lanes)) +} diff --git a/vendor/golang.org/x/crypto/argon2/argon2_test.go b/vendor/golang.org/x/crypto/argon2/argon2_test.go new file mode 100644 index 00000000..3f72c756 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/argon2_test.go @@ -0,0 +1,113 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. +package argon2 + +import ( + "bytes" + "encoding/hex" + "testing" +) + +var ( + genKatPassword = []byte{ + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, 0x01, + } + genKatSalt = []byte{0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02, 0x02} + genKatSecret = []byte{0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03, 0x03} + genKatAAD = []byte{0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04, 0x04} +) + +func TestArgon2(t *testing.T) { + defer func(sse4 bool) { useSSE4 = sse4 }(useSSE4) + + if useSSE4 { + t.Log("SSE4.1 version") + testArgon2i(t) + testArgon2d(t) + testArgon2id(t) + useSSE4 = false + } + t.Log("generic version") + testArgon2i(t) + testArgon2d(t) + testArgon2id(t) +} + +func testArgon2d(t *testing.T) { + want := []byte{ + 0x51, 0x2b, 0x39, 0x1b, 0x6f, 0x11, 0x62, 0x97, + 0x53, 0x71, 0xd3, 0x09, 0x19, 0x73, 0x42, 0x94, + 0xf8, 0x68, 0xe3, 0xbe, 0x39, 0x84, 0xf3, 0xc1, + 0xa1, 0x3a, 0x4d, 0xb9, 0xfa, 0xbe, 0x4a, 0xcb, + } + hash := deriveKey(argon2d, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32) + if !bytes.Equal(hash, want) { + t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want)) + } +} + +func testArgon2i(t *testing.T) { + want := []byte{ + 0xc8, 0x14, 0xd9, 0xd1, 0xdc, 0x7f, 0x37, 0xaa, + 0x13, 0xf0, 0xd7, 0x7f, 0x24, 0x94, 0xbd, 0xa1, + 0xc8, 0xde, 0x6b, 0x01, 0x6d, 0xd3, 0x88, 0xd2, + 0x99, 0x52, 0xa4, 0xc4, 0x67, 0x2b, 0x6c, 0xe8, + } + hash := deriveKey(argon2i, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32) + if !bytes.Equal(hash, want) { + t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want)) + } +} + +func testArgon2id(t *testing.T) { + want := []byte{ + 0x0d, 0x64, 0x0d, 0xf5, 0x8d, 0x78, 0x76, 0x6c, + 0x08, 0xc0, 0x37, 0xa3, 0x4a, 0x8b, 0x53, 0xc9, + 0xd0, 0x1e, 0xf0, 0x45, 0x2d, 0x75, 0xb6, 0x5e, + 0xb5, 0x25, 0x20, 0xe9, 0x6b, 0x01, 0xe6, 0x59, + } + hash := deriveKey(argon2id, genKatPassword, genKatSalt, genKatSecret, genKatAAD, 3, 32, 4, 32) + if !bytes.Equal(hash, want) { + t.Errorf("derived key does not match - got: %s , want: %s", hex.EncodeToString(hash), hex.EncodeToString(want)) + } +} + +func benchmarkArgon2(mode int, time, memory uint32, threads uint8, keyLen uint32, b *testing.B) { + password := []byte("password") + salt := []byte("choosing random salts is hard") + b.ReportAllocs() + for i := 0; i < b.N; i++ { + deriveKey(mode, password, salt, nil, nil, time, memory, threads, keyLen) + } +} + +func BenchmarkArgon2i(b *testing.B) { + b.Run(" Time: 3 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 3, 32*1024, 1, 32, b) }) + b.Run(" Time: 4 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 4, 32*1024, 1, 32, b) }) + b.Run(" Time: 5 Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2i, 5, 32*1024, 1, 32, b) }) + b.Run(" Time: 3 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 3, 64*1024, 4, 32, b) }) + b.Run(" Time: 4 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 4, 64*1024, 4, 32, b) }) + b.Run(" Time: 5 Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2i, 5, 64*1024, 4, 32, b) }) +} + +func BenchmarkArgon2d(b *testing.B) { + b.Run(" Time: 3, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 3, 32*1024, 1, 32, b) }) + b.Run(" Time: 4, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 4, 32*1024, 1, 32, b) }) + b.Run(" Time: 5, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2d, 5, 32*1024, 1, 32, b) }) + b.Run(" Time: 3, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 3, 64*1024, 4, 32, b) }) + b.Run(" Time: 4, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 4, 64*1024, 4, 32, b) }) + b.Run(" Time: 5, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2d, 5, 64*1024, 4, 32, b) }) +} + +func BenchmarkArgon2id(b *testing.B) { + b.Run(" Time: 3, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 3, 32*1024, 1, 32, b) }) + b.Run(" Time: 4, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 4, 32*1024, 1, 32, b) }) + b.Run(" Time: 5, Memory: 32 MB, Threads: 1", func(b *testing.B) { benchmarkArgon2(argon2id, 5, 32*1024, 1, 32, b) }) + b.Run(" Time: 3, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 3, 64*1024, 4, 32, b) }) + b.Run(" Time: 4, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 4, 64*1024, 4, 32, b) }) + b.Run(" Time: 5, Memory: 64 MB, Threads: 4", func(b *testing.B) { benchmarkArgon2(argon2id, 5, 64*1024, 4, 32, b) }) +} diff --git a/vendor/golang.org/x/crypto/argon2/blake2b.go b/vendor/golang.org/x/crypto/argon2/blake2b.go new file mode 100644 index 00000000..10f46948 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blake2b.go @@ -0,0 +1,53 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +import ( + "encoding/binary" + "hash" + + "golang.org/x/crypto/blake2b" +) + +// blake2bHash computes an arbitrary long hash value of in +// and writes the hash to out. +func blake2bHash(out []byte, in []byte) { + var b2 hash.Hash + if n := len(out); n < blake2b.Size { + b2, _ = blake2b.New(n, nil) + } else { + b2, _ = blake2b.New512(nil) + } + + var buffer [blake2b.Size]byte + binary.LittleEndian.PutUint32(buffer[:4], uint32(len(out))) + b2.Write(buffer[:4]) + b2.Write(in) + + if len(out) <= blake2b.Size { + b2.Sum(out[:0]) + return + } + + outLen := len(out) + b2.Sum(buffer[:0]) + b2.Reset() + copy(out, buffer[:32]) + out = out[32:] + for len(out) > blake2b.Size { + b2.Write(buffer[:]) + b2.Sum(buffer[:0]) + copy(out, buffer[:32]) + out = out[32:] + b2.Reset() + } + + if outLen%blake2b.Size > 0 { // outLen > 64 + r := ((outLen + 31) / 32) - 2 // ⌈τ /32⌉-2 + b2, _ = blake2b.New(outLen-32*r, nil) + } + b2.Write(buffer[:]) + b2.Sum(out[:0]) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.go b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go new file mode 100644 index 00000000..583ac4be --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.go @@ -0,0 +1,59 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +func init() { + useSSE4 = supportsSSE4() +} + +//go:noescape +func supportsSSE4() bool + +//go:noescape +func mixBlocksSSE2(out, a, b, c *block) + +//go:noescape +func xorBlocksSSE2(out, a, b, c *block) + +//go:noescape +func blamkaSSE4(b *block) + +func processBlockSSE(out, in1, in2 *block, xor bool) { + var t block + mixBlocksSSE2(&t, in1, in2, &t) + if useSSE4 { + blamkaSSE4(&t) + } else { + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + } + if xor { + xorBlocksSSE2(out, in1, in2, &t) + } else { + mixBlocksSSE2(out, in1, in2, &t) + } +} + +func processBlock(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockSSE(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_amd64.s b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s new file mode 100644 index 00000000..8a83f7c7 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_amd64.s @@ -0,0 +1,252 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build amd64,!gccgo,!appengine + +#include "textflag.h" + +DATA ·c40<>+0x00(SB)/8, $0x0201000706050403 +DATA ·c40<>+0x08(SB)/8, $0x0a09080f0e0d0c0b +GLOBL ·c40<>(SB), (NOPTR+RODATA), $16 + +DATA ·c48<>+0x00(SB)/8, $0x0100070605040302 +DATA ·c48<>+0x08(SB)/8, $0x09080f0e0d0c0b0a +GLOBL ·c48<>(SB), (NOPTR+RODATA), $16 + +#define SHUFFLE(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v6, t1; \ + PUNPCKLQDQ v6, t2; \ + PUNPCKHQDQ v7, v6; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ v7, t2; \ + MOVO t1, v7; \ + MOVO v2, t1; \ + PUNPCKHQDQ t2, v7; \ + PUNPCKLQDQ v3, t2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v3 + +#define SHUFFLE_INV(v2, v3, v4, v5, v6, v7, t1, t2) \ + MOVO v4, t1; \ + MOVO v5, v4; \ + MOVO t1, v5; \ + MOVO v2, t1; \ + PUNPCKLQDQ v2, t2; \ + PUNPCKHQDQ v3, v2; \ + PUNPCKHQDQ t2, v2; \ + PUNPCKLQDQ v3, t2; \ + MOVO t1, v3; \ + MOVO v6, t1; \ + PUNPCKHQDQ t2, v3; \ + PUNPCKLQDQ v7, t2; \ + PUNPCKHQDQ t2, v6; \ + PUNPCKLQDQ t1, t2; \ + PUNPCKHQDQ t2, v7 + +#define HALF_ROUND(v0, v1, v2, v3, v4, v5, v6, v7, t0, c40, c48) \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFD $0xB1, v6, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + PSHUFB c40, v2; \ + MOVO v0, t0; \ + PMULULQ v2, t0; \ + PADDQ v2, v0; \ + PADDQ t0, v0; \ + PADDQ t0, v0; \ + PXOR v0, v6; \ + PSHUFB c48, v6; \ + MOVO v4, t0; \ + PMULULQ v6, t0; \ + PADDQ v6, v4; \ + PADDQ t0, v4; \ + PADDQ t0, v4; \ + PXOR v4, v2; \ + MOVO v2, t0; \ + PADDQ v2, t0; \ + PSRLQ $63, v2; \ + PXOR t0, v2; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFD $0xB1, v7, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + PSHUFB c40, v3; \ + MOVO v1, t0; \ + PMULULQ v3, t0; \ + PADDQ v3, v1; \ + PADDQ t0, v1; \ + PADDQ t0, v1; \ + PXOR v1, v7; \ + PSHUFB c48, v7; \ + MOVO v5, t0; \ + PMULULQ v7, t0; \ + PADDQ v7, v5; \ + PADDQ t0, v5; \ + PADDQ t0, v5; \ + PXOR v5, v3; \ + MOVO v3, t0; \ + PADDQ v3, t0; \ + PSRLQ $63, v3; \ + PXOR t0, v3 + +#define LOAD_MSG_0(block, off) \ + MOVOU 8*(off+0)(block), X0; \ + MOVOU 8*(off+2)(block), X1; \ + MOVOU 8*(off+4)(block), X2; \ + MOVOU 8*(off+6)(block), X3; \ + MOVOU 8*(off+8)(block), X4; \ + MOVOU 8*(off+10)(block), X5; \ + MOVOU 8*(off+12)(block), X6; \ + MOVOU 8*(off+14)(block), X7 + +#define STORE_MSG_0(block, off) \ + MOVOU X0, 8*(off+0)(block); \ + MOVOU X1, 8*(off+2)(block); \ + MOVOU X2, 8*(off+4)(block); \ + MOVOU X3, 8*(off+6)(block); \ + MOVOU X4, 8*(off+8)(block); \ + MOVOU X5, 8*(off+10)(block); \ + MOVOU X6, 8*(off+12)(block); \ + MOVOU X7, 8*(off+14)(block) + +#define LOAD_MSG_1(block, off) \ + MOVOU 8*off+0*8(block), X0; \ + MOVOU 8*off+16*8(block), X1; \ + MOVOU 8*off+32*8(block), X2; \ + MOVOU 8*off+48*8(block), X3; \ + MOVOU 8*off+64*8(block), X4; \ + MOVOU 8*off+80*8(block), X5; \ + MOVOU 8*off+96*8(block), X6; \ + MOVOU 8*off+112*8(block), X7 + +#define STORE_MSG_1(block, off) \ + MOVOU X0, 8*off+0*8(block); \ + MOVOU X1, 8*off+16*8(block); \ + MOVOU X2, 8*off+32*8(block); \ + MOVOU X3, 8*off+48*8(block); \ + MOVOU X4, 8*off+64*8(block); \ + MOVOU X5, 8*off+80*8(block); \ + MOVOU X6, 8*off+96*8(block); \ + MOVOU X7, 8*off+112*8(block) + +#define BLAMKA_ROUND_0(block, off, t0, t1, c40, c48) \ + LOAD_MSG_0(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_0(block, off) + +#define BLAMKA_ROUND_1(block, off, t0, t1, c40, c48) \ + LOAD_MSG_1(block, off); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE(X2, X3, X4, X5, X6, X7, t0, t1); \ + HALF_ROUND(X0, X1, X2, X3, X4, X5, X6, X7, t0, c40, c48); \ + SHUFFLE_INV(X2, X3, X4, X5, X6, X7, t0, t1); \ + STORE_MSG_1(block, off) + +// func blamkaSSE4(b *block) +TEXT ·blamkaSSE4(SB), 4, $0-8 + MOVQ b+0(FP), AX + + MOVOU ·c40<>(SB), X10 + MOVOU ·c48<>(SB), X11 + + BLAMKA_ROUND_0(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 16, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 32, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 48, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 64, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 80, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 96, X8, X9, X10, X11) + BLAMKA_ROUND_0(AX, 112, X8, X9, X10, X11) + + BLAMKA_ROUND_1(AX, 0, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 2, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 4, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 6, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 8, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 10, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 12, X8, X9, X10, X11) + BLAMKA_ROUND_1(AX, 14, X8, X9, X10, X11) + RET + +// func mixBlocksSSE2(out, a, b, c *block) +TEXT ·mixBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + PXOR X1, X0 + PXOR X2, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func xorBlocksSSE2(out, a, b, c *block) +TEXT ·xorBlocksSSE2(SB), 4, $0-32 + MOVQ out+0(FP), DX + MOVQ a+8(FP), AX + MOVQ b+16(FP), BX + MOVQ a+24(FP), CX + MOVQ $128, BP + +loop: + MOVOU 0(AX), X0 + MOVOU 0(BX), X1 + MOVOU 0(CX), X2 + MOVOU 0(DX), X3 + PXOR X1, X0 + PXOR X2, X0 + PXOR X3, X0 + MOVOU X0, 0(DX) + ADDQ $16, AX + ADDQ $16, BX + ADDQ $16, CX + ADDQ $16, DX + SUBQ $2, BP + JA loop + RET + +// func supportsSSE4() bool +TEXT ·supportsSSE4(SB), 4, $0-1 + MOVL $1, AX + CPUID + SHRL $19, CX // Bit 19 indicates SSE4 support + ANDL $1, CX // CX != 0 if support SSE4 + MOVB CX, ret+0(FP) + RET diff --git a/vendor/golang.org/x/crypto/argon2/blamka_generic.go b/vendor/golang.org/x/crypto/argon2/blamka_generic.go new file mode 100644 index 00000000..a481b224 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_generic.go @@ -0,0 +1,163 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +package argon2 + +var useSSE4 bool + +func processBlockGeneric(out, in1, in2 *block, xor bool) { + var t block + for i := range t { + t[i] = in1[i] ^ in2[i] + } + for i := 0; i < blockLength; i += 16 { + blamkaGeneric( + &t[i+0], &t[i+1], &t[i+2], &t[i+3], + &t[i+4], &t[i+5], &t[i+6], &t[i+7], + &t[i+8], &t[i+9], &t[i+10], &t[i+11], + &t[i+12], &t[i+13], &t[i+14], &t[i+15], + ) + } + for i := 0; i < blockLength/8; i += 2 { + blamkaGeneric( + &t[i], &t[i+1], &t[16+i], &t[16+i+1], + &t[32+i], &t[32+i+1], &t[48+i], &t[48+i+1], + &t[64+i], &t[64+i+1], &t[80+i], &t[80+i+1], + &t[96+i], &t[96+i+1], &t[112+i], &t[112+i+1], + ) + } + if xor { + for i := range t { + out[i] ^= in1[i] ^ in2[i] ^ t[i] + } + } else { + for i := range t { + out[i] = in1[i] ^ in2[i] ^ t[i] + } + } +} + +func blamkaGeneric(t00, t01, t02, t03, t04, t05, t06, t07, t08, t09, t10, t11, t12, t13, t14, t15 *uint64) { + v00, v01, v02, v03 := *t00, *t01, *t02, *t03 + v04, v05, v06, v07 := *t04, *t05, *t06, *t07 + v08, v09, v10, v11 := *t08, *t09, *t10, *t11 + v12, v13, v14, v15 := *t12, *t13, *t14, *t15 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>32 | v12<<32 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>24 | v04<<40 + + v00 += v04 + 2*uint64(uint32(v00))*uint64(uint32(v04)) + v12 ^= v00 + v12 = v12>>16 | v12<<48 + v08 += v12 + 2*uint64(uint32(v08))*uint64(uint32(v12)) + v04 ^= v08 + v04 = v04>>63 | v04<<1 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>32 | v13<<32 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>24 | v05<<40 + + v01 += v05 + 2*uint64(uint32(v01))*uint64(uint32(v05)) + v13 ^= v01 + v13 = v13>>16 | v13<<48 + v09 += v13 + 2*uint64(uint32(v09))*uint64(uint32(v13)) + v05 ^= v09 + v05 = v05>>63 | v05<<1 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>32 | v14<<32 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>24 | v06<<40 + + v02 += v06 + 2*uint64(uint32(v02))*uint64(uint32(v06)) + v14 ^= v02 + v14 = v14>>16 | v14<<48 + v10 += v14 + 2*uint64(uint32(v10))*uint64(uint32(v14)) + v06 ^= v10 + v06 = v06>>63 | v06<<1 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>32 | v15<<32 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>24 | v07<<40 + + v03 += v07 + 2*uint64(uint32(v03))*uint64(uint32(v07)) + v15 ^= v03 + v15 = v15>>16 | v15<<48 + v11 += v15 + 2*uint64(uint32(v11))*uint64(uint32(v15)) + v07 ^= v11 + v07 = v07>>63 | v07<<1 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>32 | v15<<32 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>24 | v05<<40 + + v00 += v05 + 2*uint64(uint32(v00))*uint64(uint32(v05)) + v15 ^= v00 + v15 = v15>>16 | v15<<48 + v10 += v15 + 2*uint64(uint32(v10))*uint64(uint32(v15)) + v05 ^= v10 + v05 = v05>>63 | v05<<1 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>32 | v12<<32 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>24 | v06<<40 + + v01 += v06 + 2*uint64(uint32(v01))*uint64(uint32(v06)) + v12 ^= v01 + v12 = v12>>16 | v12<<48 + v11 += v12 + 2*uint64(uint32(v11))*uint64(uint32(v12)) + v06 ^= v11 + v06 = v06>>63 | v06<<1 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>32 | v13<<32 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>24 | v07<<40 + + v02 += v07 + 2*uint64(uint32(v02))*uint64(uint32(v07)) + v13 ^= v02 + v13 = v13>>16 | v13<<48 + v08 += v13 + 2*uint64(uint32(v08))*uint64(uint32(v13)) + v07 ^= v08 + v07 = v07>>63 | v07<<1 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>32 | v14<<32 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>24 | v04<<40 + + v03 += v04 + 2*uint64(uint32(v03))*uint64(uint32(v04)) + v14 ^= v03 + v14 = v14>>16 | v14<<48 + v09 += v14 + 2*uint64(uint32(v09))*uint64(uint32(v14)) + v04 ^= v09 + v04 = v04>>63 | v04<<1 + + *t00, *t01, *t02, *t03 = v00, v01, v02, v03 + *t04, *t05, *t06, *t07 = v04, v05, v06, v07 + *t08, *t09, *t10, *t11 = v08, v09, v10, v11 + *t12, *t13, *t14, *t15 = v12, v13, v14, v15 +} diff --git a/vendor/golang.org/x/crypto/argon2/blamka_ref.go b/vendor/golang.org/x/crypto/argon2/blamka_ref.go new file mode 100644 index 00000000..baf7b551 --- /dev/null +++ b/vendor/golang.org/x/crypto/argon2/blamka_ref.go @@ -0,0 +1,15 @@ +// Copyright 2017 The Go Authors. All rights reserved. +// Use of this source code is governed by a BSD-style +// license that can be found in the LICENSE file. + +// +build !amd64 appengine gccgo + +package argon2 + +func processBlock(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, false) +} + +func processBlockXOR(out, in1, in2 *block) { + processBlockGeneric(out, in1, in2, true) +} diff --git a/vendor/golang.org/x/crypto/blake2b/blake2b.go b/vendor/golang.org/x/crypto/blake2b/blake2b.go index 7f0a86e4..6dedb894 100644 --- a/vendor/golang.org/x/crypto/blake2b/blake2b.go +++ b/vendor/golang.org/x/crypto/blake2b/blake2b.go @@ -39,7 +39,10 @@ var ( useSSE4 bool ) -var errKeySize = errors.New("blake2b: invalid key size") +var ( + errKeySize = errors.New("blake2b: invalid key size") + errHashSize = errors.New("blake2b: invalid hash size") +) var iv = [8]uint64{ 0x6a09e667f3bcc908, 0xbb67ae8584caa73b, 0x3c6ef372fe94f82b, 0xa54ff53a5f1d36f1, @@ -83,7 +86,18 @@ func New384(key []byte) (hash.Hash, error) { return newDigest(Size384, key) } // key turns the hash into a MAC. The key must between zero and 64 bytes long. func New256(key []byte) (hash.Hash, error) { return newDigest(Size256, key) } +// New returns a new hash.Hash computing the BLAKE2b checksum with a custom length. +// A non-nil key turns the hash into a MAC. The key must between zero and 64 bytes long. +// The hash size can be a value between 1 and 64 but it is highly recommended to use +// values equal or greater than: +// - 32 if BLAKE2b is used as a hash function (The key is zero bytes long). +// - 16 if BLAKE2b is used as a MAC function (The key is at least 16 bytes long). +func New(size int, key []byte) (hash.Hash, error) { return newDigest(size, key) } + func newDigest(hashSize int, key []byte) (*digest, error) { + if hashSize < 1 || hashSize > Size { + return nil, errHashSize + } if len(key) > Size { return nil, errKeySize } diff --git a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go index 13792406..f83cb692 100644 --- a/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go +++ b/vendor/golang.org/x/crypto/pbkdf2/pbkdf2_test.go @@ -155,3 +155,22 @@ func TestWithHMACSHA1(t *testing.T) { func TestWithHMACSHA256(t *testing.T) { testHash(t, sha256.New, "SHA256", sha256TestVectors) } + +var sink uint8 + +func benchmark(b *testing.B, h func() hash.Hash) { + password := make([]byte, h().Size()) + salt := make([]byte, 8) + for i := 0; i < b.N; i++ { + password = Key(password, salt, 4096, len(password), h) + } + sink += password[0] +} + +func BenchmarkHMACSHA1(b *testing.B) { + benchmark(b, sha1.New) +} + +func BenchmarkHMACSHA256(b *testing.B) { + benchmark(b, sha256.New) +} diff --git a/vendor/golang.org/x/crypto/ssh/certs_test.go b/vendor/golang.org/x/crypto/ssh/certs_test.go index 0200531f..c8e7cf58 100644 --- a/vendor/golang.org/x/crypto/ssh/certs_test.go +++ b/vendor/golang.org/x/crypto/ssh/certs_test.go @@ -6,10 +6,15 @@ package ssh import ( "bytes" + "crypto/ecdsa" + "crypto/elliptic" "crypto/rand" + "net" "reflect" "testing" "time" + + "golang.org/x/crypto/ssh/testdata" ) // Cert generated by ssh-keygen 6.0p1 Debian-4. @@ -220,3 +225,111 @@ func TestHostKeyCert(t *testing.T) { } } } + +func TestCertTypes(t *testing.T) { + var testVars = []struct { + name string + keys func() Signer + }{ + { + name: CertAlgoECDSA256v01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap256"]) + return s + }, + }, + { + name: CertAlgoECDSA384v01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap384"]) + return s + }, + }, + { + name: CertAlgoECDSA521v01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["ecdsap521"]) + return s + }, + }, + { + name: CertAlgoED25519v01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["ed25519"]) + return s + }, + }, + { + name: CertAlgoRSAv01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["rsa"]) + return s + }, + }, + { + name: CertAlgoDSAv01, + keys: func() Signer { + s, _ := ParsePrivateKey(testdata.PEMBytes["dsa"]) + return s + }, + }, + } + + k, err := ecdsa.GenerateKey(elliptic.P256(), rand.Reader) + if err != nil { + t.Fatalf("error generating host key: %v", err) + } + + signer, err := NewSignerFromKey(k) + if err != nil { + t.Fatalf("error generating signer for ssh listener: %v", err) + } + + conf := &ServerConfig{ + PublicKeyCallback: func(c ConnMetadata, k PublicKey) (*Permissions, error) { + return new(Permissions), nil + }, + } + conf.AddHostKey(signer) + + for _, m := range testVars { + t.Run(m.name, func(t *testing.T) { + + c1, c2, err := netPipe() + if err != nil { + t.Fatalf("netPipe: %v", err) + } + defer c1.Close() + defer c2.Close() + + go NewServerConn(c1, conf) + + priv := m.keys() + if err != nil { + t.Fatalf("error generating ssh pubkey: %v", err) + } + + cert := &Certificate{ + CertType: UserCert, + Key: priv.PublicKey(), + } + cert.SignCert(rand.Reader, priv) + + certSigner, err := NewCertSigner(cert, priv) + if err != nil { + t.Fatalf("error generating cert signer: %v", err) + } + + config := &ClientConfig{ + User: "user", + HostKeyCallback: func(h string, r net.Addr, k PublicKey) error { return nil }, + Auth: []AuthMethod{PublicKeys(certSigner)}, + } + + _, _, _, err = NewClientConn(c2, "", config) + if err != nil { + t.Fatalf("error connecting: %v", err) + } + }) + } +} diff --git a/vendor/golang.org/x/crypto/ssh/server.go b/vendor/golang.org/x/crypto/ssh/server.go index d4df9160..b83d4738 100644 --- a/vendor/golang.org/x/crypto/ssh/server.go +++ b/vendor/golang.org/x/crypto/ssh/server.go @@ -256,7 +256,7 @@ func (s *connection) serverHandshake(config *ServerConfig) (*Permissions, error) func isAcceptableAlgo(algo string) bool { switch algo { case KeyAlgoRSA, KeyAlgoDSA, KeyAlgoECDSA256, KeyAlgoECDSA384, KeyAlgoECDSA521, KeyAlgoED25519, - CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01: + CertAlgoRSAv01, CertAlgoDSAv01, CertAlgoECDSA256v01, CertAlgoECDSA384v01, CertAlgoECDSA521v01, CertAlgoED25519v01: return true } return false diff --git a/vendor/golang.org/x/crypto/ssh/testdata/keys.go b/vendor/golang.org/x/crypto/ssh/testdata/keys.go index 3b3d26c5..521b6be9 100644 --- a/vendor/golang.org/x/crypto/ssh/testdata/keys.go +++ b/vendor/golang.org/x/crypto/ssh/testdata/keys.go @@ -23,6 +23,27 @@ MHcCAQEEINGWx0zo6fhJ/0EAfrPzVFyFC9s18lBt3cRoEDhS3ARooAoGCCqGSM49 AwEHoUQDQgAEi9Hdw6KvZcWxfg2IDhA7UkpDtzzt6ZqJXSsFdLd+Kx4S3Sx4cVO+ 6/ZOXRnPmNAlLUqjShUsUBBngG0u2fqEqA== -----END EC PRIVATE KEY----- +`), + "ecdsap256": []byte(`-----BEGIN EC PRIVATE KEY----- +MHcCAQEEIAPCE25zK0PQSnsgVcEbM1mbKTASH4pqb5QJajplDwDZoAoGCCqGSM49 +AwEHoUQDQgAEWy8TxGcIHRh5XGpO4dFVfDjeNY+VkgubQrf/eyFJZHxAn1SKraXU +qJUjTKj1z622OxYtJ5P7s9CfAEVsTzLCzg== +-----END EC PRIVATE KEY----- +`), + "ecdsap384": []byte(`-----BEGIN EC PRIVATE KEY----- +MIGkAgEBBDBWfSnMuNKq8J9rQLzzEkx3KAoEohSXqhE/4CdjEYtoU2i22HW80DDS +qQhYNHRAduygBwYFK4EEACKhZANiAAQWaDMAd0HUd8ZiXCX7mYDDnC54gwH/nG43 +VhCUEYmF7HMZm/B9Yn3GjFk3qYEDEvuF/52+NvUKBKKaLbh32AWxMv0ibcoba4cz +hL9+hWYhUD9XIUlzMWiZ2y6eBE9PdRI= +-----END EC PRIVATE KEY----- +`), + "ecdsap521": []byte(`-----BEGIN EC PRIVATE KEY----- +MIHcAgEBBEIBrkYpQcy8KTVHNiAkjlFZwee90224Bu6wz94R4OBo+Ts0eoAQG7SF +iaygEDMUbx6kTgXTBcKZ0jrWPKakayNZ/kigBwYFK4EEACOhgYkDgYYABADFuvLV +UoaCDGHcw5uNfdRIsvaLKuWSpLsl48eWGZAwdNG432GDVKduO+pceuE+8XzcyJb+ +uMv+D2b11Q/LQUcHJwE6fqbm8m3EtDKPsoKs0u/XUJb0JsH4J8lkZzbUTjvGYamn +FFlRjzoB3Oxu8UQgb+MWPedtH9XYBbg9biz4jJLkXQ== +-----END EC PRIVATE KEY----- `), "rsa": []byte(`-----BEGIN RSA PRIVATE KEY----- MIICXAIBAAKBgQC8A6FGHDiWCSREAXCq6yBfNVr0xCVG2CzvktFNRpue+RXrGs/2 diff --git a/vendor/google.golang.org/genproto/googleapis/assistant/embedded/v1alpha2/embedded_assistant.pb.go b/vendor/google.golang.org/genproto/googleapis/assistant/embedded/v1alpha2/embedded_assistant.pb.go new file mode 100644 index 00000000..b48a959a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/assistant/embedded/v1alpha2/embedded_assistant.pb.go @@ -0,0 +1,1196 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/assistant/embedded/v1alpha2/embedded_assistant.proto + +/* +Package embedded is a generated protocol buffer package. + +It is generated from these files: + google/assistant/embedded/v1alpha2/embedded_assistant.proto + +It has these top-level messages: + AssistConfig + AudioInConfig + AudioOutConfig + DialogStateIn + AudioOut + DialogStateOut + AssistRequest + AssistResponse + SpeechRecognitionResult + DeviceConfig + DeviceAction + DeviceLocation +*/ +package embedded + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_type "google.golang.org/genproto/googleapis/type/latlng" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Audio encoding of the data sent in the audio message. +// Audio must be one-channel (mono). The only language supported is "en-US". +type AudioInConfig_Encoding int32 + +const ( + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + AudioInConfig_ENCODING_UNSPECIFIED AudioInConfig_Encoding = 0 + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + // This encoding includes no header, only the raw audio bytes. + AudioInConfig_LINEAR16 AudioInConfig_Encoding = 1 + // [`FLAC`](https://xiph.org/flac/documentation.html) (Free Lossless Audio + // Codec) is the recommended encoding because it is + // lossless--therefore recognition is not compromised--and + // requires only about half the bandwidth of `LINEAR16`. This encoding + // includes the `FLAC` stream header followed by audio data. It supports + // 16-bit and 24-bit samples, however, not all fields in `STREAMINFO` are + // supported. + AudioInConfig_FLAC AudioInConfig_Encoding = 2 +) + +var AudioInConfig_Encoding_name = map[int32]string{ + 0: "ENCODING_UNSPECIFIED", + 1: "LINEAR16", + 2: "FLAC", +} +var AudioInConfig_Encoding_value = map[string]int32{ + "ENCODING_UNSPECIFIED": 0, + "LINEAR16": 1, + "FLAC": 2, +} + +func (x AudioInConfig_Encoding) String() string { + return proto.EnumName(AudioInConfig_Encoding_name, int32(x)) +} +func (AudioInConfig_Encoding) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{1, 0} } + +// Audio encoding of the data returned in the audio message. All encodings are +// raw audio bytes with no header, except as indicated below. +type AudioOutConfig_Encoding int32 + +const ( + // Not specified. Will return result [google.rpc.Code.INVALID_ARGUMENT][]. + AudioOutConfig_ENCODING_UNSPECIFIED AudioOutConfig_Encoding = 0 + // Uncompressed 16-bit signed little-endian samples (Linear PCM). + AudioOutConfig_LINEAR16 AudioOutConfig_Encoding = 1 + // MP3 audio encoding. The sample rate is encoded in the payload. + AudioOutConfig_MP3 AudioOutConfig_Encoding = 2 + // Opus-encoded audio wrapped in an ogg container. The result will be a + // file which can be played natively on Android and in some browsers (such + // as Chrome). The quality of the encoding is considerably higher than MP3 + // while using the same bitrate. The sample rate is encoded in the payload. + AudioOutConfig_OPUS_IN_OGG AudioOutConfig_Encoding = 3 +) + +var AudioOutConfig_Encoding_name = map[int32]string{ + 0: "ENCODING_UNSPECIFIED", + 1: "LINEAR16", + 2: "MP3", + 3: "OPUS_IN_OGG", +} +var AudioOutConfig_Encoding_value = map[string]int32{ + "ENCODING_UNSPECIFIED": 0, + "LINEAR16": 1, + "MP3": 2, + "OPUS_IN_OGG": 3, +} + +func (x AudioOutConfig_Encoding) String() string { + return proto.EnumName(AudioOutConfig_Encoding_name, int32(x)) +} +func (AudioOutConfig_Encoding) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{2, 0} } + +// Possible states of the microphone after a `Assist` RPC completes. +type DialogStateOut_MicrophoneMode int32 + +const ( + // No mode specified. + DialogStateOut_MICROPHONE_MODE_UNSPECIFIED DialogStateOut_MicrophoneMode = 0 + // The service is not expecting a follow-on question from the user. + // The microphone should remain off until the user re-activates it. + DialogStateOut_CLOSE_MICROPHONE DialogStateOut_MicrophoneMode = 1 + // The service is expecting a follow-on question from the user. The + // microphone should be re-opened when the `AudioOut` playback completes + // (by starting a new `Assist` RPC call to send the new audio). + DialogStateOut_DIALOG_FOLLOW_ON DialogStateOut_MicrophoneMode = 2 +) + +var DialogStateOut_MicrophoneMode_name = map[int32]string{ + 0: "MICROPHONE_MODE_UNSPECIFIED", + 1: "CLOSE_MICROPHONE", + 2: "DIALOG_FOLLOW_ON", +} +var DialogStateOut_MicrophoneMode_value = map[string]int32{ + "MICROPHONE_MODE_UNSPECIFIED": 0, + "CLOSE_MICROPHONE": 1, + "DIALOG_FOLLOW_ON": 2, +} + +func (x DialogStateOut_MicrophoneMode) String() string { + return proto.EnumName(DialogStateOut_MicrophoneMode_name, int32(x)) +} +func (DialogStateOut_MicrophoneMode) EnumDescriptor() ([]byte, []int) { + return fileDescriptor0, []int{5, 0} +} + +// Indicates the type of event. +type AssistResponse_EventType int32 + +const ( + // No event specified. + AssistResponse_EVENT_TYPE_UNSPECIFIED AssistResponse_EventType = 0 + // This event indicates that the server has detected the end of the user's + // speech utterance and expects no additional speech. Therefore, the server + // will not process additional audio (although it may subsequently return + // additional results). The client should stop sending additional audio + // data, half-close the gRPC connection, and wait for any additional results + // until the server closes the gRPC connection. + AssistResponse_END_OF_UTTERANCE AssistResponse_EventType = 1 +) + +var AssistResponse_EventType_name = map[int32]string{ + 0: "EVENT_TYPE_UNSPECIFIED", + 1: "END_OF_UTTERANCE", +} +var AssistResponse_EventType_value = map[string]int32{ + "EVENT_TYPE_UNSPECIFIED": 0, + "END_OF_UTTERANCE": 1, +} + +func (x AssistResponse_EventType) String() string { + return proto.EnumName(AssistResponse_EventType_name, int32(x)) +} +func (AssistResponse_EventType) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } + +// Specifies how to process the `AssistRequest` messages. +type AssistConfig struct { + // Types that are valid to be assigned to Type: + // *AssistConfig_AudioInConfig + // *AssistConfig_TextQuery + Type isAssistConfig_Type `protobuf_oneof:"type"` + // *Required* Specifies how to format the audio that will be returned. + AudioOutConfig *AudioOutConfig `protobuf:"bytes,2,opt,name=audio_out_config,json=audioOutConfig" json:"audio_out_config,omitempty"` + // *Required* Represents the current dialog state. + DialogStateIn *DialogStateIn `protobuf:"bytes,3,opt,name=dialog_state_in,json=dialogStateIn" json:"dialog_state_in,omitempty"` + // Device configuration that uniquely identifies a specific device. + DeviceConfig *DeviceConfig `protobuf:"bytes,4,opt,name=device_config,json=deviceConfig" json:"device_config,omitempty"` +} + +func (m *AssistConfig) Reset() { *m = AssistConfig{} } +func (m *AssistConfig) String() string { return proto.CompactTextString(m) } +func (*AssistConfig) ProtoMessage() {} +func (*AssistConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +type isAssistConfig_Type interface { + isAssistConfig_Type() +} + +type AssistConfig_AudioInConfig struct { + AudioInConfig *AudioInConfig `protobuf:"bytes,1,opt,name=audio_in_config,json=audioInConfig,oneof"` +} +type AssistConfig_TextQuery struct { + TextQuery string `protobuf:"bytes,6,opt,name=text_query,json=textQuery,oneof"` +} + +func (*AssistConfig_AudioInConfig) isAssistConfig_Type() {} +func (*AssistConfig_TextQuery) isAssistConfig_Type() {} + +func (m *AssistConfig) GetType() isAssistConfig_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *AssistConfig) GetAudioInConfig() *AudioInConfig { + if x, ok := m.GetType().(*AssistConfig_AudioInConfig); ok { + return x.AudioInConfig + } + return nil +} + +func (m *AssistConfig) GetTextQuery() string { + if x, ok := m.GetType().(*AssistConfig_TextQuery); ok { + return x.TextQuery + } + return "" +} + +func (m *AssistConfig) GetAudioOutConfig() *AudioOutConfig { + if m != nil { + return m.AudioOutConfig + } + return nil +} + +func (m *AssistConfig) GetDialogStateIn() *DialogStateIn { + if m != nil { + return m.DialogStateIn + } + return nil +} + +func (m *AssistConfig) GetDeviceConfig() *DeviceConfig { + if m != nil { + return m.DeviceConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AssistConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AssistConfig_OneofMarshaler, _AssistConfig_OneofUnmarshaler, _AssistConfig_OneofSizer, []interface{}{ + (*AssistConfig_AudioInConfig)(nil), + (*AssistConfig_TextQuery)(nil), + } +} + +func _AssistConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AssistConfig) + // type + switch x := m.Type.(type) { + case *AssistConfig_AudioInConfig: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AudioInConfig); err != nil { + return err + } + case *AssistConfig_TextQuery: + b.EncodeVarint(6<<3 | proto.WireBytes) + b.EncodeStringBytes(x.TextQuery) + case nil: + default: + return fmt.Errorf("AssistConfig.Type has unexpected type %T", x) + } + return nil +} + +func _AssistConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AssistConfig) + switch tag { + case 1: // type.audio_in_config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AudioInConfig) + err := b.DecodeMessage(msg) + m.Type = &AssistConfig_AudioInConfig{msg} + return true, err + case 6: // type.text_query + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Type = &AssistConfig_TextQuery{x} + return true, err + default: + return false, nil + } +} + +func _AssistConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AssistConfig) + // type + switch x := m.Type.(type) { + case *AssistConfig_AudioInConfig: + s := proto.Size(x.AudioInConfig) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AssistConfig_TextQuery: + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.TextQuery))) + n += len(x.TextQuery) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specifies how to process the `audio_in` data that will be provided in +// subsequent requests. For recommended settings, see the Google Assistant SDK +// [best practices](https://developers.google.com/assistant/sdk/guides/service/python/best-practices/audio). +type AudioInConfig struct { + // *Required* Encoding of audio data sent in all `audio_in` messages. + Encoding AudioInConfig_Encoding `protobuf:"varint,1,opt,name=encoding,enum=google.assistant.embedded.v1alpha2.AudioInConfig_Encoding" json:"encoding,omitempty"` + // *Required* Sample rate (in Hertz) of the audio data sent in all `audio_in` + // messages. Valid values are from 16000-24000, but 16000 is optimal. + // For best results, set the sampling rate of the audio source to 16000 Hz. + // If that's not possible, use the native sample rate of the audio source + // (instead of re-sampling). + SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"` +} + +func (m *AudioInConfig) Reset() { *m = AudioInConfig{} } +func (m *AudioInConfig) String() string { return proto.CompactTextString(m) } +func (*AudioInConfig) ProtoMessage() {} +func (*AudioInConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *AudioInConfig) GetEncoding() AudioInConfig_Encoding { + if m != nil { + return m.Encoding + } + return AudioInConfig_ENCODING_UNSPECIFIED +} + +func (m *AudioInConfig) GetSampleRateHertz() int32 { + if m != nil { + return m.SampleRateHertz + } + return 0 +} + +// Specifies the desired format for the server to use when it returns +// `audio_out` messages. +type AudioOutConfig struct { + // *Required* The encoding of audio data to be returned in all `audio_out` + // messages. + Encoding AudioOutConfig_Encoding `protobuf:"varint,1,opt,name=encoding,enum=google.assistant.embedded.v1alpha2.AudioOutConfig_Encoding" json:"encoding,omitempty"` + // *Required* The sample rate in Hertz of the audio data returned in + // `audio_out` messages. Valid values are: 16000-24000. + SampleRateHertz int32 `protobuf:"varint,2,opt,name=sample_rate_hertz,json=sampleRateHertz" json:"sample_rate_hertz,omitempty"` + // *Required* Current volume setting of the device's audio output. + // Valid values are 1 to 100 (corresponding to 1% to 100%). + VolumePercentage int32 `protobuf:"varint,3,opt,name=volume_percentage,json=volumePercentage" json:"volume_percentage,omitempty"` +} + +func (m *AudioOutConfig) Reset() { *m = AudioOutConfig{} } +func (m *AudioOutConfig) String() string { return proto.CompactTextString(m) } +func (*AudioOutConfig) ProtoMessage() {} +func (*AudioOutConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *AudioOutConfig) GetEncoding() AudioOutConfig_Encoding { + if m != nil { + return m.Encoding + } + return AudioOutConfig_ENCODING_UNSPECIFIED +} + +func (m *AudioOutConfig) GetSampleRateHertz() int32 { + if m != nil { + return m.SampleRateHertz + } + return 0 +} + +func (m *AudioOutConfig) GetVolumePercentage() int32 { + if m != nil { + return m.VolumePercentage + } + return 0 +} + +// Provides information about the current dialog state. +type DialogStateIn struct { + // *Required* This field must always be set to the + // [DialogStateOut.conversation_state][google.assistant.embedded.v1alpha2.DialogStateOut.conversation_state] value that was returned in the prior + // `Assist` RPC. It should only be omitted (field not set) if there was no + // prior `Assist` RPC because this is the first `Assist` RPC made by this + // device after it was first setup and/or a factory-default reset. + ConversationState []byte `protobuf:"bytes,1,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"` + // *Required* Language of the request in + // [IETF BCP 47 syntax](https://tools.ietf.org/html/bcp47). For example: + // "en-US". If you have selected a language for this `device_id` using the + // [Settings](https://developers.google.com/assistant/sdk/guides/assistant-settings) + // menu in your phone's Google Assistant app, that selection will override + // this value. + LanguageCode string `protobuf:"bytes,2,opt,name=language_code,json=languageCode" json:"language_code,omitempty"` + // *Optional* Location of the device where the query originated. + DeviceLocation *DeviceLocation `protobuf:"bytes,5,opt,name=device_location,json=deviceLocation" json:"device_location,omitempty"` +} + +func (m *DialogStateIn) Reset() { *m = DialogStateIn{} } +func (m *DialogStateIn) String() string { return proto.CompactTextString(m) } +func (*DialogStateIn) ProtoMessage() {} +func (*DialogStateIn) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *DialogStateIn) GetConversationState() []byte { + if m != nil { + return m.ConversationState + } + return nil +} + +func (m *DialogStateIn) GetLanguageCode() string { + if m != nil { + return m.LanguageCode + } + return "" +} + +func (m *DialogStateIn) GetDeviceLocation() *DeviceLocation { + if m != nil { + return m.DeviceLocation + } + return nil +} + +// The audio containing the Assistant's response to the query. Sequential chunks +// of audio data are received in sequential `AssistResponse` messages. +type AudioOut struct { + // *Output-only* The audio data containing the Assistant's response to the + // query. Sequential chunks of audio data are received in sequential + // `AssistResponse` messages. + AudioData []byte `protobuf:"bytes,1,opt,name=audio_data,json=audioData,proto3" json:"audio_data,omitempty"` +} + +func (m *AudioOut) Reset() { *m = AudioOut{} } +func (m *AudioOut) String() string { return proto.CompactTextString(m) } +func (*AudioOut) ProtoMessage() {} +func (*AudioOut) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *AudioOut) GetAudioData() []byte { + if m != nil { + return m.AudioData + } + return nil +} + +// The dialog state resulting from the user's query. Multiple of these messages +// may be received. +type DialogStateOut struct { + // *Output-only* Supplemental display text from the Assistant. This could be + // the same as the speech spoken in `AssistResponse.audio_out` or it could + // be some additional information which aids the user's understanding. + SupplementalDisplayText string `protobuf:"bytes,1,opt,name=supplemental_display_text,json=supplementalDisplayText" json:"supplemental_display_text,omitempty"` + // *Output-only* State information for the subsequent `Assist` RPC. This + // value should be saved in the client and returned in the + // [`DialogStateIn.conversation_state`](#dialogstatein) field with the next + // `Assist` RPC. (The client does not need to interpret or otherwise use this + // value.) This information should be saved across device reboots. However, + // this value should be cleared (not saved in the client) during a + // factory-default reset. + ConversationState []byte `protobuf:"bytes,2,opt,name=conversation_state,json=conversationState,proto3" json:"conversation_state,omitempty"` + // *Output-only* Specifies the mode of the microphone after this `Assist` + // RPC is processed. + MicrophoneMode DialogStateOut_MicrophoneMode `protobuf:"varint,3,opt,name=microphone_mode,json=microphoneMode,enum=google.assistant.embedded.v1alpha2.DialogStateOut_MicrophoneMode" json:"microphone_mode,omitempty"` + // *Output-only* Updated volume level. The value will be 0 or omitted + // (indicating no change) unless a voice command such as *Increase the volume* + // or *Set volume level 4* was recognized, in which case the value will be + // between 1 and 100 (corresponding to the new volume level of 1% to 100%). + // Typically, a client should use this volume level when playing the + // `audio_out` data, and retain this value as the current volume level and + // supply it in the `AudioOutConfig` of the next `AssistRequest`. (Some + // clients may also implement other ways to allow the current volume level to + // be changed, for example, by providing a knob that the user can turn.) + VolumePercentage int32 `protobuf:"varint,4,opt,name=volume_percentage,json=volumePercentage" json:"volume_percentage,omitempty"` +} + +func (m *DialogStateOut) Reset() { *m = DialogStateOut{} } +func (m *DialogStateOut) String() string { return proto.CompactTextString(m) } +func (*DialogStateOut) ProtoMessage() {} +func (*DialogStateOut) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *DialogStateOut) GetSupplementalDisplayText() string { + if m != nil { + return m.SupplementalDisplayText + } + return "" +} + +func (m *DialogStateOut) GetConversationState() []byte { + if m != nil { + return m.ConversationState + } + return nil +} + +func (m *DialogStateOut) GetMicrophoneMode() DialogStateOut_MicrophoneMode { + if m != nil { + return m.MicrophoneMode + } + return DialogStateOut_MICROPHONE_MODE_UNSPECIFIED +} + +func (m *DialogStateOut) GetVolumePercentage() int32 { + if m != nil { + return m.VolumePercentage + } + return 0 +} + +// The top-level message sent by the client. Clients must send at least two, and +// typically numerous `AssistRequest` messages. The first message must +// contain a `config` message and must not contain `audio_in` data. All +// subsequent messages must contain `audio_in` data and must not contain a +// `config` message. +type AssistRequest struct { + // Exactly one of these fields must be specified in each `AssistRequest`. + // + // Types that are valid to be assigned to Type: + // *AssistRequest_Config + // *AssistRequest_AudioIn + Type isAssistRequest_Type `protobuf_oneof:"type"` +} + +func (m *AssistRequest) Reset() { *m = AssistRequest{} } +func (m *AssistRequest) String() string { return proto.CompactTextString(m) } +func (*AssistRequest) ProtoMessage() {} +func (*AssistRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +type isAssistRequest_Type interface { + isAssistRequest_Type() +} + +type AssistRequest_Config struct { + Config *AssistConfig `protobuf:"bytes,1,opt,name=config,oneof"` +} +type AssistRequest_AudioIn struct { + AudioIn []byte `protobuf:"bytes,2,opt,name=audio_in,json=audioIn,proto3,oneof"` +} + +func (*AssistRequest_Config) isAssistRequest_Type() {} +func (*AssistRequest_AudioIn) isAssistRequest_Type() {} + +func (m *AssistRequest) GetType() isAssistRequest_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *AssistRequest) GetConfig() *AssistConfig { + if x, ok := m.GetType().(*AssistRequest_Config); ok { + return x.Config + } + return nil +} + +func (m *AssistRequest) GetAudioIn() []byte { + if x, ok := m.GetType().(*AssistRequest_AudioIn); ok { + return x.AudioIn + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*AssistRequest) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _AssistRequest_OneofMarshaler, _AssistRequest_OneofUnmarshaler, _AssistRequest_OneofSizer, []interface{}{ + (*AssistRequest_Config)(nil), + (*AssistRequest_AudioIn)(nil), + } +} + +func _AssistRequest_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*AssistRequest) + // type + switch x := m.Type.(type) { + case *AssistRequest_Config: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Config); err != nil { + return err + } + case *AssistRequest_AudioIn: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeRawBytes(x.AudioIn) + case nil: + default: + return fmt.Errorf("AssistRequest.Type has unexpected type %T", x) + } + return nil +} + +func _AssistRequest_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*AssistRequest) + switch tag { + case 1: // type.config + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(AssistConfig) + err := b.DecodeMessage(msg) + m.Type = &AssistRequest_Config{msg} + return true, err + case 2: // type.audio_in + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeRawBytes(true) + m.Type = &AssistRequest_AudioIn{x} + return true, err + default: + return false, nil + } +} + +func _AssistRequest_OneofSizer(msg proto.Message) (n int) { + m := msg.(*AssistRequest) + // type + switch x := m.Type.(type) { + case *AssistRequest_Config: + s := proto.Size(x.Config) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *AssistRequest_AudioIn: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.AudioIn))) + n += len(x.AudioIn) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The top-level message received by the client. A series of one or more +// `AssistResponse` messages are streamed back to the client. +type AssistResponse struct { + // *Output-only* Indicates the type of event. + EventType AssistResponse_EventType `protobuf:"varint,1,opt,name=event_type,json=eventType,enum=google.assistant.embedded.v1alpha2.AssistResponse_EventType" json:"event_type,omitempty"` + // *Output-only* The audio containing the Assistant's response to the query. + AudioOut *AudioOut `protobuf:"bytes,3,opt,name=audio_out,json=audioOut" json:"audio_out,omitempty"` + // *Output-only* Contains the action triggered by the query with the + // appropriate payloads and semantic parsing. + DeviceAction *DeviceAction `protobuf:"bytes,6,opt,name=device_action,json=deviceAction" json:"device_action,omitempty"` + // *Output-only* This repeated list contains zero or more speech recognition + // results that correspond to consecutive portions of the audio currently + // being processed, starting with the portion corresponding to the earliest + // audio (and most stable portion) to the portion corresponding to the most + // recent audio. The strings can be concatenated to view the full + // in-progress response. When the speech recognition completes, this list + // will contain one item with `stability` of `1.0`. + SpeechResults []*SpeechRecognitionResult `protobuf:"bytes,2,rep,name=speech_results,json=speechResults" json:"speech_results,omitempty"` + // *Output-only* Contains output related to the user's query. + DialogStateOut *DialogStateOut `protobuf:"bytes,5,opt,name=dialog_state_out,json=dialogStateOut" json:"dialog_state_out,omitempty"` +} + +func (m *AssistResponse) Reset() { *m = AssistResponse{} } +func (m *AssistResponse) String() string { return proto.CompactTextString(m) } +func (*AssistResponse) ProtoMessage() {} +func (*AssistResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *AssistResponse) GetEventType() AssistResponse_EventType { + if m != nil { + return m.EventType + } + return AssistResponse_EVENT_TYPE_UNSPECIFIED +} + +func (m *AssistResponse) GetAudioOut() *AudioOut { + if m != nil { + return m.AudioOut + } + return nil +} + +func (m *AssistResponse) GetDeviceAction() *DeviceAction { + if m != nil { + return m.DeviceAction + } + return nil +} + +func (m *AssistResponse) GetSpeechResults() []*SpeechRecognitionResult { + if m != nil { + return m.SpeechResults + } + return nil +} + +func (m *AssistResponse) GetDialogStateOut() *DialogStateOut { + if m != nil { + return m.DialogStateOut + } + return nil +} + +// The estimated transcription of a phrase the user has spoken. This could be +// a single segment or the full guess of the user's spoken query. +type SpeechRecognitionResult struct { + // *Output-only* Transcript text representing the words that the user spoke. + Transcript string `protobuf:"bytes,1,opt,name=transcript" json:"transcript,omitempty"` + // *Output-only* An estimate of the likelihood that the Assistant will not + // change its guess about this result. Values range from 0.0 (completely + // unstable) to 1.0 (completely stable and final). The default of 0.0 is a + // sentinel value indicating `stability` was not set. + Stability float32 `protobuf:"fixed32,2,opt,name=stability" json:"stability,omitempty"` +} + +func (m *SpeechRecognitionResult) Reset() { *m = SpeechRecognitionResult{} } +func (m *SpeechRecognitionResult) String() string { return proto.CompactTextString(m) } +func (*SpeechRecognitionResult) ProtoMessage() {} +func (*SpeechRecognitionResult) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *SpeechRecognitionResult) GetTranscript() string { + if m != nil { + return m.Transcript + } + return "" +} + +func (m *SpeechRecognitionResult) GetStability() float32 { + if m != nil { + return m.Stability + } + return 0 +} + +// *Required* Fields that identify the device to the Assistant. +// +// See also: +// +// * [Register a Device - REST API](https://developers.google.com/assistant/sdk/reference/device-registration/register-device-manual) +// * [Device Model and Instance Schemas](https://developers.google.com/assistant/sdk/reference/device-registration/model-and-instance-schemas) +// * [Device Proto](https://developers.google.com/assistant/sdk/reference/rpc/google.assistant.devices.v1alpha2#device) +type DeviceConfig struct { + // *Required* Unique identifier for the device. The id length must be 128 + // characters or less. Example: DBCDW098234. This MUST match the device_id + // returned from device registration. This device_id is used to match against + // the user's registered devices to lookup the supported traits and + // capabilities of this device. This information should not change across + // device reboots. However, it should not be saved across + // factory-default resets. + DeviceId string `protobuf:"bytes,1,opt,name=device_id,json=deviceId" json:"device_id,omitempty"` + // *Required* Unique identifier for the device model. The combination of + // device_model_id and device_id must have been previously associated through + // device registration. + DeviceModelId string `protobuf:"bytes,3,opt,name=device_model_id,json=deviceModelId" json:"device_model_id,omitempty"` +} + +func (m *DeviceConfig) Reset() { *m = DeviceConfig{} } +func (m *DeviceConfig) String() string { return proto.CompactTextString(m) } +func (*DeviceConfig) ProtoMessage() {} +func (*DeviceConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *DeviceConfig) GetDeviceId() string { + if m != nil { + return m.DeviceId + } + return "" +} + +func (m *DeviceConfig) GetDeviceModelId() string { + if m != nil { + return m.DeviceModelId + } + return "" +} + +// The response returned to the device if the user has triggered a Device +// Action. For example, a device which supports the query *Turn on the light* +// would receive a `DeviceAction` with a JSON payload containing the semantics +// of the request. +type DeviceAction struct { + // JSON containing the device command response generated from the triggered + // Device Action grammar. The format is given by the + // `action.devices.EXECUTE` intent for a given + // [trait](https://developers.google.com/assistant/sdk/reference/traits/). + DeviceRequestJson string `protobuf:"bytes,1,opt,name=device_request_json,json=deviceRequestJson" json:"device_request_json,omitempty"` +} + +func (m *DeviceAction) Reset() { *m = DeviceAction{} } +func (m *DeviceAction) String() string { return proto.CompactTextString(m) } +func (*DeviceAction) ProtoMessage() {} +func (*DeviceAction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *DeviceAction) GetDeviceRequestJson() string { + if m != nil { + return m.DeviceRequestJson + } + return "" +} + +// There are three sources of locations. They are used with this precedence: +// +// 1. This `DeviceLocation`, which is primarily used for mobile devices with +// GPS . +// 2. Location specified by the user during device setup; this is per-user, per +// device. This location is used if `DeviceLocation` is not specified. +// 3. Inferred location based on IP address. This is used only if neither of the +// above are specified. +type DeviceLocation struct { + // Types that are valid to be assigned to Type: + // *DeviceLocation_Coordinates + Type isDeviceLocation_Type `protobuf_oneof:"type"` +} + +func (m *DeviceLocation) Reset() { *m = DeviceLocation{} } +func (m *DeviceLocation) String() string { return proto.CompactTextString(m) } +func (*DeviceLocation) ProtoMessage() {} +func (*DeviceLocation) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +type isDeviceLocation_Type interface { + isDeviceLocation_Type() +} + +type DeviceLocation_Coordinates struct { + Coordinates *google_type.LatLng `protobuf:"bytes,1,opt,name=coordinates,oneof"` +} + +func (*DeviceLocation_Coordinates) isDeviceLocation_Type() {} + +func (m *DeviceLocation) GetType() isDeviceLocation_Type { + if m != nil { + return m.Type + } + return nil +} + +func (m *DeviceLocation) GetCoordinates() *google_type.LatLng { + if x, ok := m.GetType().(*DeviceLocation_Coordinates); ok { + return x.Coordinates + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DeviceLocation) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DeviceLocation_OneofMarshaler, _DeviceLocation_OneofUnmarshaler, _DeviceLocation_OneofSizer, []interface{}{ + (*DeviceLocation_Coordinates)(nil), + } +} + +func _DeviceLocation_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DeviceLocation) + // type + switch x := m.Type.(type) { + case *DeviceLocation_Coordinates: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.Coordinates); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("DeviceLocation.Type has unexpected type %T", x) + } + return nil +} + +func _DeviceLocation_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DeviceLocation) + switch tag { + case 1: // type.coordinates + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_type.LatLng) + err := b.DecodeMessage(msg) + m.Type = &DeviceLocation_Coordinates{msg} + return true, err + default: + return false, nil + } +} + +func _DeviceLocation_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DeviceLocation) + // type + switch x := m.Type.(type) { + case *DeviceLocation_Coordinates: + s := proto.Size(x.Coordinates) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +func init() { + proto.RegisterType((*AssistConfig)(nil), "google.assistant.embedded.v1alpha2.AssistConfig") + proto.RegisterType((*AudioInConfig)(nil), "google.assistant.embedded.v1alpha2.AudioInConfig") + proto.RegisterType((*AudioOutConfig)(nil), "google.assistant.embedded.v1alpha2.AudioOutConfig") + proto.RegisterType((*DialogStateIn)(nil), "google.assistant.embedded.v1alpha2.DialogStateIn") + proto.RegisterType((*AudioOut)(nil), "google.assistant.embedded.v1alpha2.AudioOut") + proto.RegisterType((*DialogStateOut)(nil), "google.assistant.embedded.v1alpha2.DialogStateOut") + proto.RegisterType((*AssistRequest)(nil), "google.assistant.embedded.v1alpha2.AssistRequest") + proto.RegisterType((*AssistResponse)(nil), "google.assistant.embedded.v1alpha2.AssistResponse") + proto.RegisterType((*SpeechRecognitionResult)(nil), "google.assistant.embedded.v1alpha2.SpeechRecognitionResult") + proto.RegisterType((*DeviceConfig)(nil), "google.assistant.embedded.v1alpha2.DeviceConfig") + proto.RegisterType((*DeviceAction)(nil), "google.assistant.embedded.v1alpha2.DeviceAction") + proto.RegisterType((*DeviceLocation)(nil), "google.assistant.embedded.v1alpha2.DeviceLocation") + proto.RegisterEnum("google.assistant.embedded.v1alpha2.AudioInConfig_Encoding", AudioInConfig_Encoding_name, AudioInConfig_Encoding_value) + proto.RegisterEnum("google.assistant.embedded.v1alpha2.AudioOutConfig_Encoding", AudioOutConfig_Encoding_name, AudioOutConfig_Encoding_value) + proto.RegisterEnum("google.assistant.embedded.v1alpha2.DialogStateOut_MicrophoneMode", DialogStateOut_MicrophoneMode_name, DialogStateOut_MicrophoneMode_value) + proto.RegisterEnum("google.assistant.embedded.v1alpha2.AssistResponse_EventType", AssistResponse_EventType_name, AssistResponse_EventType_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for EmbeddedAssistant service + +type EmbeddedAssistantClient interface { + // Initiates or continues a conversation with the embedded Assistant Service. + // Each call performs one round-trip, sending an audio request to the service + // and receiving the audio response. Uses bidirectional streaming to receive + // results, such as the `END_OF_UTTERANCE` event, while sending audio. + // + // A conversation is one or more gRPC connections, each consisting of several + // streamed requests and responses. + // For example, the user says *Add to my shopping list* and the Assistant + // responds *What do you want to add?*. The sequence of streamed requests and + // responses in the first gRPC message could be: + // + // * AssistRequest.config + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistResponse.event_type.END_OF_UTTERANCE + // * AssistResponse.speech_results.transcript "add to my shopping list" + // * AssistResponse.dialog_state_out.microphone_mode.DIALOG_FOLLOW_ON + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // + // + // The user then says *bagels* and the Assistant responds + // *OK, I've added bagels to your shopping list*. This is sent as another gRPC + // connection call to the `Assist` method, again with streamed requests and + // responses, such as: + // + // * AssistRequest.config + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistResponse.event_type.END_OF_UTTERANCE + // * AssistResponse.dialog_state_out.microphone_mode.CLOSE_MICROPHONE + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // + // Although the precise order of responses is not guaranteed, sequential + // `AssistResponse.audio_out` messages will always contain sequential portions + // of audio. + Assist(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_AssistClient, error) +} + +type embeddedAssistantClient struct { + cc *grpc.ClientConn +} + +func NewEmbeddedAssistantClient(cc *grpc.ClientConn) EmbeddedAssistantClient { + return &embeddedAssistantClient{cc} +} + +func (c *embeddedAssistantClient) Assist(ctx context.Context, opts ...grpc.CallOption) (EmbeddedAssistant_AssistClient, error) { + stream, err := grpc.NewClientStream(ctx, &_EmbeddedAssistant_serviceDesc.Streams[0], c.cc, "/google.assistant.embedded.v1alpha2.EmbeddedAssistant/Assist", opts...) + if err != nil { + return nil, err + } + x := &embeddedAssistantAssistClient{stream} + return x, nil +} + +type EmbeddedAssistant_AssistClient interface { + Send(*AssistRequest) error + Recv() (*AssistResponse, error) + grpc.ClientStream +} + +type embeddedAssistantAssistClient struct { + grpc.ClientStream +} + +func (x *embeddedAssistantAssistClient) Send(m *AssistRequest) error { + return x.ClientStream.SendMsg(m) +} + +func (x *embeddedAssistantAssistClient) Recv() (*AssistResponse, error) { + m := new(AssistResponse) + if err := x.ClientStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +// Server API for EmbeddedAssistant service + +type EmbeddedAssistantServer interface { + // Initiates or continues a conversation with the embedded Assistant Service. + // Each call performs one round-trip, sending an audio request to the service + // and receiving the audio response. Uses bidirectional streaming to receive + // results, such as the `END_OF_UTTERANCE` event, while sending audio. + // + // A conversation is one or more gRPC connections, each consisting of several + // streamed requests and responses. + // For example, the user says *Add to my shopping list* and the Assistant + // responds *What do you want to add?*. The sequence of streamed requests and + // responses in the first gRPC message could be: + // + // * AssistRequest.config + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistResponse.event_type.END_OF_UTTERANCE + // * AssistResponse.speech_results.transcript "add to my shopping list" + // * AssistResponse.dialog_state_out.microphone_mode.DIALOG_FOLLOW_ON + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // + // + // The user then says *bagels* and the Assistant responds + // *OK, I've added bagels to your shopping list*. This is sent as another gRPC + // connection call to the `Assist` method, again with streamed requests and + // responses, such as: + // + // * AssistRequest.config + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistRequest.audio_in + // * AssistResponse.event_type.END_OF_UTTERANCE + // * AssistResponse.dialog_state_out.microphone_mode.CLOSE_MICROPHONE + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // * AssistResponse.audio_out + // + // Although the precise order of responses is not guaranteed, sequential + // `AssistResponse.audio_out` messages will always contain sequential portions + // of audio. + Assist(EmbeddedAssistant_AssistServer) error +} + +func RegisterEmbeddedAssistantServer(s *grpc.Server, srv EmbeddedAssistantServer) { + s.RegisterService(&_EmbeddedAssistant_serviceDesc, srv) +} + +func _EmbeddedAssistant_Assist_Handler(srv interface{}, stream grpc.ServerStream) error { + return srv.(EmbeddedAssistantServer).Assist(&embeddedAssistantAssistServer{stream}) +} + +type EmbeddedAssistant_AssistServer interface { + Send(*AssistResponse) error + Recv() (*AssistRequest, error) + grpc.ServerStream +} + +type embeddedAssistantAssistServer struct { + grpc.ServerStream +} + +func (x *embeddedAssistantAssistServer) Send(m *AssistResponse) error { + return x.ServerStream.SendMsg(m) +} + +func (x *embeddedAssistantAssistServer) Recv() (*AssistRequest, error) { + m := new(AssistRequest) + if err := x.ServerStream.RecvMsg(m); err != nil { + return nil, err + } + return m, nil +} + +var _EmbeddedAssistant_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.assistant.embedded.v1alpha2.EmbeddedAssistant", + HandlerType: (*EmbeddedAssistantServer)(nil), + Methods: []grpc.MethodDesc{}, + Streams: []grpc.StreamDesc{ + { + StreamName: "Assist", + Handler: _EmbeddedAssistant_Assist_Handler, + ServerStreams: true, + ClientStreams: true, + }, + }, + Metadata: "google/assistant/embedded/v1alpha2/embedded_assistant.proto", +} + +func init() { + proto.RegisterFile("google/assistant/embedded/v1alpha2/embedded_assistant.proto", fileDescriptor0) +} + +var fileDescriptor0 = []byte{ + // 1141 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdd, 0x6e, 0xdb, 0x36, + 0x14, 0x8e, 0xec, 0x34, 0xb5, 0x4f, 0x62, 0xc5, 0x61, 0x8b, 0xd5, 0x4b, 0xbb, 0xb5, 0xd0, 0x80, + 0x22, 0xfb, 0xb3, 0x9b, 0x14, 0xd8, 0x80, 0xb6, 0x1b, 0xe0, 0xda, 0x4a, 0xa2, 0xc2, 0xb1, 0x5c, + 0xc6, 0x69, 0xd1, 0xfd, 0x80, 0x60, 0x24, 0x4e, 0x51, 0x21, 0x93, 0xaa, 0x44, 0x07, 0xcd, 0xae, + 0x76, 0x35, 0xec, 0xb2, 0x0f, 0xb1, 0x07, 0xd9, 0x2b, 0xec, 0x59, 0xf6, 0x02, 0x03, 0x29, 0xc9, + 0xb5, 0xb6, 0x66, 0xb3, 0xb7, 0x3b, 0xf1, 0x1c, 0x9e, 0x8f, 0xe4, 0x39, 0xe7, 0xfb, 0x8e, 0xe0, + 0x61, 0x20, 0x44, 0x10, 0xb1, 0x0e, 0x4d, 0xd3, 0x30, 0x95, 0x94, 0xcb, 0x0e, 0x9b, 0x9c, 0x32, + 0xdf, 0x67, 0x7e, 0xe7, 0x7c, 0x97, 0x46, 0xf1, 0x19, 0xdd, 0x9b, 0x59, 0xc8, 0x6c, 0x53, 0x3b, + 0x4e, 0x84, 0x14, 0xc8, 0xca, 0x82, 0xdb, 0x6f, 0xed, 0xc5, 0xd6, 0x76, 0x11, 0xbc, 0x7d, 0xab, + 0x38, 0x20, 0x0e, 0x3b, 0x94, 0x73, 0x21, 0xa9, 0x0c, 0x05, 0x4f, 0x33, 0x84, 0xed, 0x56, 0xee, + 0x95, 0x17, 0x31, 0xeb, 0x44, 0x54, 0x46, 0x3c, 0xc8, 0x3c, 0xd6, 0xaf, 0x55, 0xd8, 0xe8, 0x6a, + 0xdc, 0x9e, 0xe0, 0x3f, 0x84, 0x01, 0xfa, 0x16, 0x36, 0xe9, 0xd4, 0x0f, 0x05, 0x09, 0x39, 0xf1, + 0xb4, 0xa9, 0x65, 0xdc, 0x31, 0x76, 0xd6, 0xf7, 0x76, 0xdb, 0xff, 0x7e, 0x8d, 0x76, 0x57, 0x85, + 0x3a, 0x3c, 0xc3, 0x3a, 0x5c, 0xc1, 0x0d, 0x3a, 0x6f, 0x40, 0xb7, 0x01, 0x24, 0x7b, 0x2d, 0xc9, + 0xab, 0x29, 0x4b, 0x2e, 0x5a, 0x6b, 0x77, 0x8c, 0x9d, 0xfa, 0xe1, 0x0a, 0xae, 0x2b, 0xdb, 0x53, + 0x65, 0x42, 0xdf, 0x41, 0x33, 0x3b, 0x5d, 0x4c, 0x65, 0x71, 0x7c, 0x45, 0x1f, 0xbf, 0xb7, 0xf0, + 0xf1, 0xee, 0x34, 0x7f, 0x0b, 0x36, 0x69, 0x69, 0x8d, 0x5e, 0xc0, 0xa6, 0x1f, 0xd2, 0x48, 0x04, + 0x24, 0x95, 0x54, 0x32, 0x12, 0xf2, 0x56, 0x75, 0xf1, 0xb7, 0xf5, 0x75, 0xe8, 0xb1, 0x8a, 0x74, + 0x38, 0x6e, 0xf8, 0xf3, 0x4b, 0x74, 0x02, 0x0d, 0x9f, 0x9d, 0x87, 0x1e, 0x2b, 0x6e, 0xbd, 0xaa, + 0x81, 0xef, 0x2d, 0x04, 0xac, 0x03, 0xf3, 0x3b, 0x6f, 0xf8, 0x73, 0xab, 0xc7, 0x6b, 0xb0, 0xaa, + 0x6a, 0x66, 0xfd, 0x6e, 0x40, 0xa3, 0x94, 0x5b, 0xf4, 0x0c, 0x6a, 0x8c, 0x7b, 0xc2, 0x0f, 0x79, + 0x56, 0x20, 0x73, 0xef, 0xc1, 0xd2, 0x05, 0x6a, 0xdb, 0x39, 0x02, 0x9e, 0x61, 0xa1, 0x4f, 0x60, + 0x2b, 0xa5, 0x93, 0x38, 0x62, 0x24, 0x51, 0x29, 0x3a, 0x63, 0x89, 0xfc, 0x51, 0x97, 0xe0, 0x0a, + 0xde, 0xcc, 0x1c, 0x98, 0x4a, 0x76, 0xa8, 0xcc, 0xd6, 0x23, 0xa8, 0x15, 0x08, 0xa8, 0x05, 0xd7, + 0xed, 0x61, 0xcf, 0xed, 0x3b, 0xc3, 0x03, 0x72, 0x32, 0x3c, 0x1e, 0xd9, 0x3d, 0x67, 0xdf, 0xb1, + 0xfb, 0xcd, 0x15, 0xb4, 0x01, 0xb5, 0x81, 0x33, 0xb4, 0xbb, 0x78, 0xf7, 0x8b, 0xa6, 0x81, 0x6a, + 0xb0, 0xba, 0x3f, 0xe8, 0xf6, 0x9a, 0x15, 0xeb, 0x4d, 0x05, 0xcc, 0x72, 0xc1, 0xd0, 0xf3, 0xbf, + 0x3d, 0xea, 0xe1, 0xf2, 0x65, 0xff, 0x9f, 0xaf, 0x42, 0x9f, 0xc2, 0xd6, 0xb9, 0x88, 0xa6, 0x13, + 0x46, 0x62, 0x96, 0x78, 0x8c, 0x4b, 0x1a, 0x30, 0xdd, 0x27, 0x57, 0x70, 0x33, 0x73, 0x8c, 0x66, + 0x76, 0x6b, 0xf0, 0x1f, 0x52, 0x70, 0x15, 0xaa, 0x47, 0xa3, 0xfb, 0xcd, 0x0a, 0xda, 0x84, 0x75, + 0x77, 0x74, 0x72, 0x4c, 0x9c, 0x21, 0x71, 0x0f, 0x0e, 0x9a, 0x55, 0xeb, 0x37, 0x03, 0x1a, 0xa5, + 0x36, 0x43, 0x9f, 0x03, 0xf2, 0x04, 0x3f, 0x67, 0x49, 0xaa, 0x09, 0x9d, 0x35, 0xae, 0xce, 0xcd, + 0x06, 0xde, 0x9a, 0xf7, 0xe8, 0x00, 0xf4, 0x11, 0x34, 0x22, 0xca, 0x83, 0x29, 0x0d, 0x54, 0x23, + 0xfa, 0x4c, 0xbf, 0xb1, 0x8e, 0x37, 0x0a, 0x63, 0x4f, 0xf8, 0x4c, 0x51, 0x3c, 0xef, 0xd5, 0x48, + 0x78, 0x3a, 0xb8, 0x75, 0x65, 0x71, 0x8e, 0x65, 0xdd, 0x3a, 0xc8, 0x23, 0xb1, 0xe9, 0x97, 0xd6, + 0xd6, 0xc7, 0x50, 0x2b, 0xca, 0x81, 0x3e, 0x00, 0xc8, 0xd8, 0xec, 0x53, 0x49, 0xf3, 0x4b, 0xd7, + 0xb5, 0xa5, 0x4f, 0x25, 0xb5, 0xfe, 0xa8, 0x80, 0x39, 0xf7, 0x5a, 0x15, 0xf1, 0x00, 0xde, 0x4f, + 0xa7, 0x71, 0x1c, 0xb1, 0x89, 0xca, 0x6f, 0x44, 0xfc, 0x30, 0x8d, 0x23, 0x7a, 0x41, 0x94, 0x42, + 0x68, 0x80, 0x3a, 0xbe, 0x31, 0xbf, 0xa1, 0x9f, 0xf9, 0xc7, 0xec, 0xb5, 0xbc, 0x24, 0x55, 0x95, + 0xcb, 0x52, 0xf5, 0x12, 0x36, 0x27, 0xa1, 0x97, 0x88, 0xf8, 0x4c, 0x70, 0x46, 0x26, 0x2a, 0x59, + 0x55, 0xdd, 0x72, 0xdd, 0x25, 0xc5, 0xc0, 0x9d, 0xca, 0xf6, 0xd1, 0x0c, 0xe9, 0x48, 0xf8, 0x0c, + 0x9b, 0x93, 0xd2, 0xfa, 0xdd, 0x2d, 0xb5, 0x7a, 0x49, 0x4b, 0x7d, 0x0f, 0x66, 0x19, 0x0e, 0xdd, + 0x86, 0x9b, 0x47, 0x4e, 0x0f, 0xbb, 0xa3, 0x43, 0x77, 0x68, 0x93, 0x23, 0xb7, 0x6f, 0xff, 0xa5, + 0xbf, 0xae, 0x43, 0xb3, 0x37, 0x70, 0x8f, 0x6d, 0xf2, 0x76, 0x5b, 0xd3, 0x50, 0xd6, 0xbe, 0xd3, + 0x1d, 0xb8, 0x07, 0x64, 0xdf, 0x1d, 0x0c, 0xdc, 0xe7, 0xc4, 0x1d, 0x36, 0x2b, 0xd6, 0x4f, 0x4a, + 0x4a, 0xf4, 0xcb, 0x30, 0x7b, 0x35, 0x65, 0xa9, 0x44, 0x4f, 0x60, 0xad, 0xa4, 0xf4, 0x0b, 0x89, + 0xd6, 0xfc, 0xd0, 0x38, 0x5c, 0xc1, 0x39, 0x02, 0xba, 0x09, 0xb5, 0x62, 0x7c, 0x64, 0xa9, 0x3f, + 0x5c, 0xc1, 0x57, 0xf3, 0x21, 0x30, 0x53, 0xb3, 0x37, 0xab, 0x60, 0x16, 0x57, 0x48, 0x63, 0xc1, + 0x53, 0xd5, 0x93, 0xc0, 0xce, 0x19, 0x97, 0x44, 0x6d, 0xc8, 0xb9, 0xff, 0x68, 0xf1, 0x7b, 0x14, + 0x38, 0x6d, 0x5b, 0x81, 0x8c, 0x2f, 0x62, 0x86, 0xeb, 0xac, 0xf8, 0x44, 0x0e, 0xd4, 0x67, 0x53, + 0x25, 0x57, 0xfc, 0xcf, 0x96, 0xd1, 0x15, 0x5c, 0x2b, 0x06, 0xc9, 0x9c, 0xce, 0x53, 0x4f, 0x33, + 0x67, 0x6d, 0x59, 0x9d, 0xef, 0xea, 0xb8, 0x42, 0xe7, 0xb3, 0x15, 0x3a, 0x05, 0x33, 0x8d, 0x19, + 0xf3, 0xce, 0x48, 0xc2, 0xd2, 0x69, 0x24, 0xd3, 0x56, 0xe5, 0x4e, 0x75, 0x67, 0x7d, 0x31, 0xf9, + 0x3b, 0xd6, 0x91, 0x98, 0x79, 0x22, 0xe0, 0xa1, 0x06, 0xd7, 0x18, 0xb8, 0x91, 0xe6, 0x0e, 0x8d, + 0xa8, 0x66, 0x6b, 0x69, 0xfa, 0xa9, 0x64, 0x2c, 0xc3, 0xfb, 0x52, 0xc7, 0x63, 0xd3, 0x2f, 0xad, + 0xad, 0xaf, 0xa0, 0x3e, 0xcb, 0x3d, 0xda, 0x86, 0xf7, 0xec, 0x67, 0xf6, 0x70, 0x4c, 0xc6, 0x2f, + 0x46, 0xef, 0xe8, 0x55, 0x7b, 0xd8, 0x27, 0xee, 0x3e, 0x39, 0x19, 0x8f, 0x6d, 0xdc, 0x1d, 0xf6, + 0xec, 0xa6, 0x61, 0x3d, 0x87, 0x1b, 0x97, 0x3c, 0x03, 0x7d, 0x08, 0x20, 0x13, 0xca, 0x53, 0x2f, + 0x09, 0xe3, 0x42, 0x04, 0xe6, 0x2c, 0xe8, 0x16, 0xd4, 0x53, 0x49, 0x4f, 0xc3, 0x28, 0x94, 0x17, + 0xba, 0xe7, 0x2a, 0xf8, 0xad, 0xc1, 0x3a, 0x86, 0x8d, 0xf9, 0xf9, 0x8a, 0x6e, 0x42, 0x3d, 0x2f, + 0x60, 0xe8, 0xe7, 0x60, 0xb5, 0xcc, 0xe0, 0xf8, 0xe8, 0xee, 0x4c, 0x19, 0x95, 0x1e, 0x44, 0x6a, + 0x4b, 0x55, 0x6f, 0xc9, 0x8b, 0xae, 0xd8, 0x18, 0x39, 0xbe, 0xf5, 0x75, 0x01, 0x9a, 0x97, 0xaf, + 0x0d, 0xd7, 0xf2, 0xb8, 0x24, 0xe3, 0x14, 0x79, 0x99, 0x0a, 0x9e, 0xc3, 0x6f, 0x65, 0xae, 0x9c, + 0x6d, 0x4f, 0x52, 0xc1, 0xad, 0xa7, 0x60, 0x96, 0x65, 0x14, 0x7d, 0x09, 0xeb, 0x9e, 0x10, 0x89, + 0x1f, 0x72, 0x2a, 0x59, 0x9a, 0x13, 0xf1, 0x5a, 0x51, 0x17, 0x45, 0x8a, 0xf6, 0x80, 0xca, 0x01, + 0x57, 0x5c, 0x9b, 0xdf, 0x59, 0x70, 0x6a, 0xef, 0x17, 0x03, 0xb6, 0xec, 0xbc, 0x68, 0xdd, 0xa2, + 0x8c, 0x28, 0x85, 0xb5, 0x6c, 0x81, 0x76, 0x97, 0x21, 0x93, 0xbe, 0xe9, 0xf6, 0xde, 0xf2, 0xfc, + 0xdb, 0x31, 0xee, 0x19, 0x8f, 0x7f, 0x36, 0xe0, 0xae, 0x27, 0x26, 0x0b, 0x44, 0x3f, 0x36, 0x67, + 0x57, 0x1d, 0xa9, 0xdf, 0xd1, 0x91, 0xf1, 0xcd, 0x93, 0x3c, 0x2a, 0x10, 0x6a, 0x66, 0xb5, 0x45, + 0x12, 0x74, 0x02, 0xc6, 0xf5, 0xcf, 0x6a, 0x27, 0x73, 0xd1, 0x38, 0x4c, 0xff, 0xe9, 0x47, 0xfa, + 0x61, 0x61, 0x39, 0x5d, 0xd3, 0x61, 0xf7, 0xff, 0x0c, 0x00, 0x00, 0xff, 0xff, 0x3d, 0x7a, 0x00, + 0x97, 0x7e, 0x0b, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go index a8f3d589..c3ef596c 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/clusters.pb.go @@ -15,10 +15,12 @@ It has these top-level messages: GceClusterConfig InstanceGroupConfig ManagedGroupConfig + AcceleratorConfig DiskConfig NodeInitializationAction ClusterStatus SoftwareConfig + ClusterMetrics CreateClusterRequest UpdateClusterRequest DeleteClusterRequest @@ -38,10 +40,13 @@ It has these top-level messages: JobPlacement JobStatus JobReference + YarnApplication Job + JobScheduling SubmitJobRequest GetJobRequest ListJobsRequest + UpdateJobRequest ListJobsResponse CancelJobRequest DeleteJobRequest @@ -113,26 +118,71 @@ var ClusterStatus_State_value = map[string]int32{ func (x ClusterStatus_State) String() string { return proto.EnumName(ClusterStatus_State_name, int32(x)) } -func (ClusterStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{7, 0} } +func (ClusterStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 0} } + +type ClusterStatus_Substate int32 + +const ( + ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0 + // The cluster is known to be in an unhealthy state + // (for example, critical daemons are not running or HDFS capacity is + // exhausted). + // + // Applies to RUNNING state. + ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1 + // The agent-reported status is out of date (may occur if + // Cloud Dataproc loses communication with Agent). + // + // Applies to RUNNING state. + ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2 +) + +var ClusterStatus_Substate_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "UNHEALTHY", + 2: "STALE_STATUS", +} +var ClusterStatus_Substate_value = map[string]int32{ + "UNSPECIFIED": 0, + "UNHEALTHY": 1, + "STALE_STATUS": 2, +} + +func (x ClusterStatus_Substate) String() string { + return proto.EnumName(ClusterStatus_Substate_name, int32(x)) +} +func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{8, 1} } // Describes the identifying information, config, and status of // a cluster of Google Compute Engine instances. type Cluster struct { - // [Required] The Google Cloud Platform project ID that the cluster belongs to. + // Required. The Google Cloud Platform project ID that the cluster belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The cluster name. Cluster names within a project must be + // Required. The cluster name. Cluster names within a project must be // unique. Names of deleted clusters can be reused. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` - // [Required] The cluster config. Note that Cloud Dataproc may set + // Required. The cluster config. Note that Cloud Dataproc may set // default values, and values may change when clusters are updated. Config *ClusterConfig `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"` - // [Output-only] Cluster status. + // Optional. The labels to associate with this cluster. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // No more than 32 labels can be associated with a cluster. + Labels map[string]string `protobuf:"bytes,8,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Output-only. Cluster status. Status *ClusterStatus `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` - // [Output-only] The previous cluster status. + // Output-only. The previous cluster status. StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` - // [Output-only] A cluster UUID (Unique Universal Identifier). Cloud Dataproc + // Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc // generates this value when it creates the cluster. ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` + // Contains cluster daemon metrics such as HDFS and YARN stats. + // + // **Beta Feature**: This report is available for testing purposes only. It may + // be changed before final release. + Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics" json:"metrics,omitempty"` } func (m *Cluster) Reset() { *m = Cluster{} } @@ -161,6 +211,13 @@ func (m *Cluster) GetConfig() *ClusterConfig { return nil } +func (m *Cluster) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + func (m *Cluster) GetStatus() *ClusterStatus { if m != nil { return m.Status @@ -182,32 +239,39 @@ func (m *Cluster) GetClusterUuid() string { return "" } +func (m *Cluster) GetMetrics() *ClusterMetrics { + if m != nil { + return m.Metrics + } + return nil +} + // The cluster config. type ClusterConfig struct { - // [Optional] A Google Cloud Storage staging bucket used for sharing generated + // Optional. A Google Cloud Storage staging bucket used for sharing generated // SSH keys and config. If you do not specify a staging bucket, Cloud // Dataproc will determine an appropriate Cloud Storage location (US, // ASIA, or EU) for your cluster's staging bucket according to the Google // Compute Engine zone where your cluster is deployed, and then it will create // and manage this project-level, per-location bucket for you. ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket" json:"config_bucket,omitempty"` - // [Required] The shared Google Compute Engine config settings for + // Required. The shared Google Compute Engine config settings for // all instances in a cluster. GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig" json:"gce_cluster_config,omitempty"` - // [Optional] The Google Compute Engine config settings for + // Optional. The Google Compute Engine config settings for // the master instance in a cluster. MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig" json:"master_config,omitempty"` - // [Optional] The Google Compute Engine config settings for + // Optional. The Google Compute Engine config settings for // worker instances in a cluster. WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig" json:"worker_config,omitempty"` - // [Optional] The Google Compute Engine config settings for + // Optional. The Google Compute Engine config settings for // additional worker instances in a cluster. SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig" json:"secondary_worker_config,omitempty"` - // [Optional] The config settings for software inside the cluster. + // Optional. The config settings for software inside the cluster. SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig" json:"software_config,omitempty"` - // [Optional] Commands to execute on each node after config is + // Optional. Commands to execute on each node after config is // completed. By default, executables are run on master and all worker nodes. - // You can test a node's role metadata to run an executable on + // You can test a node's `role` metadata to run an executable on // a master or worker node, as shown below using `curl` (you can also use `wget`): // // ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1/instance/attributes/dataproc-role) @@ -276,28 +340,58 @@ func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction { // Common config settings for resources of Google Compute Engine cluster // instances, applicable to all instances in the cluster. type GceClusterConfig struct { - // [Required] The zone where the Google Compute Engine cluster will be located. - // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]`. + // Optional. The zone where the Google Compute Engine cluster will be located. + // On a create request, it is required in the "global" region. If omitted + // in a non-global Cloud Dataproc region, the service will pick a zone in the + // corresponding Compute Engine region. On a get request, zone will + // always be present. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` + // * `projects/[project_id]/zones/[zone]` + // * `us-central1-f` ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri" json:"zone_uri,omitempty"` - // [Optional] The Google Compute Engine network to be used for machine + // Optional. The Google Compute Engine network to be used for machine // communications. Cannot be specified with subnetwork_uri. If neither // `network_uri` nor `subnetwork_uri` is specified, the "default" network of // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see // [Using Subnetworks](/compute/docs/subnetworks) for more information). - // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default`. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` + // * `projects/[project_id]/regions/global/default` + // * `default` NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri" json:"network_uri,omitempty"` - // [Optional] The Google Compute Engine subnetwork to be used for machine + // Optional. The Google Compute Engine subnetwork to be used for machine // communications. Cannot be specified with network_uri. - // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0`. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0` + // * `projects/[project_id]/regions/us-east1/sub0` + // * `sub0` SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri" json:"subnetwork_uri,omitempty"` - // [Optional] If true, all instances in the cluster will only have internal IP + // Optional. If true, all instances in the cluster will only have internal IP // addresses. By default, clusters are not restricted to internal IP addresses, // and will have ephemeral external IP addresses assigned to each instance. // This `internal_ip_only` restriction can only be enabled for subnetwork // enabled networks, and all off-cluster dependencies must be configured to be // accessible without external IP addresses. InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly" json:"internal_ip_only,omitempty"` - // [Optional] The URIs of service account scopes to be included in Google + // Optional. The service account of the instances. Defaults to the default + // Google Compute Engine service account. Custom service accounts need + // permissions equivalent to the folloing IAM roles: + // + // * roles/logging.logWriter + // * roles/storage.objectAdmin + // + // (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + // for more information). + // Example: `[account_id]@[project_id].iam.gserviceaccount.com` + ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` + // Optional. The URIs of service account scopes to be included in Google // Compute Engine instances. The following base set of scopes is always // included: // @@ -313,7 +407,7 @@ type GceClusterConfig struct { // * https://www.googleapis.com/auth/devstorage.full_control ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes" json:"service_account_scopes,omitempty"` // The Google Compute Engine tags to add to all instances (see - // [Labeling instances](/compute/docs/label-or-tag-resources#labeling_instances)). + // [Tagging instances](/compute/docs/label-or-tag-resources#tags)). Tags []string `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"` // The Google Compute Engine metadata entries to add to all instances (see // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). @@ -353,6 +447,13 @@ func (m *GceClusterConfig) GetInternalIpOnly() bool { return false } +func (m *GceClusterConfig) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + func (m *GceClusterConfig) GetServiceAccountScopes() []string { if m != nil { return m.ServiceAccountScopes @@ -374,30 +475,41 @@ func (m *GceClusterConfig) GetMetadata() map[string]string { return nil } -// [Optional] The config settings for Google Compute Engine resources in +// Optional. The config settings for Google Compute Engine resources in // an instance group, such as a master or worker group. type InstanceGroupConfig struct { - // [Required] The number of VM instances in the instance group. + // Optional. The number of VM instances in the instance group. // For master instance groups, must be set to 1. NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances" json:"num_instances,omitempty"` - // [Optional] The list of instance names. Cloud Dataproc derives the names from + // Optional. The list of instance names. Cloud Dataproc derives the names from // `cluster_name`, `num_instances`, and the instance group if not set by user // (recommended practice is to let Cloud Dataproc derive the name). InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames" json:"instance_names,omitempty"` - // [Output-only] The Google Compute Engine image resource used for cluster + // Output-only. The Google Compute Engine image resource used for cluster // instances. Inferred from `SoftwareConfig.image_version`. ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"` - // [Required] The Google Compute Engine machine type used for cluster instances. - // Example: `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2`. + // Optional. The Google Compute Engine machine type used for cluster instances. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` + // * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` + // * `n1-standard-2` MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri" json:"machine_type_uri,omitempty"` - // [Optional] Disk option config settings. + // Optional. Disk option config settings. DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig" json:"disk_config,omitempty"` - // [Optional] Specifies that this instance group contains preemptible instances. + // Optional. Specifies that this instance group contains preemptible instances. IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible" json:"is_preemptible,omitempty"` - // [Output-only] The config for Google Compute Engine Instance Group + // Output-only. The config for Google Compute Engine Instance Group // Manager that manages this group. // This is only used for preemptible instance groups. ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig" json:"managed_group_config,omitempty"` + // Optional. The Google Compute Engine accelerator configuration for these + // instances. + // + // **Beta Feature**: This feature is still under development. It may be + // changed before final release. + Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators" json:"accelerators,omitempty"` } func (m *InstanceGroupConfig) Reset() { *m = InstanceGroupConfig{} } @@ -454,12 +566,19 @@ func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig { return nil } +func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig { + if m != nil { + return m.Accelerators + } + return nil +} + // Specifies the resources used to actively manage an instance group. type ManagedGroupConfig struct { - // [Output-only] The name of the Instance Template used for the Managed + // Output-only. The name of the Instance Template used for the Managed // Instance Group. InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName" json:"instance_template_name,omitempty"` - // [Output-only] The name of the Instance Group Manager for this group. + // Output-only. The name of the Instance Group Manager for this group. InstanceGroupManagerName string `protobuf:"bytes,2,opt,name=instance_group_manager_name,json=instanceGroupManagerName" json:"instance_group_manager_name,omitempty"` } @@ -482,11 +601,46 @@ func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string { return "" } +// Specifies the type and number of accelerator cards attached to the instances +// of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)). +type AcceleratorConfig struct { + // Full URL, partial URI, or short name of the accelerator type resource to + // expose to this instance. See [Google Compute Engine AcceleratorTypes]( + // /compute/docs/reference/beta/acceleratorTypes) + // + // Examples + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + // * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + // * `nvidia-tesla-k80` + AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri" json:"accelerator_type_uri,omitempty"` + // The number of the accelerator cards of this type exposed to this instance. + AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount" json:"accelerator_count,omitempty"` +} + +func (m *AcceleratorConfig) Reset() { *m = AcceleratorConfig{} } +func (m *AcceleratorConfig) String() string { return proto.CompactTextString(m) } +func (*AcceleratorConfig) ProtoMessage() {} +func (*AcceleratorConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *AcceleratorConfig) GetAcceleratorTypeUri() string { + if m != nil { + return m.AcceleratorTypeUri + } + return "" +} + +func (m *AcceleratorConfig) GetAcceleratorCount() int32 { + if m != nil { + return m.AcceleratorCount + } + return 0 +} + // Specifies the config of disk options for a group of VM instances. type DiskConfig struct { - // [Optional] Size in GB of the boot disk (default is 500GB). + // Optional. Size in GB of the boot disk (default is 500GB). BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb" json:"boot_disk_size_gb,omitempty"` - // [Optional] Number of attached SSDs, from 0 to 4 (default is 0). + // Optional. Number of attached SSDs, from 0 to 4 (default is 0). // If SSDs are not attached, the boot disk is used to store runtime logs and // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. // If one or more SSDs are attached, this runtime bulk @@ -498,7 +652,7 @@ type DiskConfig struct { func (m *DiskConfig) Reset() { *m = DiskConfig{} } func (m *DiskConfig) String() string { return proto.CompactTextString(m) } func (*DiskConfig) ProtoMessage() {} -func (*DiskConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } +func (*DiskConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } func (m *DiskConfig) GetBootDiskSizeGb() int32 { if m != nil { @@ -517,9 +671,9 @@ func (m *DiskConfig) GetNumLocalSsds() int32 { // Specifies an executable to run on a fully configured node and a // timeout period for executable completion. type NodeInitializationAction struct { - // [Required] Google Cloud Storage URI of executable file. + // Required. Google Cloud Storage URI of executable file. ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile" json:"executable_file,omitempty"` - // [Optional] Amount of time executable has to complete. Default is + // Optional. Amount of time executable has to complete. Default is // 10 minutes. Cluster creation fails with an explanatory error message (the // name of the executable that caused the error and the exceeded timeout // period) if the executable is not completed at end of the timeout period. @@ -529,7 +683,7 @@ type NodeInitializationAction struct { func (m *NodeInitializationAction) Reset() { *m = NodeInitializationAction{} } func (m *NodeInitializationAction) String() string { return proto.CompactTextString(m) } func (*NodeInitializationAction) ProtoMessage() {} -func (*NodeInitializationAction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } +func (*NodeInitializationAction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } func (m *NodeInitializationAction) GetExecutableFile() string { if m != nil { @@ -547,18 +701,21 @@ func (m *NodeInitializationAction) GetExecutionTimeout() *google_protobuf4.Durat // The status of a cluster and its instances. type ClusterStatus struct { - // [Output-only] The cluster's state. + // Output-only. The cluster's state. State ClusterStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.ClusterStatus_State" json:"state,omitempty"` - // [Output-only] Optional details of cluster's state. + // Output-only. Optional details of cluster's state. Detail string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` - // [Output-only] Time when this state was entered. + // Output-only. Time when this state was entered. StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` + // Output-only. Additional state information that includes + // status reported by the agent. + Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,enum=google.cloud.dataproc.v1.ClusterStatus_Substate" json:"substate,omitempty"` } func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } func (m *ClusterStatus) String() string { return proto.CompactTextString(m) } func (*ClusterStatus) ProtoMessage() {} -func (*ClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } +func (*ClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } func (m *ClusterStatus) GetState() ClusterStatus_State { if m != nil { @@ -581,32 +738,44 @@ func (m *ClusterStatus) GetStateStartTime() *google_protobuf3.Timestamp { return nil } +func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate { + if m != nil { + return m.Substate + } + return ClusterStatus_UNSPECIFIED +} + // Specifies the selection and config of software inside the cluster. type SoftwareConfig struct { - // [Optional] The version of software inside the cluster. It must match the + // Optional. The version of software inside the cluster. It must match the // regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the // latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)). ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion" json:"image_version,omitempty"` - // [Optional] The properties to set on daemon config files. + // Optional. The properties to set on daemon config files. // // Property keys are specified in `prefix:property` format, such as // `core:fs.defaultFS`. The following are supported prefixes // and their mappings: // + // * capacity-scheduler: `capacity-scheduler.xml` // * core: `core-site.xml` + // * distcp: `distcp-default.xml` // * hdfs: `hdfs-site.xml` - // * mapred: `mapred-site.xml` - // * yarn: `yarn-site.xml` // * hive: `hive-site.xml` + // * mapred: `mapred-site.xml` // * pig: `pig.properties` // * spark: `spark-defaults.conf` + // * yarn: `yarn-site.xml` + // + // For more information, see + // [Cluster properties](/dataproc/docs/concepts/cluster-properties). Properties map[string]string `protobuf:"bytes,2,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` } func (m *SoftwareConfig) Reset() { *m = SoftwareConfig{} } func (m *SoftwareConfig) String() string { return proto.CompactTextString(m) } func (*SoftwareConfig) ProtoMessage() {} -func (*SoftwareConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } +func (*SoftwareConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } func (m *SoftwareConfig) GetImageVersion() string { if m != nil { @@ -622,21 +791,51 @@ func (m *SoftwareConfig) GetProperties() map[string]string { return nil } +// Contains cluster daemon metrics, such as HDFS and YARN stats. +// +// **Beta Feature**: This report is available for testing purposes only. It may +// be changed before final release. +type ClusterMetrics struct { + // The HDFS metrics. + HdfsMetrics map[string]int64 `protobuf:"bytes,1,rep,name=hdfs_metrics,json=hdfsMetrics" json:"hdfs_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + // The YARN metrics. + YarnMetrics map[string]int64 `protobuf:"bytes,2,rep,name=yarn_metrics,json=yarnMetrics" json:"yarn_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *ClusterMetrics) Reset() { *m = ClusterMetrics{} } +func (m *ClusterMetrics) String() string { return proto.CompactTextString(m) } +func (*ClusterMetrics) ProtoMessage() {} +func (*ClusterMetrics) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ClusterMetrics) GetHdfsMetrics() map[string]int64 { + if m != nil { + return m.HdfsMetrics + } + return nil +} + +func (m *ClusterMetrics) GetYarnMetrics() map[string]int64 { + if m != nil { + return m.YarnMetrics + } + return nil +} + // A request to create a cluster. type CreateClusterRequest struct { - // [Required] The ID of the Google Cloud Platform project that the cluster + // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - // [Required] The cluster to create. + // Required. The cluster to create. Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` } func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } func (*CreateClusterRequest) ProtoMessage() {} -func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } func (m *CreateClusterRequest) GetProjectId() string { if m != nil { @@ -661,19 +860,19 @@ func (m *CreateClusterRequest) GetCluster() *Cluster { // A request to update a cluster. type UpdateClusterRequest struct { - // [Required] The ID of the Google Cloud Platform project the + // Required. The ID of the Google Cloud Platform project the // cluster belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,5,opt,name=region" json:"region,omitempty"` - // [Required] The cluster name. + // Required. The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` - // [Required] The changes to the cluster. + // Required. The changes to the cluster. Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` - // [Required] Specifies the path, relative to Cluster, of + // Required. Specifies the path, relative to `Cluster`, of // the field to update. For example, to change the number of workers - // in a cluster to 5, the update_mask parameter would be - // specified as config.worker_config.num_instances, + // in a cluster to 5, the `update_mask` parameter would be + // specified as `config.worker_config.num_instances`, // and the `PATCH` request body would specify the new value, as follows: // // { @@ -683,9 +882,10 @@ type UpdateClusterRequest struct { // } // } // } - // Similarly, to change the number of preemptible workers in a cluster to 5, the - // update_mask parameter would be config.secondary_worker_config.num_instances, - // and the `PATCH` request body would be set as follows: + // Similarly, to change the number of preemptible workers in a cluster to 5, + // the `update_mask` parameter would be + // `config.secondary_worker_config.num_instances`, and the `PATCH` request + // body would be set as follows: // // { // "config":{ @@ -694,16 +894,35 @@ type UpdateClusterRequest struct { // } // } // } - // Note: Currently, config.worker_config.num_instances - // and config.secondary_worker_config.num_instances are the only - // fields that can be updated. + // Note: Currently, only the following fields can be updated: + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + // + //
MaskPurpose
labelsUpdate labels
config.worker_config.num_instancesResize primary worker group
config.secondary_worker_config.num_instancesResize secondary worker group
UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` } func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } func (*UpdateClusterRequest) ProtoMessage() {} -func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } func (m *UpdateClusterRequest) GetProjectId() string { if m != nil { @@ -742,19 +961,19 @@ func (m *UpdateClusterRequest) GetUpdateMask() *google_protobuf5.FieldMask { // A request to delete a cluster. type DeleteClusterRequest struct { - // [Required] The ID of the Google Cloud Platform project that the cluster + // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - // [Required] The cluster name. + // Required. The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` } func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } func (*DeleteClusterRequest) ProtoMessage() {} -func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } func (m *DeleteClusterRequest) GetProjectId() string { if m != nil { @@ -779,19 +998,19 @@ func (m *DeleteClusterRequest) GetClusterName() string { // Request to get the resource representation for a cluster in a project. type GetClusterRequest struct { - // [Required] The ID of the Google Cloud Platform project that the cluster + // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - // [Required] The cluster name. + // Required. The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` } func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } func (*GetClusterRequest) ProtoMessage() {} -func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } func (m *GetClusterRequest) GetProjectId() string { if m != nil { @@ -816,21 +1035,41 @@ func (m *GetClusterRequest) GetClusterName() string { // A request to list the clusters in a project. type ListClustersRequest struct { - // [Required] The ID of the Google Cloud Platform project that the cluster + // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,4,opt,name=region" json:"region,omitempty"` - // [Optional] The standard List page size. + // Optional. A filter constraining the clusters to list. Filters are + // case-sensitive and have the following syntax: + // + // field = value [AND [field = value]] ... + // + // where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, + // and `[KEY]` is a label key. **value** can be `*` to match all values. + // `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, + // `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` + // contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` + // contains the `DELETING` and `ERROR` states. + // `clusterName` is the name of the cluster provided at creation time. + // Only the logical `AND` operator is supported; space-separated items are + // treated as having an implicit `AND` operator. + // + // Example filter: + // + // status.state = ACTIVE AND clusterName = mycluster + // AND labels.env = staging AND labels.starred = * + Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + // Optional. The standard List page size. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` - // [Optional] The standard List page token. + // Optional. The standard List page token. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` } func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } func (*ListClustersRequest) ProtoMessage() {} -func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } func (m *ListClustersRequest) GetProjectId() string { if m != nil { @@ -846,6 +1085,13 @@ func (m *ListClustersRequest) GetRegion() string { return "" } +func (m *ListClustersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + func (m *ListClustersRequest) GetPageSize() int32 { if m != nil { return m.PageSize @@ -862,18 +1108,18 @@ func (m *ListClustersRequest) GetPageToken() string { // The list of all clusters in a project. type ListClustersResponse struct { - // [Output-only] The clusters in the project. + // Output-only. The clusters in the project. Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` - // [Output-only] This token is included in the response if there are more + // Output-only. This token is included in the response if there are more // results to fetch. To fetch additional results, provide this value as the - // `page_token` in a subsequent ListClustersRequest. + // `page_token` in a subsequent `ListClustersRequest`. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` } func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } func (*ListClustersResponse) ProtoMessage() {} -func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } func (m *ListClustersResponse) GetClusters() []*Cluster { if m != nil { @@ -891,19 +1137,19 @@ func (m *ListClustersResponse) GetNextPageToken() string { // A request to collect cluster diagnostic information. type DiagnoseClusterRequest struct { - // [Required] The ID of the Google Cloud Platform project that the cluster + // Required. The ID of the Google Cloud Platform project that the cluster // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - // [Required] The cluster name. + // Required. The cluster name. ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` } func (m *DiagnoseClusterRequest) Reset() { *m = DiagnoseClusterRequest{} } func (m *DiagnoseClusterRequest) String() string { return proto.CompactTextString(m) } func (*DiagnoseClusterRequest) ProtoMessage() {} -func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } +func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } func (m *DiagnoseClusterRequest) GetProjectId() string { if m != nil { @@ -928,7 +1174,7 @@ func (m *DiagnoseClusterRequest) GetClusterName() string { // The location of diagnostic output. type DiagnoseClusterResults struct { - // [Output-only] The Google Cloud Storage URI of the diagnostic output. + // Output-only. The Google Cloud Storage URI of the diagnostic output. // The output report is a plain text file with a summary of collected // diagnostics. OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` @@ -937,7 +1183,7 @@ type DiagnoseClusterResults struct { func (m *DiagnoseClusterResults) Reset() { *m = DiagnoseClusterResults{} } func (m *DiagnoseClusterResults) String() string { return proto.CompactTextString(m) } func (*DiagnoseClusterResults) ProtoMessage() {} -func (*DiagnoseClusterResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } +func (*DiagnoseClusterResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } func (m *DiagnoseClusterResults) GetOutputUri() string { if m != nil { @@ -952,10 +1198,12 @@ func init() { proto.RegisterType((*GceClusterConfig)(nil), "google.cloud.dataproc.v1.GceClusterConfig") proto.RegisterType((*InstanceGroupConfig)(nil), "google.cloud.dataproc.v1.InstanceGroupConfig") proto.RegisterType((*ManagedGroupConfig)(nil), "google.cloud.dataproc.v1.ManagedGroupConfig") + proto.RegisterType((*AcceleratorConfig)(nil), "google.cloud.dataproc.v1.AcceleratorConfig") proto.RegisterType((*DiskConfig)(nil), "google.cloud.dataproc.v1.DiskConfig") proto.RegisterType((*NodeInitializationAction)(nil), "google.cloud.dataproc.v1.NodeInitializationAction") proto.RegisterType((*ClusterStatus)(nil), "google.cloud.dataproc.v1.ClusterStatus") proto.RegisterType((*SoftwareConfig)(nil), "google.cloud.dataproc.v1.SoftwareConfig") + proto.RegisterType((*ClusterMetrics)(nil), "google.cloud.dataproc.v1.ClusterMetrics") proto.RegisterType((*CreateClusterRequest)(nil), "google.cloud.dataproc.v1.CreateClusterRequest") proto.RegisterType((*UpdateClusterRequest)(nil), "google.cloud.dataproc.v1.UpdateClusterRequest") proto.RegisterType((*DeleteClusterRequest)(nil), "google.cloud.dataproc.v1.DeleteClusterRequest") @@ -965,6 +1213,7 @@ func init() { proto.RegisterType((*DiagnoseClusterRequest)(nil), "google.cloud.dataproc.v1.DiagnoseClusterRequest") proto.RegisterType((*DiagnoseClusterResults)(nil), "google.cloud.dataproc.v1.DiagnoseClusterResults") proto.RegisterEnum("google.cloud.dataproc.v1.ClusterStatus_State", ClusterStatus_State_name, ClusterStatus_State_value) + proto.RegisterEnum("google.cloud.dataproc.v1.ClusterStatus_Substate", ClusterStatus_Substate_name, ClusterStatus_Substate_value) } // Reference imports to suppress errors if they are not otherwise used. @@ -1223,110 +1472,127 @@ var _ClusterController_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/cloud/dataproc/v1/clusters.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 1667 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0x4f, 0x73, 0x23, 0x47, - 0x15, 0x67, 0x64, 0xcb, 0x96, 0x9f, 0x2c, 0x59, 0xdb, 0x51, 0x8c, 0xa2, 0x4d, 0x88, 0x33, 0x09, - 0xac, 0xb3, 0x80, 0x44, 0x1c, 0x28, 0x52, 0xeb, 0x0a, 0xb0, 0x6b, 0x79, 0x8d, 0xc9, 0xae, 0xd6, - 0x8c, 0xed, 0x4d, 0x8a, 0x2a, 0x98, 0x6a, 0xcd, 0xb4, 0x27, 0x8d, 0x66, 0xa6, 0x27, 0xd3, 0x3d, - 0x4e, 0xbc, 0x5b, 0x7b, 0xe1, 0x04, 0xe1, 0xc8, 0x57, 0xe0, 0x40, 0xe5, 0x08, 0x37, 0x4e, 0x7c, - 0x02, 0x2e, 0x1c, 0xb9, 0x72, 0xe2, 0x03, 0x70, 0xe2, 0x40, 0xf5, 0x9f, 0x91, 0x34, 0xb6, 0x24, - 0x7b, 0x17, 0xd7, 0x9e, 0xd4, 0xfd, 0xde, 0xef, 0xfd, 0xe9, 0xd7, 0xef, 0xbd, 0x7e, 0x23, 0xb8, - 0x15, 0x30, 0x16, 0x84, 0xa4, 0xeb, 0x85, 0x2c, 0xf3, 0xbb, 0x3e, 0x16, 0x38, 0x49, 0x99, 0xd7, - 0x3d, 0x7d, 0xaf, 0xeb, 0x85, 0x19, 0x17, 0x24, 0xe5, 0x9d, 0x24, 0x65, 0x82, 0xa1, 0x96, 0x06, - 0x76, 0x14, 0xb0, 0x93, 0x03, 0x3b, 0xa7, 0xef, 0xb5, 0x5f, 0x37, 0x2a, 0x70, 0x42, 0xbb, 0x38, - 0x8e, 0x99, 0xc0, 0x82, 0xb2, 0xd8, 0xc8, 0xb5, 0xdf, 0x9d, 0x69, 0x80, 0x25, 0x24, 0x2d, 0x40, - 0xdf, 0x36, 0xd0, 0x90, 0xc5, 0x41, 0x9a, 0xc5, 0x31, 0x8d, 0x83, 0x8b, 0xa0, 0x6f, 0x18, 0x90, - 0xda, 0x0d, 0xb2, 0x93, 0xae, 0x9f, 0x69, 0x80, 0xe1, 0x6f, 0x9c, 0xe7, 0x9f, 0x50, 0x12, 0xfa, - 0x6e, 0x84, 0xf9, 0xd0, 0x20, 0xde, 0x3c, 0x8f, 0x10, 0x34, 0x22, 0x5c, 0xe0, 0x28, 0xd1, 0x00, - 0xfb, 0x6f, 0x25, 0x58, 0xde, 0xd1, 0xa7, 0x47, 0x6f, 0x00, 0x24, 0x29, 0xfb, 0x35, 0xf1, 0x84, - 0x4b, 0xfd, 0x96, 0xb5, 0x61, 0x6d, 0xae, 0x38, 0x2b, 0x86, 0xb2, 0xef, 0xa3, 0xb7, 0x60, 0xd5, - 0xc4, 0xc9, 0x8d, 0x71, 0x44, 0x5a, 0x25, 0x05, 0xa8, 0x1a, 0x5a, 0x1f, 0x47, 0x04, 0xfd, 0x18, - 0x96, 0x3c, 0x16, 0x9f, 0xd0, 0xa0, 0xb5, 0xb0, 0x61, 0x6d, 0x56, 0xb7, 0x6e, 0x75, 0x66, 0x45, - 0xb2, 0x63, 0x8c, 0xee, 0x28, 0xb8, 0x63, 0xc4, 0xa4, 0x02, 0x2e, 0xb0, 0xc8, 0x78, 0x6b, 0xf1, - 0x8a, 0x0a, 0x0e, 0x15, 0xdc, 0x31, 0x62, 0xa8, 0x0f, 0x75, 0xbd, 0x72, 0x3f, 0xa5, 0x5c, 0xb0, - 0xf4, 0xac, 0xb5, 0xbc, 0xb1, 0xf0, 0x3c, 0x8a, 0x6a, 0x5a, 0xfc, 0xa7, 0x5a, 0x7a, 0xf2, 0xd0, - 0x59, 0x46, 0xfd, 0xd6, 0x52, 0xe1, 0xd0, 0xc7, 0x19, 0xf5, 0xed, 0x7f, 0x2e, 0x42, 0xad, 0x70, - 0x1a, 0xf4, 0x36, 0xd4, 0xf4, 0x79, 0xdc, 0x41, 0xe6, 0x0d, 0x89, 0x30, 0xb1, 0x5c, 0xd5, 0xc4, - 0x7b, 0x8a, 0x86, 0x3e, 0x01, 0x14, 0x78, 0xc4, 0xcd, 0xb5, 0x9b, 0xb8, 0x55, 0xd4, 0xb1, 0x6f, - 0xcf, 0xf6, 0x76, 0xcf, 0x23, 0xc5, 0xd0, 0x35, 0x82, 0x73, 0x14, 0xe4, 0x40, 0x2d, 0xc2, 0x93, - 0x4a, 0x57, 0x94, 0xd2, 0xef, 0xce, 0x56, 0xba, 0x1f, 0x73, 0x81, 0x63, 0x8f, 0xec, 0xa5, 0x2c, - 0x4b, 0x8c, 0xde, 0x55, 0xad, 0x63, 0xac, 0xf3, 0x73, 0x96, 0x0e, 0xc7, 0x3a, 0xe1, 0x85, 0x74, - 0x6a, 0x1d, 0x46, 0x27, 0x81, 0xaf, 0x73, 0xe2, 0xb1, 0xd8, 0xc7, 0xe9, 0x99, 0x5b, 0xd4, 0xbe, - 0xfa, 0x22, 0xda, 0x5f, 0x1d, 0x69, 0xfb, 0x78, 0xd2, 0xcc, 0xcf, 0x61, 0x8d, 0xb3, 0x13, 0xf1, - 0x39, 0x4e, 0x49, 0xae, 0xbe, 0xa6, 0xd4, 0x6f, 0xce, 0x56, 0x7f, 0x68, 0x04, 0x8c, 0xe6, 0x3a, - 0x2f, 0xec, 0x11, 0x85, 0x75, 0x1a, 0x53, 0x41, 0x71, 0x48, 0x9f, 0xa8, 0x82, 0x74, 0xb1, 0xa7, - 0x0a, 0xb7, 0x55, 0x55, 0xd9, 0xb6, 0x35, 0x5b, 0x73, 0x9f, 0xf9, 0x64, 0xbf, 0x20, 0x7b, 0x57, - 0x89, 0x3a, 0xaf, 0xd2, 0x29, 0x54, 0x6e, 0xff, 0xb7, 0x04, 0x8d, 0xf3, 0x77, 0x8e, 0x5e, 0x83, - 0xca, 0x13, 0x16, 0x13, 0x37, 0x4b, 0xa9, 0xc9, 0xad, 0x65, 0xb9, 0x3f, 0x4e, 0x29, 0x7a, 0x13, - 0xaa, 0x31, 0x11, 0x32, 0x9a, 0x8a, 0xab, 0x8b, 0x14, 0x0c, 0x49, 0x02, 0xbe, 0x09, 0x75, 0x9e, - 0x0d, 0x26, 0x31, 0x3a, 0xa7, 0x6b, 0x63, 0xaa, 0x84, 0x6d, 0x42, 0x83, 0xc6, 0x82, 0xa4, 0x31, - 0x0e, 0x5d, 0x9a, 0xb8, 0x2c, 0x0e, 0x65, 0x29, 0x59, 0x9b, 0x15, 0xa7, 0x9e, 0xd3, 0xf7, 0x93, - 0x47, 0x71, 0x78, 0x86, 0xbe, 0x0f, 0xeb, 0x9c, 0xa4, 0xa7, 0xd4, 0x23, 0x2e, 0xf6, 0x3c, 0x96, - 0xc5, 0xc2, 0xe5, 0x1e, 0x4b, 0x08, 0x6f, 0x2d, 0x6c, 0x2c, 0x6c, 0xae, 0x38, 0x4d, 0xc3, 0xbd, - 0xab, 0x99, 0x87, 0x8a, 0x87, 0x10, 0x2c, 0x0a, 0x1c, 0xc8, 0x3a, 0x97, 0x18, 0xb5, 0x46, 0x47, - 0x50, 0x89, 0x88, 0xc0, 0x32, 0x5c, 0xad, 0xb2, 0x0a, 0xe4, 0x07, 0x57, 0x2f, 0x84, 0xce, 0x43, - 0x23, 0xba, 0x1b, 0x8b, 0xf4, 0xcc, 0x19, 0x69, 0x6a, 0x6f, 0x43, 0xad, 0xc0, 0x42, 0x0d, 0x58, - 0x18, 0x92, 0x33, 0x13, 0x38, 0xb9, 0x44, 0x4d, 0x28, 0x9f, 0xe2, 0x30, 0xcb, 0x7b, 0x9a, 0xde, - 0xdc, 0x29, 0x7d, 0x60, 0xd9, 0xff, 0x29, 0xc1, 0x2b, 0x53, 0x72, 0x4d, 0x96, 0x78, 0x9c, 0x45, - 0x2e, 0x35, 0x2c, 0xae, 0xb4, 0x95, 0x9d, 0xd5, 0x38, 0x8b, 0x72, 0x38, 0x97, 0xa1, 0xce, 0x01, - 0xaa, 0x65, 0xf2, 0x56, 0x49, 0x9d, 0xb6, 0x96, 0x53, 0x65, 0xd3, 0xe4, 0xe8, 0x26, 0xac, 0xd0, - 0x08, 0x07, 0xfa, 0x3a, 0x17, 0x94, 0x07, 0x15, 0x45, 0x30, 0xf7, 0x10, 0x61, 0xef, 0x53, 0x1a, - 0x13, 0x57, 0x9c, 0x25, 0x1a, 0xb3, 0xa8, 0x30, 0x75, 0x43, 0x3f, 0x3a, 0x4b, 0x14, 0x72, 0x17, - 0xaa, 0x3e, 0xe5, 0xc3, 0x3c, 0xc7, 0xcb, 0x2a, 0xc7, 0xdf, 0x99, 0x1d, 0xc0, 0x1e, 0xe5, 0x43, - 0x93, 0xdf, 0xe0, 0x8f, 0xd6, 0xca, 0x69, 0xee, 0x26, 0x29, 0x21, 0x51, 0x22, 0xe8, 0x20, 0x24, - 0x2a, 0x3f, 0x2a, 0x4e, 0x8d, 0xf2, 0x83, 0x31, 0x11, 0xfd, 0x0a, 0x9a, 0x11, 0x8e, 0x71, 0x40, - 0x7c, 0x37, 0x90, 0x71, 0xc9, 0xcd, 0x2e, 0x2b, 0xb3, 0xdf, 0x99, 0x6d, 0xf6, 0xa1, 0x96, 0x9a, - 0x2c, 0x5c, 0x14, 0x5d, 0xa0, 0xd9, 0xbf, 0xb3, 0x00, 0x5d, 0x84, 0xca, 0x64, 0x1b, 0x85, 0x54, - 0x90, 0x28, 0x09, 0xb1, 0xd0, 0xb1, 0x35, 0xd7, 0xd9, 0xcc, 0xb9, 0x47, 0x86, 0xa9, 0xde, 0xa5, - 0x0f, 0xe1, 0xe6, 0x48, 0x4a, 0x7b, 0xab, 0x2d, 0x16, 0x5e, 0xb2, 0x16, 0x9d, 0xbc, 0x67, 0x6d, - 0x5b, 0x3d, 0x6b, 0xf6, 0x2f, 0x01, 0xc6, 0xc1, 0x42, 0xef, 0xc2, 0x8d, 0x01, 0x63, 0xc2, 0x55, - 0xc1, 0xe6, 0xf4, 0x09, 0x71, 0x83, 0x81, 0xb9, 0xfe, 0xba, 0x64, 0x48, 0xe8, 0x21, 0x7d, 0x42, - 0xf6, 0x06, 0xe8, 0x1d, 0xa8, 0xcb, 0x2c, 0x09, 0x99, 0x87, 0x43, 0x97, 0x73, 0x9f, 0x2b, 0x53, - 0x3a, 0x4d, 0x1e, 0x48, 0xe2, 0x21, 0xf7, 0xb9, 0xfd, 0x7b, 0x0b, 0x5a, 0xb3, 0xda, 0x02, 0xba, - 0x05, 0x6b, 0xe4, 0x0b, 0xe2, 0x65, 0x02, 0x0f, 0x42, 0xe2, 0x9e, 0xd0, 0x30, 0x3f, 0x69, 0x7d, - 0x4c, 0xbe, 0x4f, 0x43, 0x82, 0xee, 0xc3, 0x0d, 0x4d, 0x91, 0xed, 0x48, 0x3e, 0xf3, 0x2c, 0x13, - 0xca, 0x5c, 0x75, 0xeb, 0xb5, 0xfc, 0x36, 0xf2, 0x31, 0xa0, 0xd3, 0x33, 0x83, 0x84, 0xd3, 0x18, - 0xc9, 0x1c, 0x69, 0x11, 0xfb, 0xcb, 0xd2, 0xe8, 0x39, 0xd3, 0x4f, 0x22, 0xda, 0x81, 0xb2, 0x7c, - 0x14, 0xb5, 0xe1, 0xfa, 0xbc, 0xae, 0x5c, 0x90, 0xeb, 0xc8, 0x1f, 0xe2, 0x68, 0x59, 0xb4, 0x0e, - 0x4b, 0x3e, 0x11, 0x98, 0x86, 0x26, 0xda, 0x66, 0x87, 0x7a, 0xd0, 0x50, 0x00, 0x97, 0x0b, 0x9c, - 0x0a, 0xe5, 0xb8, 0x19, 0x1e, 0xda, 0x17, 0xbc, 0x3e, 0xca, 0x87, 0x17, 0x47, 0x3d, 0xf2, 0xe4, - 0x50, 0x8a, 0x48, 0xa2, 0xfd, 0x18, 0xca, 0xca, 0x1a, 0xaa, 0xc2, 0xf2, 0x71, 0xff, 0xa3, 0xfe, - 0xa3, 0x8f, 0xfb, 0x8d, 0xaf, 0xa1, 0x55, 0xa8, 0xec, 0x38, 0xbb, 0x77, 0x8f, 0xf6, 0xfb, 0x7b, - 0x0d, 0x4b, 0xb2, 0x9c, 0xe3, 0x7e, 0x5f, 0x6e, 0x4a, 0x68, 0x05, 0xca, 0xbb, 0x8e, 0xf3, 0xc8, - 0x69, 0x2c, 0x48, 0x54, 0x6f, 0xf7, 0xc1, 0xae, 0x42, 0x2d, 0xca, 0xdd, 0xf1, 0x41, 0x4f, 0xcb, - 0x94, 0xed, 0xbf, 0x5b, 0x50, 0x2f, 0xbe, 0x05, 0xb2, 0xf2, 0x75, 0xb5, 0x9e, 0x92, 0x94, 0x53, - 0x16, 0xe7, 0x8f, 0xbb, 0x22, 0x3e, 0xd6, 0x34, 0xf4, 0x89, 0x1a, 0xa5, 0x12, 0x92, 0x0a, 0x6a, - 0xaa, 0x7e, 0x6e, 0x2f, 0x2b, 0x9a, 0xe8, 0x1c, 0x8c, 0x44, 0x75, 0x2f, 0x9b, 0xd0, 0xd5, 0xfe, - 0x10, 0xd6, 0xce, 0xb1, 0x9f, 0xab, 0x9f, 0x7d, 0x69, 0x41, 0x73, 0x27, 0x25, 0x58, 0xe4, 0xcd, - 0xd3, 0x21, 0x9f, 0x65, 0x84, 0x8b, 0xcb, 0x86, 0xbf, 0x75, 0x58, 0x4a, 0x49, 0x20, 0x8f, 0xab, - 0x1b, 0x94, 0xd9, 0xa1, 0x6d, 0x58, 0x36, 0x13, 0x8c, 0xc9, 0xb5, 0xb7, 0x2e, 0xcd, 0x0e, 0x27, - 0x97, 0xb0, 0xff, 0x6d, 0x41, 0xf3, 0x38, 0xf1, 0xff, 0x0f, 0x67, 0xca, 0x05, 0x67, 0xae, 0x30, - 0xa1, 0x4e, 0xf8, 0xbb, 0xf0, 0xbc, 0xfe, 0xa2, 0x6d, 0xa8, 0x66, 0xca, 0x5d, 0x35, 0x62, 0x9b, - 0x11, 0xf5, 0x62, 0x9a, 0xde, 0x97, 0x53, 0xf8, 0x43, 0xcc, 0x87, 0x0e, 0x68, 0xb8, 0x5c, 0xdb, - 0x09, 0x34, 0x7b, 0x24, 0x24, 0xd7, 0x15, 0xf8, 0xcb, 0xcf, 0x6a, 0x47, 0x70, 0x63, 0x8f, 0x88, - 0x97, 0x66, 0xee, 0xb7, 0x16, 0xbc, 0xf2, 0x80, 0xf2, 0xdc, 0x20, 0x7f, 0x6e, 0x8b, 0x8b, 0x05, - 0x8b, 0x37, 0x61, 0x25, 0x91, 0x65, 0x26, 0x3b, 0xac, 0x69, 0x9b, 0x15, 0x49, 0x90, 0xad, 0x55, - 0xe9, 0x94, 0x4c, 0xc1, 0x86, 0x24, 0x77, 0x55, 0xc1, 0x8f, 0x24, 0xc1, 0x7e, 0x06, 0xcd, 0xa2, - 0x27, 0x3c, 0x61, 0x31, 0x97, 0xef, 0x40, 0x25, 0xff, 0xd4, 0x6b, 0x59, 0xaa, 0x28, 0xaf, 0x70, - 0xfd, 0x23, 0x11, 0xf4, 0x2d, 0x58, 0x8b, 0xc9, 0x17, 0xc2, 0x9d, 0x30, 0xad, 0xe3, 0x50, 0x93, - 0xe4, 0x83, 0x91, 0xf9, 0x14, 0xd6, 0x7b, 0x14, 0x07, 0x31, 0xe3, 0x2f, 0xef, 0xb2, 0x7f, 0x38, - 0xc5, 0x26, 0xcf, 0x42, 0xc1, 0xa5, 0x4d, 0x96, 0x89, 0x24, 0x13, 0x13, 0xe3, 0xe2, 0x8a, 0xa6, - 0x1c, 0xa7, 0x74, 0xeb, 0xcf, 0x15, 0xb8, 0x31, 0x1e, 0xa4, 0x44, 0xca, 0xc2, 0x90, 0xa4, 0xe8, - 0x8f, 0x16, 0xd4, 0x0a, 0x7d, 0x02, 0x75, 0xe6, 0x44, 0x6a, 0x4a, 0x43, 0x69, 0xbf, 0x91, 0xe3, - 0x27, 0x3e, 0x71, 0x3b, 0x8f, 0xf2, 0x4f, 0x5c, 0xbb, 0xf7, 0x9b, 0x7f, 0xfc, 0xeb, 0x0f, 0xa5, - 0x1f, 0xd9, 0xef, 0xcb, 0xcf, 0x63, 0x13, 0x01, 0xde, 0x7d, 0x3a, 0x8e, 0xce, 0xb3, 0xae, 0x3e, - 0x3c, 0xef, 0x3e, 0xd5, 0x8b, 0x67, 0xa3, 0xcf, 0xf4, 0x3b, 0xa3, 0x8a, 0xfc, 0x8b, 0x05, 0xb5, - 0x42, 0x07, 0x99, 0xe7, 0xe6, 0xb4, 0x56, 0x73, 0x99, 0x9b, 0x87, 0xca, 0xcd, 0x87, 0x5b, 0xf7, - 0x5e, 0xc0, 0xcd, 0xee, 0xd3, 0xc9, 0x4b, 0x7b, 0x36, 0xf6, 0xfa, 0x2b, 0x0b, 0x6a, 0x85, 0x5e, - 0x30, 0xcf, 0xeb, 0x69, 0x4d, 0xe3, 0x32, 0xaf, 0x7f, 0xa6, 0xbc, 0xee, 0xdd, 0xbe, 0x06, 0xaf, - 0xd1, 0x9f, 0x2c, 0x80, 0x71, 0x1b, 0x41, 0xdf, 0x9e, 0x33, 0x91, 0x9f, 0x6f, 0x36, 0xed, 0xcb, - 0xab, 0x2b, 0x77, 0x15, 0x5d, 0x87, 0xab, 0x5f, 0x59, 0xb0, 0x3a, 0x59, 0xf7, 0x68, 0xce, 0xa8, - 0x32, 0xa5, 0x53, 0xb5, 0x3b, 0x57, 0x85, 0xeb, 0x76, 0x62, 0x6f, 0x2b, 0xdf, 0x7f, 0x80, 0x5e, - 0x24, 0x87, 0xd1, 0x5f, 0x2d, 0x58, 0x3b, 0x57, 0xb1, 0xe8, 0x7b, 0xf3, 0xa6, 0xf5, 0x69, 0x0d, - 0xe5, 0xb2, 0x44, 0x78, 0xac, 0x3c, 0x3c, 0xb0, 0x3f, 0xba, 0x86, 0xf4, 0xf5, 0x8d, 0x07, 0x77, - 0xac, 0xdb, 0xf7, 0x3e, 0x83, 0xd7, 0x3d, 0x16, 0xcd, 0xf4, 0xf6, 0x5e, 0x3e, 0x41, 0xf2, 0x03, - 0xf9, 0x28, 0x1e, 0x58, 0xbf, 0xf8, 0x89, 0x81, 0x06, 0x2c, 0xc4, 0x71, 0xd0, 0x61, 0x69, 0xd0, - 0x0d, 0x48, 0xac, 0x9e, 0xcc, 0xae, 0x66, 0xe1, 0x84, 0xf2, 0x8b, 0xff, 0x9c, 0x6d, 0xe7, 0xeb, - 0xc1, 0x92, 0x02, 0xbf, 0xff, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x58, 0xab, 0x60, 0x26, 0xc6, - 0x13, 0x00, 0x00, + // 1944 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x58, 0xcd, 0x73, 0x23, 0x47, + 0x15, 0xcf, 0x58, 0xfe, 0x90, 0x9f, 0x3e, 0x2c, 0x77, 0x1c, 0xa3, 0x28, 0x09, 0x71, 0x26, 0x81, + 0x75, 0x36, 0x20, 0xed, 0x3a, 0x50, 0x24, 0x6b, 0x12, 0xb0, 0x2d, 0xad, 0xd7, 0xc4, 0x96, 0xcd, + 0x48, 0xda, 0x24, 0x14, 0x30, 0xd5, 0x9a, 0x69, 0x6b, 0x1b, 0x8f, 0x66, 0x26, 0xd3, 0x3d, 0x4e, + 0xbc, 0x5b, 0x7b, 0xe1, 0x40, 0x15, 0x70, 0xa4, 0x8a, 0x33, 0x07, 0xa8, 0xa2, 0x72, 0x84, 0x1b, + 0xff, 0x00, 0x17, 0x8a, 0x0b, 0x47, 0xae, 0x9c, 0xf8, 0x2b, 0xa8, 0xfe, 0x18, 0x69, 0xc6, 0x5f, + 0x92, 0x97, 0xad, 0x9c, 0x34, 0xf3, 0xfa, 0xf7, 0xde, 0xfb, 0xf5, 0x7b, 0xaf, 0x5f, 0xbf, 0x11, + 0xdc, 0x1a, 0x04, 0xc1, 0xc0, 0x23, 0x0d, 0xc7, 0x0b, 0x62, 0xb7, 0xe1, 0x62, 0x8e, 0xc3, 0x28, + 0x70, 0x1a, 0xa7, 0x77, 0x1b, 0x8e, 0x17, 0x33, 0x4e, 0x22, 0x56, 0x0f, 0xa3, 0x80, 0x07, 0xa8, + 0xaa, 0x80, 0x75, 0x09, 0xac, 0x27, 0xc0, 0xfa, 0xe9, 0xdd, 0xda, 0xab, 0xda, 0x04, 0x0e, 0x69, + 0x03, 0xfb, 0x7e, 0xc0, 0x31, 0xa7, 0x81, 0xaf, 0xf5, 0x6a, 0x6f, 0x5f, 0xe9, 0x20, 0x08, 0x49, + 0x94, 0x81, 0xbe, 0xa9, 0xa1, 0x5e, 0xe0, 0x0f, 0xa2, 0xd8, 0xf7, 0xa9, 0x3f, 0xb8, 0x08, 0xfa, + 0xba, 0x06, 0xc9, 0xb7, 0x7e, 0x7c, 0xdc, 0x70, 0x63, 0x05, 0xd0, 0xeb, 0x6b, 0xe7, 0xd7, 0x8f, + 0x29, 0xf1, 0x5c, 0x7b, 0x88, 0xd9, 0x89, 0x46, 0xbc, 0x7e, 0x1e, 0xc1, 0xe9, 0x90, 0x30, 0x8e, + 0x87, 0xa1, 0x02, 0x98, 0xbf, 0x9a, 0x85, 0x85, 0x1d, 0xb5, 0x7b, 0xf4, 0x1a, 0x40, 0x18, 0x05, + 0xbf, 0x20, 0x0e, 0xb7, 0xa9, 0x5b, 0x35, 0xd6, 0x8c, 0xf5, 0x45, 0x6b, 0x51, 0x4b, 0xf6, 0x5c, + 0xf4, 0x06, 0x14, 0x75, 0x9c, 0x6c, 0x1f, 0x0f, 0x49, 0x75, 0x46, 0x02, 0x0a, 0x5a, 0xd6, 0xc6, + 0x43, 0x82, 0x7e, 0x00, 0xf3, 0x4e, 0xe0, 0x1f, 0xd3, 0x41, 0x35, 0xb7, 0x66, 0xac, 0x17, 0x36, + 0x6e, 0xd5, 0xaf, 0x8a, 0x64, 0x5d, 0x3b, 0xdd, 0x91, 0x70, 0x4b, 0xab, 0xa1, 0x16, 0xcc, 0x7b, + 0xb8, 0x4f, 0x3c, 0x56, 0xcd, 0xaf, 0xe5, 0xd6, 0x0b, 0x1b, 0xdf, 0x9e, 0x68, 0xa0, 0xbe, 0x2f, + 0xf1, 0x2d, 0x9f, 0x47, 0x67, 0x96, 0x56, 0x16, 0x3c, 0x18, 0xc7, 0x3c, 0x66, 0xd5, 0xd9, 0x29, + 0x79, 0x74, 0x24, 0xdc, 0xd2, 0x6a, 0xa8, 0x0d, 0x65, 0xf5, 0x64, 0x3f, 0xa2, 0x8c, 0x07, 0xd1, + 0x59, 0x75, 0x41, 0xf2, 0x99, 0xda, 0x50, 0x49, 0xa9, 0x3f, 0x50, 0xda, 0xe9, 0xd8, 0xc5, 0x31, + 0x75, 0xab, 0xf3, 0x99, 0xd8, 0xf5, 0x62, 0xea, 0xa2, 0x6d, 0x58, 0x18, 0x12, 0x1e, 0x51, 0x87, + 0x55, 0x17, 0x25, 0xe9, 0xf5, 0x89, 0xbe, 0x0e, 0x14, 0xde, 0x4a, 0x14, 0x6b, 0xef, 0x43, 0x21, + 0x15, 0x0e, 0x54, 0x81, 0xdc, 0x09, 0x39, 0xd3, 0x99, 0x14, 0x8f, 0x68, 0x05, 0xe6, 0x4e, 0xb1, + 0x17, 0x27, 0xc9, 0x53, 0x2f, 0xf7, 0x66, 0xde, 0x33, 0xcc, 0x7f, 0xcf, 0x42, 0x29, 0x93, 0x13, + 0xf4, 0x26, 0x94, 0x54, 0x56, 0xec, 0x7e, 0xec, 0x9c, 0x10, 0xae, 0xed, 0x14, 0x95, 0x70, 0x5b, + 0xca, 0xd0, 0x27, 0x80, 0x06, 0x0e, 0xb1, 0x93, 0xcd, 0xe9, 0xec, 0xe7, 0xe5, 0x06, 0x6e, 0x5f, + 0xbd, 0x81, 0x5d, 0x87, 0x64, 0x0b, 0xa0, 0x32, 0x38, 0x27, 0x41, 0x16, 0x94, 0x86, 0x38, 0x6d, + 0x54, 0x45, 0xe5, 0x9a, 0x8a, 0xd8, 0xf3, 0x19, 0xc7, 0xbe, 0x43, 0x76, 0xa3, 0x20, 0x0e, 0xb5, + 0xdd, 0xa2, 0xb2, 0x31, 0xb6, 0xf9, 0x79, 0x10, 0x9d, 0x8c, 0x6d, 0xc2, 0x33, 0xd9, 0x54, 0x36, + 0xb4, 0x4d, 0x02, 0x5f, 0x63, 0xc4, 0x09, 0x7c, 0x17, 0x47, 0x67, 0x76, 0xd6, 0x7a, 0xf1, 0x59, + 0xac, 0xbf, 0x34, 0xb2, 0xf6, 0x71, 0xda, 0xcd, 0x8f, 0x61, 0x89, 0x05, 0xc7, 0xfc, 0x73, 0x1c, + 0x91, 0xc4, 0x7c, 0x69, 0x52, 0x99, 0x74, 0xb4, 0x82, 0xb6, 0x5c, 0x66, 0x99, 0x77, 0x44, 0x61, + 0x95, 0xfa, 0x94, 0x53, 0xec, 0xd1, 0xc7, 0xb2, 0xad, 0xd8, 0xd8, 0x91, 0xed, 0xa7, 0x5a, 0x90, + 0xc5, 0xbe, 0x71, 0xb5, 0xe5, 0x76, 0xe0, 0x92, 0xbd, 0x8c, 0xee, 0x96, 0x54, 0xb5, 0x5e, 0xa2, + 0x97, 0x48, 0x99, 0xf9, 0xa7, 0x1c, 0x54, 0xce, 0xe7, 0x1c, 0xbd, 0x0c, 0xf9, 0xc7, 0x81, 0x4f, + 0xec, 0x38, 0xa2, 0xba, 0xb6, 0x16, 0xc4, 0x7b, 0x2f, 0xa2, 0xe8, 0x75, 0x28, 0xf8, 0x84, 0x8b, + 0x68, 0xca, 0x55, 0x55, 0xad, 0xa0, 0x45, 0x02, 0xf0, 0x0d, 0x28, 0xb3, 0xb8, 0x9f, 0xc6, 0xa8, + 0x23, 0x55, 0x1a, 0x4b, 0x05, 0x6c, 0x1d, 0x2a, 0xd4, 0xe7, 0x24, 0xf2, 0xb1, 0x67, 0xd3, 0xd0, + 0x0e, 0x7c, 0x4f, 0x9c, 0x64, 0x63, 0x3d, 0x6f, 0x95, 0x13, 0xf9, 0x5e, 0x78, 0xe8, 0x7b, 0x67, + 0xe8, 0x16, 0x2c, 0x31, 0x12, 0x9d, 0x52, 0x87, 0xd8, 0xd8, 0x71, 0x82, 0xd8, 0xe7, 0xb2, 0x8a, + 0x17, 0xad, 0xb2, 0x16, 0x6f, 0x29, 0x29, 0xfa, 0x0e, 0xac, 0x9e, 0x03, 0xda, 0xcc, 0x09, 0x42, + 0xc2, 0xaa, 0xb9, 0xb5, 0xdc, 0xfa, 0xa2, 0xb5, 0x92, 0xc5, 0x77, 0xe4, 0x1a, 0x42, 0x30, 0xcb, + 0xf1, 0x40, 0xf4, 0x23, 0x81, 0x91, 0xcf, 0xa8, 0x0b, 0xf9, 0x21, 0xe1, 0x58, 0xc4, 0xb5, 0x3a, + 0x27, 0x23, 0xfe, 0xde, 0xf4, 0x27, 0xa6, 0x7e, 0xa0, 0x55, 0x55, 0xe7, 0x1b, 0x59, 0xaa, 0x6d, + 0x42, 0x29, 0xb3, 0x74, 0xa3, 0x2e, 0xf0, 0xf7, 0x1c, 0xbc, 0x78, 0x49, 0x51, 0x8a, 0x5e, 0xe0, + 0xc7, 0x43, 0x9b, 0xea, 0x25, 0x26, 0xad, 0xcd, 0x59, 0x45, 0x3f, 0x1e, 0x26, 0x70, 0x26, 0x72, + 0x92, 0x00, 0xe4, 0x0d, 0xc1, 0xaa, 0x33, 0x72, 0xb7, 0xa5, 0x44, 0x2a, 0xee, 0x08, 0x86, 0x5e, + 0x81, 0x45, 0x3a, 0xc4, 0x03, 0x95, 0xf7, 0x9c, 0x64, 0x90, 0x97, 0x02, 0x9d, 0xb0, 0x21, 0x76, + 0x1e, 0x51, 0x9f, 0xd8, 0xfc, 0x2c, 0x54, 0x98, 0x59, 0x95, 0x07, 0x2d, 0xef, 0x9e, 0x85, 0x12, + 0xd9, 0x82, 0x82, 0x4b, 0xd9, 0x49, 0x72, 0x18, 0xe6, 0xe4, 0x61, 0x78, 0xeb, 0xea, 0x00, 0x36, + 0x29, 0x3b, 0xd1, 0x07, 0x01, 0xdc, 0xd1, 0xb3, 0x24, 0xcd, 0xec, 0x30, 0x22, 0x64, 0x18, 0x72, + 0xda, 0xf7, 0x88, 0x2c, 0xa4, 0xbc, 0x55, 0xa2, 0xec, 0x68, 0x2c, 0x44, 0x3f, 0x87, 0x95, 0x21, + 0xf6, 0xf1, 0x80, 0xb8, 0xf6, 0x40, 0xc4, 0x25, 0x71, 0xbb, 0x20, 0xdd, 0x7e, 0xeb, 0x6a, 0xb7, + 0x07, 0x4a, 0x2b, 0x7d, 0xc2, 0xd1, 0xf0, 0x82, 0x0c, 0x1d, 0x42, 0x11, 0x3b, 0x0e, 0xf1, 0xc4, + 0x04, 0x10, 0x44, 0xc9, 0xf5, 0xf7, 0xce, 0xd5, 0x76, 0xb7, 0xc6, 0xe8, 0xa4, 0x2d, 0xa5, 0x0d, + 0x98, 0xbf, 0x36, 0x00, 0x5d, 0xf4, 0x2d, 0xaa, 0x77, 0x94, 0x23, 0x4e, 0x86, 0xa1, 0x87, 0xb9, + 0x4a, 0x96, 0xae, 0x8f, 0x95, 0x64, 0xb5, 0xab, 0x17, 0xe5, 0xbd, 0xfe, 0x01, 0xbc, 0x32, 0xd2, + 0x52, 0xdb, 0x57, 0x5b, 0xc8, 0x4c, 0x02, 0x55, 0x9a, 0x2e, 0x1c, 0xe5, 0x5b, 0x8e, 0x05, 0x66, + 0x04, 0xcb, 0x17, 0xe8, 0xa2, 0x3b, 0xb0, 0x92, 0x22, 0x3c, 0xce, 0xb6, 0xe2, 0x81, 0x52, 0x6b, + 0x49, 0xc6, 0xdf, 0x81, 0xe5, 0xb4, 0x86, 0x3a, 0xa4, 0x33, 0xb2, 0x10, 0x2b, 0x38, 0x6d, 0x3f, + 0xf6, 0xb9, 0xf9, 0x33, 0x80, 0x71, 0xc6, 0xd1, 0xdb, 0xb0, 0xdc, 0x0f, 0x02, 0x6e, 0xcb, 0x8a, + 0x61, 0xf4, 0x31, 0xb1, 0x07, 0x7d, 0x5d, 0xc3, 0x65, 0xb1, 0x20, 0xa0, 0x1d, 0xfa, 0x98, 0xec, + 0xf6, 0xd1, 0x5b, 0x50, 0x16, 0xa5, 0xee, 0x05, 0x0e, 0xf6, 0x6c, 0xc6, 0x5c, 0xa6, 0x5d, 0x88, + 0x5a, 0xdf, 0x17, 0xc2, 0x0e, 0x73, 0x99, 0xf9, 0x5b, 0x03, 0xaa, 0x57, 0x35, 0x41, 0xd1, 0x4b, + 0xc8, 0x17, 0xc4, 0x89, 0x39, 0xee, 0x7b, 0xc4, 0x3e, 0xa6, 0x5e, 0x12, 0xdd, 0xf2, 0x58, 0x7c, + 0x9f, 0x7a, 0x04, 0xdd, 0x87, 0x65, 0x25, 0x11, 0xcd, 0x57, 0x8c, 0x66, 0x41, 0xac, 0x76, 0x54, + 0xd8, 0x78, 0x39, 0x49, 0x7d, 0x32, 0xba, 0xd5, 0x9b, 0x7a, 0xf8, 0xb3, 0x2a, 0x23, 0x9d, 0xae, + 0x52, 0x31, 0x7f, 0x9f, 0x1b, 0x5d, 0xde, 0x6a, 0xfe, 0x40, 0x3b, 0x30, 0x27, 0x26, 0x10, 0xe5, + 0xb8, 0x3c, 0xc5, 0x1c, 0xa5, 0xf4, 0xea, 0xe2, 0x87, 0x58, 0x4a, 0x17, 0xad, 0xc2, 0xbc, 0x4b, + 0x38, 0xa6, 0x9e, 0xce, 0xb0, 0x7e, 0x43, 0x4d, 0xa8, 0x48, 0x80, 0xcd, 0x38, 0x8e, 0xb8, 0x24, + 0xae, 0x07, 0xbe, 0xda, 0x05, 0xd6, 0xdd, 0x64, 0xe0, 0xb4, 0xe4, 0x44, 0x45, 0x3a, 0x42, 0x45, + 0x08, 0xd1, 0x3e, 0xe4, 0x59, 0xdc, 0x57, 0x2c, 0x67, 0x25, 0xcb, 0x3b, 0x53, 0xb3, 0xd4, 0x7a, + 0xd6, 0xc8, 0x82, 0xf9, 0x10, 0xe6, 0x24, 0x77, 0x54, 0x80, 0x85, 0x5e, 0xfb, 0xa3, 0xf6, 0xe1, + 0xc7, 0xed, 0xca, 0x0b, 0xa8, 0x08, 0xf9, 0x1d, 0xab, 0xb5, 0xd5, 0xdd, 0x6b, 0xef, 0x56, 0x0c, + 0xb1, 0x64, 0xf5, 0xda, 0x6d, 0xf1, 0x32, 0x83, 0x16, 0x61, 0xae, 0x65, 0x59, 0x87, 0x56, 0x25, + 0x27, 0x50, 0xcd, 0xd6, 0x7e, 0x4b, 0xa2, 0x66, 0xc5, 0x5b, 0xef, 0xa8, 0xa9, 0x74, 0xe6, 0xcc, + 0xef, 0x43, 0x3e, 0xf1, 0x86, 0x96, 0xa0, 0xd0, 0x6b, 0x77, 0x8e, 0x5a, 0x3b, 0x7b, 0xf7, 0xf7, + 0x5a, 0xcd, 0xca, 0x0b, 0xa8, 0x04, 0x8b, 0xbd, 0xf6, 0x83, 0xd6, 0xd6, 0x7e, 0xf7, 0xc1, 0xa7, + 0x15, 0x03, 0x55, 0xa0, 0xd8, 0xe9, 0x6e, 0xed, 0xb7, 0xec, 0x4e, 0x77, 0xab, 0xdb, 0xeb, 0x54, + 0x66, 0xcc, 0x7f, 0x1a, 0x50, 0xce, 0xde, 0xc2, 0xa2, 0x95, 0xaa, 0xf6, 0x77, 0x4a, 0x22, 0x46, + 0x03, 0x3f, 0x19, 0xab, 0xa4, 0xf0, 0xa1, 0x92, 0xa1, 0x4f, 0xe4, 0x28, 0x1e, 0x92, 0x88, 0x53, + 0xdd, 0x46, 0xaf, 0xbd, 0x1c, 0xb2, 0x2e, 0xea, 0x47, 0x23, 0x55, 0x75, 0x39, 0xa4, 0x6c, 0xd5, + 0x3e, 0x80, 0xa5, 0x73, 0xcb, 0x37, 0xba, 0x20, 0xfe, 0x31, 0x03, 0xe5, 0xec, 0xf4, 0x89, 0x7e, + 0x0a, 0xc5, 0x47, 0xee, 0x31, 0xb3, 0x93, 0xe9, 0xd5, 0x90, 0x6c, 0xdf, 0x9f, 0x76, 0x7a, 0xad, + 0x3f, 0x70, 0x8f, 0x99, 0x7e, 0x56, 0x74, 0x0b, 0x8f, 0xc6, 0x12, 0x61, 0xfd, 0x0c, 0x47, 0xfe, + 0xc8, 0xfa, 0xcc, 0x0d, 0xad, 0x7f, 0x8a, 0x23, 0x3f, 0x6b, 0xfd, 0x6c, 0x2c, 0xa9, 0x7d, 0x08, + 0x95, 0xf3, 0xee, 0x27, 0x85, 0x23, 0x97, 0x0a, 0x87, 0xd0, 0x3f, 0xef, 0xe0, 0x26, 0xfa, 0xe6, + 0x6f, 0x0c, 0x58, 0xd9, 0x89, 0x08, 0xe6, 0xc9, 0xe5, 0x6e, 0x91, 0xcf, 0x62, 0xc2, 0xf8, 0xa4, + 0x6f, 0xb1, 0x55, 0x98, 0x8f, 0xc8, 0x40, 0x54, 0x8f, 0xba, 0x40, 0xf5, 0x1b, 0xda, 0x84, 0x05, + 0x3d, 0x8a, 0xeb, 0x36, 0xf2, 0xc6, 0xc4, 0x40, 0x59, 0x89, 0x86, 0xf9, 0x5f, 0x03, 0x56, 0x7a, + 0xa1, 0xfb, 0x7f, 0x90, 0x99, 0xcb, 0x90, 0x99, 0xe2, 0x83, 0x31, 0xc5, 0x37, 0x77, 0x53, 0xbe, + 0x68, 0x13, 0x0a, 0xb1, 0xa4, 0x2b, 0xbf, 0x78, 0xf5, 0xa7, 0xde, 0xc5, 0x0e, 0x74, 0x5f, 0x7c, + 0x14, 0x1f, 0x60, 0x76, 0x62, 0x81, 0x82, 0x8b, 0x67, 0x33, 0x84, 0x95, 0x26, 0xf1, 0xc8, 0xf3, + 0x0a, 0xfc, 0xe4, 0xbd, 0x9a, 0x43, 0x58, 0xde, 0x25, 0xfc, 0x2b, 0x73, 0xf7, 0x07, 0x03, 0x5e, + 0xdc, 0xa7, 0x2c, 0x71, 0xc8, 0x6e, 0xec, 0x71, 0x36, 0xe3, 0x71, 0x15, 0xe6, 0x8f, 0xa9, 0x27, + 0x12, 0xa5, 0x93, 0xac, 0xde, 0xc4, 0x34, 0x17, 0x8a, 0x6e, 0x26, 0x2e, 0x55, 0x7d, 0x53, 0xe6, + 0x85, 0x40, 0xdc, 0xa6, 0xd2, 0x97, 0x58, 0xe4, 0xc1, 0x09, 0x49, 0xb6, 0x20, 0xe1, 0x5d, 0x21, + 0x30, 0x9f, 0xc2, 0x4a, 0x96, 0x21, 0x0b, 0x03, 0x9f, 0x89, 0x71, 0x23, 0x9f, 0xfc, 0x23, 0xa3, + 0xbb, 0xc9, 0x14, 0x65, 0x31, 0x52, 0x41, 0xdf, 0x84, 0x25, 0x9f, 0x7c, 0xc1, 0xed, 0x94, 0x6b, + 0x15, 0x9f, 0x92, 0x10, 0x1f, 0x8d, 0xdc, 0x47, 0xb0, 0xda, 0xa4, 0x78, 0xe0, 0x07, 0xec, 0xab, + 0x2b, 0x82, 0xef, 0x5d, 0xe2, 0x93, 0xc5, 0x1e, 0x67, 0xc2, 0x67, 0x10, 0xf3, 0x30, 0xe6, 0xa9, + 0x29, 0x68, 0x51, 0x49, 0x7a, 0x11, 0xdd, 0xf8, 0x4b, 0x1e, 0x96, 0xc7, 0x1f, 0x00, 0x3c, 0x0a, + 0x3c, 0x8f, 0x44, 0xe8, 0x8f, 0x06, 0x94, 0x32, 0xfd, 0x03, 0xd5, 0xaf, 0x89, 0xd4, 0x25, 0x8d, + 0xa6, 0xf6, 0x5a, 0x82, 0x4f, 0xfd, 0x13, 0x55, 0x3f, 0x4c, 0xfe, 0x89, 0x32, 0x9b, 0xbf, 0xfc, + 0xd7, 0x7f, 0x7e, 0x37, 0xf3, 0xa1, 0xf9, 0x6e, 0xe3, 0xf4, 0x6e, 0x43, 0x47, 0x80, 0x35, 0x9e, + 0x8c, 0xa3, 0xf3, 0xb4, 0xa1, 0x36, 0xcf, 0x1a, 0x4f, 0xd4, 0xc3, 0xd3, 0xd1, 0xbf, 0x69, 0xf7, + 0x46, 0x27, 0xf5, 0xaf, 0x06, 0x94, 0x32, 0x9d, 0xe5, 0x3a, 0x9a, 0x97, 0xb5, 0xa0, 0x49, 0x34, + 0x3b, 0x92, 0xe6, 0xc1, 0xc6, 0xf6, 0x33, 0xd0, 0x6c, 0x3c, 0x49, 0x27, 0xed, 0xe9, 0x98, 0xf5, + 0x97, 0x06, 0x94, 0x32, 0x3d, 0xe2, 0x3a, 0xd6, 0x97, 0x35, 0x93, 0x49, 0xac, 0x7f, 0x24, 0x59, + 0x37, 0x6f, 0x3f, 0x07, 0xd6, 0xe8, 0xcf, 0x06, 0xc0, 0xb8, 0xbd, 0xa0, 0x6b, 0xbe, 0x1c, 0x2e, + 0x34, 0xa1, 0xda, 0xe4, 0xd3, 0x95, 0x50, 0x45, 0xcf, 0x83, 0xea, 0x97, 0x06, 0x14, 0xd3, 0xe7, + 0x1e, 0x5d, 0x33, 0x9d, 0x5e, 0xd2, 0xc1, 0x6a, 0xf5, 0x69, 0xe1, 0xaa, 0x9d, 0x98, 0x9b, 0x92, + 0xfb, 0x77, 0xd1, 0xb3, 0xd4, 0x30, 0xfa, 0x9b, 0x01, 0x4b, 0xe7, 0x4e, 0x2c, 0xba, 0x73, 0xdd, + 0x57, 0xe6, 0x65, 0x0d, 0x65, 0x52, 0x21, 0x3c, 0x94, 0x0c, 0x8f, 0xcc, 0x8f, 0x9e, 0x43, 0xf9, + 0xba, 0x9a, 0xc1, 0x3d, 0xe3, 0xf6, 0xf6, 0x67, 0xf0, 0xaa, 0x13, 0x0c, 0xaf, 0x64, 0xbb, 0x9d, + 0x7c, 0x34, 0xb0, 0x23, 0x71, 0x59, 0x1e, 0x19, 0x3f, 0xf9, 0xa1, 0x86, 0x0e, 0x02, 0x0f, 0xfb, + 0x83, 0x7a, 0x10, 0x0d, 0x1a, 0x03, 0xe2, 0xcb, 0xab, 0xb4, 0xa1, 0x96, 0x70, 0x48, 0xd9, 0xc5, + 0x3f, 0xb8, 0x37, 0x93, 0xe7, 0xfe, 0xbc, 0x04, 0xbf, 0xfb, 0xbf, 0x00, 0x00, 0x00, 0xff, 0xff, + 0xe5, 0xf5, 0x02, 0xd0, 0x6d, 0x17, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/jobs.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/jobs.pb.go index e84b749f..1b85f0d2 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/jobs.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/jobs.pb.go @@ -8,6 +8,7 @@ import fmt "fmt" import math "math" import _ "google.golang.org/genproto/googleapis/api/annotations" import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf5 "google.golang.org/genproto/protobuf/field_mask" import google_protobuf3 "github.com/golang/protobuf/ptypes/timestamp" import ( @@ -98,6 +99,11 @@ const ( JobStatus_DONE JobStatus_State = 5 // The job has completed, but encountered an error. JobStatus_ERROR JobStatus_State = 6 + // Job attempt has failed. The detail field contains failure details for + // this attempt. + // + // Applies to restartable jobs only. + JobStatus_ATTEMPT_FAILURE JobStatus_State = 9 ) var JobStatus_State_name = map[int32]string{ @@ -110,6 +116,7 @@ var JobStatus_State_name = map[int32]string{ 4: "CANCELLED", 5: "DONE", 6: "ERROR", + 9: "ATTEMPT_FAILURE", } var JobStatus_State_value = map[string]int32{ "STATE_UNSPECIFIED": 0, @@ -121,6 +128,7 @@ var JobStatus_State_value = map[string]int32{ "CANCELLED": 4, "DONE": 5, "ERROR": 6, + "ATTEMPT_FAILURE": 9, } func (x JobStatus_State) String() string { @@ -128,6 +136,99 @@ func (x JobStatus_State) String() string { } func (JobStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 0} } +type JobStatus_Substate int32 + +const ( + JobStatus_UNSPECIFIED JobStatus_Substate = 0 + // The Job is submitted to the agent. + // + // Applies to RUNNING state. + JobStatus_SUBMITTED JobStatus_Substate = 1 + // The Job has been received and is awaiting execution (it may be waiting + // for a condition to be met). See the "details" field for the reason for + // the delay. + // + // Applies to RUNNING state. + JobStatus_QUEUED JobStatus_Substate = 2 + // The agent-reported status is out of date, which may be caused by a + // loss of communication between the agent and Cloud Dataproc. If the + // agent does not send a timely update, the job will fail. + // + // Applies to RUNNING state. + JobStatus_STALE_STATUS JobStatus_Substate = 3 +) + +var JobStatus_Substate_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SUBMITTED", + 2: "QUEUED", + 3: "STALE_STATUS", +} +var JobStatus_Substate_value = map[string]int32{ + "UNSPECIFIED": 0, + "SUBMITTED": 1, + "QUEUED": 2, + "STALE_STATUS": 3, +} + +func (x JobStatus_Substate) String() string { + return proto.EnumName(JobStatus_Substate_name, int32(x)) +} +func (JobStatus_Substate) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 1} } + +// The application state, corresponding to +// YarnProtos.YarnApplicationStateProto. +type YarnApplication_State int32 + +const ( + // Status is unspecified. + YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0 + // Status is NEW. + YarnApplication_NEW YarnApplication_State = 1 + // Status is NEW_SAVING. + YarnApplication_NEW_SAVING YarnApplication_State = 2 + // Status is SUBMITTED. + YarnApplication_SUBMITTED YarnApplication_State = 3 + // Status is ACCEPTED. + YarnApplication_ACCEPTED YarnApplication_State = 4 + // Status is RUNNING. + YarnApplication_RUNNING YarnApplication_State = 5 + // Status is FINISHED. + YarnApplication_FINISHED YarnApplication_State = 6 + // Status is FAILED. + YarnApplication_FAILED YarnApplication_State = 7 + // Status is KILLED. + YarnApplication_KILLED YarnApplication_State = 8 +) + +var YarnApplication_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "NEW", + 2: "NEW_SAVING", + 3: "SUBMITTED", + 4: "ACCEPTED", + 5: "RUNNING", + 6: "FINISHED", + 7: "FAILED", + 8: "KILLED", +} +var YarnApplication_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "NEW": 1, + "NEW_SAVING": 2, + "SUBMITTED": 3, + "ACCEPTED": 4, + "RUNNING": 5, + "FINISHED": 6, + "FAILED": 7, + "KILLED": 8, +} + +func (x YarnApplication_State) String() string { + return proto.EnumName(YarnApplication_State_name, int32(x)) +} +func (YarnApplication_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{11, 0} } + // A matcher that specifies categories of job states. type ListJobsRequest_JobStateMatcher int32 @@ -156,7 +257,7 @@ func (x ListJobsRequest_JobStateMatcher) String() string { return proto.EnumName(ListJobsRequest_JobStateMatcher_name, int32(x)) } func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int) { - return fileDescriptor1, []int{14, 0} + return fileDescriptor1, []int{16, 0} } // The runtime logging config of the job. @@ -184,7 +285,7 @@ func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level { // [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) // jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). type HadoopJob struct { - // [Required] Indicates the location of the driver's main class. Specify + // Required. Indicates the location of the driver's main class. Specify // either the jar file that contains the main class or the main class name. // To specify both, add the jar file to `jar_file_uris`, and then specify // the main class name in this property. @@ -193,28 +294,28 @@ type HadoopJob struct { // *HadoopJob_MainJarFileUri // *HadoopJob_MainClass Driver isHadoopJob_Driver `protobuf_oneof:"driver"` - // [Optional] The arguments to pass to the driver. Do not + // Optional. The arguments to pass to the driver. Do not // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job // properties, since a collision may occur that causes an incorrect job // submission. Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` - // [Optional] Jar file URIs to add to the CLASSPATHs of the + // Optional. Jar file URIs to add to the CLASSPATHs of the // Hadoop driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` - // [Optional] HCFS (Hadoop Compatible Filesystem) URIs of files to be copied + // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied // to the working directory of Hadoop drivers and distributed tasks. Useful // for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` - // [Optional] HCFS URIs of archives to be extracted in the working directory of + // Optional. HCFS URIs of archives to be extracted in the working directory of // Hadoop drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, or .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` - // [Optional] A mapping of property names to values, used to configure Hadoop. + // Optional. A mapping of property names to values, used to configure Hadoop. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site and // classes in user code. Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] The runtime log config for job execution. + // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` } @@ -369,7 +470,7 @@ func _HadoopJob_OneofSizer(msg proto.Message) (n int) { // A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) // applications on YARN. type SparkJob struct { - // [Required] The specification of the main method to call to drive the job. + // Required. The specification of the main method to call to drive the job. // Specify either the jar file that contains the main class or the main class // name. To pass both a main jar and a main class in that jar, add the jar to // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`. @@ -378,26 +479,26 @@ type SparkJob struct { // *SparkJob_MainJarFileUri // *SparkJob_MainClass Driver isSparkJob_Driver `protobuf_oneof:"driver"` - // [Optional] The arguments to pass to the driver. Do not include arguments, + // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` - // [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the + // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the // Spark driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` - // [Optional] HCFS URIs of files to be copied to the working directory of + // Optional. HCFS URIs of files to be copied to the working directory of // Spark drivers and distributed tasks. Useful for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` - // [Optional] HCFS URIs of archives to be extracted in the working directory + // Optional. HCFS URIs of archives to be extracted in the working directory // of Spark drivers and tasks. Supported file types: // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` - // [Optional] A mapping of property names to values, used to configure Spark. + // Optional. A mapping of property names to values, used to configure Spark. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] The runtime log config for job execution. + // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` } @@ -553,31 +654,31 @@ func _SparkJob_OneofSizer(msg proto.Message) (n int) { // [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) // applications on YARN. type PySparkJob struct { - // [Required] The HCFS URI of the main Python file to use as the driver. Must + // Required. The HCFS URI of the main Python file to use as the driver. Must // be a .py file. MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri" json:"main_python_file_uri,omitempty"` - // [Optional] The arguments to pass to the driver. Do not include arguments, + // Optional. The arguments to pass to the driver. Do not include arguments, // such as `--conf`, that can be set as job properties, since a collision may // occur that causes an incorrect job submission. Args []string `protobuf:"bytes,2,rep,name=args" json:"args,omitempty"` - // [Optional] HCFS file URIs of Python files to pass to the PySpark + // Optional. HCFS file URIs of Python files to pass to the PySpark // framework. Supported file types: .py, .egg, and .zip. PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris" json:"python_file_uris,omitempty"` - // [Optional] HCFS URIs of jar files to add to the CLASSPATHs of the + // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the // Python driver and tasks. JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` - // [Optional] HCFS URIs of files to be copied to the working directory of + // Optional. HCFS URIs of files to be copied to the working directory of // Python drivers and distributed tasks. Useful for naively parallel tasks. FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` - // [Optional] HCFS URIs of archives to be extracted in the working directory of + // Optional. HCFS URIs of archives to be extracted in the working directory of // .jar, .tar, .tar.gz, .tgz, and .zip. ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` - // [Optional] A mapping of property names to values, used to configure PySpark. + // Optional. A mapping of property names to values, used to configure PySpark. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in // /etc/spark/conf/spark-defaults.conf and classes in user code. Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] The runtime log config for job execution. + // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` } @@ -644,7 +745,7 @@ func (m *PySparkJob) GetLoggingConfig() *LoggingConfig { // A list of queries to run on a cluster. type QueryList struct { - // [Required] The queries to execute. You do not need to terminate a query + // Required. The queries to execute. You do not need to terminate a query // with a semicolon. Multiple queries can be specified in one string // by separating each with a semicolon. Here is an example of an Cloud // Dataproc API snippet that uses a QueryList to specify a HiveJob: @@ -676,26 +777,26 @@ func (m *QueryList) GetQueries() []string { // A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) // queries on YARN. type HiveJob struct { - // [Required] The sequence of Hive queries to execute, specified as either + // Required. The sequence of Hive queries to execute, specified as either // an HCFS file URI or a list of queries. // // Types that are valid to be assigned to Queries: // *HiveJob_QueryFileUri // *HiveJob_QueryList Queries isHiveJob_Queries `protobuf_oneof:"queries"` - // [Optional] Whether to continue executing queries if a query fails. + // Optional. Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when executing // independent parallel queries. ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` - // [Optional] Mapping of query variable names to values (equivalent to the + // Optional. Mapping of query variable names to values (equivalent to the // Hive command: `SET name="value";`). ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] A mapping of property names and values, used to configure Hive. + // Optional. A mapping of property names and values, used to configure Hive. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/hive/conf/hive-site.xml, and classes in user code. Properties map[string]string `protobuf:"bytes,5,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] HCFS URIs of jar files to add to the CLASSPATH of the + // Optional. HCFS URIs of jar files to add to the CLASSPATH of the // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes // and UDFs. JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` @@ -842,23 +943,23 @@ func _HiveJob_OneofSizer(msg proto.Message) (n int) { // A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) // queries. type SparkSqlJob struct { - // [Required] The sequence of Spark SQL queries to execute, specified as + // Required. The sequence of Spark SQL queries to execute, specified as // either an HCFS file URI or as a list of queries. // // Types that are valid to be assigned to Queries: // *SparkSqlJob_QueryFileUri // *SparkSqlJob_QueryList Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"` - // [Optional] Mapping of query variable names to values (equivalent to the + // Optional. Mapping of query variable names to values (equivalent to the // Spark SQL command: SET `name="value";`). ScriptVariables map[string]string `protobuf:"bytes,3,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] A mapping of property names to values, used to configure + // Optional. A mapping of property names to values, used to configure // Spark SQL's SparkConf. Properties that conflict with values set by the // Cloud Dataproc API may be overwritten. Properties map[string]string `protobuf:"bytes,4,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] HCFS URIs of jar files to be added to the Spark CLASSPATH. + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` - // [Optional] The runtime log config for job execution. + // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` } @@ -1003,29 +1104,29 @@ func _SparkSqlJob_OneofSizer(msg proto.Message) (n int) { // A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) // queries on YARN. type PigJob struct { - // [Required] The sequence of Pig queries to execute, specified as an HCFS + // Required. The sequence of Pig queries to execute, specified as an HCFS // file URI or a list of queries. // // Types that are valid to be assigned to Queries: // *PigJob_QueryFileUri // *PigJob_QueryList Queries isPigJob_Queries `protobuf_oneof:"queries"` - // [Optional] Whether to continue executing queries if a query fails. + // Optional. Whether to continue executing queries if a query fails. // The default value is `false`. Setting to `true` can be useful when executing // independent parallel queries. ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` - // [Optional] Mapping of query variable names to values (equivalent to the Pig + // Optional. Mapping of query variable names to values (equivalent to the Pig // command: `name=[value]`). ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] A mapping of property names to values, used to configure Pig. + // Optional. A mapping of property names to values, used to configure Pig. // Properties that conflict with values set by the Cloud Dataproc API may be // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, // /etc/pig/conf/pig.properties, and classes in user code. Properties map[string]string `protobuf:"bytes,5,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` - // [Optional] HCFS URIs of jar files to add to the CLASSPATH of + // Optional. HCFS URIs of jar files to add to the CLASSPATH of // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` - // [Optional] The runtime log config for job execution. + // Optional. The runtime log config for job execution. LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` } @@ -1176,9 +1277,9 @@ func _PigJob_OneofSizer(msg proto.Message) (n int) { // Cloud Dataproc job config. type JobPlacement struct { - // [Required] The name of the cluster where the job will be submitted. + // Required. The name of the cluster where the job will be submitted. ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` - // [Output-only] A cluster UUID generated by the Cloud Dataproc service when + // Output-only. A cluster UUID generated by the Cloud Dataproc service when // the job is submitted. ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` } @@ -1204,13 +1305,16 @@ func (m *JobPlacement) GetClusterUuid() string { // Cloud Dataproc job status. type JobStatus struct { - // [Output-only] A state message specifying the overall job state. + // Output-only. A state message specifying the overall job state. State JobStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.JobStatus_State" json:"state,omitempty"` - // [Output-only] Optional job state details, such as an error + // Output-only. Optional job state details, such as an error // description if the state is ERROR. Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` - // [Output-only] The time when this state was entered. + // Output-only. The time when this state was entered. StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` + // Output-only. Additional state information, which includes + // status reported by the agent. + Substate JobStatus_Substate `protobuf:"varint,7,opt,name=substate,enum=google.cloud.dataproc.v1.JobStatus_Substate" json:"substate,omitempty"` } func (m *JobStatus) Reset() { *m = JobStatus{} } @@ -1239,16 +1343,23 @@ func (m *JobStatus) GetStateStartTime() *google_protobuf3.Timestamp { return nil } +func (m *JobStatus) GetSubstate() JobStatus_Substate { + if m != nil { + return m.Substate + } + return JobStatus_UNSPECIFIED +} + // Encapsulates the full scoping used to reference a job. type JobReference struct { - // [Required] The ID of the Google Cloud Platform project that the job + // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Optional] The job ID, which must be unique within the project. The job ID + // Optional. The job ID, which must be unique within the project. The job ID // is generated by the server upon job submission or provided by the user as a // means to perform retries without creating duplicate jobs. The ID must // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or - // hyphens (-). The maximum length is 512 characters. + // hyphens (-). The maximum length is 100 characters. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` } @@ -1271,17 +1382,69 @@ func (m *JobReference) GetJobId() string { return "" } +// A YARN application created by a job. Application information is a subset of +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. +// +// **Beta Feature**: This report is available for testing purposes only. It may +// be changed before final release. +type YarnApplication struct { + // Required. The application name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Required. The application state. + State YarnApplication_State `protobuf:"varint,2,opt,name=state,enum=google.cloud.dataproc.v1.YarnApplication_State" json:"state,omitempty"` + // Required. The numerical progress of the application, from 1 to 100. + Progress float32 `protobuf:"fixed32,3,opt,name=progress" json:"progress,omitempty"` + // Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or + // TimelineServer that provides application-specific information. The URL uses + // the internal hostname, and requires a proxy server for resolution and, + // possibly, access. + TrackingUrl string `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl" json:"tracking_url,omitempty"` +} + +func (m *YarnApplication) Reset() { *m = YarnApplication{} } +func (m *YarnApplication) String() string { return proto.CompactTextString(m) } +func (*YarnApplication) ProtoMessage() {} +func (*YarnApplication) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *YarnApplication) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *YarnApplication) GetState() YarnApplication_State { + if m != nil { + return m.State + } + return YarnApplication_STATE_UNSPECIFIED +} + +func (m *YarnApplication) GetProgress() float32 { + if m != nil { + return m.Progress + } + return 0 +} + +func (m *YarnApplication) GetTrackingUrl() string { + if m != nil { + return m.TrackingUrl + } + return "" +} + // A Cloud Dataproc job resource. type Job struct { - // [Optional] The fully qualified reference to the job, which can be used to + // Optional. The fully qualified reference to the job, which can be used to // obtain the equivalent REST path of the job resource. If this property // is not specified when a job is created, the server generates a // job_id. Reference *JobReference `protobuf:"bytes,1,opt,name=reference" json:"reference,omitempty"` - // [Required] Job information, including how, when, and where to + // Required. Job information, including how, when, and where to // run the job. Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement" json:"placement,omitempty"` - // [Required] The application/framework-specific portion of the job. + // Required. The application/framework-specific portion of the job. // // Types that are valid to be assigned to TypeJob: // *Job_HadoopJob @@ -1291,25 +1454,39 @@ type Job struct { // *Job_PigJob // *Job_SparkSqlJob TypeJob isJob_TypeJob `protobuf_oneof:"type_job"` - // [Output-only] The job status. Additional application-specific + // Output-only. The job status. Additional application-specific // status information may be contained in the type_job // and yarn_applications fields. Status *JobStatus `protobuf:"bytes,8,opt,name=status" json:"status,omitempty"` - // [Output-only] The previous job status. + // Output-only. The previous job status. StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` - // [Output-only] A URI pointing to the location of the stdout of the job's + // Output-only. The collection of YARN applications spun up by this job. + // + // **Beta** Feature: This report is available for testing purposes only. It may + // be changed before final release. + YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications" json:"yarn_applications,omitempty"` + // Output-only. A URI pointing to the location of the stdout of the job's // driver program. DriverOutputResourceUri string `protobuf:"bytes,17,opt,name=driver_output_resource_uri,json=driverOutputResourceUri" json:"driver_output_resource_uri,omitempty"` - // [Output-only] If present, the location of miscellaneous control files + // Output-only. If present, the location of miscellaneous control files // which may be used as part of job setup and handling. If not present, // control files may be placed in the same location as `driver_output_uri`. DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri" json:"driver_control_files_uri,omitempty"` + // Optional. The labels to associate with this job. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // No more than 32 labels can be associated with a job. + Labels map[string]string `protobuf:"bytes,18,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. Job scheduling configuration. + Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling" json:"scheduling,omitempty"` } func (m *Job) Reset() { *m = Job{} } func (m *Job) String() string { return proto.CompactTextString(m) } func (*Job) ProtoMessage() {} -func (*Job) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } type isJob_TypeJob interface { isJob_TypeJob() @@ -1418,6 +1595,13 @@ func (m *Job) GetStatusHistory() []*JobStatus { return nil } +func (m *Job) GetYarnApplications() []*YarnApplication { + if m != nil { + return m.YarnApplications + } + return nil +} + func (m *Job) GetDriverOutputResourceUri() string { if m != nil { return m.DriverOutputResourceUri @@ -1432,6 +1616,20 @@ func (m *Job) GetDriverControlFilesUri() string { return "" } +func (m *Job) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Job) GetScheduling() *JobScheduling { + if m != nil { + return m.Scheduling + } + return nil +} + // XXX_OneofFuncs is for the internal use of the proto package. func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { return _Job_OneofMarshaler, _Job_OneofUnmarshaler, _Job_OneofSizer, []interface{}{ @@ -1582,21 +1780,49 @@ func _Job_OneofSizer(msg proto.Message) (n int) { return n } +// Job scheduling options. +// +// **Beta Feature**: These options are available for testing purposes only. +// They may be changed before final release. +type JobScheduling struct { + // Optional. Maximum number of times per hour a driver may be restarted as + // a result of driver terminating with non-zero code before job is + // reported failed. + // + // A job may be reported as thrashing if driver exits with non-zero code + // 4 times within 10 minute window. + // + // Maximum value is 10. + MaxFailuresPerHour int32 `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour" json:"max_failures_per_hour,omitempty"` +} + +func (m *JobScheduling) Reset() { *m = JobScheduling{} } +func (m *JobScheduling) String() string { return proto.CompactTextString(m) } +func (*JobScheduling) ProtoMessage() {} +func (*JobScheduling) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *JobScheduling) GetMaxFailuresPerHour() int32 { + if m != nil { + return m.MaxFailuresPerHour + } + return 0 +} + // A request to submit a job. type SubmitJobRequest struct { - // [Required] The ID of the Google Cloud Platform project that the job + // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - // [Required] The job resource. + // Required. The job resource. Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` } func (m *SubmitJobRequest) Reset() { *m = SubmitJobRequest{} } func (m *SubmitJobRequest) String() string { return proto.CompactTextString(m) } func (*SubmitJobRequest) ProtoMessage() {} -func (*SubmitJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } +func (*SubmitJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } func (m *SubmitJobRequest) GetProjectId() string { if m != nil { @@ -1621,19 +1847,19 @@ func (m *SubmitJobRequest) GetJob() *Job { // A request to get the resource representation for a job in a project. type GetJobRequest struct { - // [Required] The ID of the Google Cloud Platform project that the job + // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - // [Required] The job ID. + // Required. The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` } func (m *GetJobRequest) Reset() { *m = GetJobRequest{} } func (m *GetJobRequest) String() string { return proto.CompactTextString(m) } func (*GetJobRequest) ProtoMessage() {} -func (*GetJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } +func (*GetJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } func (m *GetJobRequest) GetProjectId() string { if m != nil { @@ -1658,28 +1884,45 @@ func (m *GetJobRequest) GetJobId() string { // A request to list jobs in a project. type ListJobsRequest struct { - // [Required] The ID of the Google Cloud Platform project that the job + // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,6,opt,name=region" json:"region,omitempty"` - // [Optional] The number of results to return in each response. + // Optional. The number of results to return in each response. PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` - // [Optional] The page token, returned by a previous call, to request the + // Optional. The page token, returned by a previous call, to request the // next page of results. PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` - // [Optional] If set, the returned jobs list includes only jobs that were + // Optional. If set, the returned jobs list includes only jobs that were // submitted to the named cluster. ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` - // [Optional] Specifies enumerated categories of jobs to list + // Optional. Specifies enumerated categories of jobs to list. // (default = match ALL jobs). + // + // If `filter` is provided, `jobStateMatcher` will be ignored. JobStateMatcher ListJobsRequest_JobStateMatcher `protobuf:"varint,5,opt,name=job_state_matcher,json=jobStateMatcher,enum=google.cloud.dataproc.v1.ListJobsRequest_JobStateMatcher" json:"job_state_matcher,omitempty"` + // Optional. A filter constraining the jobs to list. Filters are + // case-sensitive and have the following syntax: + // + // [field = value] AND [field [= value]] ... + // + // where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label + // key. **value** can be `*` to match all values. + // `status.state` can be either `ACTIVE` or `NON_ACTIVE`. + // Only the logical `AND` operator is supported; space-separated items are + // treated as having an implicit `AND` operator. + // + // Example filter: + // + // status.state = ACTIVE AND labels.env = staging AND labels.starred = * + Filter string `protobuf:"bytes,7,opt,name=filter" json:"filter,omitempty"` } func (m *ListJobsRequest) Reset() { *m = ListJobsRequest{} } func (m *ListJobsRequest) String() string { return proto.CompactTextString(m) } func (*ListJobsRequest) ProtoMessage() {} -func (*ListJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } +func (*ListJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } func (m *ListJobsRequest) GetProjectId() string { if m != nil { @@ -1723,11 +1966,78 @@ func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher { return ListJobsRequest_ALL } +func (m *ListJobsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// A request to update a job. +type UpdateJobRequest struct { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + // Required. The job ID. + JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId" json:"job_id,omitempty"` + // Required. The changes to the job. + Job *Job `protobuf:"bytes,4,opt,name=job" json:"job,omitempty"` + // Required. Specifies the path, relative to Job, of + // the field to update. For example, to update the labels of a Job the + // update_mask parameter would be specified as + // labels, and the `PATCH` request body would specify the new + // value. Note: Currently, labels is the only + // field that can be updated. + UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateJobRequest) Reset() { *m = UpdateJobRequest{} } +func (m *UpdateJobRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateJobRequest) ProtoMessage() {} +func (*UpdateJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } + +func (m *UpdateJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *UpdateJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func (m *UpdateJobRequest) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +func (m *UpdateJobRequest) GetUpdateMask() *google_protobuf5.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + // A list of jobs in a project. type ListJobsResponse struct { - // [Output-only] Jobs list. + // Output-only. Jobs list. Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` - // [Optional] This token is included in the response if there are more results + // Optional. This token is included in the response if there are more results // to fetch. To fetch additional results, provide this value as the // `page_token` in a subsequent ListJobsRequest. NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` @@ -1736,7 +2046,7 @@ type ListJobsResponse struct { func (m *ListJobsResponse) Reset() { *m = ListJobsResponse{} } func (m *ListJobsResponse) String() string { return proto.CompactTextString(m) } func (*ListJobsResponse) ProtoMessage() {} -func (*ListJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } +func (*ListJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} } func (m *ListJobsResponse) GetJobs() []*Job { if m != nil { @@ -1754,19 +2064,19 @@ func (m *ListJobsResponse) GetNextPageToken() string { // A request to cancel a job. type CancelJobRequest struct { - // [Required] The ID of the Google Cloud Platform project that the job + // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - // [Required] The job ID. + // Required. The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` } func (m *CancelJobRequest) Reset() { *m = CancelJobRequest{} } func (m *CancelJobRequest) String() string { return proto.CompactTextString(m) } func (*CancelJobRequest) ProtoMessage() {} -func (*CancelJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } +func (*CancelJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} } func (m *CancelJobRequest) GetProjectId() string { if m != nil { @@ -1791,19 +2101,19 @@ func (m *CancelJobRequest) GetJobId() string { // A request to delete a job. type DeleteJobRequest struct { - // [Required] The ID of the Google Cloud Platform project that the job + // Required. The ID of the Google Cloud Platform project that the job // belongs to. ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` - // [Required] The Cloud Dataproc region in which to handle the request. + // Required. The Cloud Dataproc region in which to handle the request. Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` - // [Required] The job ID. + // Required. The job ID. JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` } func (m *DeleteJobRequest) Reset() { *m = DeleteJobRequest{} } func (m *DeleteJobRequest) String() string { return proto.CompactTextString(m) } func (*DeleteJobRequest) ProtoMessage() {} -func (*DeleteJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } +func (*DeleteJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{20} } func (m *DeleteJobRequest) GetProjectId() string { if m != nil { @@ -1838,15 +2148,20 @@ func init() { proto.RegisterType((*JobPlacement)(nil), "google.cloud.dataproc.v1.JobPlacement") proto.RegisterType((*JobStatus)(nil), "google.cloud.dataproc.v1.JobStatus") proto.RegisterType((*JobReference)(nil), "google.cloud.dataproc.v1.JobReference") + proto.RegisterType((*YarnApplication)(nil), "google.cloud.dataproc.v1.YarnApplication") proto.RegisterType((*Job)(nil), "google.cloud.dataproc.v1.Job") + proto.RegisterType((*JobScheduling)(nil), "google.cloud.dataproc.v1.JobScheduling") proto.RegisterType((*SubmitJobRequest)(nil), "google.cloud.dataproc.v1.SubmitJobRequest") proto.RegisterType((*GetJobRequest)(nil), "google.cloud.dataproc.v1.GetJobRequest") proto.RegisterType((*ListJobsRequest)(nil), "google.cloud.dataproc.v1.ListJobsRequest") + proto.RegisterType((*UpdateJobRequest)(nil), "google.cloud.dataproc.v1.UpdateJobRequest") proto.RegisterType((*ListJobsResponse)(nil), "google.cloud.dataproc.v1.ListJobsResponse") proto.RegisterType((*CancelJobRequest)(nil), "google.cloud.dataproc.v1.CancelJobRequest") proto.RegisterType((*DeleteJobRequest)(nil), "google.cloud.dataproc.v1.DeleteJobRequest") proto.RegisterEnum("google.cloud.dataproc.v1.LoggingConfig_Level", LoggingConfig_Level_name, LoggingConfig_Level_value) proto.RegisterEnum("google.cloud.dataproc.v1.JobStatus_State", JobStatus_State_name, JobStatus_State_value) + proto.RegisterEnum("google.cloud.dataproc.v1.JobStatus_Substate", JobStatus_Substate_name, JobStatus_Substate_value) + proto.RegisterEnum("google.cloud.dataproc.v1.YarnApplication_State", YarnApplication_State_name, YarnApplication_State_value) proto.RegisterEnum("google.cloud.dataproc.v1.ListJobsRequest_JobStateMatcher", ListJobsRequest_JobStateMatcher_name, ListJobsRequest_JobStateMatcher_value) } @@ -1867,10 +2182,12 @@ type JobControllerClient interface { GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) // Lists regions/{region}/jobs in a project. ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) + // Updates a job in a project. + UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) // Starts a job cancellation request. To access the job resource // after cancellation, call - // [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or - // [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get). + // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or + // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) // Deletes the job from the project. If the job is active, the delete fails, // and the response returns `FAILED_PRECONDITION`. @@ -1912,6 +2229,15 @@ func (c *jobControllerClient) ListJobs(ctx context.Context, in *ListJobsRequest, return out, nil } +func (c *jobControllerClient) UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.JobController/UpdateJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + func (c *jobControllerClient) CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) { out := new(Job) err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1.JobController/CancelJob", in, out, c.cc, opts...) @@ -1939,10 +2265,12 @@ type JobControllerServer interface { GetJob(context.Context, *GetJobRequest) (*Job, error) // Lists regions/{region}/jobs in a project. ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) + // Updates a job in a project. + UpdateJob(context.Context, *UpdateJobRequest) (*Job, error) // Starts a job cancellation request. To access the job resource // after cancellation, call - // [regions/{region}/jobs.list](/dataproc/reference/rest/v1/projects.regions.jobs/list) or - // [regions/{region}/jobs.get](/dataproc/reference/rest/v1/projects.regions.jobs/get). + // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1/projects.regions.jobs/list) or + // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1/projects.regions.jobs/get). CancelJob(context.Context, *CancelJobRequest) (*Job, error) // Deletes the job from the project. If the job is active, the delete fails, // and the response returns `FAILED_PRECONDITION`. @@ -2007,6 +2335,24 @@ func _JobController_ListJobs_Handler(srv interface{}, ctx context.Context, dec f return interceptor(ctx, in, info, handler) } +func _JobController_UpdateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).UpdateJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1.JobController/UpdateJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).UpdateJob(ctx, req.(*UpdateJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + func _JobController_CancelJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { in := new(CancelJobRequest) if err := dec(in); err != nil { @@ -2059,6 +2405,10 @@ var _JobController_serviceDesc = grpc.ServiceDesc{ MethodName: "ListJobs", Handler: _JobController_ListJobs_Handler, }, + { + MethodName: "UpdateJob", + Handler: _JobController_UpdateJob_Handler, + }, { MethodName: "CancelJob", Handler: _JobController_CancelJob_Handler, @@ -2075,122 +2425,149 @@ var _JobController_serviceDesc = grpc.ServiceDesc{ func init() { proto.RegisterFile("google/cloud/dataproc/v1/jobs.proto", fileDescriptor1) } var fileDescriptor1 = []byte{ - // 1862 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0x5b, 0x6f, 0x23, 0x49, - 0x15, 0x8e, 0xef, 0xee, 0xe3, 0xb1, 0xd3, 0x29, 0x66, 0x17, 0xcb, 0xb3, 0xab, 0xcd, 0xf6, 0xb0, - 0x43, 0x76, 0x10, 0x36, 0xf1, 0xc2, 0xec, 0x90, 0x00, 0xbb, 0x8e, 0xed, 0x8c, 0x13, 0x8c, 0xe3, - 0x6d, 0x3b, 0x83, 0x84, 0x84, 0x7a, 0xda, 0x76, 0xc5, 0x69, 0x4f, 0xbb, 0xab, 0xd3, 0xd5, 0x6d, - 0xe1, 0x19, 0xcd, 0x0b, 0x7f, 0x00, 0x71, 0x11, 0x12, 0x3c, 0xf2, 0x2b, 0x90, 0x10, 0xe2, 0x01, - 0xc4, 0x1f, 0xe0, 0x15, 0xf1, 0xc4, 0x0f, 0x41, 0x55, 0xd5, 0xed, 0xf8, 0x12, 0x5f, 0xb2, 0xc3, - 0xae, 0x76, 0xf7, 0x29, 0xd5, 0xe7, 0x56, 0xa7, 0xea, 0xfb, 0xce, 0xa9, 0x2a, 0x07, 0xee, 0xf7, - 0x09, 0xe9, 0x9b, 0xb8, 0xd0, 0x35, 0x89, 0xd7, 0x2b, 0xf4, 0x74, 0x57, 0xb7, 0x1d, 0xd2, 0x2d, - 0x8c, 0xf6, 0x0b, 0x03, 0xd2, 0xa1, 0x79, 0xdb, 0x21, 0x2e, 0x41, 0x59, 0x61, 0x94, 0xe7, 0x46, - 0xf9, 0xc0, 0x28, 0x3f, 0xda, 0xcf, 0xbd, 0xe5, 0xbb, 0xeb, 0xb6, 0x51, 0xd0, 0x2d, 0x8b, 0xb8, - 0xba, 0x6b, 0x10, 0xcb, 0xf7, 0xcb, 0xdd, 0xf3, 0xb5, 0xfc, 0xab, 0xe3, 0x5d, 0x14, 0xf0, 0xd0, - 0x76, 0xc7, 0xbe, 0xf2, 0x9d, 0x79, 0xa5, 0x6b, 0x0c, 0x31, 0x75, 0xf5, 0xa1, 0x2d, 0x0c, 0x94, - 0xff, 0x84, 0x21, 0x5d, 0x27, 0xfd, 0xbe, 0x61, 0xf5, 0xcb, 0xc4, 0xba, 0x30, 0xfa, 0xe8, 0x12, - 0x76, 0x7a, 0x8e, 0x31, 0xc2, 0x8e, 0x66, 0x92, 0xbe, 0x66, 0xe2, 0x11, 0x36, 0x69, 0x36, 0xbc, - 0x1b, 0xd9, 0x4b, 0x15, 0x7f, 0x90, 0x5f, 0x96, 0x63, 0x7e, 0x26, 0x46, 0xbe, 0xc2, 0x03, 0xd4, - 0x49, 0xbf, 0xce, 0xdd, 0xab, 0x96, 0xeb, 0x8c, 0xd5, 0xed, 0xde, 0xac, 0x34, 0x77, 0x05, 0x77, - 0x6f, 0x32, 0x44, 0x32, 0x44, 0x9e, 0xe3, 0x71, 0x36, 0xb4, 0x1b, 0xda, 0x93, 0x54, 0x36, 0x44, - 0x65, 0x88, 0x8d, 0x74, 0xd3, 0xc3, 0xd9, 0xf0, 0x6e, 0x68, 0x2f, 0x53, 0xfc, 0xf6, 0xa6, 0x79, - 0xf0, 0xa8, 0xaa, 0xf0, 0x3d, 0x08, 0x3f, 0x0e, 0x29, 0x36, 0xc4, 0xb8, 0x0c, 0xbd, 0x01, 0x3b, - 0xf5, 0xea, 0xd3, 0x6a, 0x5d, 0x3b, 0x6f, 0xb4, 0x9a, 0xd5, 0xf2, 0xc9, 0xf1, 0x49, 0xb5, 0x22, - 0x6f, 0xa1, 0x04, 0x44, 0x4a, 0xf5, 0xba, 0x1c, 0x42, 0x12, 0xc4, 0xda, 0x6a, 0xa9, 0x5c, 0x95, - 0xc3, 0x6c, 0x58, 0xa9, 0x1e, 0x9d, 0x3f, 0x91, 0x23, 0x28, 0x09, 0xd1, 0x93, 0xc6, 0xf1, 0x99, - 0x1c, 0x65, 0xa3, 0x9f, 0x96, 0xd4, 0x86, 0x1c, 0x63, 0xea, 0xaa, 0xaa, 0x9e, 0xa9, 0x72, 0x9c, - 0x0d, 0x8f, 0x4b, 0xed, 0x52, 0x5d, 0x4e, 0xb0, 0x40, 0x67, 0xc7, 0xc7, 0x72, 0x52, 0xf9, 0x5b, - 0x04, 0xa4, 0x9a, 0xde, 0x23, 0xc4, 0x3e, 0x25, 0x1d, 0xf4, 0x2d, 0xd8, 0x19, 0xea, 0x86, 0xa5, - 0x0d, 0x74, 0x47, 0xbb, 0x30, 0x4c, 0xac, 0x79, 0x8e, 0x21, 0x16, 0x5a, 0xdb, 0x52, 0x33, 0x4c, - 0x75, 0xaa, 0x3b, 0xc7, 0x86, 0x89, 0xcf, 0x1d, 0x03, 0xbd, 0x03, 0xc0, 0x8d, 0xbb, 0xa6, 0x4e, - 0x29, 0x5f, 0x3a, 0xb3, 0x92, 0x98, 0xac, 0xcc, 0x44, 0x08, 0x41, 0x54, 0x77, 0xfa, 0x34, 0x1b, - 0xd9, 0x8d, 0xec, 0x49, 0x2a, 0x1f, 0x23, 0x05, 0xd2, 0xd3, 0xc1, 0x69, 0x36, 0xca, 0x95, 0xa9, - 0xc1, 0x24, 0x2e, 0x45, 0xf7, 0x40, 0xba, 0xd6, 0xc7, 0xb8, 0x3e, 0x79, 0x11, 0x28, 0xdf, 0x85, - 0x3b, 0xba, 0xd3, 0xbd, 0x34, 0x46, 0xbe, 0x3e, 0x2e, 0xfc, 0x7d, 0x19, 0x37, 0x69, 0x01, 0xd8, - 0x0e, 0xb1, 0xb1, 0xe3, 0x1a, 0x98, 0x66, 0x13, 0x9c, 0x1b, 0x1f, 0x2c, 0xc7, 0x64, 0xb2, 0xfc, - 0x7c, 0x73, 0xe2, 0x25, 0x28, 0x31, 0x15, 0x06, 0x35, 0x20, 0x63, 0x0a, 0xf0, 0xb4, 0x2e, 0x47, - 0x2f, 0x9b, 0xdc, 0x0d, 0xed, 0xa5, 0x8a, 0xdf, 0xdc, 0x10, 0x6c, 0x35, 0x6d, 0x4e, 0x7f, 0xe6, - 0x7e, 0x08, 0xdb, 0x73, 0xd3, 0xdd, 0x40, 0xac, 0xbb, 0xd3, 0xc4, 0x92, 0xa6, 0x98, 0x72, 0x94, - 0x84, 0xb8, 0xe0, 0xab, 0xf2, 0xd7, 0x08, 0x24, 0x5b, 0xb6, 0xee, 0x3c, 0xff, 0xea, 0x00, 0xa8, - 0xde, 0x00, 0x60, 0x71, 0xf9, 0x3e, 0x07, 0xab, 0xff, 0x72, 0xe2, 0xf7, 0x8f, 0x08, 0x40, 0x73, - 0x3c, 0x41, 0xb0, 0x00, 0x77, 0x39, 0x28, 0xf6, 0xd8, 0xbd, 0x24, 0xd6, 0x1c, 0x88, 0x2a, 0x47, - 0xb7, 0xc9, 0x55, 0x01, 0x8a, 0x01, 0x48, 0xe1, 0x29, 0x90, 0xf6, 0x40, 0x9e, 0xf3, 0x0f, 0x40, - 0xcc, 0xd8, 0xd3, 0xce, 0x9f, 0x0f, 0x9c, 0xed, 0x1b, 0xe0, 0xfc, 0xee, 0xf2, 0x6d, 0xbf, 0xde, - 0x8c, 0x2f, 0x11, 0xa0, 0xca, 0x7b, 0x20, 0x7d, 0xe2, 0x61, 0x67, 0x5c, 0x37, 0xa8, 0x8b, 0xb2, - 0x90, 0xb8, 0xf2, 0xb0, 0xc3, 0x96, 0x1b, 0xe2, 0xfb, 0x11, 0x7c, 0x2a, 0xbf, 0x8a, 0x42, 0xa2, - 0x66, 0x8c, 0x30, 0x83, 0xfa, 0x01, 0x64, 0x98, 0x78, 0xbc, 0x58, 0xa9, 0x77, 0xb8, 0x3c, 0x40, - 0xb8, 0x02, 0x20, 0xec, 0x4c, 0x83, 0xba, 0x7c, 0xe6, 0x54, 0xf1, 0xfe, 0xf2, 0x55, 0x4e, 0xd2, - 0x60, 0xc5, 0x7c, 0x35, 0xc9, 0x29, 0x0f, 0x5f, 0xeb, 0x12, 0xcb, 0x35, 0x2c, 0x0f, 0x6b, 0x8c, - 0x18, 0xba, 0x61, 0x7a, 0x0e, 0xce, 0x46, 0x76, 0x43, 0x7b, 0x49, 0x75, 0x27, 0x50, 0x9d, 0x59, - 0xc7, 0x42, 0x81, 0x74, 0x90, 0x69, 0xd7, 0x31, 0x6c, 0x57, 0x1b, 0xe9, 0x8e, 0xa1, 0x77, 0x4c, - 0x2c, 0xc8, 0x91, 0x2a, 0x3e, 0x5a, 0xd1, 0x4b, 0xc5, 0xd2, 0xf2, 0x2d, 0xee, 0xf9, 0x34, 0x70, - 0xf4, 0x4f, 0x58, 0x3a, 0x2b, 0x45, 0x9f, 0xcc, 0x10, 0x23, 0xc6, 0x83, 0xef, 0xaf, 0x0f, 0xbe, - 0x8a, 0x15, 0x0b, 0x7c, 0x8e, 0x2f, 0xf0, 0x39, 0x77, 0x04, 0x77, 0x6f, 0xca, 0xef, 0x36, 0x70, - 0xbf, 0x6e, 0xf9, 0x4b, 0x13, 0x82, 0x28, 0x7f, 0x89, 0x42, 0x8a, 0x13, 0xbe, 0x75, 0x65, 0x7e, - 0xfe, 0xac, 0xc0, 0x37, 0xa0, 0x1c, 0xe1, 0x40, 0x1c, 0xac, 0x69, 0xb8, 0x22, 0xdd, 0x0d, 0x91, - 0x3e, 0x9f, 0x41, 0x5a, 0xd0, 0xe8, 0x7b, 0x9b, 0x4d, 0x70, 0x2b, 0xb4, 0x1f, 0x2f, 0x76, 0xaf, - 0xc5, 0x3e, 0x11, 0x7f, 0xad, 0x3e, 0xf1, 0xc5, 0x62, 0xcf, 0xbf, 0xa3, 0x10, 0x6f, 0x1a, 0xfd, - 0x2f, 0x7e, 0x3b, 0x79, 0xb6, 0xb4, 0x9d, 0xac, 0xe0, 0x81, 0x58, 0xd9, 0x86, 0x1c, 0x6b, 0xde, - 0xd0, 0x4d, 0xbe, 0xb3, 0x36, 0xf6, 0x6b, 0x36, 0x93, 0x1b, 0xe8, 0x95, 0xf8, 0x0a, 0xd1, 0xab, - 0x0d, 0x77, 0x4e, 0x49, 0xa7, 0x69, 0xea, 0x5d, 0x3c, 0xc4, 0x96, 0xcb, 0x4e, 0xfb, 0xae, 0xe9, - 0x51, 0x17, 0x3b, 0x9a, 0xa5, 0x0f, 0xb1, 0x1f, 0x2f, 0xe5, 0xcb, 0x1a, 0xfa, 0x10, 0x4f, 0x9b, - 0x78, 0x9e, 0xd1, 0xf3, 0xc3, 0x07, 0x26, 0xe7, 0x9e, 0xd1, 0x53, 0xfe, 0x1e, 0x06, 0xe9, 0x94, - 0x74, 0x5a, 0xae, 0xee, 0x7a, 0x14, 0x7d, 0x04, 0x31, 0xea, 0xea, 0xae, 0x08, 0x96, 0x29, 0xbe, - 0xbf, 0x7c, 0xe3, 0x26, 0x3e, 0x79, 0xf6, 0x07, 0xab, 0xc2, 0x8f, 0x9d, 0xb6, 0x3d, 0xec, 0xea, - 0x86, 0xe9, 0x5f, 0x62, 0xd5, 0xe0, 0x13, 0x55, 0x40, 0xe6, 0x26, 0x1a, 0x75, 0x75, 0xc7, 0xd5, - 0xd8, 0xeb, 0xd2, 0xaf, 0xfe, 0x5c, 0x30, 0x4b, 0xf0, 0xf4, 0xcc, 0xb7, 0x83, 0xa7, 0xa7, 0x9a, - 0xe1, 0x3e, 0x2d, 0xe6, 0xc2, 0x84, 0xca, 0xef, 0x42, 0x10, 0xe3, 0x13, 0xb2, 0x67, 0x59, 0xab, - 0x5d, 0x6a, 0x57, 0xe7, 0x9e, 0x65, 0x29, 0x48, 0x34, 0xab, 0x8d, 0xca, 0x49, 0xe3, 0x89, 0x1c, - 0x42, 0x19, 0x80, 0x56, 0xb5, 0x7d, 0xde, 0xd4, 0x2a, 0x67, 0x8d, 0xaa, 0x9c, 0x64, 0x4a, 0xf5, - 0xbc, 0xd1, 0x60, 0xca, 0x30, 0x42, 0x90, 0x29, 0x97, 0x1a, 0xe5, 0x6a, 0x5d, 0x0b, 0x1c, 0x22, - 0x53, 0xb2, 0x56, 0xbb, 0xa4, 0xb6, 0xab, 0x15, 0x39, 0x81, 0xd2, 0x20, 0x09, 0x59, 0xbd, 0x5a, - 0x11, 0xcf, 0x39, 0x1e, 0x6d, 0xfa, 0x39, 0xa7, 0x54, 0x38, 0x36, 0x2a, 0xbe, 0xc0, 0x0e, 0xb6, - 0xba, 0x18, 0xbd, 0xcd, 0xf9, 0x3f, 0xc0, 0x5d, 0x57, 0x33, 0x7a, 0x3e, 0x32, 0x92, 0x2f, 0x39, - 0xe9, 0xa1, 0x37, 0x20, 0x3e, 0x20, 0x1d, 0x6d, 0x82, 0x48, 0x6c, 0x40, 0x3a, 0x27, 0x3d, 0xe5, - 0xcf, 0x71, 0x88, 0xb0, 0xee, 0x51, 0x01, 0xc9, 0x09, 0x42, 0x71, 0xe7, 0x54, 0xf1, 0xc1, 0x4a, - 0x24, 0x26, 0x13, 0xab, 0xd7, 0x8e, 0x2c, 0x8a, 0x1d, 0x90, 0xc5, 0x6f, 0x2d, 0xab, 0xa3, 0x4c, - 0xa8, 0xa5, 0x5e, 0x3b, 0xb2, 0x0e, 0x75, 0xc9, 0x1f, 0x65, 0xda, 0x80, 0x74, 0x78, 0x4b, 0x59, - 0xd9, 0xa1, 0x26, 0x0f, 0x38, 0xd6, 0xa1, 0x2e, 0x27, 0x8f, 0xd9, 0x12, 0x48, 0x94, 0x9d, 0x23, - 0x3c, 0x48, 0x94, 0x07, 0x51, 0xd6, 0x3f, 0x22, 0x6a, 0x5b, 0x6a, 0x92, 0x06, 0x97, 0xf1, 0x27, - 0x90, 0xb2, 0xc7, 0xd7, 0x41, 0x62, 0x3c, 0xc8, 0x37, 0x36, 0xb9, 0xba, 0xd6, 0xb6, 0x54, 0xf0, - 0x5d, 0x59, 0xa0, 0x1f, 0x41, 0x92, 0x5f, 0x91, 0x59, 0x14, 0x41, 0xc0, 0x77, 0xd7, 0xde, 0x73, - 0x6a, 0x5b, 0x6a, 0xe2, 0xd2, 0xbf, 0x2a, 0x1e, 0x42, 0xc2, 0x36, 0xfa, 0xdc, 0x5d, 0xb4, 0x97, - 0xdd, 0x75, 0x8d, 0xad, 0xb6, 0xa5, 0xc6, 0x6d, 0x71, 0x30, 0xfc, 0x18, 0xd2, 0x62, 0x0d, 0xf4, - 0xca, 0xe4, 0x21, 0xee, 0xf0, 0x10, 0xef, 0x6d, 0x74, 0xfe, 0xd6, 0xb6, 0xd4, 0x14, 0x9d, 0xba, - 0x9e, 0x1c, 0x42, 0x9c, 0xf2, 0x1a, 0xf4, 0xaf, 0xdb, 0xf7, 0x37, 0x28, 0x57, 0xd5, 0x77, 0x41, - 0xa7, 0x90, 0x11, 0x23, 0xed, 0xd2, 0xa0, 0x2e, 0x71, 0xc6, 0xd9, 0x34, 0x6f, 0xd3, 0x1b, 0x05, - 0x49, 0x0b, 0xd7, 0x9a, 0xf0, 0x44, 0x87, 0x90, 0xf3, 0x7f, 0x08, 0x22, 0x9e, 0x6b, 0x7b, 0xae, - 0xe6, 0x60, 0x4a, 0x3c, 0xa7, 0x2b, 0x8e, 0xbe, 0x1d, 0xce, 0xf1, 0xaf, 0x0b, 0x8b, 0x33, 0x6e, - 0xa0, 0xfa, 0x7a, 0x76, 0x06, 0x7e, 0x08, 0x59, 0xdf, 0x99, 0x9d, 0x54, 0x0e, 0x31, 0x79, 0x93, - 0xa7, 0xdc, 0x75, 0x9b, 0xbb, 0xbe, 0x21, 0xf4, 0x65, 0xa1, 0x66, 0xed, 0x9e, 0x9e, 0x3b, 0xc6, - 0x11, 0x40, 0xd2, 0x1d, 0xdb, 0x1c, 0x48, 0xe5, 0x05, 0xc8, 0x2d, 0xaf, 0x33, 0x34, 0x5c, 0x5e, - 0x0d, 0x57, 0x1e, 0xa6, 0xee, 0xba, 0x22, 0x7c, 0x13, 0xe2, 0x0e, 0xee, 0x1b, 0xc4, 0xe2, 0xac, - 0x96, 0x54, 0xff, 0x0b, 0x15, 0x20, 0xc2, 0x80, 0x11, 0x15, 0xf3, 0xf6, 0xea, 0xba, 0x63, 0x96, - 0xca, 0xcf, 0x21, 0xfd, 0x04, 0xff, 0x1f, 0x26, 0x5e, 0xd2, 0x15, 0xfe, 0x19, 0x86, 0x6d, 0x76, - 0xcc, 0x9f, 0x92, 0x0e, 0xbd, 0xf5, 0x0c, 0xf1, 0x99, 0x19, 0xee, 0x81, 0x64, 0xeb, 0x7d, 0xac, - 0x51, 0xe3, 0x85, 0x38, 0x6b, 0x62, 0x6a, 0x92, 0x09, 0x5a, 0xc6, 0x0b, 0xd1, 0xb3, 0x98, 0xd2, - 0x25, 0xcf, 0x71, 0x90, 0x1a, 0x37, 0x6f, 0x33, 0xc1, 0xc2, 0x71, 0x13, 0x5d, 0x3c, 0x6e, 0x30, - 0xec, 0xb0, 0x05, 0x88, 0x36, 0x3f, 0xd4, 0xdd, 0xee, 0x25, 0x76, 0x78, 0xa1, 0x66, 0x8a, 0xdf, - 0x5f, 0x71, 0x04, 0xcf, 0xae, 0x2d, 0x60, 0x19, 0xfe, 0x89, 0x08, 0xa0, 0x6e, 0x0f, 0x66, 0x05, - 0xca, 0x23, 0xd8, 0x9e, 0xb3, 0x09, 0x7e, 0x8c, 0xdb, 0x42, 0x00, 0xf1, 0x52, 0xb9, 0x7d, 0xf2, - 0xb4, 0x2a, 0xba, 0x7f, 0xe3, 0xac, 0xa1, 0xf9, 0xdf, 0x61, 0x65, 0x08, 0xf2, 0xf5, 0x5c, 0xd4, - 0x26, 0x16, 0xc5, 0x68, 0x1f, 0xa2, 0x03, 0xd2, 0x11, 0x4f, 0xc3, 0xb5, 0x68, 0x73, 0x53, 0xf4, - 0x00, 0xb6, 0x2d, 0xfc, 0x0b, 0x57, 0x9b, 0xda, 0x2c, 0x81, 0x57, 0x9a, 0x89, 0x9b, 0xc1, 0x86, - 0x29, 0xcf, 0x40, 0x2e, 0xeb, 0x56, 0x17, 0x9b, 0x9f, 0x19, 0x33, 0x9e, 0x81, 0x5c, 0xc1, 0x26, - 0x76, 0xf1, 0x67, 0x35, 0x43, 0xf1, 0xf7, 0x71, 0x48, 0x9f, 0x92, 0x8e, 0x5f, 0x79, 0x26, 0x76, - 0xd0, 0x1f, 0x42, 0x20, 0x4d, 0x2a, 0x0d, 0x3d, 0x5c, 0xd1, 0xb7, 0xe6, 0xca, 0x31, 0xb7, 0x7a, - 0x73, 0x95, 0xd2, 0x2f, 0xff, 0xf5, 0xdf, 0xdf, 0x86, 0x0f, 0x95, 0x47, 0x85, 0xd1, 0x7e, 0xc1, - 0x4f, 0x98, 0x16, 0x5e, 0x5e, 0x2f, 0xe6, 0x55, 0x41, 0xe4, 0x4a, 0x0b, 0x2f, 0xc5, 0xe0, 0x15, - 0xff, 0x4d, 0xfc, 0x80, 0xf2, 0x89, 0x0e, 0x42, 0x0f, 0xd1, 0x6f, 0x42, 0x10, 0x17, 0x95, 0x88, - 0x56, 0x5c, 0xf9, 0x66, 0x6a, 0x75, 0x5d, 0x56, 0x1f, 0xf3, 0xac, 0x0e, 0xd0, 0xe3, 0x5b, 0x66, - 0x55, 0x78, 0x29, 0xb6, 0xf3, 0x15, 0xfa, 0x63, 0x08, 0x92, 0x01, 0xed, 0xd0, 0xfb, 0x1b, 0x97, - 0x41, 0xee, 0xe1, 0x26, 0xa6, 0x82, 0xc5, 0xca, 0x87, 0x3c, 0xcb, 0x7d, 0x54, 0xb8, 0x65, 0x96, - 0xe8, 0x4f, 0x21, 0x90, 0x26, 0x24, 0x5d, 0x85, 0xe6, 0x3c, 0x93, 0xd7, 0xed, 0xdb, 0x29, 0xcf, - 0xa8, 0xa2, 0x7c, 0xf4, 0x69, 0xf7, 0xed, 0xa0, 0xcb, 0x67, 0x64, 0xb0, 0xfe, 0x3a, 0x04, 0xd2, - 0x84, 0xe7, 0xab, 0x92, 0x9c, 0x2f, 0x86, 0xdc, 0x9b, 0x0b, 0x37, 0xcb, 0xea, 0xd0, 0x76, 0xc7, - 0x01, 0xaa, 0x0f, 0x3f, 0x35, 0xaa, 0x47, 0x43, 0x78, 0xab, 0x4b, 0x86, 0x4b, 0x53, 0x39, 0x62, - 0x77, 0x6a, 0xda, 0x64, 0xb3, 0x36, 0x43, 0x3f, 0xfb, 0xd8, 0x37, 0xeb, 0x13, 0x53, 0xb7, 0xfa, - 0x79, 0xe2, 0xf4, 0x0b, 0x7d, 0x6c, 0xf1, 0x9c, 0x0a, 0x42, 0xa5, 0xdb, 0x06, 0x5d, 0xfc, 0x9f, - 0xcf, 0x61, 0x30, 0xee, 0xc4, 0xb9, 0xf1, 0x07, 0xff, 0x0b, 0x00, 0x00, 0xff, 0xff, 0xaf, 0x59, - 0x9b, 0xa1, 0x1f, 0x1a, 0x00, 0x00, + // 2290 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x59, 0xcf, 0x73, 0x1b, 0x49, + 0xf5, 0xb7, 0x7e, 0x6b, 0x9e, 0x6c, 0x79, 0xdc, 0x9b, 0xec, 0x57, 0xa5, 0xdd, 0xad, 0xf5, 0x4e, + 0xbe, 0x1b, 0x9c, 0x00, 0x12, 0xd6, 0x42, 0x36, 0x6b, 0x03, 0x59, 0x59, 0x1a, 0x47, 0xf2, 0x2a, + 0xb2, 0x32, 0x92, 0x92, 0x82, 0x2a, 0x6a, 0x32, 0x92, 0xda, 0xf2, 0xd8, 0xa3, 0x99, 0xf1, 0xf4, + 0x8c, 0x2b, 0x4a, 0x2a, 0x17, 0x2e, 0x1c, 0x29, 0xe0, 0x04, 0x55, 0x5c, 0xb8, 0xf1, 0x07, 0xc0, + 0x85, 0xa2, 0xb8, 0x70, 0xe6, 0xc2, 0x81, 0x0b, 0xb5, 0x27, 0x8e, 0xfc, 0x11, 0x54, 0x77, 0xcf, + 0xc8, 0x92, 0x6c, 0xfd, 0x70, 0x02, 0x5b, 0xbb, 0x7b, 0x72, 0x4f, 0xbf, 0x1f, 0xfd, 0xba, 0x3f, + 0x9f, 0x7e, 0xef, 0xb5, 0x0c, 0xb7, 0xfa, 0x96, 0xd5, 0x37, 0x70, 0xbe, 0x6b, 0x58, 0x5e, 0x2f, + 0xdf, 0xd3, 0x5c, 0xcd, 0x76, 0xac, 0x6e, 0xfe, 0x7c, 0x3b, 0x7f, 0x62, 0x75, 0x48, 0xce, 0x76, + 0x2c, 0xd7, 0x42, 0x19, 0xae, 0x94, 0x63, 0x4a, 0xb9, 0x40, 0x29, 0x77, 0xbe, 0x9d, 0x7d, 0xd7, + 0x37, 0xd7, 0x6c, 0x3d, 0xaf, 0x99, 0xa6, 0xe5, 0x6a, 0xae, 0x6e, 0x99, 0xbe, 0x5d, 0xf6, 0x1d, + 0x5f, 0xca, 0xbe, 0x3a, 0xde, 0x51, 0x1e, 0x0f, 0x6c, 0x77, 0xe8, 0x0b, 0x37, 0xa7, 0x85, 0x47, + 0x3a, 0x36, 0x7a, 0xea, 0x40, 0x23, 0xa7, 0xbe, 0xc6, 0xfb, 0xd3, 0x1a, 0xae, 0x3e, 0xc0, 0xc4, + 0xd5, 0x06, 0x36, 0x57, 0x90, 0x3e, 0x0f, 0xc3, 0x5a, 0xcd, 0xea, 0xf7, 0x75, 0xb3, 0x5f, 0xb2, + 0xcc, 0x23, 0xbd, 0x8f, 0x8e, 0x61, 0xa3, 0xe7, 0xe8, 0xe7, 0xd8, 0x51, 0x0d, 0xab, 0xaf, 0x1a, + 0xf8, 0x1c, 0x1b, 0x24, 0x13, 0xde, 0x8c, 0x6c, 0xa5, 0x0a, 0xdf, 0xcf, 0xcd, 0xda, 0x45, 0x6e, + 0xc2, 0x47, 0xae, 0xcc, 0x1c, 0xd4, 0xac, 0x7e, 0x8d, 0x99, 0xcb, 0xa6, 0xeb, 0x0c, 0x95, 0xf5, + 0xde, 0xe4, 0x6c, 0xf6, 0x0c, 0x6e, 0x5c, 0xa5, 0x88, 0x44, 0x88, 0x9c, 0xe2, 0x61, 0x26, 0xb4, + 0x19, 0xda, 0x12, 0x14, 0x3a, 0x44, 0x25, 0x88, 0x9d, 0x6b, 0x86, 0x87, 0x33, 0xe1, 0xcd, 0xd0, + 0x56, 0xba, 0xf0, 0xed, 0x65, 0xe3, 0x60, 0x5e, 0x15, 0x6e, 0xbb, 0x13, 0xbe, 0x1f, 0x92, 0x6c, + 0x88, 0xb1, 0x39, 0x74, 0x13, 0x36, 0x6a, 0xf2, 0x13, 0xb9, 0xa6, 0xb6, 0xeb, 0xcd, 0x86, 0x5c, + 0xaa, 0xee, 0x57, 0xe5, 0xb2, 0xb8, 0x82, 0x12, 0x10, 0x29, 0xd6, 0x6a, 0x62, 0x08, 0x09, 0x10, + 0x6b, 0x29, 0xc5, 0x92, 0x2c, 0x86, 0xe9, 0xb0, 0x2c, 0xef, 0xb5, 0x1f, 0x8a, 0x11, 0x94, 0x84, + 0x68, 0xb5, 0xbe, 0x7f, 0x28, 0x46, 0xe9, 0xe8, 0x69, 0x51, 0xa9, 0x8b, 0x31, 0x2a, 0x96, 0x15, + 0xe5, 0x50, 0x11, 0xe3, 0x74, 0xb8, 0x5f, 0x6c, 0x15, 0x6b, 0x62, 0x82, 0x3a, 0x3a, 0xdc, 0xdf, + 0x17, 0x93, 0xd2, 0x5f, 0x22, 0x20, 0x54, 0xb4, 0x9e, 0x65, 0xd9, 0x07, 0x56, 0x07, 0x7d, 0x13, + 0x36, 0x06, 0x9a, 0x6e, 0xaa, 0x27, 0x9a, 0xa3, 0x1e, 0xe9, 0x06, 0x56, 0x3d, 0x47, 0xe7, 0x1b, + 0xad, 0xac, 0x28, 0x69, 0x2a, 0x3a, 0xd0, 0x9c, 0x7d, 0xdd, 0xc0, 0x6d, 0x47, 0x47, 0xef, 0x03, + 0x30, 0xe5, 0xae, 0xa1, 0x11, 0xc2, 0xb6, 0x4e, 0xb5, 0x04, 0x3a, 0x57, 0xa2, 0x53, 0x08, 0x41, + 0x54, 0x73, 0xfa, 0x24, 0x13, 0xd9, 0x8c, 0x6c, 0x09, 0x0a, 0x1b, 0x23, 0x09, 0xd6, 0xc6, 0x9d, + 0x93, 0x4c, 0x94, 0x09, 0x53, 0x27, 0x23, 0xbf, 0x04, 0xbd, 0x03, 0xc2, 0x85, 0x3c, 0xc6, 0xe4, + 0xc9, 0xa3, 0x40, 0xf8, 0x01, 0xac, 0x6a, 0x4e, 0xf7, 0x58, 0x3f, 0xf7, 0xe5, 0x71, 0x6e, 0xef, + 0xcf, 0x31, 0x95, 0x26, 0x80, 0xed, 0x58, 0x36, 0x76, 0x5c, 0x1d, 0x93, 0x4c, 0x82, 0x71, 0xe3, + 0xa3, 0xd9, 0x98, 0x8c, 0xb6, 0x9f, 0x6b, 0x8c, 0xac, 0x38, 0x25, 0xc6, 0xdc, 0xa0, 0x3a, 0xa4, + 0x0d, 0x0e, 0x9e, 0xda, 0x65, 0xe8, 0x65, 0x92, 0x9b, 0xa1, 0xad, 0x54, 0xe1, 0x1b, 0x4b, 0x82, + 0xad, 0xac, 0x19, 0xe3, 0x9f, 0xd9, 0x1f, 0xc0, 0xfa, 0xd4, 0x72, 0x57, 0x10, 0xeb, 0xc6, 0x38, + 0xb1, 0x84, 0x31, 0xa6, 0xec, 0x25, 0x21, 0xce, 0xf9, 0x2a, 0xfd, 0x39, 0x02, 0xc9, 0xa6, 0xad, + 0x39, 0xa7, 0x5f, 0x1f, 0x00, 0x95, 0x2b, 0x00, 0x2c, 0xcc, 0x3e, 0xe7, 0x60, 0xf7, 0x5f, 0x4d, + 0xfc, 0xfe, 0x1a, 0x01, 0x68, 0x0c, 0x47, 0x08, 0xe6, 0xe1, 0x06, 0x03, 0xc5, 0x1e, 0xba, 0xc7, + 0x96, 0x39, 0x05, 0xa2, 0xc2, 0xd0, 0x6d, 0x30, 0x51, 0x80, 0x62, 0x00, 0x52, 0x78, 0x0c, 0xa4, + 0x2d, 0x10, 0xa7, 0xec, 0x03, 0x10, 0xd3, 0xf6, 0xb8, 0xf1, 0x17, 0x03, 0x67, 0xeb, 0x0a, 0x38, + 0xbf, 0x3b, 0xfb, 0xd8, 0x2f, 0x0e, 0xe3, 0x2b, 0x04, 0xa8, 0xf4, 0x21, 0x08, 0x8f, 0x3d, 0xec, + 0x0c, 0x6b, 0x3a, 0x71, 0x51, 0x06, 0x12, 0x67, 0x1e, 0x76, 0xe8, 0x76, 0x43, 0xec, 0x3c, 0x82, + 0x4f, 0xe9, 0xe7, 0x51, 0x48, 0x54, 0xf4, 0x73, 0x4c, 0xa1, 0xbe, 0x0d, 0x69, 0x3a, 0x3d, 0xbc, + 0x7c, 0x53, 0x57, 0xd9, 0x7c, 0x80, 0x70, 0x19, 0x80, 0xeb, 0x19, 0x3a, 0x71, 0xd9, 0xca, 0xa9, + 0xc2, 0xad, 0xd9, 0xbb, 0x1c, 0x85, 0x41, 0x2f, 0xf3, 0xd9, 0x28, 0xa6, 0x1c, 0xbc, 0xd5, 0xb5, + 0x4c, 0x57, 0x37, 0x3d, 0xac, 0x52, 0x62, 0x68, 0xba, 0xe1, 0x39, 0x38, 0x13, 0xd9, 0x0c, 0x6d, + 0x25, 0x95, 0x8d, 0x40, 0x74, 0x68, 0xee, 0x73, 0x01, 0xd2, 0x40, 0x24, 0x5d, 0x47, 0xb7, 0x5d, + 0xf5, 0x5c, 0x73, 0x74, 0xad, 0x63, 0x60, 0x4e, 0x8e, 0x54, 0xe1, 0xde, 0x9c, 0x5c, 0xca, 0xb7, + 0x96, 0x6b, 0x32, 0xcb, 0x27, 0x81, 0xa1, 0x5f, 0x61, 0xc9, 0xe4, 0x2c, 0x7a, 0x3c, 0x41, 0x8c, + 0x18, 0x73, 0xbe, 0xbd, 0xd8, 0xf9, 0x3c, 0x56, 0x5c, 0xe2, 0x73, 0xfc, 0x12, 0x9f, 0xb3, 0x7b, + 0x70, 0xe3, 0xaa, 0xf8, 0xae, 0x03, 0xf7, 0x9b, 0x5e, 0x7f, 0x61, 0x44, 0x10, 0xe9, 0x4f, 0x51, + 0x48, 0x31, 0xc2, 0x37, 0xcf, 0x8c, 0x2f, 0x9e, 0x15, 0xf8, 0x0a, 0x94, 0x23, 0x0c, 0x88, 0x9d, + 0x05, 0x09, 0x97, 0x87, 0xbb, 0x24, 0xd2, 0xed, 0x09, 0xa4, 0x39, 0x8d, 0xbe, 0xb7, 0xdc, 0x02, + 0xd7, 0x42, 0xfb, 0xfe, 0xe5, 0xec, 0x75, 0x39, 0x4f, 0xc4, 0xdf, 0x28, 0x4f, 0x7c, 0xb9, 0xd8, + 0xf3, 0xcf, 0x28, 0xc4, 0x1b, 0x7a, 0xff, 0xcb, 0x9f, 0x4e, 0x9e, 0xcd, 0x4c, 0x27, 0x73, 0x78, + 0xc0, 0x77, 0xb6, 0x24, 0xc7, 0x1a, 0x57, 0x64, 0x93, 0xef, 0x2c, 0xf4, 0xfd, 0x86, 0xc9, 0xe4, + 0x0a, 0x7a, 0x25, 0xbe, 0x46, 0xf4, 0x6a, 0xc1, 0xea, 0x81, 0xd5, 0x69, 0x18, 0x5a, 0x17, 0x0f, + 0xb0, 0xe9, 0xd2, 0x6a, 0xdf, 0x35, 0x3c, 0xe2, 0x62, 0x47, 0x35, 0xb5, 0x01, 0xf6, 0xfd, 0xa5, + 0xfc, 0xb9, 0xba, 0x36, 0xc0, 0xe3, 0x2a, 0x9e, 0xa7, 0xf7, 0x7c, 0xf7, 0x81, 0x4a, 0xdb, 0xd3, + 0x7b, 0xd2, 0xbf, 0x23, 0x20, 0x1c, 0x58, 0x9d, 0xa6, 0xab, 0xb9, 0x1e, 0x41, 0x0f, 0x20, 0x46, + 0x5c, 0xcd, 0xe5, 0xce, 0xd2, 0x85, 0x3b, 0xb3, 0x0f, 0x6e, 0x64, 0x93, 0xa3, 0x7f, 0xb0, 0xc2, + 0xed, 0x68, 0xb5, 0xed, 0x61, 0x57, 0xd3, 0x0d, 0xbf, 0x89, 0x55, 0x82, 0x4f, 0x54, 0x06, 0x91, + 0xa9, 0xa8, 0xc4, 0xd5, 0x1c, 0x57, 0xa5, 0xaf, 0x4b, 0xff, 0xf6, 0x67, 0x83, 0x55, 0x82, 0xa7, + 0x67, 0xae, 0x15, 0x3c, 0x3d, 0x95, 0x34, 0xb3, 0x69, 0x52, 0x13, 0x3a, 0x89, 0x2a, 0x90, 0x24, + 0x5e, 0x87, 0xc7, 0x98, 0x60, 0x31, 0x7e, 0x6b, 0xa9, 0x18, 0x7d, 0x1b, 0x65, 0x64, 0x2d, 0xfd, + 0x3e, 0x04, 0x31, 0x16, 0x3a, 0x7d, 0xe0, 0x35, 0x5b, 0xc5, 0x96, 0x3c, 0xf5, 0xc0, 0x4b, 0x41, + 0xa2, 0x21, 0xd7, 0xcb, 0xd5, 0xfa, 0x43, 0x31, 0x84, 0xd2, 0x00, 0x4d, 0xb9, 0xd5, 0x6e, 0xa8, + 0xe5, 0xc3, 0xba, 0x2c, 0x26, 0xa9, 0x50, 0x69, 0xd7, 0xeb, 0x54, 0x18, 0x46, 0x08, 0xd2, 0xa5, + 0x62, 0xbd, 0x24, 0xd7, 0xd4, 0xc0, 0x20, 0x32, 0x36, 0xd7, 0x6c, 0x15, 0x95, 0x96, 0x5c, 0x16, + 0x13, 0x68, 0x0d, 0x04, 0x3e, 0x57, 0x93, 0xcb, 0xfc, 0x61, 0xc8, 0xbc, 0x4d, 0x3c, 0x0c, 0xdf, + 0x82, 0xf5, 0x62, 0xab, 0x25, 0x3f, 0x6a, 0xb4, 0xd4, 0xfd, 0x62, 0xb5, 0xd6, 0x56, 0x64, 0x51, + 0x90, 0x2a, 0x90, 0x0c, 0x76, 0x80, 0xd6, 0x21, 0x35, 0x19, 0xe7, 0x1a, 0x08, 0xcd, 0xf6, 0xde, + 0xa3, 0x6a, 0x8b, 0x2e, 0x12, 0x42, 0x00, 0xf1, 0xc7, 0x6d, 0xb9, 0x2d, 0x97, 0xc5, 0x30, 0x12, + 0x61, 0xb5, 0xd9, 0x2a, 0xd6, 0x64, 0x1a, 0x43, 0xab, 0xdd, 0x14, 0x23, 0x52, 0x99, 0x91, 0x48, + 0xc1, 0x47, 0xd8, 0xc1, 0x66, 0x17, 0xa3, 0xf7, 0xd8, 0x45, 0x3d, 0xc1, 0x5d, 0x57, 0xd5, 0x7b, + 0x3e, 0x85, 0x04, 0x7f, 0xa6, 0xda, 0x43, 0x37, 0x21, 0x7e, 0x62, 0x75, 0xd4, 0x11, 0x75, 0x62, + 0x27, 0x56, 0xa7, 0xda, 0x93, 0xfe, 0x10, 0x86, 0xf5, 0x1f, 0x69, 0x8e, 0x59, 0xb4, 0x6d, 0x43, + 0xef, 0xb2, 0x5f, 0x21, 0x68, 0xef, 0x3b, 0x46, 0x43, 0x36, 0x46, 0x72, 0x40, 0x27, 0xfe, 0x18, + 0xcf, 0xcf, 0x86, 0x6a, 0xca, 0xdb, 0x24, 0xa9, 0xb2, 0x90, 0xb4, 0x1d, 0xab, 0xef, 0x60, 0x42, + 0x58, 0x52, 0x0b, 0x2b, 0xa3, 0x6f, 0x4a, 0x71, 0xd7, 0xd1, 0xba, 0xa7, 0xf4, 0xd2, 0x7b, 0x8e, + 0x91, 0x89, 0x72, 0x8a, 0x07, 0x73, 0x6d, 0xc7, 0x90, 0x7e, 0xb6, 0x08, 0xe9, 0x04, 0x44, 0xea, + 0xf2, 0x53, 0x8e, 0x72, 0x5d, 0x7e, 0xaa, 0x36, 0x8b, 0x4f, 0x38, 0xb0, 0x13, 0x47, 0x1b, 0x41, + 0xab, 0x90, 0x2c, 0x96, 0x4a, 0x72, 0xa3, 0xc5, 0xe0, 0x1b, 0xa3, 0x40, 0x8c, 0x8a, 0xf6, 0xab, + 0xf5, 0x6a, 0xb3, 0x22, 0x97, 0xc5, 0x38, 0xc5, 0x80, 0x82, 0xc7, 0x40, 0x07, 0x88, 0x7f, 0x56, + 0x65, 0x88, 0x27, 0xa5, 0x7f, 0x24, 0x21, 0x42, 0xcb, 0x43, 0x19, 0x04, 0x27, 0x80, 0x80, 0x1d, + 0x58, 0xaa, 0x70, 0x7b, 0x2e, 0x8d, 0x47, 0x80, 0x29, 0x17, 0x86, 0xd4, 0x8b, 0x1d, 0x64, 0x03, + 0xbf, 0x76, 0xcc, 0xf7, 0x32, 0xca, 0x1d, 0xca, 0x85, 0x21, 0x2d, 0x41, 0xc7, 0xec, 0xd5, 0xad, + 0x9e, 0x58, 0x1d, 0x76, 0xbc, 0x73, 0x4b, 0xd0, 0xe8, 0x85, 0x4e, 0x4b, 0xd0, 0xf1, 0xe8, 0xd7, + 0x8a, 0x22, 0x08, 0x84, 0x36, 0x0a, 0xcc, 0x49, 0x94, 0x39, 0x91, 0x16, 0xbf, 0x12, 0x2b, 0x2b, + 0x4a, 0x92, 0x04, 0xaf, 0xad, 0x87, 0x90, 0xb2, 0x87, 0x17, 0x4e, 0x62, 0xcc, 0xc9, 0xff, 0x2f, + 0xf3, 0x36, 0xa9, 0xac, 0x28, 0xe0, 0x9b, 0x52, 0x47, 0x3f, 0x84, 0x24, 0x7b, 0x03, 0x51, 0x2f, + 0x3c, 0xc3, 0x7c, 0xb0, 0xb0, 0x91, 0xad, 0xac, 0x28, 0x89, 0x63, 0xff, 0x2d, 0xb0, 0x0b, 0x09, + 0x5b, 0xef, 0x33, 0x73, 0x5e, 0x3f, 0x36, 0x17, 0x55, 0xae, 0xca, 0x8a, 0x12, 0xb7, 0x79, 0xe5, + 0xff, 0x0c, 0xd6, 0xf8, 0x1e, 0xc8, 0x99, 0xc1, 0x5c, 0xac, 0x32, 0x17, 0x1f, 0x2e, 0xd5, 0x60, + 0x55, 0x56, 0x94, 0x14, 0x19, 0xeb, 0x3f, 0x77, 0x21, 0x4e, 0x58, 0x02, 0xf3, 0xdf, 0x53, 0xb7, + 0x96, 0xc8, 0x75, 0x8a, 0x6f, 0x82, 0x0e, 0x20, 0xcd, 0x47, 0xea, 0xb1, 0x4e, 0x5c, 0xcb, 0x19, + 0x66, 0xd6, 0x58, 0x1d, 0x5e, 0xca, 0xc9, 0x1a, 0x37, 0xad, 0x70, 0x4b, 0xf4, 0x04, 0x36, 0x86, + 0x9a, 0x63, 0xaa, 0xda, 0xc5, 0x15, 0x25, 0x19, 0x81, 0xb9, 0xbb, 0xb3, 0xf4, 0xa5, 0x56, 0xc4, + 0xe1, 0xe4, 0x04, 0x41, 0xbb, 0x90, 0xf5, 0x7f, 0x41, 0xb4, 0x3c, 0xd7, 0xf6, 0x5c, 0xd5, 0xc1, + 0xc4, 0xf2, 0x9c, 0x2e, 0xef, 0x99, 0x36, 0xd8, 0x5d, 0xfe, 0x3f, 0xae, 0x71, 0xc8, 0x14, 0x14, + 0x5f, 0x4e, 0x9b, 0xa7, 0x8f, 0x21, 0xe3, 0x1b, 0xd3, 0x16, 0xc7, 0xb1, 0x0c, 0xd6, 0x1d, 0x10, + 0x66, 0xba, 0xce, 0x4c, 0x6f, 0x72, 0x79, 0x89, 0x8b, 0x69, 0x9f, 0x40, 0xa8, 0x61, 0x11, 0xe2, + 0x86, 0xd6, 0xc1, 0x06, 0xc9, 0xa0, 0x45, 0x5b, 0xa0, 0x6d, 0x49, 0x8d, 0xe9, 0xf2, 0x96, 0xc4, + 0x37, 0x44, 0x0f, 0x01, 0x48, 0xf7, 0x18, 0xf7, 0x3c, 0x43, 0x37, 0xfb, 0x99, 0x1b, 0x8b, 0xda, + 0x0c, 0x7a, 0xb0, 0x23, 0x75, 0x65, 0xcc, 0x34, 0xfb, 0x09, 0xa4, 0xc6, 0xfc, 0x5f, 0xab, 0x37, + 0x00, 0x48, 0xba, 0x43, 0x9b, 0xf1, 0x5c, 0xda, 0x83, 0xb5, 0x89, 0x35, 0xd0, 0x36, 0xdc, 0x1c, + 0x68, 0xcf, 0x83, 0x5e, 0x90, 0xa8, 0x36, 0x76, 0xd4, 0x63, 0xcb, 0x73, 0x98, 0xeb, 0x98, 0x82, + 0x06, 0xda, 0x73, 0xbf, 0x1d, 0x24, 0x0d, 0xec, 0x54, 0x2c, 0xcf, 0x91, 0x5e, 0x80, 0xd8, 0xf4, + 0x3a, 0x03, 0xdd, 0x65, 0x09, 0xe7, 0xcc, 0xc3, 0xc4, 0x5d, 0x54, 0x1f, 0xde, 0x86, 0xb8, 0x83, + 0xfb, 0xba, 0x65, 0xb2, 0xc4, 0x21, 0x28, 0xfe, 0x17, 0xca, 0x43, 0x84, 0x72, 0x9f, 0x27, 0xa5, + 0xf7, 0xe6, 0xa7, 0x36, 0xaa, 0x29, 0xfd, 0x04, 0xd6, 0x1e, 0xe2, 0xff, 0xc2, 0xc2, 0x33, 0x0a, + 0xd6, 0xe7, 0x61, 0x58, 0xa7, 0xad, 0xf2, 0x81, 0xd5, 0x21, 0xd7, 0x5e, 0x21, 0x3e, 0xb1, 0xc2, + 0x3b, 0x20, 0xd8, 0x5a, 0x1f, 0xab, 0x44, 0x7f, 0xc1, 0x31, 0x89, 0x29, 0x49, 0x3a, 0xd1, 0xd4, + 0x5f, 0xf0, 0x72, 0x4a, 0x85, 0xae, 0x75, 0x8a, 0x83, 0xd0, 0x98, 0x7a, 0x8b, 0x4e, 0x5c, 0x6a, + 0xd9, 0xa2, 0x97, 0x5b, 0x36, 0x0c, 0x1b, 0x74, 0x03, 0xbc, 0x55, 0x1a, 0x68, 0x6e, 0xf7, 0x18, + 0x3b, 0x2c, 0x17, 0xa6, 0x0b, 0x9f, 0xcc, 0x69, 0x63, 0x27, 0xf7, 0x16, 0x5c, 0x64, 0xfc, 0x88, + 0x3b, 0x50, 0xd6, 0x4f, 0x26, 0x27, 0xe8, 0xee, 0x8e, 0x74, 0xc3, 0xc5, 0x0e, 0x4b, 0x71, 0x82, + 0xe2, 0x7f, 0x49, 0xf7, 0x60, 0x7d, 0xca, 0x36, 0xf8, 0xa1, 0x7b, 0x85, 0x56, 0xb2, 0x62, 0xa9, + 0x55, 0x7d, 0x22, 0xfb, 0x95, 0xf2, 0xb0, 0xae, 0xfa, 0xdf, 0x61, 0xe9, 0x6f, 0x21, 0x10, 0xdb, + 0x76, 0x4f, 0x73, 0xf1, 0xeb, 0x60, 0x18, 0x9e, 0x81, 0x61, 0x64, 0x0c, 0xc3, 0x80, 0x53, 0xd1, + 0x65, 0x39, 0x85, 0x76, 0x21, 0xe5, 0xb1, 0x90, 0xd8, 0xbf, 0x39, 0xfc, 0x82, 0x72, 0xb9, 0xd9, + 0xdc, 0xd7, 0xb1, 0xd1, 0x7b, 0xa4, 0x91, 0x53, 0x05, 0xb8, 0x3a, 0x1d, 0x4b, 0x03, 0x10, 0x2f, + 0x0e, 0x95, 0xd8, 0x96, 0x49, 0x30, 0xda, 0x86, 0xe8, 0x89, 0xd5, 0xe1, 0xbf, 0x23, 0x2d, 0x0c, + 0x81, 0xa9, 0xa2, 0xdb, 0xb0, 0x6e, 0xe2, 0xe7, 0xae, 0x3a, 0xc6, 0x0a, 0xbe, 0xd9, 0x35, 0x3a, + 0xdd, 0x08, 0x98, 0x21, 0x3d, 0x03, 0xb1, 0xa4, 0x99, 0x5d, 0x6c, 0xfc, 0xcf, 0xae, 0xc0, 0x33, + 0x10, 0xcb, 0xd8, 0xc0, 0xaf, 0x07, 0xd0, 0x32, 0x2b, 0x14, 0xfe, 0x98, 0x60, 0x49, 0xc8, 0xcf, + 0xb6, 0x06, 0x76, 0xd0, 0xaf, 0x43, 0x20, 0x8c, 0x52, 0x0a, 0xba, 0x3b, 0xa7, 0x06, 0x4e, 0xe5, + 0x9d, 0xec, 0xfc, 0xc3, 0x95, 0x8a, 0x3f, 0xfd, 0xfb, 0xbf, 0x7e, 0x15, 0xde, 0x95, 0xee, 0xe5, + 0xcf, 0xb7, 0xf3, 0x7e, 0xc0, 0x24, 0xff, 0xf2, 0x62, 0x33, 0xaf, 0xf2, 0x3c, 0x56, 0x92, 0x7f, + 0xc9, 0x07, 0xaf, 0xd8, 0xbf, 0xd8, 0x76, 0x08, 0x5b, 0x68, 0x27, 0x74, 0x17, 0xfd, 0x32, 0x04, + 0x71, 0x9e, 0x72, 0xd0, 0x9c, 0xc4, 0x3d, 0x91, 0x94, 0x16, 0x45, 0xf5, 0x29, 0x8b, 0x6a, 0x07, + 0xdd, 0xbf, 0x66, 0x54, 0xf9, 0x97, 0xfc, 0x38, 0x5f, 0xa1, 0xdf, 0x84, 0x20, 0x19, 0xd0, 0x0e, + 0xdd, 0x59, 0xfa, 0xbe, 0x67, 0xef, 0x2e, 0xa3, 0xca, 0x59, 0x2c, 0x7d, 0xcc, 0xa2, 0xdc, 0x46, + 0xf9, 0x6b, 0x46, 0x89, 0x7e, 0x1b, 0x02, 0x61, 0x74, 0xc7, 0xe7, 0xa1, 0x39, 0x9d, 0x08, 0x16, + 0x9d, 0x9b, 0xcc, 0x22, 0x7a, 0x50, 0x78, 0xed, 0x73, 0xdb, 0x61, 0xf7, 0xfd, 0x77, 0x21, 0x10, + 0x46, 0x97, 0x68, 0x5e, 0x7c, 0xd3, 0x37, 0x6d, 0x51, 0x7c, 0x07, 0x2c, 0xbe, 0xb2, 0xf4, 0xe0, + 0xb5, 0xe3, 0xeb, 0xb2, 0x15, 0x29, 0xed, 0x7e, 0x11, 0x02, 0x61, 0x74, 0x0f, 0xe7, 0x05, 0x39, + 0x7d, 0x59, 0xb3, 0x6f, 0x5f, 0xca, 0x5c, 0xf2, 0xc0, 0x76, 0x87, 0x01, 0xeb, 0xee, 0xbe, 0xf6, + 0xe9, 0xed, 0x0d, 0xe0, 0xdd, 0xae, 0x35, 0x98, 0x19, 0xca, 0x9e, 0x40, 0xf9, 0xd3, 0xa0, 0xab, + 0x36, 0x42, 0x3f, 0xfe, 0xd4, 0x57, 0xeb, 0x5b, 0x86, 0x66, 0xf6, 0x73, 0x96, 0xd3, 0xcf, 0xf7, + 0xb1, 0xc9, 0x62, 0xca, 0x73, 0x91, 0x66, 0xeb, 0xe4, 0xf2, 0xbf, 0xb8, 0x77, 0x83, 0x71, 0x27, + 0xce, 0x94, 0x3f, 0xfa, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x02, 0xa8, 0x72, 0x7c, 0x0e, 0x1f, + 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/operations.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/operations.pb.go index a66e72ad..ecdbdebc 100644 --- a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/operations.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1/operations.pb.go @@ -52,13 +52,13 @@ func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int) { // The status of the operation. type ClusterOperationStatus struct { - // [Output-only] A message containing the operation state. + // Output-only. A message containing the operation state. State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1.ClusterOperationStatus_State" json:"state,omitempty"` - // [Output-only] A message containing the detailed operation state. + // Output-only. A message containing the detailed operation state. InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState" json:"inner_state,omitempty"` - // [Output-only]A message containing any operation metadata details. + // Output-only.A message containing any operation metadata details. Details string `protobuf:"bytes,3,opt,name=details" json:"details,omitempty"` - // [Output-only] The time this state was entered. + // Output-only. The time this state was entered. StateStartTime *google_protobuf3.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` } @@ -97,18 +97,22 @@ func (m *ClusterOperationStatus) GetStateStartTime() *google_protobuf3.Timestamp // Metadata describing the operation. type ClusterOperationMetadata struct { - // [Output-only] Name of the cluster for the operation. + // Output-only. Name of the cluster for the operation. ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` - // [Output-only] Cluster UUID for the operation. + // Output-only. Cluster UUID for the operation. ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` - // [Output-only] Current operation status. + // Output-only. Current operation status. Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status" json:"status,omitempty"` - // [Output-only] The previous operation status. + // Output-only. The previous operation status. StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` - // [Output-only] The operation type. + // Output-only. The operation type. OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType" json:"operation_type,omitempty"` - // [Output-only] Short description of operation. + // Output-only. Short description of operation. Description string `protobuf:"bytes,12,opt,name=description" json:"description,omitempty"` + // Output-only. Labels associated with the operation + Labels map[string]string `protobuf:"bytes,13,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Output-only. Errors encountered during operation execution. + Warnings []string `protobuf:"bytes,14,rep,name=warnings" json:"warnings,omitempty"` } func (m *ClusterOperationMetadata) Reset() { *m = ClusterOperationMetadata{} } @@ -158,6 +162,20 @@ func (m *ClusterOperationMetadata) GetDescription() string { return "" } +func (m *ClusterOperationMetadata) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ClusterOperationMetadata) GetWarnings() []string { + if m != nil { + return m.Warnings + } + return nil +} + func init() { proto.RegisterType((*ClusterOperationStatus)(nil), "google.cloud.dataproc.v1.ClusterOperationStatus") proto.RegisterType((*ClusterOperationMetadata)(nil), "google.cloud.dataproc.v1.ClusterOperationMetadata") @@ -167,35 +185,40 @@ func init() { func init() { proto.RegisterFile("google/cloud/dataproc/v1/operations.proto", fileDescriptor2) } var fileDescriptor2 = []byte{ - // 479 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x53, 0x4f, 0x6f, 0xd3, 0x30, - 0x14, 0x27, 0xed, 0xb6, 0x6e, 0xce, 0x56, 0x2a, 0x1f, 0x90, 0x55, 0x26, 0x2d, 0x14, 0x21, 0x95, - 0x4b, 0xc2, 0x86, 0x84, 0x90, 0xb8, 0xa0, 0xd1, 0x89, 0x21, 0xc0, 0xad, 0xb2, 0x55, 0x93, 0xb8, - 0x44, 0x5e, 0x62, 0x82, 0xa5, 0xc4, 0xb6, 0x6c, 0x67, 0x52, 0x3f, 0x0e, 0xdf, 0x87, 0x0f, 0x85, - 0x6c, 0x27, 0x55, 0xd9, 0xe8, 0x01, 0x4e, 0xf1, 0x7b, 0xbf, 0x3f, 0x79, 0xbf, 0xe7, 0x04, 0xbc, - 0x2c, 0x85, 0x28, 0x2b, 0x9a, 0xe4, 0x95, 0x68, 0x8a, 0xa4, 0x20, 0x86, 0x48, 0x25, 0xf2, 0xe4, - 0xee, 0x34, 0x11, 0x92, 0x2a, 0x62, 0x98, 0xe0, 0x3a, 0x96, 0x4a, 0x18, 0x01, 0x91, 0xa7, 0xc6, - 0x8e, 0x1a, 0x77, 0xd4, 0xf8, 0xee, 0x74, 0x7c, 0xdc, 0x9a, 0x10, 0xc9, 0x12, 0xc2, 0xb9, 0x30, - 0x9b, 0xba, 0xf1, 0xf3, 0x16, 0xad, 0x04, 0x2f, 0x55, 0xc3, 0x39, 0xe3, 0xe5, 0x03, 0xf3, 0xf1, - 0xd3, 0x96, 0xe4, 0xaa, 0xdb, 0xe6, 0x7b, 0x42, 0x6b, 0x69, 0x56, 0x2d, 0x78, 0x72, 0x1f, 0x34, - 0xac, 0xa6, 0xda, 0x90, 0x5a, 0x7a, 0xc2, 0xe4, 0x67, 0x0f, 0x3c, 0xf9, 0x50, 0x35, 0xda, 0x50, - 0x35, 0xef, 0x9c, 0xaf, 0x0c, 0x31, 0x8d, 0x86, 0x5f, 0xc0, 0xae, 0x36, 0xc4, 0x50, 0x14, 0x44, - 0xc1, 0x74, 0x78, 0xf6, 0x26, 0xde, 0x96, 0x22, 0xfe, 0xbb, 0x41, 0x6c, 0x1f, 0x34, 0xf5, 0x26, - 0xf0, 0x04, 0x84, 0x8c, 0x73, 0xaa, 0x32, 0xef, 0xd9, 0x8b, 0x82, 0xe9, 0x41, 0x0a, 0x5c, 0xcb, - 0xf1, 0x20, 0x02, 0x83, 0x82, 0x1a, 0xc2, 0x2a, 0x8d, 0xfa, 0x0e, 0xec, 0x4a, 0x38, 0x03, 0x23, - 0x27, 0xb2, 0x52, 0x65, 0x32, 0x1b, 0x01, 0xed, 0x44, 0xc1, 0x34, 0x3c, 0x1b, 0x77, 0x33, 0x75, - 0xf9, 0xe2, 0xeb, 0x2e, 0x5f, 0x3a, 0x74, 0x9a, 0x2b, 0x2b, 0xb1, 0xcd, 0xc9, 0x5b, 0xb0, 0xeb, - 0x5f, 0x14, 0x82, 0xc1, 0x12, 0x7f, 0xc6, 0xf3, 0x1b, 0x3c, 0x7a, 0x64, 0x8b, 0xc5, 0x05, 0x9e, - 0x7d, 0xc2, 0x1f, 0x47, 0x81, 0x2d, 0xd2, 0x25, 0xc6, 0xb6, 0xe8, 0xc1, 0x7d, 0xb0, 0x33, 0x9b, - 0xe3, 0x8b, 0x51, 0x7f, 0xf2, 0xab, 0x07, 0xd0, 0xfd, 0x88, 0x5f, 0xa9, 0x21, 0x76, 0x05, 0xf0, - 0x19, 0x38, 0xcc, 0x3d, 0x96, 0x71, 0x52, 0x53, 0x34, 0x70, 0xb3, 0x87, 0x6d, 0x0f, 0x93, 0x9a, - 0x6e, 0x52, 0x9a, 0x86, 0x15, 0x68, 0xff, 0x0f, 0xca, 0xb2, 0x61, 0x05, 0xbc, 0x04, 0x7b, 0xda, - 0x2d, 0x0d, 0x1d, 0xb8, 0x60, 0xaf, 0xfe, 0x75, 0xd9, 0x69, 0xab, 0x87, 0x37, 0x60, 0xe8, 0x4f, - 0xd9, 0x0f, 0xa6, 0x8d, 0x50, 0x2b, 0x04, 0xa2, 0xfe, 0x7f, 0x39, 0x1e, 0x79, 0x9f, 0x4b, 0x6f, - 0x03, 0x5f, 0x80, 0xe1, 0xfa, 0xdb, 0xcb, 0xcc, 0x4a, 0x52, 0x14, 0xba, 0x1c, 0x47, 0xeb, 0xee, - 0xf5, 0x4a, 0x52, 0x18, 0x81, 0xb0, 0xa0, 0x3a, 0x57, 0x4c, 0xda, 0x16, 0x3a, 0xf4, 0x59, 0x37, - 0x5a, 0xe7, 0x1a, 0x1c, 0xe7, 0xa2, 0xde, 0x3a, 0xce, 0xf9, 0xe3, 0xf5, 0x20, 0x7a, 0x61, 0xaf, - 0x75, 0x11, 0x7c, 0x7b, 0xdf, 0x92, 0x4b, 0x51, 0x11, 0x5e, 0xc6, 0x42, 0x95, 0x49, 0x49, 0xb9, - 0xbb, 0xf4, 0xc4, 0x43, 0x44, 0x32, 0xfd, 0xf0, 0x57, 0x7c, 0xd7, 0x9d, 0x6f, 0xf7, 0x1c, 0xf9, - 0xf5, 0xef, 0x00, 0x00, 0x00, 0xff, 0xff, 0x2c, 0x8c, 0x74, 0x3e, 0xb6, 0x03, 0x00, 0x00, + // 553 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x61, 0x6b, 0xd3, 0x40, + 0x18, 0x36, 0xcd, 0xb6, 0xb6, 0x97, 0xb5, 0x96, 0x43, 0xe4, 0x88, 0x83, 0xc5, 0x8a, 0x50, 0xbf, + 0x24, 0x6e, 0x82, 0x4c, 0x05, 0x91, 0xd9, 0xe2, 0xc4, 0x99, 0x96, 0x6c, 0x75, 0xe0, 0x97, 0x72, + 0x6d, 0xce, 0x18, 0x4c, 0xee, 0xc2, 0xdd, 0xa5, 0x92, 0x9f, 0xe3, 0xef, 0xf4, 0x8b, 0xdc, 0x5d, + 0x52, 0xea, 0xe6, 0x40, 0xfd, 0xd4, 0x7b, 0xde, 0xf7, 0x79, 0x9e, 0xbe, 0xcf, 0xcb, 0xdb, 0x82, + 0x27, 0x09, 0x63, 0x49, 0x46, 0x82, 0x55, 0xc6, 0xca, 0x38, 0x88, 0xb1, 0xc4, 0x05, 0x67, 0xab, + 0x60, 0x7d, 0x14, 0xb0, 0x82, 0x70, 0x2c, 0x53, 0x46, 0x85, 0x5f, 0x70, 0x26, 0x19, 0x44, 0x86, + 0xea, 0x6b, 0xaa, 0xdf, 0x50, 0xfd, 0xf5, 0x91, 0x7b, 0x50, 0x9b, 0xe0, 0x22, 0x0d, 0x30, 0xa5, + 0x4c, 0x6e, 0xeb, 0xdc, 0x47, 0x75, 0x37, 0x63, 0x34, 0xe1, 0x25, 0xa5, 0x29, 0x4d, 0x6e, 0x98, + 0xbb, 0x0f, 0x6a, 0x92, 0x46, 0xcb, 0xf2, 0x4b, 0x40, 0xf2, 0x42, 0x56, 0x75, 0xf3, 0xf0, 0x7a, + 0x53, 0xa6, 0x39, 0x11, 0x12, 0xe7, 0x85, 0x21, 0x0c, 0x7f, 0xb4, 0xc0, 0xfd, 0xb7, 0x59, 0x29, + 0x24, 0xe1, 0xd3, 0xc6, 0xf9, 0x42, 0x62, 0x59, 0x0a, 0x78, 0x0e, 0x76, 0x85, 0xc4, 0x92, 0x20, + 0xcb, 0xb3, 0x46, 0xfd, 0xe3, 0xe7, 0xfe, 0x6d, 0x29, 0xfc, 0x3f, 0x1b, 0xf8, 0xea, 0x83, 0x44, + 0xc6, 0x04, 0x1e, 0x02, 0x27, 0xa5, 0x94, 0xf0, 0x85, 0xf1, 0x6c, 0x79, 0xd6, 0xa8, 0x1b, 0x01, + 0x5d, 0xd2, 0x3c, 0x88, 0x40, 0x3b, 0x26, 0x12, 0xa7, 0x99, 0x40, 0xb6, 0x6e, 0x36, 0x10, 0x8e, + 0xc1, 0x40, 0x8b, 0x94, 0x94, 0xcb, 0x85, 0x8a, 0x80, 0x76, 0x3c, 0x6b, 0xe4, 0x1c, 0xbb, 0xcd, + 0x4c, 0x4d, 0x3e, 0xff, 0xb2, 0xc9, 0x17, 0xf5, 0xb5, 0xe6, 0x42, 0x49, 0x54, 0x71, 0x78, 0x02, + 0x76, 0xcd, 0x17, 0x39, 0xa0, 0x3d, 0x0f, 0x3f, 0x84, 0xd3, 0xab, 0x70, 0x70, 0x47, 0x81, 0xd9, + 0x24, 0x1c, 0xbf, 0x0f, 0xdf, 0x0d, 0x2c, 0x05, 0xa2, 0x79, 0x18, 0x2a, 0xd0, 0x82, 0x1d, 0xb0, + 0x33, 0x9e, 0x86, 0x93, 0x81, 0x3d, 0xfc, 0x69, 0x03, 0x74, 0x3d, 0xe2, 0x47, 0x22, 0xb1, 0x5a, + 0x01, 0x7c, 0x08, 0xf6, 0x57, 0xa6, 0xb7, 0xa0, 0x38, 0x27, 0xa8, 0xad, 0x67, 0x77, 0xea, 0x5a, + 0x88, 0x73, 0xb2, 0x4d, 0x29, 0xcb, 0x34, 0x46, 0x9d, 0xdf, 0x28, 0xf3, 0x32, 0x8d, 0xe1, 0x19, + 0xd8, 0x13, 0x7a, 0x69, 0xa8, 0xab, 0x83, 0x3d, 0xfd, 0xd7, 0x65, 0x47, 0xb5, 0x1e, 0x5e, 0x81, + 0xbe, 0x79, 0x2d, 0xbe, 0xa6, 0x42, 0x32, 0x5e, 0x21, 0xe0, 0xd9, 0xff, 0xe5, 0xd8, 0x33, 0x3e, + 0x67, 0xc6, 0x06, 0x3e, 0x06, 0xfd, 0xcd, 0xed, 0x2d, 0x64, 0x55, 0x10, 0xe4, 0xe8, 0x1c, 0xbd, + 0x4d, 0xf5, 0xb2, 0x2a, 0x08, 0xf4, 0x80, 0x13, 0x13, 0xb1, 0xe2, 0x69, 0xa1, 0x4a, 0x68, 0xdf, + 0x64, 0xdd, 0x2a, 0xc1, 0x4f, 0x60, 0x2f, 0xc3, 0x4b, 0x92, 0x09, 0xd4, 0xd3, 0x93, 0xbd, 0xfe, + 0xfb, 0xc9, 0x9a, 0xad, 0xfb, 0xe7, 0xda, 0x60, 0x42, 0x25, 0xaf, 0xa2, 0xda, 0x0d, 0xba, 0xa0, + 0xf3, 0x1d, 0x73, 0xf5, 0x23, 0x11, 0xa8, 0xef, 0xd9, 0xa3, 0x6e, 0xb4, 0xc1, 0xee, 0x0b, 0xe0, + 0x6c, 0x49, 0xe0, 0x00, 0xd8, 0xdf, 0x48, 0xa5, 0x0f, 0xbb, 0x1b, 0xa9, 0x27, 0xbc, 0x07, 0x76, + 0xd7, 0x38, 0x2b, 0x9b, 0xc3, 0x34, 0xe0, 0x65, 0xeb, 0xc4, 0x3a, 0x15, 0xe0, 0x60, 0xc5, 0xf2, + 0x5b, 0x67, 0x3c, 0xbd, 0xbb, 0x99, 0x4e, 0xcc, 0xd4, 0x15, 0xce, 0xac, 0xcf, 0x6f, 0x6a, 0x72, + 0xc2, 0x32, 0x4c, 0x13, 0x9f, 0xf1, 0x24, 0x48, 0x08, 0xd5, 0x37, 0x1a, 0x98, 0x16, 0x2e, 0x52, + 0x71, 0xf3, 0x9f, 0xe3, 0x55, 0xf3, 0x5e, 0xee, 0x69, 0xf2, 0xb3, 0x5f, 0x01, 0x00, 0x00, 0xff, + 0xff, 0x09, 0x4c, 0x3d, 0x35, 0x65, 0x04, 0x00, 0x00, } diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/clusters.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/clusters.pb.go new file mode 100644 index 00000000..0ba92da1 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/clusters.pb.go @@ -0,0 +1,1775 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/dataproc/v1beta2/clusters.proto + +/* +Package dataproc is a generated protocol buffer package. + +It is generated from these files: + google/cloud/dataproc/v1beta2/clusters.proto + google/cloud/dataproc/v1beta2/jobs.proto + google/cloud/dataproc/v1beta2/operations.proto + google/cloud/dataproc/v1beta2/workflow_templates.proto + +It has these top-level messages: + Cluster + ClusterConfig + GceClusterConfig + InstanceGroupConfig + ManagedGroupConfig + AcceleratorConfig + DiskConfig + LifecycleConfig + NodeInitializationAction + ClusterStatus + SoftwareConfig + ClusterMetrics + CreateClusterRequest + UpdateClusterRequest + DeleteClusterRequest + GetClusterRequest + ListClustersRequest + ListClustersResponse + DiagnoseClusterRequest + DiagnoseClusterResults + LoggingConfig + HadoopJob + SparkJob + PySparkJob + QueryList + HiveJob + SparkSqlJob + PigJob + JobPlacement + JobStatus + JobReference + YarnApplication + Job + JobScheduling + SubmitJobRequest + GetJobRequest + ListJobsRequest + UpdateJobRequest + ListJobsResponse + CancelJobRequest + DeleteJobRequest + ClusterOperationStatus + ClusterOperationMetadata + WorkflowTemplate + WorkflowTemplatePlacement + ManagedCluster + ClusterSelector + OrderedJob + WorkflowMetadata + ClusterOperation + WorkflowGraph + WorkflowNode + CreateWorkflowTemplateRequest + GetWorkflowTemplateRequest + InstantiateWorkflowTemplateRequest + UpdateWorkflowTemplateRequest + ListWorkflowTemplatesRequest + ListWorkflowTemplatesResponse + DeleteWorkflowTemplateRequest +*/ +package dataproc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" +import google_protobuf4 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf5 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// The cluster state. +type ClusterStatus_State int32 + +const ( + // The cluster state is unknown. + ClusterStatus_UNKNOWN ClusterStatus_State = 0 + // The cluster is being created and set up. It is not ready for use. + ClusterStatus_CREATING ClusterStatus_State = 1 + // The cluster is currently running and healthy. It is ready for use. + ClusterStatus_RUNNING ClusterStatus_State = 2 + // The cluster encountered an error. It is not ready for use. + ClusterStatus_ERROR ClusterStatus_State = 3 + // The cluster is being deleted. It cannot be used. + ClusterStatus_DELETING ClusterStatus_State = 4 + // The cluster is being updated. It continues to accept and process jobs. + ClusterStatus_UPDATING ClusterStatus_State = 5 +) + +var ClusterStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "CREATING", + 2: "RUNNING", + 3: "ERROR", + 4: "DELETING", + 5: "UPDATING", +} +var ClusterStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "CREATING": 1, + "RUNNING": 2, + "ERROR": 3, + "DELETING": 4, + "UPDATING": 5, +} + +func (x ClusterStatus_State) String() string { + return proto.EnumName(ClusterStatus_State_name, int32(x)) +} +func (ClusterStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 0} } + +type ClusterStatus_Substate int32 + +const ( + ClusterStatus_UNSPECIFIED ClusterStatus_Substate = 0 + // The cluster is known to be in an unhealthy state + // (for example, critical daemons are not running or HDFS capacity is + // exhausted). + // + // Applies to RUNNING state. + ClusterStatus_UNHEALTHY ClusterStatus_Substate = 1 + // The agent-reported status is out of date (may occur if + // Cloud Dataproc loses communication with Agent). + // + // Applies to RUNNING state. + ClusterStatus_STALE_STATUS ClusterStatus_Substate = 2 +) + +var ClusterStatus_Substate_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "UNHEALTHY", + 2: "STALE_STATUS", +} +var ClusterStatus_Substate_value = map[string]int32{ + "UNSPECIFIED": 0, + "UNHEALTHY": 1, + "STALE_STATUS": 2, +} + +func (x ClusterStatus_Substate) String() string { + return proto.EnumName(ClusterStatus_Substate_name, int32(x)) +} +func (ClusterStatus_Substate) EnumDescriptor() ([]byte, []int) { return fileDescriptor0, []int{9, 1} } + +// Describes the identifying information, config, and status of +// a cluster of Google Compute Engine instances. +type Cluster struct { + // Required. The Google Cloud Platform project ID that the cluster belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The cluster name. Cluster names within a project must be + // unique. Names of deleted clusters can be reused. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // Required. The cluster config. Note that Cloud Dataproc may set + // default values, and values may change when clusters are updated. + Config *ClusterConfig `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"` + // Optional. The labels to associate with this cluster. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // No more than 32 labels can be associated with a cluster. + Labels map[string]string `protobuf:"bytes,8,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Output-only. Cluster status. + Status *ClusterStatus `protobuf:"bytes,4,opt,name=status" json:"status,omitempty"` + // Output-only. The previous cluster status. + StatusHistory []*ClusterStatus `protobuf:"bytes,7,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` + // Output-only. A cluster UUID (Unique Universal Identifier). Cloud Dataproc + // generates this value when it creates the cluster. + ClusterUuid string `protobuf:"bytes,6,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` + // Contains cluster daemon metrics such as HDFS and YARN stats. + // + // **Beta Feature**: This report is available for testing purposes only. It may + // be changed before final release. + Metrics *ClusterMetrics `protobuf:"bytes,9,opt,name=metrics" json:"metrics,omitempty"` +} + +func (m *Cluster) Reset() { *m = Cluster{} } +func (m *Cluster) String() string { return proto.CompactTextString(m) } +func (*Cluster) ProtoMessage() {} +func (*Cluster) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *Cluster) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *Cluster) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *Cluster) GetConfig() *ClusterConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *Cluster) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Cluster) GetStatus() *ClusterStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *Cluster) GetStatusHistory() []*ClusterStatus { + if m != nil { + return m.StatusHistory + } + return nil +} + +func (m *Cluster) GetClusterUuid() string { + if m != nil { + return m.ClusterUuid + } + return "" +} + +func (m *Cluster) GetMetrics() *ClusterMetrics { + if m != nil { + return m.Metrics + } + return nil +} + +// The cluster config. +type ClusterConfig struct { + // Optional. A Google Cloud Storage staging bucket used for sharing generated + // SSH keys and config. If you do not specify a staging bucket, Cloud + // Dataproc will determine an appropriate Cloud Storage location (US, + // ASIA, or EU) for your cluster's staging bucket according to the Google + // Compute Engine zone where your cluster is deployed, and then it will create + // and manage this project-level, per-location bucket for you. + ConfigBucket string `protobuf:"bytes,1,opt,name=config_bucket,json=configBucket" json:"config_bucket,omitempty"` + // Required. The shared Google Compute Engine config settings for + // all instances in a cluster. + GceClusterConfig *GceClusterConfig `protobuf:"bytes,8,opt,name=gce_cluster_config,json=gceClusterConfig" json:"gce_cluster_config,omitempty"` + // Optional. The Google Compute Engine config settings for + // the master instance in a cluster. + MasterConfig *InstanceGroupConfig `protobuf:"bytes,9,opt,name=master_config,json=masterConfig" json:"master_config,omitempty"` + // Optional. The Google Compute Engine config settings for + // worker instances in a cluster. + WorkerConfig *InstanceGroupConfig `protobuf:"bytes,10,opt,name=worker_config,json=workerConfig" json:"worker_config,omitempty"` + // Optional. The Google Compute Engine config settings for + // additional worker instances in a cluster. + SecondaryWorkerConfig *InstanceGroupConfig `protobuf:"bytes,12,opt,name=secondary_worker_config,json=secondaryWorkerConfig" json:"secondary_worker_config,omitempty"` + // Optional. The config settings for software inside the cluster. + SoftwareConfig *SoftwareConfig `protobuf:"bytes,13,opt,name=software_config,json=softwareConfig" json:"software_config,omitempty"` + // Optional. The config setting for auto delete cluster schedule. + LifecycleConfig *LifecycleConfig `protobuf:"bytes,14,opt,name=lifecycle_config,json=lifecycleConfig" json:"lifecycle_config,omitempty"` + // Optional. Commands to execute on each node after config is + // completed. By default, executables are run on master and all worker nodes. + // You can test a node's role metadata to run an executable on + // a master or worker node, as shown below using `curl` (you can also use `wget`): + // + // ROLE=$(curl -H Metadata-Flavor:Google http://metadata/computeMetadata/v1beta2/instance/attributes/dataproc-role) + // if [[ "${ROLE}" == 'Master' ]]; then + // ... master specific actions ... + // else + // ... worker specific actions ... + // fi + InitializationActions []*NodeInitializationAction `protobuf:"bytes,11,rep,name=initialization_actions,json=initializationActions" json:"initialization_actions,omitempty"` +} + +func (m *ClusterConfig) Reset() { *m = ClusterConfig{} } +func (m *ClusterConfig) String() string { return proto.CompactTextString(m) } +func (*ClusterConfig) ProtoMessage() {} +func (*ClusterConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *ClusterConfig) GetConfigBucket() string { + if m != nil { + return m.ConfigBucket + } + return "" +} + +func (m *ClusterConfig) GetGceClusterConfig() *GceClusterConfig { + if m != nil { + return m.GceClusterConfig + } + return nil +} + +func (m *ClusterConfig) GetMasterConfig() *InstanceGroupConfig { + if m != nil { + return m.MasterConfig + } + return nil +} + +func (m *ClusterConfig) GetWorkerConfig() *InstanceGroupConfig { + if m != nil { + return m.WorkerConfig + } + return nil +} + +func (m *ClusterConfig) GetSecondaryWorkerConfig() *InstanceGroupConfig { + if m != nil { + return m.SecondaryWorkerConfig + } + return nil +} + +func (m *ClusterConfig) GetSoftwareConfig() *SoftwareConfig { + if m != nil { + return m.SoftwareConfig + } + return nil +} + +func (m *ClusterConfig) GetLifecycleConfig() *LifecycleConfig { + if m != nil { + return m.LifecycleConfig + } + return nil +} + +func (m *ClusterConfig) GetInitializationActions() []*NodeInitializationAction { + if m != nil { + return m.InitializationActions + } + return nil +} + +// Common config settings for resources of Google Compute Engine cluster +// instances, applicable to all instances in the cluster. +type GceClusterConfig struct { + // Optional. The zone where the Google Compute Engine cluster will be located. + // On a create request, it is required in the "global" region. If omitted + // in a non-global Cloud Dataproc region, the service will pick a zone in the + // corresponding Compute Engine region. On a get request, zone will always be + // present. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/[zone]` + // * `projects/[project_id]/zones/[zone]` + // * `us-central1-f` + ZoneUri string `protobuf:"bytes,1,opt,name=zone_uri,json=zoneUri" json:"zone_uri,omitempty"` + // Optional. The Google Compute Engine network to be used for machine + // communications. Cannot be specified with subnetwork_uri. If neither + // `network_uri` nor `subnetwork_uri` is specified, the "default" network of + // the project is used, if it exists. Cannot be a "Custom Subnet Network" (see + // [Using Subnetworks](/compute/docs/subnetworks) for more information). + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/global/default` + // * `projects/[project_id]/regions/global/default` + // * `default` + NetworkUri string `protobuf:"bytes,2,opt,name=network_uri,json=networkUri" json:"network_uri,omitempty"` + // Optional. The Google Compute Engine subnetwork to be used for machine + // communications. Cannot be specified with network_uri. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/regions/us-east1/sub0` + // * `projects/[project_id]/regions/us-east1/sub0` + // * `sub0` + SubnetworkUri string `protobuf:"bytes,6,opt,name=subnetwork_uri,json=subnetworkUri" json:"subnetwork_uri,omitempty"` + // Optional. If true, all instances in the cluster will only have internal IP + // addresses. By default, clusters are not restricted to internal IP addresses, + // and will have ephemeral external IP addresses assigned to each instance. + // This `internal_ip_only` restriction can only be enabled for subnetwork + // enabled networks, and all off-cluster dependencies must be configured to be + // accessible without external IP addresses. + InternalIpOnly bool `protobuf:"varint,7,opt,name=internal_ip_only,json=internalIpOnly" json:"internal_ip_only,omitempty"` + // Optional. The service account of the instances. Defaults to the default + // Google Compute Engine service account. Custom service accounts need + // permissions equivalent to the folloing IAM roles: + // + // * roles/logging.logWriter + // * roles/storage.objectAdmin + // + // (see https://cloud.google.com/compute/docs/access/service-accounts#custom_service_accounts + // for more information). + // Example: `[account_id]@[project_id].iam.gserviceaccount.com` + ServiceAccount string `protobuf:"bytes,8,opt,name=service_account,json=serviceAccount" json:"service_account,omitempty"` + // Optional. The URIs of service account scopes to be included in Google + // Compute Engine instances. The following base set of scopes is always + // included: + // + // * https://www.googleapis.com/auth/cloud.useraccounts.readonly + // * https://www.googleapis.com/auth/devstorage.read_write + // * https://www.googleapis.com/auth/logging.write + // + // If no scopes are specified, the following defaults are also provided: + // + // * https://www.googleapis.com/auth/bigquery + // * https://www.googleapis.com/auth/bigtable.admin.table + // * https://www.googleapis.com/auth/bigtable.data + // * https://www.googleapis.com/auth/devstorage.full_control + ServiceAccountScopes []string `protobuf:"bytes,3,rep,name=service_account_scopes,json=serviceAccountScopes" json:"service_account_scopes,omitempty"` + // The Google Compute Engine tags to add to all instances (see + // [Tagging instances](/compute/docs/label-or-tag-resources#tags)). + Tags []string `protobuf:"bytes,4,rep,name=tags" json:"tags,omitempty"` + // The Google Compute Engine metadata entries to add to all instances (see + // [Project and instance metadata](https://cloud.google.com/compute/docs/storing-retrieving-metadata#project_and_instance_metadata)). + Metadata map[string]string `protobuf:"bytes,5,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *GceClusterConfig) Reset() { *m = GceClusterConfig{} } +func (m *GceClusterConfig) String() string { return proto.CompactTextString(m) } +func (*GceClusterConfig) ProtoMessage() {} +func (*GceClusterConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *GceClusterConfig) GetZoneUri() string { + if m != nil { + return m.ZoneUri + } + return "" +} + +func (m *GceClusterConfig) GetNetworkUri() string { + if m != nil { + return m.NetworkUri + } + return "" +} + +func (m *GceClusterConfig) GetSubnetworkUri() string { + if m != nil { + return m.SubnetworkUri + } + return "" +} + +func (m *GceClusterConfig) GetInternalIpOnly() bool { + if m != nil { + return m.InternalIpOnly + } + return false +} + +func (m *GceClusterConfig) GetServiceAccount() string { + if m != nil { + return m.ServiceAccount + } + return "" +} + +func (m *GceClusterConfig) GetServiceAccountScopes() []string { + if m != nil { + return m.ServiceAccountScopes + } + return nil +} + +func (m *GceClusterConfig) GetTags() []string { + if m != nil { + return m.Tags + } + return nil +} + +func (m *GceClusterConfig) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +// Optional. The config settings for Google Compute Engine resources in +// an instance group, such as a master or worker group. +type InstanceGroupConfig struct { + // Optional. The number of VM instances in the instance group. + // For master instance groups, must be set to 1. + NumInstances int32 `protobuf:"varint,1,opt,name=num_instances,json=numInstances" json:"num_instances,omitempty"` + // Optional. The list of instance names. Cloud Dataproc derives the names from + // `cluster_name`, `num_instances`, and the instance group if not set by user + // (recommended practice is to let Cloud Dataproc derive the name). + InstanceNames []string `protobuf:"bytes,2,rep,name=instance_names,json=instanceNames" json:"instance_names,omitempty"` + // Output-only. The Google Compute Engine image resource used for cluster + // instances. Inferred from `SoftwareConfig.image_version`. + ImageUri string `protobuf:"bytes,3,opt,name=image_uri,json=imageUri" json:"image_uri,omitempty"` + // Optional. The Google Compute Engine machine type used for cluster instances. + // + // A full URL, partial URI, or short name are valid. Examples: + // + // * `https://www.googleapis.com/compute/v1/projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` + // * `projects/[project_id]/zones/us-east1-a/machineTypes/n1-standard-2` + // * `n1-standard-2` + MachineTypeUri string `protobuf:"bytes,4,opt,name=machine_type_uri,json=machineTypeUri" json:"machine_type_uri,omitempty"` + // Optional. Disk option config settings. + DiskConfig *DiskConfig `protobuf:"bytes,5,opt,name=disk_config,json=diskConfig" json:"disk_config,omitempty"` + // Optional. Specifies that this instance group contains preemptible instances. + IsPreemptible bool `protobuf:"varint,6,opt,name=is_preemptible,json=isPreemptible" json:"is_preemptible,omitempty"` + // Output-only. The config for Google Compute Engine Instance Group + // Manager that manages this group. + // This is only used for preemptible instance groups. + ManagedGroupConfig *ManagedGroupConfig `protobuf:"bytes,7,opt,name=managed_group_config,json=managedGroupConfig" json:"managed_group_config,omitempty"` + // Optional. The Google Compute Engine accelerator configuration for these + // instances. + // + // **Beta Feature**: This feature is still under development. It may be + // changed before final release. + Accelerators []*AcceleratorConfig `protobuf:"bytes,8,rep,name=accelerators" json:"accelerators,omitempty"` +} + +func (m *InstanceGroupConfig) Reset() { *m = InstanceGroupConfig{} } +func (m *InstanceGroupConfig) String() string { return proto.CompactTextString(m) } +func (*InstanceGroupConfig) ProtoMessage() {} +func (*InstanceGroupConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *InstanceGroupConfig) GetNumInstances() int32 { + if m != nil { + return m.NumInstances + } + return 0 +} + +func (m *InstanceGroupConfig) GetInstanceNames() []string { + if m != nil { + return m.InstanceNames + } + return nil +} + +func (m *InstanceGroupConfig) GetImageUri() string { + if m != nil { + return m.ImageUri + } + return "" +} + +func (m *InstanceGroupConfig) GetMachineTypeUri() string { + if m != nil { + return m.MachineTypeUri + } + return "" +} + +func (m *InstanceGroupConfig) GetDiskConfig() *DiskConfig { + if m != nil { + return m.DiskConfig + } + return nil +} + +func (m *InstanceGroupConfig) GetIsPreemptible() bool { + if m != nil { + return m.IsPreemptible + } + return false +} + +func (m *InstanceGroupConfig) GetManagedGroupConfig() *ManagedGroupConfig { + if m != nil { + return m.ManagedGroupConfig + } + return nil +} + +func (m *InstanceGroupConfig) GetAccelerators() []*AcceleratorConfig { + if m != nil { + return m.Accelerators + } + return nil +} + +// Specifies the resources used to actively manage an instance group. +type ManagedGroupConfig struct { + // Output-only. The name of the Instance Template used for the Managed + // Instance Group. + InstanceTemplateName string `protobuf:"bytes,1,opt,name=instance_template_name,json=instanceTemplateName" json:"instance_template_name,omitempty"` + // Output-only. The name of the Instance Group Manager for this group. + InstanceGroupManagerName string `protobuf:"bytes,2,opt,name=instance_group_manager_name,json=instanceGroupManagerName" json:"instance_group_manager_name,omitempty"` +} + +func (m *ManagedGroupConfig) Reset() { *m = ManagedGroupConfig{} } +func (m *ManagedGroupConfig) String() string { return proto.CompactTextString(m) } +func (*ManagedGroupConfig) ProtoMessage() {} +func (*ManagedGroupConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ManagedGroupConfig) GetInstanceTemplateName() string { + if m != nil { + return m.InstanceTemplateName + } + return "" +} + +func (m *ManagedGroupConfig) GetInstanceGroupManagerName() string { + if m != nil { + return m.InstanceGroupManagerName + } + return "" +} + +// Specifies the type and number of accelerator cards attached to the instances +// of an instance group (see [GPUs on Compute Engine](/compute/docs/gpus/)). +type AcceleratorConfig struct { + // Full URL, partial URI, or short name of the accelerator type resource to + // expose to this instance. See [Google Compute Engine AcceleratorTypes]( + // /compute/docs/reference/beta/acceleratorTypes) + // + // Examples + // * `https://www.googleapis.com/compute/beta/projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + // * `projects/[project_id]/zones/us-east1-a/acceleratorTypes/nvidia-tesla-k80` + // * `nvidia-tesla-k80` + AcceleratorTypeUri string `protobuf:"bytes,1,opt,name=accelerator_type_uri,json=acceleratorTypeUri" json:"accelerator_type_uri,omitempty"` + // The number of the accelerator cards of this type exposed to this instance. + AcceleratorCount int32 `protobuf:"varint,2,opt,name=accelerator_count,json=acceleratorCount" json:"accelerator_count,omitempty"` +} + +func (m *AcceleratorConfig) Reset() { *m = AcceleratorConfig{} } +func (m *AcceleratorConfig) String() string { return proto.CompactTextString(m) } +func (*AcceleratorConfig) ProtoMessage() {} +func (*AcceleratorConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *AcceleratorConfig) GetAcceleratorTypeUri() string { + if m != nil { + return m.AcceleratorTypeUri + } + return "" +} + +func (m *AcceleratorConfig) GetAcceleratorCount() int32 { + if m != nil { + return m.AcceleratorCount + } + return 0 +} + +// Specifies the config of disk options for a group of VM instances. +type DiskConfig struct { + // Optional. Size in GB of the boot disk (default is 500GB). + BootDiskSizeGb int32 `protobuf:"varint,1,opt,name=boot_disk_size_gb,json=bootDiskSizeGb" json:"boot_disk_size_gb,omitempty"` + // Optional. Number of attached SSDs, from 0 to 4 (default is 0). + // If SSDs are not attached, the boot disk is used to store runtime logs and + // [HDFS](https://hadoop.apache.org/docs/r1.2.1/hdfs_user_guide.html) data. + // If one or more SSDs are attached, this runtime bulk + // data is spread across them, and the boot disk contains only basic + // config and installed binaries. + NumLocalSsds int32 `protobuf:"varint,2,opt,name=num_local_ssds,json=numLocalSsds" json:"num_local_ssds,omitempty"` +} + +func (m *DiskConfig) Reset() { *m = DiskConfig{} } +func (m *DiskConfig) String() string { return proto.CompactTextString(m) } +func (*DiskConfig) ProtoMessage() {} +func (*DiskConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *DiskConfig) GetBootDiskSizeGb() int32 { + if m != nil { + return m.BootDiskSizeGb + } + return 0 +} + +func (m *DiskConfig) GetNumLocalSsds() int32 { + if m != nil { + return m.NumLocalSsds + } + return 0 +} + +// Specifies the cluster auto delete related schedule configuration. +type LifecycleConfig struct { + // Optional. The longest duration that cluster would keep alive while staying + // idle; passing this threshold will cause cluster to be auto-deleted. + IdleDeleteTtl *google_protobuf3.Duration `protobuf:"bytes,1,opt,name=idle_delete_ttl,json=idleDeleteTtl" json:"idle_delete_ttl,omitempty"` + // Types that are valid to be assigned to Ttl: + // *LifecycleConfig_AutoDeleteTime + // *LifecycleConfig_AutoDeleteTtl + Ttl isLifecycleConfig_Ttl `protobuf_oneof:"ttl"` +} + +func (m *LifecycleConfig) Reset() { *m = LifecycleConfig{} } +func (m *LifecycleConfig) String() string { return proto.CompactTextString(m) } +func (*LifecycleConfig) ProtoMessage() {} +func (*LifecycleConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +type isLifecycleConfig_Ttl interface { + isLifecycleConfig_Ttl() +} + +type LifecycleConfig_AutoDeleteTime struct { + AutoDeleteTime *google_protobuf5.Timestamp `protobuf:"bytes,2,opt,name=auto_delete_time,json=autoDeleteTime,oneof"` +} +type LifecycleConfig_AutoDeleteTtl struct { + AutoDeleteTtl *google_protobuf3.Duration `protobuf:"bytes,3,opt,name=auto_delete_ttl,json=autoDeleteTtl,oneof"` +} + +func (*LifecycleConfig_AutoDeleteTime) isLifecycleConfig_Ttl() {} +func (*LifecycleConfig_AutoDeleteTtl) isLifecycleConfig_Ttl() {} + +func (m *LifecycleConfig) GetTtl() isLifecycleConfig_Ttl { + if m != nil { + return m.Ttl + } + return nil +} + +func (m *LifecycleConfig) GetIdleDeleteTtl() *google_protobuf3.Duration { + if m != nil { + return m.IdleDeleteTtl + } + return nil +} + +func (m *LifecycleConfig) GetAutoDeleteTime() *google_protobuf5.Timestamp { + if x, ok := m.GetTtl().(*LifecycleConfig_AutoDeleteTime); ok { + return x.AutoDeleteTime + } + return nil +} + +func (m *LifecycleConfig) GetAutoDeleteTtl() *google_protobuf3.Duration { + if x, ok := m.GetTtl().(*LifecycleConfig_AutoDeleteTtl); ok { + return x.AutoDeleteTtl + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*LifecycleConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _LifecycleConfig_OneofMarshaler, _LifecycleConfig_OneofUnmarshaler, _LifecycleConfig_OneofSizer, []interface{}{ + (*LifecycleConfig_AutoDeleteTime)(nil), + (*LifecycleConfig_AutoDeleteTtl)(nil), + } +} + +func _LifecycleConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*LifecycleConfig) + // ttl + switch x := m.Ttl.(type) { + case *LifecycleConfig_AutoDeleteTime: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AutoDeleteTime); err != nil { + return err + } + case *LifecycleConfig_AutoDeleteTtl: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.AutoDeleteTtl); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("LifecycleConfig.Ttl has unexpected type %T", x) + } + return nil +} + +func _LifecycleConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*LifecycleConfig) + switch tag { + case 2: // ttl.auto_delete_time + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf5.Timestamp) + err := b.DecodeMessage(msg) + m.Ttl = &LifecycleConfig_AutoDeleteTime{msg} + return true, err + case 3: // ttl.auto_delete_ttl + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_protobuf3.Duration) + err := b.DecodeMessage(msg) + m.Ttl = &LifecycleConfig_AutoDeleteTtl{msg} + return true, err + default: + return false, nil + } +} + +func _LifecycleConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*LifecycleConfig) + // ttl + switch x := m.Ttl.(type) { + case *LifecycleConfig_AutoDeleteTime: + s := proto.Size(x.AutoDeleteTime) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *LifecycleConfig_AutoDeleteTtl: + s := proto.Size(x.AutoDeleteTtl) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Specifies an executable to run on a fully configured node and a +// timeout period for executable completion. +type NodeInitializationAction struct { + // Required. Google Cloud Storage URI of executable file. + ExecutableFile string `protobuf:"bytes,1,opt,name=executable_file,json=executableFile" json:"executable_file,omitempty"` + // Optional. Amount of time executable has to complete. Default is + // 10 minutes. Cluster creation fails with an explanatory error message (the + // name of the executable that caused the error and the exceeded timeout + // period) if the executable is not completed at end of the timeout period. + ExecutionTimeout *google_protobuf3.Duration `protobuf:"bytes,2,opt,name=execution_timeout,json=executionTimeout" json:"execution_timeout,omitempty"` +} + +func (m *NodeInitializationAction) Reset() { *m = NodeInitializationAction{} } +func (m *NodeInitializationAction) String() string { return proto.CompactTextString(m) } +func (*NodeInitializationAction) ProtoMessage() {} +func (*NodeInitializationAction) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *NodeInitializationAction) GetExecutableFile() string { + if m != nil { + return m.ExecutableFile + } + return "" +} + +func (m *NodeInitializationAction) GetExecutionTimeout() *google_protobuf3.Duration { + if m != nil { + return m.ExecutionTimeout + } + return nil +} + +// The status of a cluster and its instances. +type ClusterStatus struct { + // Output-only. The cluster's state. + State ClusterStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1beta2.ClusterStatus_State" json:"state,omitempty"` + // Output-only. Optional details of cluster's state. + Detail string `protobuf:"bytes,2,opt,name=detail" json:"detail,omitempty"` + // Output-only. Time when this state was entered. + StateStartTime *google_protobuf5.Timestamp `protobuf:"bytes,3,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` + // Output-only. Additional state information that includes + // status reported by the agent. + Substate ClusterStatus_Substate `protobuf:"varint,4,opt,name=substate,enum=google.cloud.dataproc.v1beta2.ClusterStatus_Substate" json:"substate,omitempty"` +} + +func (m *ClusterStatus) Reset() { *m = ClusterStatus{} } +func (m *ClusterStatus) String() string { return proto.CompactTextString(m) } +func (*ClusterStatus) ProtoMessage() {} +func (*ClusterStatus) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *ClusterStatus) GetState() ClusterStatus_State { + if m != nil { + return m.State + } + return ClusterStatus_UNKNOWN +} + +func (m *ClusterStatus) GetDetail() string { + if m != nil { + return m.Detail + } + return "" +} + +func (m *ClusterStatus) GetStateStartTime() *google_protobuf5.Timestamp { + if m != nil { + return m.StateStartTime + } + return nil +} + +func (m *ClusterStatus) GetSubstate() ClusterStatus_Substate { + if m != nil { + return m.Substate + } + return ClusterStatus_UNSPECIFIED +} + +// Specifies the selection and config of software inside the cluster. +type SoftwareConfig struct { + // Optional. The version of software inside the cluster. It must match the + // regular expression `[0-9]+\.[0-9]+`. If unspecified, it defaults to the + // latest version (see [Cloud Dataproc Versioning](/dataproc/versioning)). + ImageVersion string `protobuf:"bytes,1,opt,name=image_version,json=imageVersion" json:"image_version,omitempty"` + // Optional. The properties to set on daemon config files. + // + // Property keys are specified in `prefix:property` format, such as + // `core:fs.defaultFS`. The following are supported prefixes + // and their mappings: + // + // * capacity-scheduler: `capacity-scheduler.xml` + // * core: `core-site.xml` + // * distcp: `distcp-default.xml` + // * hdfs: `hdfs-site.xml` + // * hive: `hive-site.xml` + // * mapred: `mapred-site.xml` + // * pig: `pig.properties` + // * spark: `spark-defaults.conf` + // * yarn: `yarn-site.xml` + // + // For more information, see + // [Cluster properties](/dataproc/docs/concepts/cluster-properties). + Properties map[string]string `protobuf:"bytes,2,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *SoftwareConfig) Reset() { *m = SoftwareConfig{} } +func (m *SoftwareConfig) String() string { return proto.CompactTextString(m) } +func (*SoftwareConfig) ProtoMessage() {} +func (*SoftwareConfig) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *SoftwareConfig) GetImageVersion() string { + if m != nil { + return m.ImageVersion + } + return "" +} + +func (m *SoftwareConfig) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +// Contains cluster daemon metrics, such as HDFS and YARN stats. +// +// **Beta Feature**: This report is available for testing purposes only. It may +// be changed before final release. +type ClusterMetrics struct { + // The HDFS metrics. + HdfsMetrics map[string]int64 `protobuf:"bytes,1,rep,name=hdfs_metrics,json=hdfsMetrics" json:"hdfs_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` + // The YARN metrics. + YarnMetrics map[string]int64 `protobuf:"bytes,2,rep,name=yarn_metrics,json=yarnMetrics" json:"yarn_metrics,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value"` +} + +func (m *ClusterMetrics) Reset() { *m = ClusterMetrics{} } +func (m *ClusterMetrics) String() string { return proto.CompactTextString(m) } +func (*ClusterMetrics) ProtoMessage() {} +func (*ClusterMetrics) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ClusterMetrics) GetHdfsMetrics() map[string]int64 { + if m != nil { + return m.HdfsMetrics + } + return nil +} + +func (m *ClusterMetrics) GetYarnMetrics() map[string]int64 { + if m != nil { + return m.YarnMetrics + } + return nil +} + +// A request to create a cluster. +type CreateClusterRequest struct { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // Required. The cluster to create. + Cluster *Cluster `protobuf:"bytes,2,opt,name=cluster" json:"cluster,omitempty"` +} + +func (m *CreateClusterRequest) Reset() { *m = CreateClusterRequest{} } +func (m *CreateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*CreateClusterRequest) ProtoMessage() {} +func (*CreateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{12} } + +func (m *CreateClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CreateClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *CreateClusterRequest) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +// A request to update a cluster. +type UpdateClusterRequest struct { + // Required. The ID of the Google Cloud Platform project the + // cluster belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,5,opt,name=region" json:"region,omitempty"` + // Required. The cluster name. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // Required. The changes to the cluster. + Cluster *Cluster `protobuf:"bytes,3,opt,name=cluster" json:"cluster,omitempty"` + // Optional. Timeout for graceful YARN decomissioning. Graceful + // decommissioning allows removing nodes from the cluster without + // interrupting jobs in progress. Timeout specifies how long to wait for jobs + // in progress to finish before forcefully removing nodes (and potentially + // interrupting jobs). Default timeout is 0 (for forceful decommission), and + // the maximum allowed timeout is 1 day. + // + // Only supported on Dataproc image versions 1.2 and higher. + GracefulDecommissionTimeout *google_protobuf3.Duration `protobuf:"bytes,6,opt,name=graceful_decommission_timeout,json=gracefulDecommissionTimeout" json:"graceful_decommission_timeout,omitempty"` + // Required. Specifies the path, relative to Cluster, of + // the field to update. For example, to change the number of workers + // in a cluster to 5, the update_mask parameter would be + // specified as config.worker_config.num_instances, + // and the `PATCH` request body would specify the new value, as follows: + // + // { + // "config":{ + // "workerConfig":{ + // "numInstances":"5" + // } + // } + // } + // Similarly, to change the number of preemptible workers in a cluster to 5, the + // update_mask parameter would be config.secondary_worker_config.num_instances, + // and the `PATCH` request body would be set as follows: + // + // { + // "config":{ + // "secondaryWorkerConfig":{ + // "numInstances":"5" + // } + // } + // } + // Note: currently only some fields can be updated: + // |Mask|Purpose| + // |`labels`|Updates labels| + // |`config.worker_config.num_instances`|Resize primary worker group| + // |`config.secondary_worker_config.num_instances`|Resize secondary worker group| + UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,4,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateClusterRequest) Reset() { *m = UpdateClusterRequest{} } +func (m *UpdateClusterRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateClusterRequest) ProtoMessage() {} +func (*UpdateClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{13} } + +func (m *UpdateClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *UpdateClusterRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *UpdateClusterRequest) GetCluster() *Cluster { + if m != nil { + return m.Cluster + } + return nil +} + +func (m *UpdateClusterRequest) GetGracefulDecommissionTimeout() *google_protobuf3.Duration { + if m != nil { + return m.GracefulDecommissionTimeout + } + return nil +} + +func (m *UpdateClusterRequest) GetUpdateMask() *google_protobuf4.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// A request to delete a cluster. +type DeleteClusterRequest struct { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // Required. The cluster name. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // Optional. Specifying the `cluster_uuid` means the RPC should fail + // (with error NOT_FOUND) if cluster with specified UUID does not exist. + ClusterUuid string `protobuf:"bytes,4,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` +} + +func (m *DeleteClusterRequest) Reset() { *m = DeleteClusterRequest{} } +func (m *DeleteClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteClusterRequest) ProtoMessage() {} +func (*DeleteClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{14} } + +func (m *DeleteClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DeleteClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *DeleteClusterRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *DeleteClusterRequest) GetClusterUuid() string { + if m != nil { + return m.ClusterUuid + } + return "" +} + +// Request to get the resource representation for a cluster in a project. +type GetClusterRequest struct { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // Required. The cluster name. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` +} + +func (m *GetClusterRequest) Reset() { *m = GetClusterRequest{} } +func (m *GetClusterRequest) String() string { return proto.CompactTextString(m) } +func (*GetClusterRequest) ProtoMessage() {} +func (*GetClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *GetClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *GetClusterRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +// A request to list the clusters in a project. +type ListClustersRequest struct { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,4,opt,name=region" json:"region,omitempty"` + // Optional. A filter constraining the clusters to list. Filters are + // case-sensitive and have the following syntax: + // + // field = value [AND [field = value]] ... + // + // where **field** is one of `status.state`, `clusterName`, or `labels.[KEY]`, + // and `[KEY]` is a label key. **value** can be `*` to match all values. + // `status.state` can be one of the following: `ACTIVE`, `INACTIVE`, + // `CREATING`, `RUNNING`, `ERROR`, `DELETING`, or `UPDATING`. `ACTIVE` + // contains the `CREATING`, `UPDATING`, and `RUNNING` states. `INACTIVE` + // contains the `DELETING` and `ERROR` states. + // `clusterName` is the name of the cluster provided at creation time. + // Only the logical `AND` operator is supported; space-separated items are + // treated as having an implicit `AND` operator. + // + // Example filter: + // + // status.state = ACTIVE AND clusterName = mycluster + // AND labels.env = staging AND labels.starred = * + Filter string `protobuf:"bytes,5,opt,name=filter" json:"filter,omitempty"` + // Optional. The standard List page size. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional. The standard List page token. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListClustersRequest) Reset() { *m = ListClustersRequest{} } +func (m *ListClustersRequest) String() string { return proto.CompactTextString(m) } +func (*ListClustersRequest) ProtoMessage() {} +func (*ListClustersRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *ListClustersRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListClustersRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *ListClustersRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +func (m *ListClustersRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListClustersRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The list of all clusters in a project. +type ListClustersResponse struct { + // Output-only. The clusters in the project. + Clusters []*Cluster `protobuf:"bytes,1,rep,name=clusters" json:"clusters,omitempty"` + // Output-only. This token is included in the response if there are more + // results to fetch. To fetch additional results, provide this value as the + // `page_token` in a subsequent ListClustersRequest. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListClustersResponse) Reset() { *m = ListClustersResponse{} } +func (m *ListClustersResponse) String() string { return proto.CompactTextString(m) } +func (*ListClustersResponse) ProtoMessage() {} +func (*ListClustersResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{17} } + +func (m *ListClustersResponse) GetClusters() []*Cluster { + if m != nil { + return m.Clusters + } + return nil +} + +func (m *ListClustersResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// A request to collect cluster diagnostic information. +type DiagnoseClusterRequest struct { + // Required. The ID of the Google Cloud Platform project that the cluster + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // Required. The cluster name. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` +} + +func (m *DiagnoseClusterRequest) Reset() { *m = DiagnoseClusterRequest{} } +func (m *DiagnoseClusterRequest) String() string { return proto.CompactTextString(m) } +func (*DiagnoseClusterRequest) ProtoMessage() {} +func (*DiagnoseClusterRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{18} } + +func (m *DiagnoseClusterRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DiagnoseClusterRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *DiagnoseClusterRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +// The location of diagnostic output. +type DiagnoseClusterResults struct { + // Output-only. The Google Cloud Storage URI of the diagnostic output. + // The output report is a plain text file with a summary of collected + // diagnostics. + OutputUri string `protobuf:"bytes,1,opt,name=output_uri,json=outputUri" json:"output_uri,omitempty"` +} + +func (m *DiagnoseClusterResults) Reset() { *m = DiagnoseClusterResults{} } +func (m *DiagnoseClusterResults) String() string { return proto.CompactTextString(m) } +func (*DiagnoseClusterResults) ProtoMessage() {} +func (*DiagnoseClusterResults) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{19} } + +func (m *DiagnoseClusterResults) GetOutputUri() string { + if m != nil { + return m.OutputUri + } + return "" +} + +func init() { + proto.RegisterType((*Cluster)(nil), "google.cloud.dataproc.v1beta2.Cluster") + proto.RegisterType((*ClusterConfig)(nil), "google.cloud.dataproc.v1beta2.ClusterConfig") + proto.RegisterType((*GceClusterConfig)(nil), "google.cloud.dataproc.v1beta2.GceClusterConfig") + proto.RegisterType((*InstanceGroupConfig)(nil), "google.cloud.dataproc.v1beta2.InstanceGroupConfig") + proto.RegisterType((*ManagedGroupConfig)(nil), "google.cloud.dataproc.v1beta2.ManagedGroupConfig") + proto.RegisterType((*AcceleratorConfig)(nil), "google.cloud.dataproc.v1beta2.AcceleratorConfig") + proto.RegisterType((*DiskConfig)(nil), "google.cloud.dataproc.v1beta2.DiskConfig") + proto.RegisterType((*LifecycleConfig)(nil), "google.cloud.dataproc.v1beta2.LifecycleConfig") + proto.RegisterType((*NodeInitializationAction)(nil), "google.cloud.dataproc.v1beta2.NodeInitializationAction") + proto.RegisterType((*ClusterStatus)(nil), "google.cloud.dataproc.v1beta2.ClusterStatus") + proto.RegisterType((*SoftwareConfig)(nil), "google.cloud.dataproc.v1beta2.SoftwareConfig") + proto.RegisterType((*ClusterMetrics)(nil), "google.cloud.dataproc.v1beta2.ClusterMetrics") + proto.RegisterType((*CreateClusterRequest)(nil), "google.cloud.dataproc.v1beta2.CreateClusterRequest") + proto.RegisterType((*UpdateClusterRequest)(nil), "google.cloud.dataproc.v1beta2.UpdateClusterRequest") + proto.RegisterType((*DeleteClusterRequest)(nil), "google.cloud.dataproc.v1beta2.DeleteClusterRequest") + proto.RegisterType((*GetClusterRequest)(nil), "google.cloud.dataproc.v1beta2.GetClusterRequest") + proto.RegisterType((*ListClustersRequest)(nil), "google.cloud.dataproc.v1beta2.ListClustersRequest") + proto.RegisterType((*ListClustersResponse)(nil), "google.cloud.dataproc.v1beta2.ListClustersResponse") + proto.RegisterType((*DiagnoseClusterRequest)(nil), "google.cloud.dataproc.v1beta2.DiagnoseClusterRequest") + proto.RegisterType((*DiagnoseClusterResults)(nil), "google.cloud.dataproc.v1beta2.DiagnoseClusterResults") + proto.RegisterEnum("google.cloud.dataproc.v1beta2.ClusterStatus_State", ClusterStatus_State_name, ClusterStatus_State_value) + proto.RegisterEnum("google.cloud.dataproc.v1beta2.ClusterStatus_Substate", ClusterStatus_Substate_name, ClusterStatus_Substate_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for ClusterController service + +type ClusterControllerClient interface { + // Creates a cluster in a project. + CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Updates a cluster in a project. + UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Deletes a cluster in a project. + DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Gets the resource representation for a cluster in a project. + GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) + // Lists all regions/{region}/clusters in a project. + ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) + // Gets cluster diagnostic information. + // After the operation completes, the Operation.response field + // contains `DiagnoseClusterOutputLocation`. + DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) +} + +type clusterControllerClient struct { + cc *grpc.ClientConn +} + +func NewClusterControllerClient(cc *grpc.ClientConn) ClusterControllerClient { + return &clusterControllerClient{cc} +} + +func (c *clusterControllerClient) CreateCluster(ctx context.Context, in *CreateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) UpdateCluster(ctx context.Context, in *UpdateClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) DeleteCluster(ctx context.Context, in *DeleteClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) GetCluster(ctx context.Context, in *GetClusterRequest, opts ...grpc.CallOption) (*Cluster, error) { + out := new(Cluster) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) ListClusters(ctx context.Context, in *ListClustersRequest, opts ...grpc.CallOption) (*ListClustersResponse, error) { + out := new(ListClustersResponse) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *clusterControllerClient) DiagnoseCluster(ctx context.Context, in *DiagnoseClusterRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for ClusterController service + +type ClusterControllerServer interface { + // Creates a cluster in a project. + CreateCluster(context.Context, *CreateClusterRequest) (*google_longrunning.Operation, error) + // Updates a cluster in a project. + UpdateCluster(context.Context, *UpdateClusterRequest) (*google_longrunning.Operation, error) + // Deletes a cluster in a project. + DeleteCluster(context.Context, *DeleteClusterRequest) (*google_longrunning.Operation, error) + // Gets the resource representation for a cluster in a project. + GetCluster(context.Context, *GetClusterRequest) (*Cluster, error) + // Lists all regions/{region}/clusters in a project. + ListClusters(context.Context, *ListClustersRequest) (*ListClustersResponse, error) + // Gets cluster diagnostic information. + // After the operation completes, the Operation.response field + // contains `DiagnoseClusterOutputLocation`. + DiagnoseCluster(context.Context, *DiagnoseClusterRequest) (*google_longrunning.Operation, error) +} + +func RegisterClusterControllerServer(s *grpc.Server, srv ClusterControllerServer) { + s.RegisterService(&_ClusterController_serviceDesc, srv) +} + +func _ClusterController_CreateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).CreateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/CreateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).CreateCluster(ctx, req.(*CreateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_UpdateCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).UpdateCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/UpdateCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).UpdateCluster(ctx, req.(*UpdateClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_DeleteCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).DeleteCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/DeleteCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).DeleteCluster(ctx, req.(*DeleteClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_GetCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).GetCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/GetCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).GetCluster(ctx, req.(*GetClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_ListClusters_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListClustersRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).ListClusters(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/ListClusters", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).ListClusters(ctx, req.(*ListClustersRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _ClusterController_DiagnoseCluster_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DiagnoseClusterRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(ClusterControllerServer).DiagnoseCluster(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.ClusterController/DiagnoseCluster", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(ClusterControllerServer).DiagnoseCluster(ctx, req.(*DiagnoseClusterRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _ClusterController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.dataproc.v1beta2.ClusterController", + HandlerType: (*ClusterControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateCluster", + Handler: _ClusterController_CreateCluster_Handler, + }, + { + MethodName: "UpdateCluster", + Handler: _ClusterController_UpdateCluster_Handler, + }, + { + MethodName: "DeleteCluster", + Handler: _ClusterController_DeleteCluster_Handler, + }, + { + MethodName: "GetCluster", + Handler: _ClusterController_GetCluster_Handler, + }, + { + MethodName: "ListClusters", + Handler: _ClusterController_ListClusters_Handler, + }, + { + MethodName: "DiagnoseCluster", + Handler: _ClusterController_DiagnoseCluster_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/dataproc/v1beta2/clusters.proto", +} + +func init() { proto.RegisterFile("google/cloud/dataproc/v1beta2/clusters.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 2093 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x59, 0xcd, 0x72, 0x1b, 0xc7, + 0x11, 0xd6, 0x12, 0x04, 0x09, 0x36, 0x08, 0x10, 0x1c, 0xd3, 0x0c, 0x4c, 0x45, 0xb1, 0xbc, 0x4e, + 0x1c, 0xda, 0x71, 0x00, 0x9b, 0x8a, 0xcb, 0x8e, 0x14, 0xb9, 0x4c, 0x91, 0x14, 0x49, 0x87, 0xa2, + 0x98, 0x05, 0x20, 0x45, 0x49, 0xa9, 0xb6, 0x06, 0xbb, 0x43, 0x68, 0xcc, 0xfd, 0xcb, 0xce, 0xac, + 0x6c, 0x48, 0xa5, 0x8b, 0x6f, 0xa9, 0x1c, 0x72, 0xf0, 0x03, 0xc4, 0xe7, 0x1c, 0x72, 0x4a, 0x55, + 0x2a, 0x87, 0xdc, 0x72, 0xce, 0xc5, 0xa9, 0x3c, 0x41, 0x0e, 0x79, 0x84, 0x1c, 0x53, 0xf3, 0xb3, + 0xc0, 0x2e, 0x48, 0x69, 0x09, 0x46, 0xe5, 0x13, 0x76, 0x7a, 0xfa, 0xe7, 0x9b, 0xee, 0x9e, 0x9e, + 0x9e, 0x01, 0xbc, 0x3b, 0x08, 0xc3, 0x81, 0x47, 0xda, 0x8e, 0x17, 0x26, 0x6e, 0xdb, 0xc5, 0x1c, + 0x47, 0x71, 0xe8, 0xb4, 0x1f, 0xbf, 0xdf, 0x27, 0x1c, 0x6f, 0xb4, 0x1d, 0x2f, 0x61, 0x9c, 0xc4, + 0xac, 0x15, 0xc5, 0x21, 0x0f, 0xd1, 0x15, 0xc5, 0xdd, 0x92, 0xdc, 0xad, 0x94, 0xbb, 0xa5, 0xb9, + 0xd7, 0xbe, 0xab, 0x95, 0xe1, 0x88, 0xb6, 0x71, 0x10, 0x84, 0x1c, 0x73, 0x1a, 0x06, 0x5a, 0x78, + 0xed, 0x4d, 0x3d, 0xeb, 0x85, 0xc1, 0x20, 0x4e, 0x82, 0x80, 0x06, 0x83, 0x76, 0x18, 0x91, 0x38, + 0xc7, 0xf4, 0x3d, 0xcd, 0x24, 0x47, 0xfd, 0xe4, 0xb8, 0xed, 0x26, 0x8a, 0x41, 0xcf, 0x5f, 0x9d, + 0x9c, 0x3f, 0xa6, 0xc4, 0x73, 0x6d, 0x1f, 0xb3, 0x13, 0xcd, 0xf1, 0xfa, 0x24, 0x07, 0xa7, 0x3e, + 0x61, 0x1c, 0xfb, 0x91, 0x62, 0x30, 0xff, 0x30, 0x0b, 0xf3, 0x5b, 0x6a, 0x5d, 0xe8, 0x0a, 0x40, + 0x14, 0x87, 0x9f, 0x11, 0x87, 0xdb, 0xd4, 0x6d, 0x1a, 0x57, 0x8d, 0xf5, 0x05, 0x6b, 0x41, 0x53, + 0xf6, 0x5d, 0xf4, 0x06, 0x2c, 0x6a, 0x0f, 0xd8, 0x01, 0xf6, 0x49, 0x73, 0x46, 0x32, 0x54, 0x35, + 0xed, 0x10, 0xfb, 0x04, 0x6d, 0xc3, 0x9c, 0x13, 0x06, 0xc7, 0x74, 0xd0, 0x2c, 0x5d, 0x35, 0xd6, + 0xab, 0x1b, 0xef, 0xb6, 0x5e, 0xe8, 0xa3, 0x96, 0xb6, 0xbc, 0x25, 0x65, 0x2c, 0x2d, 0x8b, 0x3e, + 0x85, 0x39, 0x0f, 0xf7, 0x89, 0xc7, 0x9a, 0x95, 0xab, 0xa5, 0xf5, 0xea, 0xc6, 0xc6, 0xf9, 0xb4, + 0xb4, 0x0e, 0xa4, 0xd0, 0x4e, 0xc0, 0xe3, 0xa1, 0xa5, 0x35, 0x08, 0x44, 0x8c, 0x63, 0x9e, 0xb0, + 0xe6, 0xec, 0x34, 0x88, 0x3a, 0x52, 0xc6, 0xd2, 0xb2, 0xa8, 0x03, 0x75, 0xf5, 0x65, 0x3f, 0xa2, + 0x8c, 0x87, 0xf1, 0xb0, 0x39, 0x2f, 0x91, 0x4d, 0xa7, 0xad, 0xa6, 0x74, 0xec, 0x29, 0x15, 0x59, + 0x7f, 0x26, 0x09, 0x75, 0x9b, 0x73, 0x39, 0x7f, 0xf6, 0x12, 0xea, 0xa2, 0x5d, 0x98, 0xf7, 0x09, + 0x8f, 0xa9, 0xc3, 0x9a, 0x0b, 0x12, 0xfe, 0x8f, 0xcf, 0x67, 0xf0, 0x8e, 0x12, 0xb2, 0x52, 0xe9, + 0xb5, 0x9f, 0x42, 0x35, 0xe3, 0x1d, 0xd4, 0x80, 0xd2, 0x09, 0x19, 0xea, 0x10, 0x8b, 0x4f, 0xb4, + 0x02, 0xe5, 0xc7, 0xd8, 0x4b, 0xd2, 0xa8, 0xaa, 0xc1, 0xf5, 0x99, 0x8f, 0x0c, 0xf3, 0x9f, 0x65, + 0xa8, 0xe5, 0xe2, 0x84, 0xde, 0x84, 0x9a, 0x8a, 0x94, 0xdd, 0x4f, 0x9c, 0x13, 0xc2, 0xb5, 0x9e, + 0x45, 0x45, 0xbc, 0x25, 0x69, 0xe8, 0x21, 0xa0, 0x81, 0x43, 0xec, 0x74, 0x85, 0x3a, 0x2d, 0x2a, + 0x72, 0x15, 0xed, 0x82, 0x55, 0xec, 0x3a, 0x24, 0x9f, 0x19, 0x8d, 0xc1, 0x04, 0x05, 0xdd, 0x87, + 0x9a, 0x8f, 0xb3, 0x9a, 0x95, 0x7f, 0x8a, 0x52, 0x65, 0x3f, 0x60, 0x1c, 0x07, 0x0e, 0xd9, 0x8d, + 0xc3, 0x24, 0xd2, 0xca, 0x17, 0x95, 0xa2, 0xb1, 0xe2, 0xcf, 0xc3, 0xf8, 0x64, 0xac, 0x18, 0x2e, + 0xae, 0x58, 0x29, 0xd2, 0x8a, 0x3f, 0x83, 0xef, 0x30, 0xe2, 0x84, 0x81, 0x8b, 0xe3, 0xa1, 0x9d, + 0x37, 0xb1, 0x78, 0x61, 0x13, 0xaf, 0x8e, 0x54, 0xde, 0xcf, 0xda, 0xba, 0x07, 0x4b, 0x2c, 0x3c, + 0xe6, 0x9f, 0xe3, 0x98, 0xa4, 0x36, 0x6a, 0xe7, 0xca, 0x9f, 0x8e, 0x96, 0xd2, 0xea, 0xeb, 0x2c, + 0x37, 0x46, 0x0f, 0xa0, 0xe1, 0xd1, 0x63, 0xe2, 0x0c, 0x1d, 0x6f, 0xa4, 0xb8, 0x2e, 0x15, 0xb7, + 0x0a, 0x14, 0x1f, 0xa4, 0x62, 0x5a, 0xf3, 0x92, 0x97, 0x27, 0xa0, 0x00, 0x56, 0x69, 0x40, 0x39, + 0xc5, 0x1e, 0x7d, 0x22, 0x6b, 0x9c, 0x8d, 0x1d, 0x59, 0x0b, 0x9b, 0x55, 0xb9, 0xd5, 0x3e, 0x2c, + 0x30, 0x70, 0x18, 0xba, 0x64, 0x3f, 0xa7, 0x60, 0x53, 0xca, 0x5b, 0xaf, 0xd2, 0x33, 0xa8, 0xcc, + 0xfc, 0x63, 0x09, 0x1a, 0x93, 0x79, 0x86, 0x5e, 0x83, 0xca, 0x93, 0x30, 0x20, 0x76, 0x12, 0x53, + 0x9d, 0xd4, 0xf3, 0x62, 0xdc, 0x8b, 0x29, 0x7a, 0x1d, 0xaa, 0x01, 0xe1, 0x22, 0x6e, 0x72, 0x56, + 0x6d, 0x13, 0xd0, 0x24, 0xc1, 0xf0, 0x03, 0xa8, 0xb3, 0xa4, 0x9f, 0xe5, 0x51, 0x1b, 0xba, 0x36, + 0xa6, 0x0a, 0xb6, 0x75, 0x68, 0xd0, 0x80, 0x93, 0x38, 0xc0, 0x9e, 0x4d, 0x23, 0x3b, 0x0c, 0x3c, + 0x51, 0x4c, 0x8c, 0xf5, 0x8a, 0x55, 0x4f, 0xe9, 0xfb, 0xd1, 0xdd, 0xc0, 0x1b, 0xa2, 0x1f, 0xc2, + 0x12, 0x23, 0xf1, 0x63, 0xea, 0x10, 0x1b, 0x3b, 0x4e, 0x98, 0x04, 0x5c, 0x6e, 0x9f, 0x05, 0xab, + 0xae, 0xc9, 0x9b, 0x8a, 0x8a, 0x7e, 0x02, 0xab, 0x13, 0x8c, 0x36, 0x73, 0xc2, 0x88, 0xb0, 0x66, + 0xe9, 0x6a, 0x69, 0x7d, 0xc1, 0x5a, 0xc9, 0xf3, 0x77, 0xe4, 0x1c, 0x42, 0x30, 0xcb, 0xf1, 0x40, + 0xd4, 0x45, 0xc1, 0x23, 0xbf, 0xd1, 0x03, 0xa8, 0xf8, 0x84, 0x63, 0xe1, 0xdc, 0x66, 0x59, 0xba, + 0xfd, 0xe6, 0x94, 0x5b, 0xb5, 0x75, 0x47, 0xcb, 0xab, 0x32, 0x3c, 0x52, 0xb7, 0x76, 0x03, 0x6a, + 0xb9, 0xa9, 0xa9, 0x6a, 0xd0, 0xbf, 0x4a, 0xf0, 0xca, 0x19, 0xe9, 0x2f, 0x2a, 0x51, 0x90, 0xf8, + 0x36, 0xd5, 0x53, 0x4c, 0x6a, 0x2b, 0x5b, 0x8b, 0x41, 0xe2, 0xa7, 0xec, 0x4c, 0x04, 0x26, 0x65, + 0x90, 0x07, 0x17, 0x6b, 0xce, 0xc8, 0x25, 0xd7, 0x52, 0xaa, 0x38, 0xba, 0x18, 0xba, 0x0c, 0x0b, + 0xd4, 0xc7, 0x03, 0x15, 0xfc, 0x92, 0x44, 0x50, 0x91, 0x04, 0x1d, 0x35, 0x1f, 0x3b, 0x8f, 0x68, + 0x40, 0x6c, 0x3e, 0x8c, 0x14, 0xcf, 0xac, 0x0a, 0x86, 0xa6, 0x77, 0x87, 0x91, 0xe4, 0xfc, 0x14, + 0xaa, 0x2e, 0x65, 0x27, 0xe9, 0xee, 0x28, 0xcb, 0xdd, 0xf1, 0x76, 0x81, 0x17, 0xb7, 0x29, 0x3b, + 0xd1, 0x1b, 0x03, 0xdc, 0xd1, 0xb7, 0x44, 0xce, 0xec, 0x28, 0x26, 0xc4, 0x8f, 0x38, 0xed, 0x7b, + 0x44, 0xa6, 0x54, 0xc5, 0xaa, 0x51, 0x76, 0x34, 0x26, 0x22, 0x07, 0x56, 0x7c, 0x1c, 0xe0, 0x01, + 0x71, 0xed, 0x81, 0x70, 0x4e, 0x6a, 0x7b, 0x5e, 0xda, 0x7e, 0xbf, 0xc0, 0xf6, 0x1d, 0x25, 0x9a, + 0xad, 0x2a, 0xc8, 0x3f, 0x45, 0x43, 0x5d, 0x58, 0xc4, 0x8e, 0x43, 0x3c, 0xd1, 0xa2, 0x84, 0x71, + 0x7a, 0x34, 0xbf, 0x57, 0xa0, 0x7c, 0x73, 0x2c, 0x92, 0x16, 0xc5, 0xac, 0x16, 0xf3, 0xb7, 0x06, + 0xa0, 0xd3, 0x00, 0x44, 0x46, 0x8f, 0x42, 0xc6, 0x89, 0x1f, 0x79, 0x98, 0xab, 0xd8, 0xe9, 0x74, + 0x59, 0x49, 0x67, 0xbb, 0x7a, 0x52, 0x76, 0x1f, 0x37, 0xe1, 0xf2, 0x48, 0x4a, 0x39, 0x42, 0xad, + 0x23, 0xd7, 0xaf, 0x34, 0x69, 0x36, 0x8f, 0x94, 0x6d, 0xd9, 0xbc, 0x98, 0x31, 0x2c, 0x9f, 0x82, + 0x8b, 0xde, 0x83, 0x95, 0x0c, 0xe0, 0x71, 0xf0, 0x15, 0x0e, 0x94, 0x99, 0x4b, 0x13, 0xe0, 0x47, + 0xb0, 0x9c, 0x95, 0x50, 0x1b, 0x77, 0x46, 0xe6, 0x65, 0x03, 0x67, 0xf5, 0x27, 0x01, 0x37, 0x1f, + 0x02, 0x8c, 0x63, 0x8f, 0xde, 0x86, 0xe5, 0x7e, 0x18, 0x72, 0x5b, 0x26, 0x10, 0xa3, 0x4f, 0x88, + 0x3d, 0xe8, 0xeb, 0x94, 0xae, 0x8b, 0x09, 0xc1, 0xda, 0xa1, 0x4f, 0xc8, 0x6e, 0x1f, 0x7d, 0x1f, + 0xea, 0x22, 0xf3, 0xbd, 0xd0, 0xc1, 0x9e, 0xcd, 0x98, 0xcb, 0xb4, 0x09, 0x91, 0xfa, 0x07, 0x82, + 0xd8, 0x61, 0x2e, 0x33, 0xff, 0x63, 0xc0, 0xd2, 0x44, 0xe5, 0x45, 0x9b, 0xb0, 0x44, 0x5d, 0x8f, + 0xd8, 0x2e, 0xf1, 0x08, 0x27, 0x36, 0xe7, 0x9e, 0x34, 0x51, 0xdd, 0x78, 0x2d, 0x8d, 0x65, 0xda, + 0x2c, 0xb6, 0xb6, 0x75, 0xbb, 0x69, 0xd5, 0x84, 0xc4, 0xb6, 0x14, 0xe8, 0x72, 0x0f, 0xdd, 0x86, + 0x06, 0x4e, 0x78, 0x38, 0x52, 0x41, 0xb5, 0x77, 0xab, 0x1b, 0x6b, 0xa7, 0x74, 0x74, 0xd3, 0x86, + 0x73, 0xef, 0x92, 0x55, 0x17, 0x52, 0x5a, 0x0d, 0xf5, 0x09, 0xda, 0x82, 0xa5, 0x9c, 0x1e, 0xee, + 0xe9, 0xbe, 0xf1, 0xf9, 0x50, 0xf6, 0x2e, 0x59, 0xb5, 0x8c, 0x16, 0xee, 0xdd, 0x2a, 0x43, 0x89, + 0x73, 0xcf, 0xfc, 0x9d, 0x01, 0xcd, 0xe7, 0x9d, 0x01, 0xa2, 0x94, 0x92, 0x2f, 0x88, 0x93, 0x70, + 0xdc, 0xf7, 0x88, 0x7d, 0x4c, 0xbd, 0x34, 0x91, 0xea, 0x63, 0xf2, 0x6d, 0xea, 0x11, 0x74, 0x1b, + 0x96, 0x15, 0x45, 0x1c, 0x40, 0x62, 0x5d, 0x61, 0xc2, 0xf5, 0xd2, 0x5e, 0xe0, 0x9e, 0xc6, 0x48, + 0xa6, 0xab, 0x44, 0xcc, 0xaf, 0x4b, 0xa3, 0xa6, 0x49, 0x35, 0x7f, 0x68, 0x0f, 0xca, 0xa2, 0xfd, + 0x53, 0x86, 0xeb, 0xe7, 0xed, 0x69, 0x95, 0x70, 0x4b, 0xfc, 0x10, 0x4b, 0x29, 0x40, 0xab, 0x30, + 0xe7, 0x12, 0x8e, 0xa9, 0xa7, 0x33, 0x5a, 0x8f, 0xd0, 0x36, 0x34, 0x24, 0x83, 0xcd, 0x38, 0x8e, + 0xb9, 0x8a, 0x4a, 0xa9, 0x28, 0x2a, 0x96, 0x6c, 0x6c, 0x49, 0x47, 0x88, 0xc8, 0x98, 0xfc, 0x02, + 0x2a, 0x2c, 0xe9, 0x2b, 0xa8, 0xb3, 0x12, 0xea, 0x07, 0xd3, 0x41, 0xd5, 0xc2, 0xd6, 0x48, 0x8d, + 0x79, 0x0f, 0xca, 0x72, 0x01, 0xa8, 0x0a, 0xf3, 0xbd, 0xc3, 0x9f, 0x1f, 0xde, 0xbd, 0x7f, 0xd8, + 0xb8, 0x84, 0x16, 0xa1, 0xb2, 0x65, 0xed, 0x6c, 0x76, 0xf7, 0x0f, 0x77, 0x1b, 0x86, 0x98, 0xb2, + 0x7a, 0x87, 0x87, 0x62, 0x30, 0x83, 0x16, 0xa0, 0xbc, 0x63, 0x59, 0x77, 0xad, 0x46, 0x49, 0x70, + 0x6d, 0xef, 0x1c, 0xec, 0x48, 0xae, 0x59, 0x31, 0xea, 0x1d, 0x6d, 0x2b, 0x99, 0xb2, 0xf9, 0x33, + 0xa8, 0xa4, 0xd6, 0xd0, 0x12, 0x54, 0x7b, 0x87, 0x9d, 0xa3, 0x9d, 0xad, 0xfd, 0xdb, 0xfb, 0x3b, + 0xdb, 0x8d, 0x4b, 0xa8, 0x06, 0x0b, 0xbd, 0xc3, 0xbd, 0x9d, 0xcd, 0x83, 0xee, 0xde, 0x83, 0x86, + 0x81, 0x1a, 0xb0, 0xd8, 0xe9, 0x6e, 0x1e, 0xec, 0xd8, 0x9d, 0xee, 0x66, 0xb7, 0xd7, 0x69, 0xcc, + 0x98, 0xdf, 0x18, 0x50, 0xcf, 0xb7, 0x3b, 0xe2, 0x38, 0x51, 0x47, 0xc0, 0x63, 0x12, 0x33, 0x1a, + 0x06, 0x69, 0x63, 0x2b, 0x89, 0xf7, 0x14, 0x0d, 0x3d, 0x94, 0xb7, 0xa4, 0x88, 0xc4, 0x9c, 0xea, + 0xa3, 0xa4, 0xf8, 0x94, 0xcc, 0xdb, 0x69, 0x1d, 0x8d, 0xe4, 0xd5, 0x29, 0x99, 0x51, 0xb8, 0x76, + 0x13, 0x96, 0x26, 0xa6, 0xa7, 0x3b, 0x29, 0x67, 0xa0, 0x9e, 0xbf, 0x04, 0x20, 0x0c, 0x8b, 0x8f, + 0xdc, 0x63, 0x66, 0xa7, 0x37, 0x09, 0x43, 0x42, 0xfe, 0x78, 0xaa, 0x9b, 0x44, 0x6b, 0xcf, 0x3d, + 0x66, 0xfa, 0x5b, 0x61, 0xae, 0x3e, 0x1a, 0x53, 0x84, 0x89, 0x21, 0x8e, 0x83, 0x91, 0x89, 0x99, + 0x8b, 0x98, 0x78, 0x80, 0xe3, 0x20, 0x6f, 0x62, 0x38, 0xa6, 0xac, 0x7d, 0x0c, 0x8d, 0x49, 0x0c, + 0x45, 0x8e, 0x29, 0x65, 0x1c, 0x23, 0xe4, 0x27, 0x0d, 0x4c, 0x23, 0x6f, 0xfe, 0xde, 0x80, 0x95, + 0xad, 0x98, 0x60, 0x9e, 0xf6, 0x3b, 0x16, 0xf9, 0x4d, 0x42, 0x18, 0x2f, 0xba, 0x35, 0xaf, 0xc2, + 0x5c, 0x4c, 0x06, 0x22, 0x99, 0x54, 0x4f, 0xa1, 0x47, 0xe8, 0x13, 0x98, 0xd7, 0x77, 0x23, 0x5d, + 0x5f, 0xde, 0x3a, 0x9f, 0xb7, 0xac, 0x54, 0xcc, 0xfc, 0xc7, 0x0c, 0xac, 0xf4, 0x22, 0xf7, 0xff, + 0x40, 0x54, 0xce, 0x21, 0x3a, 0xc7, 0xfd, 0x3e, 0x03, 0xba, 0x74, 0x21, 0xd0, 0xe8, 0x21, 0x5c, + 0x19, 0xc4, 0xd8, 0x21, 0xc7, 0x89, 0x67, 0xbb, 0xc4, 0x09, 0x7d, 0x9f, 0x32, 0x96, 0x2d, 0xb6, + 0x73, 0x45, 0xc5, 0xf6, 0x72, 0x2a, 0xbf, 0x9d, 0x11, 0xd7, 0x75, 0x17, 0xdd, 0x80, 0x6a, 0x22, + 0x5d, 0x22, 0x1f, 0x41, 0xf4, 0x9d, 0xff, 0x74, 0xf9, 0xbb, 0x4d, 0x89, 0xe7, 0xde, 0xc1, 0xec, + 0xc4, 0x02, 0xc5, 0x2e, 0xbe, 0xcd, 0xaf, 0x0c, 0x58, 0x51, 0xe7, 0xca, 0xcb, 0x09, 0xf1, 0x39, + 0x1c, 0x3a, 0xf9, 0x06, 0x30, 0x7b, 0xea, 0x0d, 0xc0, 0xf4, 0x61, 0x79, 0x97, 0xf0, 0x6f, 0x0b, + 0x91, 0xf9, 0xb5, 0x01, 0xaf, 0x1c, 0x50, 0x96, 0x1a, 0x64, 0x53, 0x5b, 0x9c, 0xcd, 0x59, 0x5c, + 0x85, 0xb9, 0x63, 0xea, 0x89, 0x84, 0xd1, 0xc9, 0xa6, 0x46, 0xa2, 0xdb, 0x8e, 0x44, 0xa5, 0x15, + 0x5d, 0x8e, 0x6e, 0x5d, 0x2a, 0x82, 0x20, 0xda, 0x1b, 0x69, 0x4b, 0x4c, 0xf2, 0xf0, 0x84, 0xa4, + 0x4b, 0x90, 0xec, 0x5d, 0x41, 0x30, 0xbf, 0x34, 0x60, 0x25, 0x0f, 0x91, 0x45, 0x61, 0xc0, 0x08, + 0xba, 0x05, 0x95, 0xf4, 0x8d, 0x4e, 0x57, 0xb9, 0xf3, 0xe6, 0xe7, 0x48, 0x0e, 0xbd, 0x05, 0x4b, + 0x01, 0xf9, 0x82, 0xdb, 0x19, 0x00, 0xca, 0x4b, 0x35, 0x41, 0x3e, 0x1a, 0x81, 0x88, 0x61, 0x75, + 0x9b, 0xe2, 0x41, 0x10, 0xb2, 0x6f, 0x2d, 0x5b, 0xcc, 0x0f, 0xcf, 0xb0, 0xc9, 0x12, 0x8f, 0x33, + 0x61, 0x33, 0x4c, 0x78, 0x94, 0xf0, 0x4c, 0x73, 0xba, 0xa0, 0x28, 0xbd, 0x98, 0x6e, 0xfc, 0xb7, + 0x02, 0xcb, 0xe3, 0x6b, 0x1a, 0x8f, 0x43, 0xcf, 0x23, 0x31, 0xfa, 0x93, 0x01, 0xb5, 0x5c, 0x49, + 0x43, 0xd7, 0x8a, 0xdc, 0x75, 0x46, 0x01, 0x5c, 0xbb, 0x92, 0x0a, 0x65, 0xde, 0x32, 0x5b, 0x77, + 0xd3, 0xb7, 0x4c, 0x73, 0xff, 0xcb, 0x6f, 0xfe, 0xfd, 0xd5, 0xcc, 0x96, 0xf9, 0xd1, 0xe8, 0x1d, + 0x55, 0xfb, 0x82, 0xb5, 0x9f, 0x8e, 0xfd, 0xf4, 0xac, 0xad, 0xdc, 0xc0, 0xda, 0x4f, 0xd5, 0xc7, + 0xb3, 0xd1, 0x73, 0xeb, 0xf5, 0x51, 0xf1, 0xf8, 0x9b, 0x01, 0xb5, 0x5c, 0xc5, 0x2b, 0x04, 0x7c, + 0x56, 0x7d, 0x2c, 0x02, 0xfc, 0x4b, 0x09, 0xd8, 0xda, 0xd8, 0xbd, 0x28, 0xe0, 0xf6, 0xd3, 0x6c, + 0x20, 0x9f, 0x8d, 0xf1, 0xff, 0xd9, 0x80, 0x5a, 0xae, 0xc0, 0x14, 0xe2, 0x3f, 0xab, 0x1c, 0x15, + 0xe1, 0xbf, 0x2b, 0xf1, 0xef, 0xbf, 0xf3, 0xb2, 0xf0, 0xa3, 0xbf, 0x18, 0x00, 0xe3, 0x12, 0x84, + 0x8a, 0xee, 0x7c, 0xa7, 0xaa, 0xd5, 0xda, 0x39, 0x77, 0x61, 0x8a, 0x1c, 0xbd, 0x34, 0xe4, 0x7f, + 0x35, 0x60, 0x31, 0x5b, 0x29, 0xd0, 0x46, 0xe1, 0x33, 0xd5, 0xa9, 0xca, 0xb7, 0x76, 0x6d, 0x2a, + 0x19, 0x55, 0x8a, 0xcc, 0x4f, 0xe4, 0x52, 0xae, 0xa3, 0x0b, 0x67, 0x3d, 0xfa, 0xbb, 0x01, 0x4b, + 0x13, 0xbb, 0x1d, 0x7d, 0x50, 0xf8, 0x8e, 0x70, 0x56, 0x45, 0x2a, 0x4a, 0x98, 0x5f, 0x4b, 0xac, + 0x3d, 0xf3, 0xe8, 0x65, 0x25, 0xbc, 0xab, 0x61, 0x5c, 0x37, 0xde, 0xb9, 0xf5, 0x14, 0xde, 0x70, + 0x42, 0xff, 0xc5, 0xb8, 0x6f, 0xa5, 0x77, 0x25, 0x76, 0x24, 0x4e, 0xe8, 0x23, 0xe3, 0x57, 0x3b, + 0x9a, 0x7f, 0x10, 0x7a, 0x38, 0x18, 0xb4, 0xc2, 0x78, 0xd0, 0x1e, 0x90, 0x40, 0x9e, 0xdf, 0x6d, + 0x35, 0x85, 0x23, 0xca, 0x9e, 0xf3, 0x47, 0xcd, 0x8d, 0x94, 0xd0, 0x9f, 0x93, 0x12, 0xd7, 0xfe, + 0x17, 0x00, 0x00, 0xff, 0xff, 0x82, 0x2f, 0x0d, 0xbd, 0xd9, 0x19, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/jobs.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/jobs.pb.go new file mode 100644 index 00000000..2a2e2945 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/jobs.pb.go @@ -0,0 +1,2573 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/dataproc/v1beta2/jobs.proto + +package dataproc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf4 "google.golang.org/genproto/protobuf/field_mask" +import google_protobuf5 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The Log4j level for job execution. When running an +// [Apache Hive](http://hive.apache.org/) job, Cloud +// Dataproc configures the Hive client to an equivalent verbosity level. +type LoggingConfig_Level int32 + +const ( + // Level is unspecified. Use default level for log4j. + LoggingConfig_LEVEL_UNSPECIFIED LoggingConfig_Level = 0 + // Use ALL level for log4j. + LoggingConfig_ALL LoggingConfig_Level = 1 + // Use TRACE level for log4j. + LoggingConfig_TRACE LoggingConfig_Level = 2 + // Use DEBUG level for log4j. + LoggingConfig_DEBUG LoggingConfig_Level = 3 + // Use INFO level for log4j. + LoggingConfig_INFO LoggingConfig_Level = 4 + // Use WARN level for log4j. + LoggingConfig_WARN LoggingConfig_Level = 5 + // Use ERROR level for log4j. + LoggingConfig_ERROR LoggingConfig_Level = 6 + // Use FATAL level for log4j. + LoggingConfig_FATAL LoggingConfig_Level = 7 + // Turn off log4j. + LoggingConfig_OFF LoggingConfig_Level = 8 +) + +var LoggingConfig_Level_name = map[int32]string{ + 0: "LEVEL_UNSPECIFIED", + 1: "ALL", + 2: "TRACE", + 3: "DEBUG", + 4: "INFO", + 5: "WARN", + 6: "ERROR", + 7: "FATAL", + 8: "OFF", +} +var LoggingConfig_Level_value = map[string]int32{ + "LEVEL_UNSPECIFIED": 0, + "ALL": 1, + "TRACE": 2, + "DEBUG": 3, + "INFO": 4, + "WARN": 5, + "ERROR": 6, + "FATAL": 7, + "OFF": 8, +} + +func (x LoggingConfig_Level) String() string { + return proto.EnumName(LoggingConfig_Level_name, int32(x)) +} +func (LoggingConfig_Level) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0, 0} } + +// The job state. +type JobStatus_State int32 + +const ( + // The job state is unknown. + JobStatus_STATE_UNSPECIFIED JobStatus_State = 0 + // The job is pending; it has been submitted, but is not yet running. + JobStatus_PENDING JobStatus_State = 1 + // Job has been received by the service and completed initial setup; + // it will soon be submitted to the cluster. + JobStatus_SETUP_DONE JobStatus_State = 8 + // The job is running on the cluster. + JobStatus_RUNNING JobStatus_State = 2 + // A CancelJob request has been received, but is pending. + JobStatus_CANCEL_PENDING JobStatus_State = 3 + // Transient in-flight resources have been canceled, and the request to + // cancel the running job has been issued to the cluster. + JobStatus_CANCEL_STARTED JobStatus_State = 7 + // The job cancellation was successful. + JobStatus_CANCELLED JobStatus_State = 4 + // The job has completed successfully. + JobStatus_DONE JobStatus_State = 5 + // The job has completed, but encountered an error. + JobStatus_ERROR JobStatus_State = 6 + // Job attempt has failed. The detail field contains failure details for + // this attempt. + // + // Applies to restartable jobs only. + JobStatus_ATTEMPT_FAILURE JobStatus_State = 9 +) + +var JobStatus_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "PENDING", + 8: "SETUP_DONE", + 2: "RUNNING", + 3: "CANCEL_PENDING", + 7: "CANCEL_STARTED", + 4: "CANCELLED", + 5: "DONE", + 6: "ERROR", + 9: "ATTEMPT_FAILURE", +} +var JobStatus_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "PENDING": 1, + "SETUP_DONE": 8, + "RUNNING": 2, + "CANCEL_PENDING": 3, + "CANCEL_STARTED": 7, + "CANCELLED": 4, + "DONE": 5, + "ERROR": 6, + "ATTEMPT_FAILURE": 9, +} + +func (x JobStatus_State) String() string { + return proto.EnumName(JobStatus_State_name, int32(x)) +} +func (JobStatus_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 0} } + +type JobStatus_Substate int32 + +const ( + JobStatus_UNSPECIFIED JobStatus_Substate = 0 + // The Job is submitted to the agent. + // + // Applies to RUNNING state. + JobStatus_SUBMITTED JobStatus_Substate = 1 + // The Job has been received and is awaiting execution (it may be waiting + // for a condition to be met). See the "details" field for the reason for + // the delay. + // + // Applies to RUNNING state. + JobStatus_QUEUED JobStatus_Substate = 2 + // The agent-reported status is out of date, which may be caused by a + // loss of communication between the agent and Cloud Dataproc. If the + // agent does not send a timely update, the job will fail. + // + // Applies to RUNNING state. + JobStatus_STALE_STATUS JobStatus_Substate = 3 +) + +var JobStatus_Substate_name = map[int32]string{ + 0: "UNSPECIFIED", + 1: "SUBMITTED", + 2: "QUEUED", + 3: "STALE_STATUS", +} +var JobStatus_Substate_value = map[string]int32{ + "UNSPECIFIED": 0, + "SUBMITTED": 1, + "QUEUED": 2, + "STALE_STATUS": 3, +} + +func (x JobStatus_Substate) String() string { + return proto.EnumName(JobStatus_Substate_name, int32(x)) +} +func (JobStatus_Substate) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{9, 1} } + +// The application state, corresponding to +// YarnProtos.YarnApplicationStateProto. +type YarnApplication_State int32 + +const ( + // Status is unspecified. + YarnApplication_STATE_UNSPECIFIED YarnApplication_State = 0 + // Status is NEW. + YarnApplication_NEW YarnApplication_State = 1 + // Status is NEW_SAVING. + YarnApplication_NEW_SAVING YarnApplication_State = 2 + // Status is SUBMITTED. + YarnApplication_SUBMITTED YarnApplication_State = 3 + // Status is ACCEPTED. + YarnApplication_ACCEPTED YarnApplication_State = 4 + // Status is RUNNING. + YarnApplication_RUNNING YarnApplication_State = 5 + // Status is FINISHED. + YarnApplication_FINISHED YarnApplication_State = 6 + // Status is FAILED. + YarnApplication_FAILED YarnApplication_State = 7 + // Status is KILLED. + YarnApplication_KILLED YarnApplication_State = 8 +) + +var YarnApplication_State_name = map[int32]string{ + 0: "STATE_UNSPECIFIED", + 1: "NEW", + 2: "NEW_SAVING", + 3: "SUBMITTED", + 4: "ACCEPTED", + 5: "RUNNING", + 6: "FINISHED", + 7: "FAILED", + 8: "KILLED", +} +var YarnApplication_State_value = map[string]int32{ + "STATE_UNSPECIFIED": 0, + "NEW": 1, + "NEW_SAVING": 2, + "SUBMITTED": 3, + "ACCEPTED": 4, + "RUNNING": 5, + "FINISHED": 6, + "FAILED": 7, + "KILLED": 8, +} + +func (x YarnApplication_State) String() string { + return proto.EnumName(YarnApplication_State_name, int32(x)) +} +func (YarnApplication_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{11, 0} } + +// A matcher that specifies categories of job states. +type ListJobsRequest_JobStateMatcher int32 + +const ( + // Match all jobs, regardless of state. + ListJobsRequest_ALL ListJobsRequest_JobStateMatcher = 0 + // Only match jobs in non-terminal states: PENDING, RUNNING, or + // CANCEL_PENDING. + ListJobsRequest_ACTIVE ListJobsRequest_JobStateMatcher = 1 + // Only match jobs in terminal states: CANCELLED, DONE, or ERROR. + ListJobsRequest_NON_ACTIVE ListJobsRequest_JobStateMatcher = 2 +) + +var ListJobsRequest_JobStateMatcher_name = map[int32]string{ + 0: "ALL", + 1: "ACTIVE", + 2: "NON_ACTIVE", +} +var ListJobsRequest_JobStateMatcher_value = map[string]int32{ + "ALL": 0, + "ACTIVE": 1, + "NON_ACTIVE": 2, +} + +func (x ListJobsRequest_JobStateMatcher) String() string { + return proto.EnumName(ListJobsRequest_JobStateMatcher_name, int32(x)) +} +func (ListJobsRequest_JobStateMatcher) EnumDescriptor() ([]byte, []int) { + return fileDescriptor1, []int{16, 0} +} + +// The runtime logging config of the job. +type LoggingConfig struct { + // The per-package log levels for the driver. This may include + // "root" package name to configure rootLogger. + // Examples: + // 'com.google = FATAL', 'root = INFO', 'org.apache = DEBUG' + DriverLogLevels map[string]LoggingConfig_Level `protobuf:"bytes,2,rep,name=driver_log_levels,json=driverLogLevels" json:"driver_log_levels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"varint,2,opt,name=value,enum=google.cloud.dataproc.v1beta2.LoggingConfig_Level"` +} + +func (m *LoggingConfig) Reset() { *m = LoggingConfig{} } +func (m *LoggingConfig) String() string { return proto.CompactTextString(m) } +func (*LoggingConfig) ProtoMessage() {} +func (*LoggingConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *LoggingConfig) GetDriverLogLevels() map[string]LoggingConfig_Level { + if m != nil { + return m.DriverLogLevels + } + return nil +} + +// A Cloud Dataproc job for running +// [Apache Hadoop MapReduce](https://hadoop.apache.org/docs/current/hadoop-mapreduce-client/hadoop-mapreduce-client-core/MapReduceTutorial.html) +// jobs on [Apache Hadoop YARN](https://hadoop.apache.org/docs/r2.7.1/hadoop-yarn/hadoop-yarn-site/YARN.html). +type HadoopJob struct { + // Required. Indicates the location of the driver's main class. Specify + // either the jar file that contains the main class or the main class name. + // To specify both, add the jar file to `jar_file_uris`, and then specify + // the main class name in this property. + // + // Types that are valid to be assigned to Driver: + // *HadoopJob_MainJarFileUri + // *HadoopJob_MainClass + Driver isHadoopJob_Driver `protobuf_oneof:"driver"` + // Optional. The arguments to pass to the driver. Do not + // include arguments, such as `-libjars` or `-Dfoo=bar`, that can be set as job + // properties, since a collision may occur that causes an incorrect job + // submission. + Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` + // Optional. Jar file URIs to add to the CLASSPATHs of the + // Hadoop driver and tasks. + JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // Optional. HCFS (Hadoop Compatible Filesystem) URIs of files to be copied + // to the working directory of Hadoop drivers and distributed tasks. Useful + // for naively parallel tasks. + FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` + // Optional. HCFS URIs of archives to be extracted in the working directory of + // Hadoop drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, or .zip. + ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` + // Optional. A mapping of property names to values, used to configure Hadoop. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site and + // classes in user code. + Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *HadoopJob) Reset() { *m = HadoopJob{} } +func (m *HadoopJob) String() string { return proto.CompactTextString(m) } +func (*HadoopJob) ProtoMessage() {} +func (*HadoopJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +type isHadoopJob_Driver interface { + isHadoopJob_Driver() +} + +type HadoopJob_MainJarFileUri struct { + MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"` +} +type HadoopJob_MainClass struct { + MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"` +} + +func (*HadoopJob_MainJarFileUri) isHadoopJob_Driver() {} +func (*HadoopJob_MainClass) isHadoopJob_Driver() {} + +func (m *HadoopJob) GetDriver() isHadoopJob_Driver { + if m != nil { + return m.Driver + } + return nil +} + +func (m *HadoopJob) GetMainJarFileUri() string { + if x, ok := m.GetDriver().(*HadoopJob_MainJarFileUri); ok { + return x.MainJarFileUri + } + return "" +} + +func (m *HadoopJob) GetMainClass() string { + if x, ok := m.GetDriver().(*HadoopJob_MainClass); ok { + return x.MainClass + } + return "" +} + +func (m *HadoopJob) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *HadoopJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *HadoopJob) GetFileUris() []string { + if m != nil { + return m.FileUris + } + return nil +} + +func (m *HadoopJob) GetArchiveUris() []string { + if m != nil { + return m.ArchiveUris + } + return nil +} + +func (m *HadoopJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *HadoopJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HadoopJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HadoopJob_OneofMarshaler, _HadoopJob_OneofUnmarshaler, _HadoopJob_OneofSizer, []interface{}{ + (*HadoopJob_MainJarFileUri)(nil), + (*HadoopJob_MainClass)(nil), + } +} + +func _HadoopJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HadoopJob) + // driver + switch x := m.Driver.(type) { + case *HadoopJob_MainJarFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.MainJarFileUri) + case *HadoopJob_MainClass: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.MainClass) + case nil: + default: + return fmt.Errorf("HadoopJob.Driver has unexpected type %T", x) + } + return nil +} + +func _HadoopJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HadoopJob) + switch tag { + case 1: // driver.main_jar_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Driver = &HadoopJob_MainJarFileUri{x} + return true, err + case 2: // driver.main_class + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Driver = &HadoopJob_MainClass{x} + return true, err + default: + return false, nil + } +} + +func _HadoopJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HadoopJob) + // driver + switch x := m.Driver.(type) { + case *HadoopJob_MainJarFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.MainJarFileUri))) + n += len(x.MainJarFileUri) + case *HadoopJob_MainClass: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.MainClass))) + n += len(x.MainClass) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc job for running [Apache Spark](http://spark.apache.org/) +// applications on YARN. +type SparkJob struct { + // Required. The specification of the main method to call to drive the job. + // Specify either the jar file that contains the main class or the main class + // name. To pass both a main jar and a main class in that jar, add the jar to + // `CommonJob.jar_file_uris`, and then specify the main class name in `main_class`. + // + // Types that are valid to be assigned to Driver: + // *SparkJob_MainJarFileUri + // *SparkJob_MainClass + Driver isSparkJob_Driver `protobuf_oneof:"driver"` + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + Args []string `protobuf:"bytes,3,rep,name=args" json:"args,omitempty"` + // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the + // Spark driver and tasks. + JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // Optional. HCFS URIs of files to be copied to the working directory of + // Spark drivers and distributed tasks. Useful for naively parallel tasks. + FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` + // Optional. HCFS URIs of archives to be extracted in the working directory + // of Spark drivers and tasks. Supported file types: + // .jar, .tar, .tar.gz, .tgz, and .zip. + ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` + // Optional. A mapping of property names to values, used to configure Spark. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *SparkJob) Reset() { *m = SparkJob{} } +func (m *SparkJob) String() string { return proto.CompactTextString(m) } +func (*SparkJob) ProtoMessage() {} +func (*SparkJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +type isSparkJob_Driver interface { + isSparkJob_Driver() +} + +type SparkJob_MainJarFileUri struct { + MainJarFileUri string `protobuf:"bytes,1,opt,name=main_jar_file_uri,json=mainJarFileUri,oneof"` +} +type SparkJob_MainClass struct { + MainClass string `protobuf:"bytes,2,opt,name=main_class,json=mainClass,oneof"` +} + +func (*SparkJob_MainJarFileUri) isSparkJob_Driver() {} +func (*SparkJob_MainClass) isSparkJob_Driver() {} + +func (m *SparkJob) GetDriver() isSparkJob_Driver { + if m != nil { + return m.Driver + } + return nil +} + +func (m *SparkJob) GetMainJarFileUri() string { + if x, ok := m.GetDriver().(*SparkJob_MainJarFileUri); ok { + return x.MainJarFileUri + } + return "" +} + +func (m *SparkJob) GetMainClass() string { + if x, ok := m.GetDriver().(*SparkJob_MainClass); ok { + return x.MainClass + } + return "" +} + +func (m *SparkJob) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *SparkJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *SparkJob) GetFileUris() []string { + if m != nil { + return m.FileUris + } + return nil +} + +func (m *SparkJob) GetArchiveUris() []string { + if m != nil { + return m.ArchiveUris + } + return nil +} + +func (m *SparkJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *SparkJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SparkJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SparkJob_OneofMarshaler, _SparkJob_OneofUnmarshaler, _SparkJob_OneofSizer, []interface{}{ + (*SparkJob_MainJarFileUri)(nil), + (*SparkJob_MainClass)(nil), + } +} + +func _SparkJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SparkJob) + // driver + switch x := m.Driver.(type) { + case *SparkJob_MainJarFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.MainJarFileUri) + case *SparkJob_MainClass: + b.EncodeVarint(2<<3 | proto.WireBytes) + b.EncodeStringBytes(x.MainClass) + case nil: + default: + return fmt.Errorf("SparkJob.Driver has unexpected type %T", x) + } + return nil +} + +func _SparkJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SparkJob) + switch tag { + case 1: // driver.main_jar_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Driver = &SparkJob_MainJarFileUri{x} + return true, err + case 2: // driver.main_class + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Driver = &SparkJob_MainClass{x} + return true, err + default: + return false, nil + } +} + +func _SparkJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SparkJob) + // driver + switch x := m.Driver.(type) { + case *SparkJob_MainJarFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.MainJarFileUri))) + n += len(x.MainJarFileUri) + case *SparkJob_MainClass: + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.MainClass))) + n += len(x.MainClass) + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc job for running +// [Apache PySpark](https://spark.apache.org/docs/0.9.0/python-programming-guide.html) +// applications on YARN. +type PySparkJob struct { + // Required. The HCFS URI of the main Python file to use as the driver. Must + // be a .py file. + MainPythonFileUri string `protobuf:"bytes,1,opt,name=main_python_file_uri,json=mainPythonFileUri" json:"main_python_file_uri,omitempty"` + // Optional. The arguments to pass to the driver. Do not include arguments, + // such as `--conf`, that can be set as job properties, since a collision may + // occur that causes an incorrect job submission. + Args []string `protobuf:"bytes,2,rep,name=args" json:"args,omitempty"` + // Optional. HCFS file URIs of Python files to pass to the PySpark + // framework. Supported file types: .py, .egg, and .zip. + PythonFileUris []string `protobuf:"bytes,3,rep,name=python_file_uris,json=pythonFileUris" json:"python_file_uris,omitempty"` + // Optional. HCFS URIs of jar files to add to the CLASSPATHs of the + // Python driver and tasks. + JarFileUris []string `protobuf:"bytes,4,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // Optional. HCFS URIs of files to be copied to the working directory of + // Python drivers and distributed tasks. Useful for naively parallel tasks. + FileUris []string `protobuf:"bytes,5,rep,name=file_uris,json=fileUris" json:"file_uris,omitempty"` + // Optional. HCFS URIs of archives to be extracted in the working directory of + // .jar, .tar, .tar.gz, .tgz, and .zip. + ArchiveUris []string `protobuf:"bytes,6,rep,name=archive_uris,json=archiveUris" json:"archive_uris,omitempty"` + // Optional. A mapping of property names to values, used to configure PySpark. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in + // /etc/spark/conf/spark-defaults.conf and classes in user code. + Properties map[string]string `protobuf:"bytes,7,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,8,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *PySparkJob) Reset() { *m = PySparkJob{} } +func (m *PySparkJob) String() string { return proto.CompactTextString(m) } +func (*PySparkJob) ProtoMessage() {} +func (*PySparkJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *PySparkJob) GetMainPythonFileUri() string { + if m != nil { + return m.MainPythonFileUri + } + return "" +} + +func (m *PySparkJob) GetArgs() []string { + if m != nil { + return m.Args + } + return nil +} + +func (m *PySparkJob) GetPythonFileUris() []string { + if m != nil { + return m.PythonFileUris + } + return nil +} + +func (m *PySparkJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *PySparkJob) GetFileUris() []string { + if m != nil { + return m.FileUris + } + return nil +} + +func (m *PySparkJob) GetArchiveUris() []string { + if m != nil { + return m.ArchiveUris + } + return nil +} + +func (m *PySparkJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *PySparkJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// A list of queries to run on a cluster. +type QueryList struct { + // Required. The queries to execute. You do not need to terminate a query + // with a semicolon. Multiple queries can be specified in one string + // by separating each with a semicolon. Here is an example of an Cloud + // Dataproc API snippet that uses a QueryList to specify a HiveJob: + // + // "hiveJob": { + // "queryList": { + // "queries": [ + // "query1", + // "query2", + // "query3;query4", + // ] + // } + // } + Queries []string `protobuf:"bytes,1,rep,name=queries" json:"queries,omitempty"` +} + +func (m *QueryList) Reset() { *m = QueryList{} } +func (m *QueryList) String() string { return proto.CompactTextString(m) } +func (*QueryList) ProtoMessage() {} +func (*QueryList) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *QueryList) GetQueries() []string { + if m != nil { + return m.Queries + } + return nil +} + +// A Cloud Dataproc job for running [Apache Hive](https://hive.apache.org/) +// queries on YARN. +type HiveJob struct { + // Required. The sequence of Hive queries to execute, specified as either + // an HCFS file URI or a list of queries. + // + // Types that are valid to be assigned to Queries: + // *HiveJob_QueryFileUri + // *HiveJob_QueryList + Queries isHiveJob_Queries `protobuf_oneof:"queries"` + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when executing + // independent parallel queries. + ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` + // Optional. Mapping of query variable names to values (equivalent to the + // Hive command: `SET name="value";`). + ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. A mapping of property names and values, used to configure Hive. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, + // /etc/hive/conf/hive-site.xml, and classes in user code. + Properties map[string]string `protobuf:"bytes,5,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. HCFS URIs of jar files to add to the CLASSPATH of the + // Hive server and Hadoop MapReduce (MR) tasks. Can contain Hive SerDes + // and UDFs. + JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` +} + +func (m *HiveJob) Reset() { *m = HiveJob{} } +func (m *HiveJob) String() string { return proto.CompactTextString(m) } +func (*HiveJob) ProtoMessage() {} +func (*HiveJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +type isHiveJob_Queries interface { + isHiveJob_Queries() +} + +type HiveJob_QueryFileUri struct { + QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` +} +type HiveJob_QueryList struct { + QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` +} + +func (*HiveJob_QueryFileUri) isHiveJob_Queries() {} +func (*HiveJob_QueryList) isHiveJob_Queries() {} + +func (m *HiveJob) GetQueries() isHiveJob_Queries { + if m != nil { + return m.Queries + } + return nil +} + +func (m *HiveJob) GetQueryFileUri() string { + if x, ok := m.GetQueries().(*HiveJob_QueryFileUri); ok { + return x.QueryFileUri + } + return "" +} + +func (m *HiveJob) GetQueryList() *QueryList { + if x, ok := m.GetQueries().(*HiveJob_QueryList); ok { + return x.QueryList + } + return nil +} + +func (m *HiveJob) GetContinueOnFailure() bool { + if m != nil { + return m.ContinueOnFailure + } + return false +} + +func (m *HiveJob) GetScriptVariables() map[string]string { + if m != nil { + return m.ScriptVariables + } + return nil +} + +func (m *HiveJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *HiveJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*HiveJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _HiveJob_OneofMarshaler, _HiveJob_OneofUnmarshaler, _HiveJob_OneofSizer, []interface{}{ + (*HiveJob_QueryFileUri)(nil), + (*HiveJob_QueryList)(nil), + } +} + +func _HiveJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*HiveJob) + // queries + switch x := m.Queries.(type) { + case *HiveJob_QueryFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.QueryFileUri) + case *HiveJob_QueryList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("HiveJob.Queries has unexpected type %T", x) + } + return nil +} + +func _HiveJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*HiveJob) + switch tag { + case 1: // queries.query_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Queries = &HiveJob_QueryFileUri{x} + return true, err + case 2: // queries.query_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryList) + err := b.DecodeMessage(msg) + m.Queries = &HiveJob_QueryList{msg} + return true, err + default: + return false, nil + } +} + +func _HiveJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*HiveJob) + // queries + switch x := m.Queries.(type) { + case *HiveJob_QueryFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.QueryFileUri))) + n += len(x.QueryFileUri) + case *HiveJob_QueryList: + s := proto.Size(x.QueryList) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc job for running [Apache Spark SQL](http://spark.apache.org/sql/) +// queries. +type SparkSqlJob struct { + // Required. The sequence of Spark SQL queries to execute, specified as + // either an HCFS file URI or as a list of queries. + // + // Types that are valid to be assigned to Queries: + // *SparkSqlJob_QueryFileUri + // *SparkSqlJob_QueryList + Queries isSparkSqlJob_Queries `protobuf_oneof:"queries"` + // Optional. Mapping of query variable names to values (equivalent to the + // Spark SQL command: SET `name="value";`). + ScriptVariables map[string]string `protobuf:"bytes,3,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. A mapping of property names to values, used to configure + // Spark SQL's SparkConf. Properties that conflict with values set by the + // Cloud Dataproc API may be overwritten. + Properties map[string]string `protobuf:"bytes,4,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. HCFS URIs of jar files to be added to the Spark CLASSPATH. + JarFileUris []string `protobuf:"bytes,56,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,6,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *SparkSqlJob) Reset() { *m = SparkSqlJob{} } +func (m *SparkSqlJob) String() string { return proto.CompactTextString(m) } +func (*SparkSqlJob) ProtoMessage() {} +func (*SparkSqlJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +type isSparkSqlJob_Queries interface { + isSparkSqlJob_Queries() +} + +type SparkSqlJob_QueryFileUri struct { + QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` +} +type SparkSqlJob_QueryList struct { + QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` +} + +func (*SparkSqlJob_QueryFileUri) isSparkSqlJob_Queries() {} +func (*SparkSqlJob_QueryList) isSparkSqlJob_Queries() {} + +func (m *SparkSqlJob) GetQueries() isSparkSqlJob_Queries { + if m != nil { + return m.Queries + } + return nil +} + +func (m *SparkSqlJob) GetQueryFileUri() string { + if x, ok := m.GetQueries().(*SparkSqlJob_QueryFileUri); ok { + return x.QueryFileUri + } + return "" +} + +func (m *SparkSqlJob) GetQueryList() *QueryList { + if x, ok := m.GetQueries().(*SparkSqlJob_QueryList); ok { + return x.QueryList + } + return nil +} + +func (m *SparkSqlJob) GetScriptVariables() map[string]string { + if m != nil { + return m.ScriptVariables + } + return nil +} + +func (m *SparkSqlJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *SparkSqlJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *SparkSqlJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*SparkSqlJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _SparkSqlJob_OneofMarshaler, _SparkSqlJob_OneofUnmarshaler, _SparkSqlJob_OneofSizer, []interface{}{ + (*SparkSqlJob_QueryFileUri)(nil), + (*SparkSqlJob_QueryList)(nil), + } +} + +func _SparkSqlJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*SparkSqlJob) + // queries + switch x := m.Queries.(type) { + case *SparkSqlJob_QueryFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.QueryFileUri) + case *SparkSqlJob_QueryList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("SparkSqlJob.Queries has unexpected type %T", x) + } + return nil +} + +func _SparkSqlJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*SparkSqlJob) + switch tag { + case 1: // queries.query_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Queries = &SparkSqlJob_QueryFileUri{x} + return true, err + case 2: // queries.query_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryList) + err := b.DecodeMessage(msg) + m.Queries = &SparkSqlJob_QueryList{msg} + return true, err + default: + return false, nil + } +} + +func _SparkSqlJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*SparkSqlJob) + // queries + switch x := m.Queries.(type) { + case *SparkSqlJob_QueryFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.QueryFileUri))) + n += len(x.QueryFileUri) + case *SparkSqlJob_QueryList: + s := proto.Size(x.QueryList) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc job for running [Apache Pig](https://pig.apache.org/) +// queries on YARN. +type PigJob struct { + // Required. The sequence of Pig queries to execute, specified as an HCFS + // file URI or a list of queries. + // + // Types that are valid to be assigned to Queries: + // *PigJob_QueryFileUri + // *PigJob_QueryList + Queries isPigJob_Queries `protobuf_oneof:"queries"` + // Optional. Whether to continue executing queries if a query fails. + // The default value is `false`. Setting to `true` can be useful when executing + // independent parallel queries. + ContinueOnFailure bool `protobuf:"varint,3,opt,name=continue_on_failure,json=continueOnFailure" json:"continue_on_failure,omitempty"` + // Optional. Mapping of query variable names to values (equivalent to the Pig + // command: `name=[value]`). + ScriptVariables map[string]string `protobuf:"bytes,4,rep,name=script_variables,json=scriptVariables" json:"script_variables,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. A mapping of property names to values, used to configure Pig. + // Properties that conflict with values set by the Cloud Dataproc API may be + // overwritten. Can include properties set in /etc/hadoop/conf/*-site.xml, + // /etc/pig/conf/pig.properties, and classes in user code. + Properties map[string]string `protobuf:"bytes,5,rep,name=properties" json:"properties,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. HCFS URIs of jar files to add to the CLASSPATH of + // the Pig Client and Hadoop MapReduce (MR) tasks. Can contain Pig UDFs. + JarFileUris []string `protobuf:"bytes,6,rep,name=jar_file_uris,json=jarFileUris" json:"jar_file_uris,omitempty"` + // Optional. The runtime log config for job execution. + LoggingConfig *LoggingConfig `protobuf:"bytes,7,opt,name=logging_config,json=loggingConfig" json:"logging_config,omitempty"` +} + +func (m *PigJob) Reset() { *m = PigJob{} } +func (m *PigJob) String() string { return proto.CompactTextString(m) } +func (*PigJob) ProtoMessage() {} +func (*PigJob) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +type isPigJob_Queries interface { + isPigJob_Queries() +} + +type PigJob_QueryFileUri struct { + QueryFileUri string `protobuf:"bytes,1,opt,name=query_file_uri,json=queryFileUri,oneof"` +} +type PigJob_QueryList struct { + QueryList *QueryList `protobuf:"bytes,2,opt,name=query_list,json=queryList,oneof"` +} + +func (*PigJob_QueryFileUri) isPigJob_Queries() {} +func (*PigJob_QueryList) isPigJob_Queries() {} + +func (m *PigJob) GetQueries() isPigJob_Queries { + if m != nil { + return m.Queries + } + return nil +} + +func (m *PigJob) GetQueryFileUri() string { + if x, ok := m.GetQueries().(*PigJob_QueryFileUri); ok { + return x.QueryFileUri + } + return "" +} + +func (m *PigJob) GetQueryList() *QueryList { + if x, ok := m.GetQueries().(*PigJob_QueryList); ok { + return x.QueryList + } + return nil +} + +func (m *PigJob) GetContinueOnFailure() bool { + if m != nil { + return m.ContinueOnFailure + } + return false +} + +func (m *PigJob) GetScriptVariables() map[string]string { + if m != nil { + return m.ScriptVariables + } + return nil +} + +func (m *PigJob) GetProperties() map[string]string { + if m != nil { + return m.Properties + } + return nil +} + +func (m *PigJob) GetJarFileUris() []string { + if m != nil { + return m.JarFileUris + } + return nil +} + +func (m *PigJob) GetLoggingConfig() *LoggingConfig { + if m != nil { + return m.LoggingConfig + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*PigJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _PigJob_OneofMarshaler, _PigJob_OneofUnmarshaler, _PigJob_OneofSizer, []interface{}{ + (*PigJob_QueryFileUri)(nil), + (*PigJob_QueryList)(nil), + } +} + +func _PigJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*PigJob) + // queries + switch x := m.Queries.(type) { + case *PigJob_QueryFileUri: + b.EncodeVarint(1<<3 | proto.WireBytes) + b.EncodeStringBytes(x.QueryFileUri) + case *PigJob_QueryList: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.QueryList); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("PigJob.Queries has unexpected type %T", x) + } + return nil +} + +func _PigJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*PigJob) + switch tag { + case 1: // queries.query_file_uri + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + x, err := b.DecodeStringBytes() + m.Queries = &PigJob_QueryFileUri{x} + return true, err + case 2: // queries.query_list + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(QueryList) + err := b.DecodeMessage(msg) + m.Queries = &PigJob_QueryList{msg} + return true, err + default: + return false, nil + } +} + +func _PigJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*PigJob) + // queries + switch x := m.Queries.(type) { + case *PigJob_QueryFileUri: + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(len(x.QueryFileUri))) + n += len(x.QueryFileUri) + case *PigJob_QueryList: + s := proto.Size(x.QueryList) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Cloud Dataproc job config. +type JobPlacement struct { + // Required. The name of the cluster where the job will be submitted. + ClusterName string `protobuf:"bytes,1,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // Output-only. A cluster UUID generated by the Cloud Dataproc service when + // the job is submitted. + ClusterUuid string `protobuf:"bytes,2,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` +} + +func (m *JobPlacement) Reset() { *m = JobPlacement{} } +func (m *JobPlacement) String() string { return proto.CompactTextString(m) } +func (*JobPlacement) ProtoMessage() {} +func (*JobPlacement) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *JobPlacement) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *JobPlacement) GetClusterUuid() string { + if m != nil { + return m.ClusterUuid + } + return "" +} + +// Cloud Dataproc job status. +type JobStatus struct { + // Output-only. A state message specifying the overall job state. + State JobStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1beta2.JobStatus_State" json:"state,omitempty"` + // Output-only. Optional job state details, such as an error + // description if the state is ERROR. + Details string `protobuf:"bytes,2,opt,name=details" json:"details,omitempty"` + // Output-only. The time when this state was entered. + StateStartTime *google_protobuf5.Timestamp `protobuf:"bytes,6,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` + // Output-only. Additional state information, which includes + // status reported by the agent. + Substate JobStatus_Substate `protobuf:"varint,7,opt,name=substate,enum=google.cloud.dataproc.v1beta2.JobStatus_Substate" json:"substate,omitempty"` +} + +func (m *JobStatus) Reset() { *m = JobStatus{} } +func (m *JobStatus) String() string { return proto.CompactTextString(m) } +func (*JobStatus) ProtoMessage() {} +func (*JobStatus) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +func (m *JobStatus) GetState() JobStatus_State { + if m != nil { + return m.State + } + return JobStatus_STATE_UNSPECIFIED +} + +func (m *JobStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +func (m *JobStatus) GetStateStartTime() *google_protobuf5.Timestamp { + if m != nil { + return m.StateStartTime + } + return nil +} + +func (m *JobStatus) GetSubstate() JobStatus_Substate { + if m != nil { + return m.Substate + } + return JobStatus_UNSPECIFIED +} + +// Encapsulates the full scoping used to reference a job. +type JobReference struct { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Optional. The job ID, which must be unique within the project. The job ID + // is generated by the server upon job submission or provided by the user as a + // means to perform retries without creating duplicate jobs. The ID must + // contain only letters (a-z, A-Z), numbers (0-9), underscores (_), or + // hyphens (-). The maximum length is 100 characters. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *JobReference) Reset() { *m = JobReference{} } +func (m *JobReference) String() string { return proto.CompactTextString(m) } +func (*JobReference) ProtoMessage() {} +func (*JobReference) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *JobReference) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *JobReference) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +// A YARN application created by a job. Application information is a subset of +// org.apache.hadoop.yarn.proto.YarnProtos.ApplicationReportProto. +// +// **Beta Feature**: This report is available for testing purposes only. It may +// be changed before final release. +type YarnApplication struct { + // Required. The application name. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Required. The application state. + State YarnApplication_State `protobuf:"varint,2,opt,name=state,enum=google.cloud.dataproc.v1beta2.YarnApplication_State" json:"state,omitempty"` + // Required. The numerical progress of the application, from 1 to 100. + Progress float32 `protobuf:"fixed32,3,opt,name=progress" json:"progress,omitempty"` + // Optional. The HTTP URL of the ApplicationMaster, HistoryServer, or + // TimelineServer that provides application-specific information. The URL uses + // the internal hostname, and requires a proxy server for resolution and, + // possibly, access. + TrackingUrl string `protobuf:"bytes,4,opt,name=tracking_url,json=trackingUrl" json:"tracking_url,omitempty"` +} + +func (m *YarnApplication) Reset() { *m = YarnApplication{} } +func (m *YarnApplication) String() string { return proto.CompactTextString(m) } +func (*YarnApplication) ProtoMessage() {} +func (*YarnApplication) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *YarnApplication) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *YarnApplication) GetState() YarnApplication_State { + if m != nil { + return m.State + } + return YarnApplication_STATE_UNSPECIFIED +} + +func (m *YarnApplication) GetProgress() float32 { + if m != nil { + return m.Progress + } + return 0 +} + +func (m *YarnApplication) GetTrackingUrl() string { + if m != nil { + return m.TrackingUrl + } + return "" +} + +// A Cloud Dataproc job resource. +type Job struct { + // Optional. The fully qualified reference to the job, which can be used to + // obtain the equivalent REST path of the job resource. If this property + // is not specified when a job is created, the server generates a + // job_id. + Reference *JobReference `protobuf:"bytes,1,opt,name=reference" json:"reference,omitempty"` + // Required. Job information, including how, when, and where to + // run the job. + Placement *JobPlacement `protobuf:"bytes,2,opt,name=placement" json:"placement,omitempty"` + // Required. The application/framework-specific portion of the job. + // + // Types that are valid to be assigned to TypeJob: + // *Job_HadoopJob + // *Job_SparkJob + // *Job_PysparkJob + // *Job_HiveJob + // *Job_PigJob + // *Job_SparkSqlJob + TypeJob isJob_TypeJob `protobuf_oneof:"type_job"` + // Output-only. The job status. Additional application-specific + // status information may be contained in the type_job + // and yarn_applications fields. + Status *JobStatus `protobuf:"bytes,8,opt,name=status" json:"status,omitempty"` + // Output-only. The previous job status. + StatusHistory []*JobStatus `protobuf:"bytes,13,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` + // Output-only. The collection of YARN applications spun up by this job. + // + // **Beta** Feature: This report is available for testing purposes only. It may + // be changed before final release. + YarnApplications []*YarnApplication `protobuf:"bytes,9,rep,name=yarn_applications,json=yarnApplications" json:"yarn_applications,omitempty"` + // Output-only. A URI pointing to the location of the stdout of the job's + // driver program. + DriverOutputResourceUri string `protobuf:"bytes,17,opt,name=driver_output_resource_uri,json=driverOutputResourceUri" json:"driver_output_resource_uri,omitempty"` + // Output-only. If present, the location of miscellaneous control files + // which may be used as part of job setup and handling. If not present, + // control files may be placed in the same location as `driver_output_uri`. + DriverControlFilesUri string `protobuf:"bytes,15,opt,name=driver_control_files_uri,json=driverControlFilesUri" json:"driver_control_files_uri,omitempty"` + // Optional. The labels to associate with this job. + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // No more than 32 labels can be associated with a job. + Labels map[string]string `protobuf:"bytes,18,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. Job scheduling configuration. + Scheduling *JobScheduling `protobuf:"bytes,20,opt,name=scheduling" json:"scheduling,omitempty"` +} + +func (m *Job) Reset() { *m = Job{} } +func (m *Job) String() string { return proto.CompactTextString(m) } +func (*Job) ProtoMessage() {} +func (*Job) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +type isJob_TypeJob interface { + isJob_TypeJob() +} + +type Job_HadoopJob struct { + HadoopJob *HadoopJob `protobuf:"bytes,3,opt,name=hadoop_job,json=hadoopJob,oneof"` +} +type Job_SparkJob struct { + SparkJob *SparkJob `protobuf:"bytes,4,opt,name=spark_job,json=sparkJob,oneof"` +} +type Job_PysparkJob struct { + PysparkJob *PySparkJob `protobuf:"bytes,5,opt,name=pyspark_job,json=pysparkJob,oneof"` +} +type Job_HiveJob struct { + HiveJob *HiveJob `protobuf:"bytes,6,opt,name=hive_job,json=hiveJob,oneof"` +} +type Job_PigJob struct { + PigJob *PigJob `protobuf:"bytes,7,opt,name=pig_job,json=pigJob,oneof"` +} +type Job_SparkSqlJob struct { + SparkSqlJob *SparkSqlJob `protobuf:"bytes,12,opt,name=spark_sql_job,json=sparkSqlJob,oneof"` +} + +func (*Job_HadoopJob) isJob_TypeJob() {} +func (*Job_SparkJob) isJob_TypeJob() {} +func (*Job_PysparkJob) isJob_TypeJob() {} +func (*Job_HiveJob) isJob_TypeJob() {} +func (*Job_PigJob) isJob_TypeJob() {} +func (*Job_SparkSqlJob) isJob_TypeJob() {} + +func (m *Job) GetTypeJob() isJob_TypeJob { + if m != nil { + return m.TypeJob + } + return nil +} + +func (m *Job) GetReference() *JobReference { + if m != nil { + return m.Reference + } + return nil +} + +func (m *Job) GetPlacement() *JobPlacement { + if m != nil { + return m.Placement + } + return nil +} + +func (m *Job) GetHadoopJob() *HadoopJob { + if x, ok := m.GetTypeJob().(*Job_HadoopJob); ok { + return x.HadoopJob + } + return nil +} + +func (m *Job) GetSparkJob() *SparkJob { + if x, ok := m.GetTypeJob().(*Job_SparkJob); ok { + return x.SparkJob + } + return nil +} + +func (m *Job) GetPysparkJob() *PySparkJob { + if x, ok := m.GetTypeJob().(*Job_PysparkJob); ok { + return x.PysparkJob + } + return nil +} + +func (m *Job) GetHiveJob() *HiveJob { + if x, ok := m.GetTypeJob().(*Job_HiveJob); ok { + return x.HiveJob + } + return nil +} + +func (m *Job) GetPigJob() *PigJob { + if x, ok := m.GetTypeJob().(*Job_PigJob); ok { + return x.PigJob + } + return nil +} + +func (m *Job) GetSparkSqlJob() *SparkSqlJob { + if x, ok := m.GetTypeJob().(*Job_SparkSqlJob); ok { + return x.SparkSqlJob + } + return nil +} + +func (m *Job) GetStatus() *JobStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *Job) GetStatusHistory() []*JobStatus { + if m != nil { + return m.StatusHistory + } + return nil +} + +func (m *Job) GetYarnApplications() []*YarnApplication { + if m != nil { + return m.YarnApplications + } + return nil +} + +func (m *Job) GetDriverOutputResourceUri() string { + if m != nil { + return m.DriverOutputResourceUri + } + return "" +} + +func (m *Job) GetDriverControlFilesUri() string { + if m != nil { + return m.DriverControlFilesUri + } + return "" +} + +func (m *Job) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *Job) GetScheduling() *JobScheduling { + if m != nil { + return m.Scheduling + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*Job) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _Job_OneofMarshaler, _Job_OneofUnmarshaler, _Job_OneofSizer, []interface{}{ + (*Job_HadoopJob)(nil), + (*Job_SparkJob)(nil), + (*Job_PysparkJob)(nil), + (*Job_HiveJob)(nil), + (*Job_PigJob)(nil), + (*Job_SparkSqlJob)(nil), + } +} + +func _Job_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*Job) + // type_job + switch x := m.TypeJob.(type) { + case *Job_HadoopJob: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HadoopJob); err != nil { + return err + } + case *Job_SparkJob: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SparkJob); err != nil { + return err + } + case *Job_PysparkJob: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PysparkJob); err != nil { + return err + } + case *Job_HiveJob: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HiveJob); err != nil { + return err + } + case *Job_PigJob: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PigJob); err != nil { + return err + } + case *Job_SparkSqlJob: + b.EncodeVarint(12<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SparkSqlJob); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("Job.TypeJob has unexpected type %T", x) + } + return nil +} + +func _Job_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*Job) + switch tag { + case 3: // type_job.hadoop_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HadoopJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_HadoopJob{msg} + return true, err + case 4: // type_job.spark_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SparkJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_SparkJob{msg} + return true, err + case 5: // type_job.pyspark_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PySparkJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_PysparkJob{msg} + return true, err + case 6: // type_job.hive_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HiveJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_HiveJob{msg} + return true, err + case 7: // type_job.pig_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PigJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_PigJob{msg} + return true, err + case 12: // type_job.spark_sql_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SparkSqlJob) + err := b.DecodeMessage(msg) + m.TypeJob = &Job_SparkSqlJob{msg} + return true, err + default: + return false, nil + } +} + +func _Job_OneofSizer(msg proto.Message) (n int) { + m := msg.(*Job) + // type_job + switch x := m.TypeJob.(type) { + case *Job_HadoopJob: + s := proto.Size(x.HadoopJob) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_SparkJob: + s := proto.Size(x.SparkJob) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_PysparkJob: + s := proto.Size(x.PysparkJob) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_HiveJob: + s := proto.Size(x.HiveJob) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_PigJob: + s := proto.Size(x.PigJob) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *Job_SparkSqlJob: + s := proto.Size(x.SparkSqlJob) + n += proto.SizeVarint(12<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Job scheduling options. +// +// **Beta Feature**: These options are available for testing purposes only. +// They may be changed before final release. +type JobScheduling struct { + // Optional. Maximum number of times per hour a driver may be restarted as + // a result of driver terminating with non-zero code before job is + // reported failed. + // + // A job may be reported as thrashing if driver exits with non-zero code + // 4 times within 10 minute window. + // + // Maximum value is 10. + MaxFailuresPerHour int32 `protobuf:"varint,1,opt,name=max_failures_per_hour,json=maxFailuresPerHour" json:"max_failures_per_hour,omitempty"` +} + +func (m *JobScheduling) Reset() { *m = JobScheduling{} } +func (m *JobScheduling) String() string { return proto.CompactTextString(m) } +func (*JobScheduling) ProtoMessage() {} +func (*JobScheduling) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{13} } + +func (m *JobScheduling) GetMaxFailuresPerHour() int32 { + if m != nil { + return m.MaxFailuresPerHour + } + return 0 +} + +// A request to submit a job. +type SubmitJobRequest struct { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // Required. The job resource. + Job *Job `protobuf:"bytes,2,opt,name=job" json:"job,omitempty"` +} + +func (m *SubmitJobRequest) Reset() { *m = SubmitJobRequest{} } +func (m *SubmitJobRequest) String() string { return proto.CompactTextString(m) } +func (*SubmitJobRequest) ProtoMessage() {} +func (*SubmitJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{14} } + +func (m *SubmitJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *SubmitJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *SubmitJobRequest) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +// A request to get the resource representation for a job in a project. +type GetJobRequest struct { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // Required. The job ID. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *GetJobRequest) Reset() { *m = GetJobRequest{} } +func (m *GetJobRequest) String() string { return proto.CompactTextString(m) } +func (*GetJobRequest) ProtoMessage() {} +func (*GetJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{15} } + +func (m *GetJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *GetJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *GetJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +// A request to list jobs in a project. +type ListJobsRequest struct { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,6,opt,name=region" json:"region,omitempty"` + // Optional. The number of results to return in each response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional. The page token, returned by a previous call, to request the + // next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` + // Optional. If set, the returned jobs list includes only jobs that were + // submitted to the named cluster. + ClusterName string `protobuf:"bytes,4,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // Optional. Specifies enumerated categories of jobs to list. + // (default = match ALL jobs). + // + // If `filter` is provided, `jobStateMatcher` will be ignored. + JobStateMatcher ListJobsRequest_JobStateMatcher `protobuf:"varint,5,opt,name=job_state_matcher,json=jobStateMatcher,enum=google.cloud.dataproc.v1beta2.ListJobsRequest_JobStateMatcher" json:"job_state_matcher,omitempty"` + // Optional. A filter constraining the jobs to list. Filters are + // case-sensitive and have the following syntax: + // + // [field = value] AND [field [= value]] ... + // + // where **field** is `status.state` or `labels.[KEY]`, and `[KEY]` is a label + // key. **value** can be `*` to match all values. + // `status.state` can be either `ACTIVE` or `NON_ACTIVE`. + // Only the logical `AND` operator is supported; space-separated items are + // treated as having an implicit `AND` operator. + // + // Example filter: + // + // status.state = ACTIVE AND labels.env = staging AND labels.starred = * + Filter string `protobuf:"bytes,7,opt,name=filter" json:"filter,omitempty"` +} + +func (m *ListJobsRequest) Reset() { *m = ListJobsRequest{} } +func (m *ListJobsRequest) String() string { return proto.CompactTextString(m) } +func (*ListJobsRequest) ProtoMessage() {} +func (*ListJobsRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{16} } + +func (m *ListJobsRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *ListJobsRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *ListJobsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListJobsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +func (m *ListJobsRequest) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *ListJobsRequest) GetJobStateMatcher() ListJobsRequest_JobStateMatcher { + if m != nil { + return m.JobStateMatcher + } + return ListJobsRequest_ALL +} + +func (m *ListJobsRequest) GetFilter() string { + if m != nil { + return m.Filter + } + return "" +} + +// A request to update a job. +type UpdateJobRequest struct { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,2,opt,name=region" json:"region,omitempty"` + // Required. The job ID. + JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId" json:"job_id,omitempty"` + // Required. The changes to the job. + Job *Job `protobuf:"bytes,4,opt,name=job" json:"job,omitempty"` + // Required. Specifies the path, relative to Job, of + // the field to update. For example, to update the labels of a Job the + // update_mask parameter would be specified as + // labels, and the `PATCH` request body would specify the new + // value. Note: Currently, labels is the only + // field that can be updated. + UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,5,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateJobRequest) Reset() { *m = UpdateJobRequest{} } +func (m *UpdateJobRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateJobRequest) ProtoMessage() {} +func (*UpdateJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{17} } + +func (m *UpdateJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UpdateJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *UpdateJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func (m *UpdateJobRequest) GetJob() *Job { + if m != nil { + return m.Job + } + return nil +} + +func (m *UpdateJobRequest) GetUpdateMask() *google_protobuf4.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// A list of jobs in a project. +type ListJobsResponse struct { + // Output-only. Jobs list. + Jobs []*Job `protobuf:"bytes,1,rep,name=jobs" json:"jobs,omitempty"` + // Optional. This token is included in the response if there are more results + // to fetch. To fetch additional results, provide this value as the + // `page_token` in a subsequent ListJobsRequest. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListJobsResponse) Reset() { *m = ListJobsResponse{} } +func (m *ListJobsResponse) String() string { return proto.CompactTextString(m) } +func (*ListJobsResponse) ProtoMessage() {} +func (*ListJobsResponse) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{18} } + +func (m *ListJobsResponse) GetJobs() []*Job { + if m != nil { + return m.Jobs + } + return nil +} + +func (m *ListJobsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// A request to cancel a job. +type CancelJobRequest struct { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // Required. The job ID. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *CancelJobRequest) Reset() { *m = CancelJobRequest{} } +func (m *CancelJobRequest) String() string { return proto.CompactTextString(m) } +func (*CancelJobRequest) ProtoMessage() {} +func (*CancelJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{19} } + +func (m *CancelJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *CancelJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *CancelJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +// A request to delete a job. +type DeleteJobRequest struct { + // Required. The ID of the Google Cloud Platform project that the job + // belongs to. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // Required. The Cloud Dataproc region in which to handle the request. + Region string `protobuf:"bytes,3,opt,name=region" json:"region,omitempty"` + // Required. The job ID. + JobId string `protobuf:"bytes,2,opt,name=job_id,json=jobId" json:"job_id,omitempty"` +} + +func (m *DeleteJobRequest) Reset() { *m = DeleteJobRequest{} } +func (m *DeleteJobRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteJobRequest) ProtoMessage() {} +func (*DeleteJobRequest) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{20} } + +func (m *DeleteJobRequest) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *DeleteJobRequest) GetRegion() string { + if m != nil { + return m.Region + } + return "" +} + +func (m *DeleteJobRequest) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func init() { + proto.RegisterType((*LoggingConfig)(nil), "google.cloud.dataproc.v1beta2.LoggingConfig") + proto.RegisterType((*HadoopJob)(nil), "google.cloud.dataproc.v1beta2.HadoopJob") + proto.RegisterType((*SparkJob)(nil), "google.cloud.dataproc.v1beta2.SparkJob") + proto.RegisterType((*PySparkJob)(nil), "google.cloud.dataproc.v1beta2.PySparkJob") + proto.RegisterType((*QueryList)(nil), "google.cloud.dataproc.v1beta2.QueryList") + proto.RegisterType((*HiveJob)(nil), "google.cloud.dataproc.v1beta2.HiveJob") + proto.RegisterType((*SparkSqlJob)(nil), "google.cloud.dataproc.v1beta2.SparkSqlJob") + proto.RegisterType((*PigJob)(nil), "google.cloud.dataproc.v1beta2.PigJob") + proto.RegisterType((*JobPlacement)(nil), "google.cloud.dataproc.v1beta2.JobPlacement") + proto.RegisterType((*JobStatus)(nil), "google.cloud.dataproc.v1beta2.JobStatus") + proto.RegisterType((*JobReference)(nil), "google.cloud.dataproc.v1beta2.JobReference") + proto.RegisterType((*YarnApplication)(nil), "google.cloud.dataproc.v1beta2.YarnApplication") + proto.RegisterType((*Job)(nil), "google.cloud.dataproc.v1beta2.Job") + proto.RegisterType((*JobScheduling)(nil), "google.cloud.dataproc.v1beta2.JobScheduling") + proto.RegisterType((*SubmitJobRequest)(nil), "google.cloud.dataproc.v1beta2.SubmitJobRequest") + proto.RegisterType((*GetJobRequest)(nil), "google.cloud.dataproc.v1beta2.GetJobRequest") + proto.RegisterType((*ListJobsRequest)(nil), "google.cloud.dataproc.v1beta2.ListJobsRequest") + proto.RegisterType((*UpdateJobRequest)(nil), "google.cloud.dataproc.v1beta2.UpdateJobRequest") + proto.RegisterType((*ListJobsResponse)(nil), "google.cloud.dataproc.v1beta2.ListJobsResponse") + proto.RegisterType((*CancelJobRequest)(nil), "google.cloud.dataproc.v1beta2.CancelJobRequest") + proto.RegisterType((*DeleteJobRequest)(nil), "google.cloud.dataproc.v1beta2.DeleteJobRequest") + proto.RegisterEnum("google.cloud.dataproc.v1beta2.LoggingConfig_Level", LoggingConfig_Level_name, LoggingConfig_Level_value) + proto.RegisterEnum("google.cloud.dataproc.v1beta2.JobStatus_State", JobStatus_State_name, JobStatus_State_value) + proto.RegisterEnum("google.cloud.dataproc.v1beta2.JobStatus_Substate", JobStatus_Substate_name, JobStatus_Substate_value) + proto.RegisterEnum("google.cloud.dataproc.v1beta2.YarnApplication_State", YarnApplication_State_name, YarnApplication_State_value) + proto.RegisterEnum("google.cloud.dataproc.v1beta2.ListJobsRequest_JobStateMatcher", ListJobsRequest_JobStateMatcher_name, ListJobsRequest_JobStateMatcher_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for JobController service + +type JobControllerClient interface { + // Submits a job to a cluster. + SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) + // Gets the resource representation for a job in a project. + GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) + // Lists regions/{region}/jobs in a project. + ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) + // Updates a job in a project. + UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) + // Starts a job cancellation request. To access the job resource + // after cancellation, call + // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or + // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) + // Deletes the job from the project. If the job is active, the delete fails, + // and the response returns `FAILED_PRECONDITION`. + DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) +} + +type jobControllerClient struct { + cc *grpc.ClientConn +} + +func NewJobControllerClient(cc *grpc.ClientConn) JobControllerClient { + return &jobControllerClient{cc} +} + +func (c *jobControllerClient) SubmitJob(ctx context.Context, in *SubmitJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) GetJob(ctx context.Context, in *GetJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/GetJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) ListJobs(ctx context.Context, in *ListJobsRequest, opts ...grpc.CallOption) (*ListJobsResponse, error) { + out := new(ListJobsResponse) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/ListJobs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) UpdateJob(ctx context.Context, in *UpdateJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) CancelJob(ctx context.Context, in *CancelJobRequest, opts ...grpc.CallOption) (*Job, error) { + out := new(Job) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/CancelJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *jobControllerClient) DeleteJob(ctx context.Context, in *DeleteJobRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for JobController service + +type JobControllerServer interface { + // Submits a job to a cluster. + SubmitJob(context.Context, *SubmitJobRequest) (*Job, error) + // Gets the resource representation for a job in a project. + GetJob(context.Context, *GetJobRequest) (*Job, error) + // Lists regions/{region}/jobs in a project. + ListJobs(context.Context, *ListJobsRequest) (*ListJobsResponse, error) + // Updates a job in a project. + UpdateJob(context.Context, *UpdateJobRequest) (*Job, error) + // Starts a job cancellation request. To access the job resource + // after cancellation, call + // [regions/{region}/jobs.list](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/list) or + // [regions/{region}/jobs.get](/dataproc/docs/reference/rest/v1beta2/projects.regions.jobs/get). + CancelJob(context.Context, *CancelJobRequest) (*Job, error) + // Deletes the job from the project. If the job is active, the delete fails, + // and the response returns `FAILED_PRECONDITION`. + DeleteJob(context.Context, *DeleteJobRequest) (*google_protobuf2.Empty, error) +} + +func RegisterJobControllerServer(s *grpc.Server, srv JobControllerServer) { + s.RegisterService(&_JobController_serviceDesc, srv) +} + +func _JobController_SubmitJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(SubmitJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).SubmitJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.JobController/SubmitJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).SubmitJob(ctx, req.(*SubmitJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_GetJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).GetJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.JobController/GetJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).GetJob(ctx, req.(*GetJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_ListJobs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListJobsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).ListJobs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.JobController/ListJobs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).ListJobs(ctx, req.(*ListJobsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_UpdateJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).UpdateJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.JobController/UpdateJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).UpdateJob(ctx, req.(*UpdateJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_CancelJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CancelJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).CancelJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.JobController/CancelJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).CancelJob(ctx, req.(*CancelJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _JobController_DeleteJob_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteJobRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(JobControllerServer).DeleteJob(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.JobController/DeleteJob", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(JobControllerServer).DeleteJob(ctx, req.(*DeleteJobRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _JobController_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.dataproc.v1beta2.JobController", + HandlerType: (*JobControllerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "SubmitJob", + Handler: _JobController_SubmitJob_Handler, + }, + { + MethodName: "GetJob", + Handler: _JobController_GetJob_Handler, + }, + { + MethodName: "ListJobs", + Handler: _JobController_ListJobs_Handler, + }, + { + MethodName: "UpdateJob", + Handler: _JobController_UpdateJob_Handler, + }, + { + MethodName: "CancelJob", + Handler: _JobController_CancelJob_Handler, + }, + { + MethodName: "DeleteJob", + Handler: _JobController_DeleteJob_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/dataproc/v1beta2/jobs.proto", +} + +func init() { proto.RegisterFile("google/cloud/dataproc/v1beta2/jobs.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 2294 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x5a, 0xcd, 0x6f, 0xdb, 0xc8, + 0x15, 0xb7, 0xbe, 0xc5, 0x27, 0x4b, 0xa6, 0x67, 0x93, 0xad, 0xa0, 0x74, 0xb1, 0x5e, 0x02, 0x9b, + 0xba, 0xd9, 0x42, 0x42, 0xd4, 0x34, 0x9b, 0x4d, 0xba, 0xdd, 0xc8, 0x12, 0x15, 0xc9, 0x55, 0x64, + 0x2d, 0x25, 0x25, 0xdd, 0x2d, 0x0a, 0x2e, 0x25, 0x8d, 0x65, 0xca, 0x14, 0xc9, 0x70, 0x48, 0x37, + 0xda, 0x20, 0x28, 0xd0, 0x4b, 0x0f, 0x3d, 0xf6, 0x52, 0xa0, 0x40, 0x81, 0xde, 0xba, 0x40, 0x2f, + 0xbd, 0xf6, 0x1f, 0x28, 0x7a, 0x69, 0x0f, 0xfb, 0x27, 0xb4, 0x87, 0x1e, 0x7b, 0xea, 0xb9, 0x98, + 0x19, 0x52, 0x96, 0x64, 0x27, 0xa2, 0xe3, 0x7e, 0x65, 0x4f, 0x26, 0xdf, 0xd7, 0xbc, 0x99, 0xdf, + 0x6f, 0xde, 0xbc, 0xa1, 0x0c, 0xbb, 0x63, 0xcb, 0x1a, 0x1b, 0xb8, 0x34, 0x34, 0x2c, 0x6f, 0x54, + 0x1a, 0x69, 0xae, 0x66, 0x3b, 0xd6, 0xb0, 0x74, 0x72, 0x73, 0x80, 0x5d, 0xad, 0x5c, 0x9a, 0x58, + 0x03, 0x52, 0xb4, 0x1d, 0xcb, 0xb5, 0xd0, 0x5b, 0xdc, 0xb2, 0xc8, 0x2c, 0x8b, 0x81, 0x65, 0xd1, + 0xb7, 0x2c, 0x7c, 0xdd, 0x0f, 0xa4, 0xd9, 0x7a, 0x49, 0x33, 0x4d, 0xcb, 0xd5, 0x5c, 0xdd, 0x32, + 0x7d, 0xe7, 0xc2, 0x35, 0x5f, 0xcb, 0xde, 0x06, 0xde, 0x61, 0x09, 0x4f, 0x6d, 0x77, 0xe6, 0x2b, + 0x77, 0x56, 0x95, 0x87, 0x3a, 0x36, 0x46, 0xea, 0x54, 0x23, 0xc7, 0xbe, 0xc5, 0xdb, 0xab, 0x16, + 0xae, 0x3e, 0xc5, 0xc4, 0xd5, 0xa6, 0x36, 0x37, 0x90, 0xfe, 0x1e, 0x85, 0x6c, 0xcb, 0x1a, 0x8f, + 0x75, 0x73, 0x5c, 0xb5, 0xcc, 0x43, 0x7d, 0x8c, 0xa6, 0xb0, 0x3d, 0x72, 0xf4, 0x13, 0xec, 0xa8, + 0x86, 0x35, 0x56, 0x0d, 0x7c, 0x82, 0x0d, 0x92, 0x8f, 0xee, 0xc4, 0x76, 0x33, 0xe5, 0x4a, 0xf1, + 0xa5, 0x53, 0x29, 0x2e, 0x05, 0x2a, 0xd6, 0x58, 0x94, 0x96, 0x35, 0x6e, 0xb1, 0x18, 0xb2, 0xe9, + 0x3a, 0x33, 0x65, 0x6b, 0xb4, 0x2c, 0x2d, 0x9c, 0xc0, 0x95, 0xf3, 0x0c, 0x91, 0x08, 0xb1, 0x63, + 0x3c, 0xcb, 0x47, 0x76, 0x22, 0xbb, 0x82, 0x42, 0x1f, 0x51, 0x03, 0x12, 0x27, 0x9a, 0xe1, 0xe1, + 0x7c, 0x74, 0x27, 0xb2, 0x9b, 0x2b, 0x97, 0x2f, 0x94, 0x0c, 0x0b, 0xad, 0xf0, 0x00, 0x77, 0xa3, + 0x77, 0x22, 0x92, 0x0d, 0x09, 0x26, 0x43, 0x57, 0x61, 0xbb, 0x25, 0x3f, 0x92, 0x5b, 0x6a, 0xbf, + 0xdd, 0xed, 0xc8, 0xd5, 0x66, 0xbd, 0x29, 0xd7, 0xc4, 0x0d, 0x94, 0x82, 0x58, 0xa5, 0xd5, 0x12, + 0x23, 0x48, 0x80, 0x44, 0x4f, 0xa9, 0x54, 0x65, 0x31, 0x4a, 0x1f, 0x6b, 0xf2, 0x5e, 0xff, 0x81, + 0x18, 0x43, 0x69, 0x88, 0x37, 0xdb, 0xf5, 0x03, 0x31, 0x4e, 0x9f, 0x1e, 0x57, 0x94, 0xb6, 0x98, + 0xa0, 0x6a, 0x59, 0x51, 0x0e, 0x14, 0x31, 0x49, 0x1f, 0xeb, 0x95, 0x5e, 0xa5, 0x25, 0xa6, 0x68, + 0xa0, 0x83, 0x7a, 0x5d, 0x4c, 0x4b, 0x7f, 0x8a, 0x81, 0xd0, 0xd0, 0x46, 0x96, 0x65, 0xef, 0x5b, + 0x03, 0xf4, 0x1e, 0x6c, 0x4f, 0x35, 0xdd, 0x54, 0x27, 0x9a, 0xa3, 0x1e, 0xea, 0x06, 0x56, 0x3d, + 0x47, 0xe7, 0xb3, 0x6d, 0x6c, 0x28, 0x39, 0xaa, 0xda, 0xd7, 0x9c, 0xba, 0x6e, 0xe0, 0xbe, 0xa3, + 0xa3, 0xb7, 0x01, 0x98, 0xf1, 0xd0, 0xd0, 0x08, 0x61, 0xf3, 0xa7, 0x56, 0x02, 0x95, 0x55, 0xa9, + 0x08, 0x21, 0x88, 0x6b, 0xce, 0x98, 0xe4, 0x63, 0x3b, 0xb1, 0x5d, 0x41, 0x61, 0xcf, 0x48, 0x82, + 0xec, 0x62, 0x70, 0x92, 0x8f, 0x33, 0x65, 0x66, 0x32, 0x8f, 0x4b, 0xd0, 0x35, 0x10, 0x4e, 0xf5, + 0x09, 0xa6, 0x4f, 0x1f, 0x06, 0xca, 0x77, 0x60, 0x53, 0x73, 0x86, 0x47, 0xfa, 0x89, 0xaf, 0x4f, + 0x72, 0x7f, 0x5f, 0xc6, 0x4c, 0x7e, 0x00, 0x60, 0x3b, 0x96, 0x8d, 0x1d, 0x57, 0xc7, 0x24, 0x9f, + 0x62, 0x2c, 0xb9, 0xb3, 0x06, 0x98, 0xf9, 0x1a, 0x14, 0x3b, 0x73, 0x57, 0x4e, 0x8e, 0x85, 0x58, + 0xa8, 0x0b, 0x39, 0x83, 0x23, 0xa8, 0x0e, 0x19, 0x84, 0xf9, 0xf4, 0x4e, 0x64, 0x37, 0x53, 0xfe, + 0xd6, 0x45, 0x60, 0x57, 0xb2, 0xc6, 0xe2, 0x6b, 0xe1, 0x43, 0xd8, 0x5a, 0x19, 0xf3, 0x1c, 0x9e, + 0x5d, 0x59, 0xe4, 0x99, 0xb0, 0xc0, 0x99, 0xbd, 0x34, 0x24, 0x39, 0x7d, 0xa5, 0x3f, 0xc6, 0x20, + 0xdd, 0xb5, 0x35, 0xe7, 0xf8, 0xab, 0x03, 0xe5, 0xe3, 0x73, 0xa0, 0x7c, 0x7f, 0xcd, 0x62, 0x07, + 0x4b, 0xf0, 0x1a, 0x23, 0xf9, 0xe7, 0x18, 0x40, 0x67, 0x36, 0xc7, 0xb2, 0x04, 0x57, 0x18, 0x3c, + 0xf6, 0xcc, 0x3d, 0xb2, 0xcc, 0x15, 0x38, 0x15, 0x86, 0x73, 0x87, 0xa9, 0x02, 0x3c, 0x03, 0xb8, + 0xa2, 0x0b, 0x70, 0xed, 0x82, 0xb8, 0xe2, 0x1f, 0xc0, 0x99, 0xb3, 0x17, 0x9d, 0xff, 0x3b, 0xc0, + 0x7e, 0x72, 0x0e, 0xb0, 0x1f, 0xac, 0x59, 0xfb, 0xd3, 0x15, 0x79, 0xdd, 0xa0, 0x95, 0xde, 0x05, + 0xe1, 0x63, 0x0f, 0x3b, 0xb3, 0x96, 0x4e, 0x5c, 0x94, 0x87, 0xd4, 0x13, 0x0f, 0x3b, 0x74, 0xe2, + 0x11, 0xb6, 0x32, 0xc1, 0xab, 0xf4, 0xeb, 0x38, 0xa4, 0x1a, 0xfa, 0x09, 0xa6, 0xa0, 0x5f, 0x87, + 0x1c, 0x15, 0xcf, 0xce, 0xee, 0xde, 0x4d, 0x26, 0x0f, 0xb0, 0x6e, 0x02, 0x70, 0x3b, 0x43, 0x27, + 0x2e, 0x1b, 0x39, 0x53, 0xde, 0x5d, 0x33, 0xd5, 0x79, 0x2e, 0x74, 0x97, 0x3f, 0x99, 0x27, 0x56, + 0x84, 0x37, 0x86, 0x96, 0xe9, 0xea, 0xa6, 0x87, 0x55, 0xca, 0x13, 0x4d, 0x37, 0x3c, 0x07, 0xe7, + 0x63, 0x3b, 0x91, 0xdd, 0xb4, 0xb2, 0x1d, 0xa8, 0x0e, 0xcc, 0x3a, 0x57, 0xa0, 0x43, 0x10, 0xc9, + 0xd0, 0xd1, 0x6d, 0x57, 0x3d, 0xd1, 0x1c, 0x5d, 0x1b, 0x18, 0x98, 0x73, 0x25, 0x53, 0xbe, 0xb7, + 0xae, 0xdc, 0xf2, 0x49, 0x16, 0xbb, 0xcc, 0xfd, 0x51, 0xe0, 0xed, 0x1f, 0xc7, 0x64, 0x59, 0x8a, + 0x1e, 0x2d, 0x91, 0x25, 0xc1, 0x46, 0xb8, 0x1d, 0x72, 0x84, 0x97, 0x31, 0xe5, 0x0c, 0xd1, 0x93, + 0x67, 0x88, 0x5e, 0xd8, 0x83, 0x2b, 0xe7, 0x25, 0x79, 0x11, 0xf4, 0x2f, 0x5b, 0x17, 0x84, 0x39, + 0x5f, 0xa4, 0xbf, 0xc4, 0x21, 0xc3, 0x36, 0x41, 0xf7, 0x89, 0xf1, 0x3f, 0x22, 0xc9, 0xe4, 0x1c, + 0xd0, 0x63, 0x0c, 0x92, 0x8f, 0xc2, 0x14, 0x66, 0x9e, 0x78, 0x48, 0xe0, 0x3f, 0x5d, 0x02, 0x9e, + 0x53, 0xeb, 0xee, 0x05, 0x46, 0xb9, 0x10, 0xf8, 0x77, 0xce, 0x56, 0xb9, 0xb3, 0xa5, 0x24, 0x79, + 0xf9, 0x52, 0xf2, 0xff, 0xc5, 0xa8, 0x7f, 0xc4, 0x21, 0xd9, 0xd1, 0xc7, 0xaf, 0x49, 0xc5, 0xc1, + 0x2f, 0xac, 0x38, 0xeb, 0x68, 0xc1, 0xe7, 0x18, 0x92, 0x77, 0xfd, 0x73, 0x0a, 0xce, 0x77, 0xc2, + 0x0d, 0x70, 0xc9, 0x7a, 0x73, 0x0e, 0xe5, 0x52, 0x5f, 0x35, 0xca, 0xf5, 0x60, 0x73, 0xdf, 0x1a, + 0x74, 0x0c, 0x6d, 0x88, 0xa7, 0xd8, 0x74, 0x69, 0xbb, 0x30, 0x34, 0x3c, 0xe2, 0x62, 0x47, 0x35, + 0xb5, 0x29, 0xf6, 0xe3, 0x65, 0x7c, 0x59, 0x5b, 0x9b, 0xe2, 0x45, 0x13, 0xcf, 0xd3, 0x47, 0x7e, + 0xf8, 0xc0, 0xa4, 0xef, 0xe9, 0x23, 0xe9, 0x9f, 0x31, 0x10, 0xf6, 0xad, 0x41, 0xd7, 0xd5, 0x5c, + 0x8f, 0xa0, 0x1a, 0x24, 0x88, 0xab, 0xb9, 0x3c, 0x58, 0xae, 0x5c, 0x5c, 0xb3, 0x7a, 0x73, 0xc7, + 0x22, 0xfd, 0x83, 0x15, 0xee, 0x4c, 0x4f, 0xea, 0x11, 0x76, 0x35, 0xdd, 0xf0, 0x9b, 0x62, 0x25, + 0x78, 0x45, 0x35, 0x10, 0x99, 0x89, 0x4a, 0x5c, 0xcd, 0x71, 0x55, 0x7a, 0x83, 0xf5, 0x6b, 0x43, + 0x21, 0x18, 0x2a, 0xb8, 0xde, 0x16, 0x7b, 0xc1, 0xf5, 0x56, 0xc9, 0x31, 0x9f, 0x2e, 0x75, 0xa1, + 0x42, 0xf4, 0x10, 0xd2, 0xc4, 0x1b, 0xf0, 0x44, 0x53, 0x2c, 0xd1, 0x9b, 0xe1, 0x13, 0xf5, 0x1d, + 0x95, 0x79, 0x08, 0xe9, 0x8b, 0x08, 0x24, 0x58, 0xfe, 0xf4, 0xfe, 0xd8, 0xed, 0x55, 0x7a, 0xf2, + 0xca, 0xfd, 0x31, 0x03, 0xa9, 0x8e, 0xdc, 0xae, 0x35, 0xdb, 0x0f, 0xc4, 0x08, 0xca, 0x01, 0x74, + 0xe5, 0x5e, 0xbf, 0xa3, 0xd6, 0x0e, 0xda, 0xb2, 0x98, 0xa6, 0x4a, 0xa5, 0xdf, 0x6e, 0x53, 0x65, + 0x14, 0x21, 0xc8, 0x55, 0x2b, 0xed, 0xaa, 0xdc, 0x52, 0x03, 0x87, 0xd8, 0x82, 0xac, 0xdb, 0xab, + 0x28, 0x3d, 0xb9, 0x26, 0xa6, 0x50, 0x16, 0x04, 0x2e, 0x6b, 0xc9, 0x35, 0x7e, 0xef, 0x64, 0xd1, + 0x96, 0xee, 0x9d, 0x6f, 0xc0, 0x56, 0xa5, 0xd7, 0x93, 0x1f, 0x76, 0x7a, 0x6a, 0xbd, 0xd2, 0x6c, + 0xf5, 0x15, 0x59, 0x14, 0xa4, 0x06, 0xa4, 0x83, 0x19, 0xa0, 0x2d, 0xc8, 0x2c, 0xe7, 0x99, 0x05, + 0xa1, 0xdb, 0xdf, 0x7b, 0xd8, 0xec, 0xd1, 0x41, 0x22, 0x08, 0x20, 0xf9, 0x71, 0x5f, 0xee, 0xcb, + 0x35, 0x31, 0x8a, 0x44, 0xd8, 0xec, 0xf6, 0x2a, 0x2d, 0x99, 0xe6, 0xd0, 0xeb, 0x77, 0xc5, 0x98, + 0x54, 0x63, 0x74, 0x52, 0xf0, 0x21, 0x76, 0xb0, 0x39, 0xc4, 0xe8, 0x2d, 0xb6, 0x79, 0x27, 0x78, + 0xe8, 0xaa, 0xfa, 0xc8, 0x27, 0x93, 0xe0, 0x4b, 0x9a, 0x23, 0x74, 0x15, 0x92, 0x13, 0x6b, 0xa0, + 0xce, 0x49, 0x94, 0x98, 0x58, 0x83, 0xe6, 0x48, 0xfa, 0x43, 0x14, 0xb6, 0x3e, 0xd1, 0x1c, 0xb3, + 0x62, 0xdb, 0x86, 0x3e, 0x64, 0x9f, 0x3b, 0x68, 0x1b, 0xbd, 0x40, 0x48, 0xf6, 0x8c, 0xf6, 0x03, + 0x62, 0xf1, 0x0b, 0xff, 0xad, 0x35, 0x78, 0xad, 0x84, 0x5c, 0xa6, 0x57, 0x01, 0xd2, 0xb6, 0x63, + 0x8d, 0x1d, 0x4c, 0x08, 0x2b, 0x79, 0x51, 0x65, 0xfe, 0x4e, 0x19, 0xef, 0x3a, 0xda, 0xf0, 0x98, + 0x16, 0x02, 0xcf, 0x31, 0xf2, 0x71, 0xce, 0xf8, 0x40, 0xd6, 0x77, 0x0c, 0xe9, 0x67, 0xeb, 0xe0, + 0x4e, 0x41, 0xac, 0x2d, 0x3f, 0xe6, 0x50, 0xb7, 0xe5, 0xc7, 0x6a, 0xb7, 0xf2, 0x88, 0xa3, 0xbb, + 0xb4, 0xbe, 0x31, 0xb4, 0x09, 0xe9, 0x4a, 0xb5, 0x2a, 0x77, 0x7a, 0x0c, 0xc3, 0x05, 0x1e, 0x24, + 0xa8, 0xaa, 0xde, 0x6c, 0x37, 0xbb, 0x0d, 0xb9, 0x26, 0x26, 0x29, 0x10, 0x14, 0x41, 0x86, 0x3c, + 0x40, 0xf2, 0xfb, 0x4d, 0x06, 0x7b, 0x5a, 0xfa, 0xa5, 0x00, 0x31, 0x7a, 0x82, 0x34, 0x41, 0x70, + 0x02, 0x1c, 0xd8, 0xaa, 0x65, 0xca, 0xef, 0xad, 0x27, 0xf4, 0x1c, 0x3a, 0xe5, 0xd4, 0x9b, 0x86, + 0xb2, 0x83, 0x0a, 0xe1, 0x9f, 0x31, 0x21, 0x42, 0xcd, 0x8b, 0x8a, 0x72, 0xea, 0x4d, 0xcf, 0xab, + 0x23, 0x76, 0xbd, 0x57, 0x27, 0xd6, 0x80, 0x2d, 0xf4, 0xfa, 0xf3, 0x6a, 0xfe, 0x3d, 0x80, 0x9e, + 0x57, 0x47, 0xf3, 0x0f, 0x24, 0x75, 0x10, 0x08, 0xed, 0x2f, 0x58, 0xa4, 0x38, 0x8b, 0xf4, 0x8d, + 0x90, 0xd7, 0xd1, 0xc6, 0x86, 0x92, 0x26, 0xc1, 0x8d, 0xae, 0x05, 0x19, 0x7b, 0x76, 0x1a, 0x29, + 0xc1, 0x22, 0x7d, 0x33, 0xf4, 0xfd, 0xa7, 0xb1, 0xa1, 0x80, 0xef, 0x4f, 0xa3, 0x55, 0x21, 0xcd, + 0x2e, 0x5b, 0x34, 0x14, 0x2f, 0x42, 0xd7, 0xc3, 0x75, 0xc7, 0x8d, 0x0d, 0x25, 0x75, 0xe4, 0xdf, + 0x37, 0xee, 0x43, 0xca, 0xd6, 0xc7, 0x2c, 0x06, 0x3f, 0x71, 0xde, 0x0d, 0x75, 0xe0, 0x35, 0x36, + 0x94, 0xa4, 0xcd, 0xfb, 0x87, 0x0e, 0x64, 0xf9, 0x94, 0xc8, 0x13, 0x83, 0xc5, 0xd9, 0x64, 0x71, + 0x6e, 0x84, 0x6f, 0xd8, 0x1a, 0x1b, 0x4a, 0x86, 0x2c, 0xb4, 0xb7, 0xf7, 0x21, 0x49, 0x58, 0xb5, + 0xf3, 0xaf, 0x70, 0xbb, 0x61, 0xab, 0xa3, 0xe2, 0xfb, 0xa1, 0x03, 0xc8, 0xf1, 0x27, 0xf5, 0x48, + 0x27, 0xae, 0xe5, 0xcc, 0xf2, 0x59, 0x76, 0x9a, 0x87, 0x8f, 0x94, 0xe5, 0xfe, 0x0d, 0xee, 0x8e, + 0x7e, 0x08, 0xdb, 0x33, 0xcd, 0x31, 0x55, 0xed, 0x74, 0x53, 0x93, 0xbc, 0xc0, 0x62, 0x16, 0x2f, + 0x56, 0x0b, 0x14, 0x71, 0xb6, 0x2c, 0x20, 0xe8, 0x1e, 0x14, 0xfc, 0xcf, 0x9c, 0x96, 0xe7, 0xda, + 0x9e, 0xab, 0x3a, 0x98, 0x58, 0x9e, 0x33, 0xe4, 0xdd, 0xd8, 0x36, 0x2b, 0x01, 0x5f, 0xe3, 0x16, + 0x07, 0xcc, 0x40, 0xf1, 0xf5, 0xb4, 0x2d, 0x7b, 0x1f, 0xf2, 0xbe, 0x33, 0xed, 0x9b, 0x1c, 0xcb, + 0x60, 0x8d, 0x06, 0x61, 0xae, 0x5b, 0xcc, 0xf5, 0x2a, 0xd7, 0x57, 0xb9, 0x9a, 0xb6, 0x1c, 0x84, + 0x3a, 0xd6, 0x21, 0x69, 0x68, 0x03, 0x6c, 0x90, 0x3c, 0x0a, 0x35, 0x0f, 0xda, 0xe6, 0xb4, 0x98, + 0x03, 0x6f, 0x71, 0x7c, 0x6f, 0xd4, 0x02, 0x20, 0xc3, 0x23, 0x3c, 0xf2, 0x0c, 0xdd, 0x1c, 0xe7, + 0xaf, 0x84, 0x6a, 0x5b, 0xe8, 0x3a, 0xcf, 0x7d, 0x94, 0x05, 0xff, 0xc2, 0x07, 0x90, 0x59, 0x18, + 0xe4, 0x42, 0xbd, 0x06, 0x40, 0xda, 0x9d, 0xd9, 0x6c, 0x3f, 0x48, 0x7b, 0x90, 0x5d, 0x1a, 0x03, + 0xdd, 0x84, 0xab, 0x53, 0xed, 0x69, 0xd0, 0x6a, 0x12, 0xd5, 0xc6, 0x8e, 0x7a, 0x64, 0x79, 0x0e, + 0x0b, 0x9d, 0x50, 0xd0, 0x54, 0x7b, 0xea, 0x77, 0x9b, 0xa4, 0x83, 0x9d, 0x86, 0xe5, 0x39, 0xd2, + 0x4f, 0x40, 0xec, 0x7a, 0x83, 0xa9, 0xee, 0xb2, 0x62, 0xf5, 0xc4, 0xc3, 0xc4, 0x5d, 0x77, 0xca, + 0xbc, 0x09, 0x49, 0x07, 0x8f, 0x75, 0xcb, 0x64, 0xf5, 0x46, 0x50, 0xfc, 0x37, 0x74, 0x0b, 0x62, + 0x74, 0x67, 0xf0, 0x82, 0x26, 0x85, 0xa8, 0x8d, 0xd4, 0x5c, 0xfa, 0x11, 0x64, 0x1f, 0xe0, 0x7f, + 0xc3, 0xe8, 0x2f, 0x38, 0xfb, 0xfe, 0x1a, 0x85, 0x2d, 0xda, 0x8e, 0xef, 0x5b, 0x03, 0x72, 0xe1, + 0x11, 0x92, 0x4b, 0x23, 0x5c, 0x03, 0xc1, 0xd6, 0xc6, 0x58, 0x25, 0xfa, 0xe7, 0x1c, 0x98, 0x84, + 0x92, 0xa6, 0x82, 0xae, 0xfe, 0x39, 0x3f, 0x99, 0xa9, 0xd2, 0xb5, 0x8e, 0x71, 0x90, 0x1a, 0x33, + 0xef, 0x51, 0xc1, 0x99, 0x3e, 0x30, 0x7e, 0xb6, 0x0f, 0x9c, 0xc0, 0x36, 0x9d, 0x00, 0x6f, 0xbd, + 0xa6, 0x9a, 0x3b, 0x3c, 0xc2, 0x0e, 0xab, 0x9e, 0xb9, 0xf2, 0xf7, 0xd6, 0x35, 0xc8, 0xcb, 0x13, + 0x0c, 0x76, 0x38, 0x7e, 0xc8, 0xa3, 0x28, 0x5b, 0x93, 0x65, 0x01, 0x9d, 0xe2, 0xa1, 0x6e, 0xb8, + 0xd8, 0x61, 0xf5, 0x50, 0x50, 0xfc, 0x37, 0xe9, 0x36, 0x6c, 0xad, 0xf8, 0x06, 0xdf, 0xe5, 0x37, + 0xe8, 0xa1, 0x58, 0xa9, 0xf6, 0x9a, 0x8f, 0x64, 0xff, 0xd0, 0x3d, 0x68, 0xab, 0xfe, 0x7b, 0x54, + 0xfa, 0x32, 0x02, 0x62, 0xdf, 0x1e, 0x69, 0x2e, 0x7e, 0x15, 0x20, 0xa3, 0x2f, 0x00, 0x32, 0xb6, + 0x00, 0x64, 0xc0, 0xae, 0xf8, 0x85, 0xd8, 0x85, 0xee, 0x41, 0xc6, 0x63, 0x79, 0xb1, 0x1f, 0x69, + 0xfc, 0xc3, 0xe8, 0x6c, 0x1b, 0x5b, 0xd7, 0xb1, 0x31, 0x7a, 0xa8, 0x91, 0x63, 0x05, 0xb8, 0x39, + 0x7d, 0x96, 0x1c, 0x10, 0x4f, 0x57, 0x96, 0xd8, 0x96, 0x49, 0x30, 0xba, 0x0d, 0xf1, 0x89, 0x35, + 0xe0, 0x5f, 0xb7, 0xc2, 0xe5, 0xc1, 0xec, 0xd1, 0x75, 0xd8, 0x32, 0xf1, 0x53, 0x57, 0x5d, 0x20, + 0x09, 0x9f, 0x76, 0x96, 0x8a, 0x3b, 0x01, 0x51, 0xa4, 0xcf, 0x40, 0xac, 0x6a, 0xe6, 0x10, 0x1b, + 0xff, 0xb1, 0x1d, 0xf1, 0x19, 0x88, 0x35, 0x6c, 0xe0, 0x57, 0x83, 0x2a, 0xcc, 0x08, 0xe5, 0x9f, + 0xa7, 0x59, 0x61, 0xf2, 0x6b, 0xb1, 0x81, 0x1d, 0xf4, 0xdb, 0x08, 0x08, 0xf3, 0x32, 0x83, 0x4a, + 0xeb, 0x4e, 0xcd, 0x95, 0x82, 0x54, 0x08, 0xb1, 0xcc, 0x52, 0xfd, 0xa7, 0x5f, 0xfe, 0xed, 0x17, + 0xd1, 0xfb, 0xd2, 0xbd, 0xf9, 0x4f, 0x82, 0x7e, 0xfe, 0xa4, 0xf4, 0xec, 0x74, 0x6e, 0xcf, 0x4b, + 0x3c, 0x75, 0x52, 0x7a, 0xc6, 0x1f, 0x9e, 0xb3, 0x5f, 0x0e, 0xef, 0x12, 0x36, 0xe4, 0xdd, 0xc8, + 0x0d, 0xf4, 0x9b, 0x08, 0x24, 0x79, 0x41, 0x42, 0xeb, 0x0a, 0xfc, 0x52, 0xdd, 0x0a, 0x95, 0xa4, + 0xcc, 0x92, 0xfc, 0x08, 0x7d, 0xf8, 0x2a, 0x49, 0x96, 0x9e, 0xf1, 0xc5, 0x7e, 0x8e, 0xbe, 0x88, + 0x40, 0x3a, 0x60, 0x26, 0x2a, 0x5e, 0xac, 0x38, 0x14, 0x4a, 0xa1, 0xed, 0x39, 0xe5, 0xa5, 0xef, + 0xb2, 0xa4, 0x6f, 0xa3, 0x5b, 0xaf, 0x92, 0x34, 0xfa, 0x5d, 0x04, 0x84, 0x79, 0x69, 0x58, 0x0b, + 0xfd, 0x6a, 0x11, 0x09, 0xb5, 0xaa, 0xfb, 0x2c, 0xc1, 0x5a, 0xf9, 0x72, 0xab, 0x7a, 0x97, 0x15, + 0x8c, 0xdf, 0x47, 0x40, 0x98, 0x6f, 0xc0, 0xb5, 0xe9, 0xae, 0x6e, 0xd5, 0x50, 0xe9, 0x1e, 0xb0, + 0x74, 0x9b, 0x52, 0xed, 0x72, 0xe9, 0x0e, 0xd9, 0xd8, 0x94, 0xb2, 0xbf, 0x8a, 0x80, 0x30, 0xdf, + 0xd2, 0x6b, 0x73, 0x5e, 0xdd, 0xfc, 0x85, 0x37, 0xcf, 0x94, 0x43, 0x79, 0x6a, 0xbb, 0xb3, 0x80, + 0xac, 0x37, 0x2e, 0xb7, 0xac, 0x7b, 0x3f, 0x86, 0x77, 0x86, 0xd6, 0xf4, 0xe5, 0x49, 0xed, 0x09, + 0x94, 0x71, 0x1d, 0x3a, 0x7e, 0x27, 0xf2, 0xa9, 0xec, 0xdb, 0x8e, 0x2d, 0x43, 0x33, 0xc7, 0x45, + 0xcb, 0x19, 0x97, 0xc6, 0xd8, 0x64, 0xd9, 0x95, 0xb8, 0x4a, 0xb3, 0x75, 0xf2, 0x82, 0xff, 0x04, + 0xb8, 0x17, 0x08, 0x06, 0x49, 0xe6, 0xf1, 0xed, 0x7f, 0x05, 0x00, 0x00, 0xff, 0xff, 0xc8, 0x83, + 0x17, 0x31, 0x3a, 0x20, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/operations.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/operations.pb.go new file mode 100644 index 00000000..55d5a6fa --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/operations.pb.go @@ -0,0 +1,221 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/dataproc/v1beta2/operations.proto + +package dataproc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf5 "github.com/golang/protobuf/ptypes/timestamp" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The operation state. +type ClusterOperationStatus_State int32 + +const ( + // Unused. + ClusterOperationStatus_UNKNOWN ClusterOperationStatus_State = 0 + // The operation has been created. + ClusterOperationStatus_PENDING ClusterOperationStatus_State = 1 + // The operation is running. + ClusterOperationStatus_RUNNING ClusterOperationStatus_State = 2 + // The operation is done; either cancelled or completed. + ClusterOperationStatus_DONE ClusterOperationStatus_State = 3 +) + +var ClusterOperationStatus_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PENDING", + 2: "RUNNING", + 3: "DONE", +} +var ClusterOperationStatus_State_value = map[string]int32{ + "UNKNOWN": 0, + "PENDING": 1, + "RUNNING": 2, + "DONE": 3, +} + +func (x ClusterOperationStatus_State) String() string { + return proto.EnumName(ClusterOperationStatus_State_name, int32(x)) +} +func (ClusterOperationStatus_State) EnumDescriptor() ([]byte, []int) { + return fileDescriptor2, []int{0, 0} +} + +// The status of the operation. +type ClusterOperationStatus struct { + // Output-only. A message containing the operation state. + State ClusterOperationStatus_State `protobuf:"varint,1,opt,name=state,enum=google.cloud.dataproc.v1beta2.ClusterOperationStatus_State" json:"state,omitempty"` + // Output-only. A message containing the detailed operation state. + InnerState string `protobuf:"bytes,2,opt,name=inner_state,json=innerState" json:"inner_state,omitempty"` + // Output-only.A message containing any operation metadata details. + Details string `protobuf:"bytes,3,opt,name=details" json:"details,omitempty"` + // Output-only. The time this state was entered. + StateStartTime *google_protobuf5.Timestamp `protobuf:"bytes,4,opt,name=state_start_time,json=stateStartTime" json:"state_start_time,omitempty"` +} + +func (m *ClusterOperationStatus) Reset() { *m = ClusterOperationStatus{} } +func (m *ClusterOperationStatus) String() string { return proto.CompactTextString(m) } +func (*ClusterOperationStatus) ProtoMessage() {} +func (*ClusterOperationStatus) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{0} } + +func (m *ClusterOperationStatus) GetState() ClusterOperationStatus_State { + if m != nil { + return m.State + } + return ClusterOperationStatus_UNKNOWN +} + +func (m *ClusterOperationStatus) GetInnerState() string { + if m != nil { + return m.InnerState + } + return "" +} + +func (m *ClusterOperationStatus) GetDetails() string { + if m != nil { + return m.Details + } + return "" +} + +func (m *ClusterOperationStatus) GetStateStartTime() *google_protobuf5.Timestamp { + if m != nil { + return m.StateStartTime + } + return nil +} + +// Metadata describing the operation. +type ClusterOperationMetadata struct { + // Output-only. Name of the cluster for the operation. + ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // Output-only. Cluster UUID for the operation. + ClusterUuid string `protobuf:"bytes,8,opt,name=cluster_uuid,json=clusterUuid" json:"cluster_uuid,omitempty"` + // Output-only. Current operation status. + Status *ClusterOperationStatus `protobuf:"bytes,9,opt,name=status" json:"status,omitempty"` + // Output-only. The previous operation status. + StatusHistory []*ClusterOperationStatus `protobuf:"bytes,10,rep,name=status_history,json=statusHistory" json:"status_history,omitempty"` + // Output-only. The operation type. + OperationType string `protobuf:"bytes,11,opt,name=operation_type,json=operationType" json:"operation_type,omitempty"` + // Output-only. Short description of operation. + Description string `protobuf:"bytes,12,opt,name=description" json:"description,omitempty"` + // Output-only. Labels associated with the operation + Labels map[string]string `protobuf:"bytes,13,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Output-only. Errors encountered during operation execution. + Warnings []string `protobuf:"bytes,14,rep,name=warnings" json:"warnings,omitempty"` +} + +func (m *ClusterOperationMetadata) Reset() { *m = ClusterOperationMetadata{} } +func (m *ClusterOperationMetadata) String() string { return proto.CompactTextString(m) } +func (*ClusterOperationMetadata) ProtoMessage() {} +func (*ClusterOperationMetadata) Descriptor() ([]byte, []int) { return fileDescriptor2, []int{1} } + +func (m *ClusterOperationMetadata) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *ClusterOperationMetadata) GetClusterUuid() string { + if m != nil { + return m.ClusterUuid + } + return "" +} + +func (m *ClusterOperationMetadata) GetStatus() *ClusterOperationStatus { + if m != nil { + return m.Status + } + return nil +} + +func (m *ClusterOperationMetadata) GetStatusHistory() []*ClusterOperationStatus { + if m != nil { + return m.StatusHistory + } + return nil +} + +func (m *ClusterOperationMetadata) GetOperationType() string { + if m != nil { + return m.OperationType + } + return "" +} + +func (m *ClusterOperationMetadata) GetDescription() string { + if m != nil { + return m.Description + } + return "" +} + +func (m *ClusterOperationMetadata) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *ClusterOperationMetadata) GetWarnings() []string { + if m != nil { + return m.Warnings + } + return nil +} + +func init() { + proto.RegisterType((*ClusterOperationStatus)(nil), "google.cloud.dataproc.v1beta2.ClusterOperationStatus") + proto.RegisterType((*ClusterOperationMetadata)(nil), "google.cloud.dataproc.v1beta2.ClusterOperationMetadata") + proto.RegisterEnum("google.cloud.dataproc.v1beta2.ClusterOperationStatus_State", ClusterOperationStatus_State_name, ClusterOperationStatus_State_value) +} + +func init() { proto.RegisterFile("google/cloud/dataproc/v1beta2/operations.proto", fileDescriptor2) } + +var fileDescriptor2 = []byte{ + // 537 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x9c, 0x54, 0x5f, 0x8b, 0xd3, 0x4e, + 0x14, 0xfd, 0xa5, 0xff, 0x7b, 0xb3, 0xed, 0xaf, 0x0c, 0x22, 0x43, 0x51, 0x36, 0x5b, 0x10, 0xfa, + 0x94, 0x60, 0x45, 0x58, 0xdd, 0xb7, 0xdd, 0x16, 0x15, 0xdd, 0xb4, 0x66, 0xb7, 0x08, 0x2a, 0x94, + 0x69, 0x33, 0xc6, 0x60, 0x3a, 0x13, 0x66, 0x26, 0x2b, 0x7d, 0xf0, 0x0b, 0xf8, 0x41, 0xfc, 0x9c, + 0x32, 0x33, 0x49, 0xa9, 0xa2, 0x0b, 0xee, 0x53, 0xe7, 0xde, 0x7b, 0xce, 0x99, 0x73, 0xa6, 0x97, + 0x80, 0x9f, 0x70, 0x9e, 0x64, 0x34, 0xd8, 0x64, 0xbc, 0x88, 0x83, 0x98, 0x28, 0x92, 0x0b, 0xbe, + 0x09, 0x6e, 0x1e, 0xaf, 0xa9, 0x22, 0x93, 0x80, 0xe7, 0x54, 0x10, 0x95, 0x72, 0x26, 0xfd, 0x5c, + 0x70, 0xc5, 0xd1, 0x43, 0x8b, 0xf7, 0x0d, 0xde, 0xaf, 0xf0, 0x7e, 0x89, 0x1f, 0x3e, 0x28, 0xe5, + 0x48, 0x9e, 0x06, 0x84, 0x31, 0xae, 0x0e, 0xc9, 0xc3, 0xe3, 0x72, 0x6a, 0xaa, 0x75, 0xf1, 0x29, + 0x50, 0xe9, 0x96, 0x4a, 0x45, 0xb6, 0xb9, 0x05, 0x8c, 0x7e, 0xd4, 0xe0, 0xfe, 0x45, 0x56, 0x48, + 0x45, 0xc5, 0xbc, 0xba, 0xf9, 0x4a, 0x11, 0x55, 0x48, 0xf4, 0x16, 0x9a, 0x52, 0x11, 0x45, 0xb1, + 0xe3, 0x39, 0xe3, 0xfe, 0xe4, 0xcc, 0xbf, 0xd5, 0x88, 0xff, 0x67, 0x15, 0x5f, 0xff, 0xd0, 0xc8, + 0x2a, 0xa1, 0x63, 0x70, 0x53, 0xc6, 0xa8, 0x58, 0x59, 0xe1, 0x9a, 0xe7, 0x8c, 0xbb, 0x11, 0x98, + 0x96, 0xc1, 0x21, 0x0c, 0xed, 0x98, 0x2a, 0x92, 0x66, 0x12, 0xd7, 0xcd, 0xb0, 0x2a, 0xd1, 0x14, + 0x06, 0x86, 0xa4, 0xa9, 0x42, 0xad, 0x74, 0x0e, 0xdc, 0xf0, 0x9c, 0xb1, 0x3b, 0x19, 0x56, 0xc6, + 0xaa, 0x90, 0xfe, 0x75, 0x15, 0x32, 0xea, 0x1b, 0xce, 0x95, 0xa6, 0xe8, 0xe6, 0xe8, 0x14, 0x9a, + 0xf6, 0x22, 0x17, 0xda, 0xcb, 0xf0, 0x75, 0x38, 0x7f, 0x17, 0x0e, 0xfe, 0xd3, 0xc5, 0x62, 0x16, + 0x4e, 0x5f, 0x85, 0x2f, 0x06, 0x8e, 0x2e, 0xa2, 0x65, 0x18, 0xea, 0xa2, 0x86, 0x3a, 0xd0, 0x98, + 0xce, 0xc3, 0xd9, 0xa0, 0x3e, 0xfa, 0xde, 0x00, 0xfc, 0x7b, 0xc4, 0x4b, 0xaa, 0x88, 0x7e, 0x07, + 0x74, 0x02, 0x47, 0x1b, 0x3b, 0x5b, 0x31, 0xb2, 0xa5, 0xb8, 0x6d, 0xbc, 0xbb, 0x65, 0x2f, 0x24, + 0x5b, 0x7a, 0x08, 0x29, 0x8a, 0x34, 0xc6, 0x9d, 0x5f, 0x20, 0xcb, 0x22, 0x8d, 0xd1, 0x25, 0xb4, + 0xa4, 0x79, 0x34, 0xdc, 0x35, 0xc1, 0x9e, 0xde, 0xe9, 0xc5, 0xa3, 0x52, 0x04, 0x7d, 0x84, 0xbe, + 0x3d, 0xad, 0x3e, 0xa7, 0x52, 0x71, 0xb1, 0xc3, 0xe0, 0xd5, 0xef, 0x2e, 0xdb, 0xb3, 0x62, 0x2f, + 0xad, 0x16, 0x7a, 0x04, 0xfd, 0xfd, 0xaa, 0xae, 0xd4, 0x2e, 0xa7, 0xd8, 0x35, 0x89, 0x7a, 0xfb, + 0xee, 0xf5, 0x2e, 0xa7, 0xc8, 0x03, 0x37, 0xa6, 0x72, 0x23, 0xd2, 0x5c, 0xb7, 0xf0, 0x91, 0x4d, + 0x7d, 0xd0, 0x42, 0x1f, 0xa0, 0x95, 0x91, 0x35, 0xcd, 0x24, 0xee, 0x19, 0x7b, 0x17, 0xff, 0x68, + 0xaf, 0xfa, 0x13, 0xfc, 0x37, 0x46, 0x65, 0xc6, 0x94, 0xd8, 0x45, 0xa5, 0x24, 0x1a, 0x42, 0xe7, + 0x2b, 0x11, 0x2c, 0x65, 0x89, 0xc4, 0x7d, 0xaf, 0x3e, 0xee, 0x46, 0xfb, 0x7a, 0xf8, 0x0c, 0xdc, + 0x03, 0x0a, 0x1a, 0x40, 0xfd, 0x0b, 0xdd, 0x99, 0x65, 0xef, 0x46, 0xfa, 0x88, 0xee, 0x41, 0xf3, + 0x86, 0x64, 0x45, 0xb5, 0xa7, 0xb6, 0x78, 0x5e, 0x3b, 0x75, 0xce, 0xbf, 0xc1, 0xc9, 0x86, 0x6f, + 0x6f, 0x37, 0x7a, 0xfe, 0xff, 0xde, 0xa2, 0x5c, 0xe8, 0xcd, 0x5c, 0x38, 0xef, 0x67, 0x25, 0x23, + 0xe1, 0x19, 0x61, 0x89, 0xcf, 0x45, 0x12, 0x24, 0x94, 0x99, 0xbd, 0x0d, 0xec, 0x88, 0xe4, 0xa9, + 0xfc, 0xcb, 0xa7, 0xe1, 0xac, 0x6a, 0xac, 0x5b, 0x86, 0xf1, 0xe4, 0x67, 0x00, 0x00, 0x00, 0xff, + 0xff, 0x83, 0x10, 0x95, 0x5e, 0x4b, 0x04, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/workflow_templates.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/workflow_templates.pb.go new file mode 100644 index 00000000..0ed6987a --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/dataproc/v1beta2/workflow_templates.pb.go @@ -0,0 +1,1526 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/dataproc/v1beta2/workflow_templates.proto + +package dataproc + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_longrunning "google.golang.org/genproto/googleapis/longrunning" +import google_protobuf2 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf5 "github.com/golang/protobuf/ptypes/timestamp" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The operation state. +type WorkflowMetadata_State int32 + +const ( + // Unused. + WorkflowMetadata_UNKNOWN WorkflowMetadata_State = 0 + // The operation has been created. + WorkflowMetadata_PENDING WorkflowMetadata_State = 1 + // The operation is running. + WorkflowMetadata_RUNNING WorkflowMetadata_State = 2 + // The operation is done; either cancelled or completed. + WorkflowMetadata_DONE WorkflowMetadata_State = 3 +) + +var WorkflowMetadata_State_name = map[int32]string{ + 0: "UNKNOWN", + 1: "PENDING", + 2: "RUNNING", + 3: "DONE", +} +var WorkflowMetadata_State_value = map[string]int32{ + "UNKNOWN": 0, + "PENDING": 1, + "RUNNING": 2, + "DONE": 3, +} + +func (x WorkflowMetadata_State) String() string { + return proto.EnumName(WorkflowMetadata_State_name, int32(x)) +} +func (WorkflowMetadata_State) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{5, 0} } + +type WorkflowNode_NodeState int32 + +const ( + WorkflowNode_NODE_STATUS_UNSPECIFIED WorkflowNode_NodeState = 0 + // The node is awaiting prerequisite node to finish. + WorkflowNode_BLOCKED WorkflowNode_NodeState = 1 + // The node is runnable but not running. + WorkflowNode_RUNNABLE WorkflowNode_NodeState = 2 + // The node is running. + WorkflowNode_RUNNING WorkflowNode_NodeState = 3 + // The node completed successfully. + WorkflowNode_COMPLETED WorkflowNode_NodeState = 4 + // The node failed. A node can be marked FAILED because + // its ancestor or peer failed. + WorkflowNode_FAILED WorkflowNode_NodeState = 5 +) + +var WorkflowNode_NodeState_name = map[int32]string{ + 0: "NODE_STATUS_UNSPECIFIED", + 1: "BLOCKED", + 2: "RUNNABLE", + 3: "RUNNING", + 4: "COMPLETED", + 5: "FAILED", +} +var WorkflowNode_NodeState_value = map[string]int32{ + "NODE_STATUS_UNSPECIFIED": 0, + "BLOCKED": 1, + "RUNNABLE": 2, + "RUNNING": 3, + "COMPLETED": 4, + "FAILED": 5, +} + +func (x WorkflowNode_NodeState) String() string { + return proto.EnumName(WorkflowNode_NodeState_name, int32(x)) +} +func (WorkflowNode_NodeState) EnumDescriptor() ([]byte, []int) { return fileDescriptor3, []int{8, 0} } + +// A Cloud Dataproc workflow template resource. +type WorkflowTemplate struct { + // Required. The template id. + Id string `protobuf:"bytes,2,opt,name=id" json:"id,omitempty"` + // Output only. The "resource name" of the template, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. Used to perform a consistent read-modify-write. + // + // This field should be left blank for a `CreateWorkflowTemplate` request. It + // is required for an `UpdateWorkflowTemplate` request, and must match the + // current server version. A typical update template flow would fetch the + // current template with a `GetWorkflowTemplate` request, which will return + // the current template with the `version` field filled in with the + // current server version. The user updates other fields in the template, + // then returns it as part of the `UpdateWorkflowTemplate` request. + Version int32 `protobuf:"varint,3,opt,name=version" json:"version,omitempty"` + // Output only. The time template was created. + CreateTime *google_protobuf5.Timestamp `protobuf:"bytes,4,opt,name=create_time,json=createTime" json:"create_time,omitempty"` + // Output only. The time template was last updated. + UpdateTime *google_protobuf5.Timestamp `protobuf:"bytes,5,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // Optional. The labels to associate with this template. These labels + // will be propagated to all jobs and clusters created by the workflow + // instance. + // + // Label **keys** must contain 1 to 63 characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // + // Label **values** may be empty, but, if present, must contain 1 to 63 + // characters, and must conform to + // [RFC 1035](https://www.ietf.org/rfc/rfc1035.txt). + // + // No more than 32 labels can be associated with a template. + Labels map[string]string `protobuf:"bytes,6,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Required. WorkflowTemplate scheduling information. + Placement *WorkflowTemplatePlacement `protobuf:"bytes,7,opt,name=placement" json:"placement,omitempty"` + // Required. The Directed Acyclic Graph of Jobs to submit. + Jobs []*OrderedJob `protobuf:"bytes,8,rep,name=jobs" json:"jobs,omitempty"` +} + +func (m *WorkflowTemplate) Reset() { *m = WorkflowTemplate{} } +func (m *WorkflowTemplate) String() string { return proto.CompactTextString(m) } +func (*WorkflowTemplate) ProtoMessage() {} +func (*WorkflowTemplate) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{0} } + +func (m *WorkflowTemplate) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *WorkflowTemplate) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *WorkflowTemplate) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *WorkflowTemplate) GetCreateTime() *google_protobuf5.Timestamp { + if m != nil { + return m.CreateTime + } + return nil +} + +func (m *WorkflowTemplate) GetUpdateTime() *google_protobuf5.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *WorkflowTemplate) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *WorkflowTemplate) GetPlacement() *WorkflowTemplatePlacement { + if m != nil { + return m.Placement + } + return nil +} + +func (m *WorkflowTemplate) GetJobs() []*OrderedJob { + if m != nil { + return m.Jobs + } + return nil +} + +// Specifies workflow execution target. +// +// Either `managed_cluster` or `cluster_selector` is required. +type WorkflowTemplatePlacement struct { + // Types that are valid to be assigned to Placement: + // *WorkflowTemplatePlacement_ManagedCluster + // *WorkflowTemplatePlacement_ClusterSelector + Placement isWorkflowTemplatePlacement_Placement `protobuf_oneof:"placement"` +} + +func (m *WorkflowTemplatePlacement) Reset() { *m = WorkflowTemplatePlacement{} } +func (m *WorkflowTemplatePlacement) String() string { return proto.CompactTextString(m) } +func (*WorkflowTemplatePlacement) ProtoMessage() {} +func (*WorkflowTemplatePlacement) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{1} } + +type isWorkflowTemplatePlacement_Placement interface { + isWorkflowTemplatePlacement_Placement() +} + +type WorkflowTemplatePlacement_ManagedCluster struct { + ManagedCluster *ManagedCluster `protobuf:"bytes,1,opt,name=managed_cluster,json=managedCluster,oneof"` +} +type WorkflowTemplatePlacement_ClusterSelector struct { + ClusterSelector *ClusterSelector `protobuf:"bytes,2,opt,name=cluster_selector,json=clusterSelector,oneof"` +} + +func (*WorkflowTemplatePlacement_ManagedCluster) isWorkflowTemplatePlacement_Placement() {} +func (*WorkflowTemplatePlacement_ClusterSelector) isWorkflowTemplatePlacement_Placement() {} + +func (m *WorkflowTemplatePlacement) GetPlacement() isWorkflowTemplatePlacement_Placement { + if m != nil { + return m.Placement + } + return nil +} + +func (m *WorkflowTemplatePlacement) GetManagedCluster() *ManagedCluster { + if x, ok := m.GetPlacement().(*WorkflowTemplatePlacement_ManagedCluster); ok { + return x.ManagedCluster + } + return nil +} + +func (m *WorkflowTemplatePlacement) GetClusterSelector() *ClusterSelector { + if x, ok := m.GetPlacement().(*WorkflowTemplatePlacement_ClusterSelector); ok { + return x.ClusterSelector + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*WorkflowTemplatePlacement) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _WorkflowTemplatePlacement_OneofMarshaler, _WorkflowTemplatePlacement_OneofUnmarshaler, _WorkflowTemplatePlacement_OneofSizer, []interface{}{ + (*WorkflowTemplatePlacement_ManagedCluster)(nil), + (*WorkflowTemplatePlacement_ClusterSelector)(nil), + } +} + +func _WorkflowTemplatePlacement_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*WorkflowTemplatePlacement) + // placement + switch x := m.Placement.(type) { + case *WorkflowTemplatePlacement_ManagedCluster: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ManagedCluster); err != nil { + return err + } + case *WorkflowTemplatePlacement_ClusterSelector: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ClusterSelector); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("WorkflowTemplatePlacement.Placement has unexpected type %T", x) + } + return nil +} + +func _WorkflowTemplatePlacement_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*WorkflowTemplatePlacement) + switch tag { + case 1: // placement.managed_cluster + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ManagedCluster) + err := b.DecodeMessage(msg) + m.Placement = &WorkflowTemplatePlacement_ManagedCluster{msg} + return true, err + case 2: // placement.cluster_selector + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(ClusterSelector) + err := b.DecodeMessage(msg) + m.Placement = &WorkflowTemplatePlacement_ClusterSelector{msg} + return true, err + default: + return false, nil + } +} + +func _WorkflowTemplatePlacement_OneofSizer(msg proto.Message) (n int) { + m := msg.(*WorkflowTemplatePlacement) + // placement + switch x := m.Placement.(type) { + case *WorkflowTemplatePlacement_ManagedCluster: + s := proto.Size(x.ManagedCluster) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *WorkflowTemplatePlacement_ClusterSelector: + s := proto.Size(x.ClusterSelector) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Cluster that is managed by the workflow. +type ManagedCluster struct { + // Required. The cluster name. Cluster names within a project must be + // unique. Names from deleted clusters can be reused. + ClusterName string `protobuf:"bytes,2,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` + // Required. The cluster configuration. + Config *ClusterConfig `protobuf:"bytes,3,opt,name=config" json:"config,omitempty"` + // Optional. The labels to associate with this cluster. + // + // Label keys must be between 1 and 63 characters long, and must conform to + // the following PCRE regular expression: + // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + // + // Label values must be between 1 and 63 characters long, and must conform to + // the following PCRE regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + // + // No more than 64 labels can be associated with a given cluster. + Labels map[string]string `protobuf:"bytes,4,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ManagedCluster) Reset() { *m = ManagedCluster{} } +func (m *ManagedCluster) String() string { return proto.CompactTextString(m) } +func (*ManagedCluster) ProtoMessage() {} +func (*ManagedCluster) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{2} } + +func (m *ManagedCluster) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +func (m *ManagedCluster) GetConfig() *ClusterConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *ManagedCluster) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +// A selector that chooses target cluster for jobs based on metadata. +type ClusterSelector struct { + // Optional. The zone where workflow process executes. This parameter does not + // affect the selection of the cluster. + // + // If unspecified, the zone of the first cluster matching the selector + // is used. + Zone string `protobuf:"bytes,1,opt,name=zone" json:"zone,omitempty"` + // Required. The cluster labels. Cluster must have all labels + // to match. + ClusterLabels map[string]string `protobuf:"bytes,2,rep,name=cluster_labels,json=clusterLabels" json:"cluster_labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *ClusterSelector) Reset() { *m = ClusterSelector{} } +func (m *ClusterSelector) String() string { return proto.CompactTextString(m) } +func (*ClusterSelector) ProtoMessage() {} +func (*ClusterSelector) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{3} } + +func (m *ClusterSelector) GetZone() string { + if m != nil { + return m.Zone + } + return "" +} + +func (m *ClusterSelector) GetClusterLabels() map[string]string { + if m != nil { + return m.ClusterLabels + } + return nil +} + +type OrderedJob struct { + // Required. The step id. The id must be unique among all jobs + // within the template. + // + // The step id is used as prefix for job id, as job `workflow-step-id` label, + // and in prerequisite_step_ids field from other steps. + StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId" json:"step_id,omitempty"` + // Required. The job definition. + // + // Types that are valid to be assigned to JobType: + // *OrderedJob_HadoopJob + // *OrderedJob_SparkJob + // *OrderedJob_PysparkJob + // *OrderedJob_HiveJob + // *OrderedJob_PigJob + // *OrderedJob_SparkSqlJob + JobType isOrderedJob_JobType `protobuf_oneof:"job_type"` + // Optional. The labels to associate with this job. + // + // Label keys must be between 1 and 63 characters long, and must conform to + // the following regular expression: + // [\p{Ll}\p{Lo}][\p{Ll}\p{Lo}\p{N}_-]{0,62} + // + // Label values must be between 1 and 63 characters long, and must conform to + // the following regular expression: [\p{Ll}\p{Lo}\p{N}_-]{0,63} + // + // No more than 64 labels can be associated with a given job. + Labels map[string]string `protobuf:"bytes,8,rep,name=labels" json:"labels,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` + // Optional. Job scheduling configuration. + Scheduling *JobScheduling `protobuf:"bytes,9,opt,name=scheduling" json:"scheduling,omitempty"` + // Optional. The optional list of prerequisite job step_ids. + // If not specified, the job will start at the beginning of workflow. + PrerequisiteStepIds []string `protobuf:"bytes,10,rep,name=prerequisite_step_ids,json=prerequisiteStepIds" json:"prerequisite_step_ids,omitempty"` +} + +func (m *OrderedJob) Reset() { *m = OrderedJob{} } +func (m *OrderedJob) String() string { return proto.CompactTextString(m) } +func (*OrderedJob) ProtoMessage() {} +func (*OrderedJob) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{4} } + +type isOrderedJob_JobType interface { + isOrderedJob_JobType() +} + +type OrderedJob_HadoopJob struct { + HadoopJob *HadoopJob `protobuf:"bytes,2,opt,name=hadoop_job,json=hadoopJob,oneof"` +} +type OrderedJob_SparkJob struct { + SparkJob *SparkJob `protobuf:"bytes,3,opt,name=spark_job,json=sparkJob,oneof"` +} +type OrderedJob_PysparkJob struct { + PysparkJob *PySparkJob `protobuf:"bytes,4,opt,name=pyspark_job,json=pysparkJob,oneof"` +} +type OrderedJob_HiveJob struct { + HiveJob *HiveJob `protobuf:"bytes,5,opt,name=hive_job,json=hiveJob,oneof"` +} +type OrderedJob_PigJob struct { + PigJob *PigJob `protobuf:"bytes,6,opt,name=pig_job,json=pigJob,oneof"` +} +type OrderedJob_SparkSqlJob struct { + SparkSqlJob *SparkSqlJob `protobuf:"bytes,7,opt,name=spark_sql_job,json=sparkSqlJob,oneof"` +} + +func (*OrderedJob_HadoopJob) isOrderedJob_JobType() {} +func (*OrderedJob_SparkJob) isOrderedJob_JobType() {} +func (*OrderedJob_PysparkJob) isOrderedJob_JobType() {} +func (*OrderedJob_HiveJob) isOrderedJob_JobType() {} +func (*OrderedJob_PigJob) isOrderedJob_JobType() {} +func (*OrderedJob_SparkSqlJob) isOrderedJob_JobType() {} + +func (m *OrderedJob) GetJobType() isOrderedJob_JobType { + if m != nil { + return m.JobType + } + return nil +} + +func (m *OrderedJob) GetStepId() string { + if m != nil { + return m.StepId + } + return "" +} + +func (m *OrderedJob) GetHadoopJob() *HadoopJob { + if x, ok := m.GetJobType().(*OrderedJob_HadoopJob); ok { + return x.HadoopJob + } + return nil +} + +func (m *OrderedJob) GetSparkJob() *SparkJob { + if x, ok := m.GetJobType().(*OrderedJob_SparkJob); ok { + return x.SparkJob + } + return nil +} + +func (m *OrderedJob) GetPysparkJob() *PySparkJob { + if x, ok := m.GetJobType().(*OrderedJob_PysparkJob); ok { + return x.PysparkJob + } + return nil +} + +func (m *OrderedJob) GetHiveJob() *HiveJob { + if x, ok := m.GetJobType().(*OrderedJob_HiveJob); ok { + return x.HiveJob + } + return nil +} + +func (m *OrderedJob) GetPigJob() *PigJob { + if x, ok := m.GetJobType().(*OrderedJob_PigJob); ok { + return x.PigJob + } + return nil +} + +func (m *OrderedJob) GetSparkSqlJob() *SparkSqlJob { + if x, ok := m.GetJobType().(*OrderedJob_SparkSqlJob); ok { + return x.SparkSqlJob + } + return nil +} + +func (m *OrderedJob) GetLabels() map[string]string { + if m != nil { + return m.Labels + } + return nil +} + +func (m *OrderedJob) GetScheduling() *JobScheduling { + if m != nil { + return m.Scheduling + } + return nil +} + +func (m *OrderedJob) GetPrerequisiteStepIds() []string { + if m != nil { + return m.PrerequisiteStepIds + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*OrderedJob) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _OrderedJob_OneofMarshaler, _OrderedJob_OneofUnmarshaler, _OrderedJob_OneofSizer, []interface{}{ + (*OrderedJob_HadoopJob)(nil), + (*OrderedJob_SparkJob)(nil), + (*OrderedJob_PysparkJob)(nil), + (*OrderedJob_HiveJob)(nil), + (*OrderedJob_PigJob)(nil), + (*OrderedJob_SparkSqlJob)(nil), + } +} + +func _OrderedJob_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*OrderedJob) + // job_type + switch x := m.JobType.(type) { + case *OrderedJob_HadoopJob: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HadoopJob); err != nil { + return err + } + case *OrderedJob_SparkJob: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SparkJob); err != nil { + return err + } + case *OrderedJob_PysparkJob: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PysparkJob); err != nil { + return err + } + case *OrderedJob_HiveJob: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HiveJob); err != nil { + return err + } + case *OrderedJob_PigJob: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PigJob); err != nil { + return err + } + case *OrderedJob_SparkSqlJob: + b.EncodeVarint(7<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.SparkSqlJob); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("OrderedJob.JobType has unexpected type %T", x) + } + return nil +} + +func _OrderedJob_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*OrderedJob) + switch tag { + case 2: // job_type.hadoop_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HadoopJob) + err := b.DecodeMessage(msg) + m.JobType = &OrderedJob_HadoopJob{msg} + return true, err + case 3: // job_type.spark_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SparkJob) + err := b.DecodeMessage(msg) + m.JobType = &OrderedJob_SparkJob{msg} + return true, err + case 4: // job_type.pyspark_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PySparkJob) + err := b.DecodeMessage(msg) + m.JobType = &OrderedJob_PysparkJob{msg} + return true, err + case 5: // job_type.hive_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(HiveJob) + err := b.DecodeMessage(msg) + m.JobType = &OrderedJob_HiveJob{msg} + return true, err + case 6: // job_type.pig_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PigJob) + err := b.DecodeMessage(msg) + m.JobType = &OrderedJob_PigJob{msg} + return true, err + case 7: // job_type.spark_sql_job + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(SparkSqlJob) + err := b.DecodeMessage(msg) + m.JobType = &OrderedJob_SparkSqlJob{msg} + return true, err + default: + return false, nil + } +} + +func _OrderedJob_OneofSizer(msg proto.Message) (n int) { + m := msg.(*OrderedJob) + // job_type + switch x := m.JobType.(type) { + case *OrderedJob_HadoopJob: + s := proto.Size(x.HadoopJob) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *OrderedJob_SparkJob: + s := proto.Size(x.SparkJob) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *OrderedJob_PysparkJob: + s := proto.Size(x.PysparkJob) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *OrderedJob_HiveJob: + s := proto.Size(x.HiveJob) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *OrderedJob_PigJob: + s := proto.Size(x.PigJob) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *OrderedJob_SparkSqlJob: + s := proto.Size(x.SparkSqlJob) + n += proto.SizeVarint(7<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A Cloud Dataproc workflow template resource. +type WorkflowMetadata struct { + // Output only. The "resource name" of the template. + Template string `protobuf:"bytes,1,opt,name=template" json:"template,omitempty"` + // Output only. The version of template at the time of + // workflow instantiation. + Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + // Output only. The create cluster operation metadata. + CreateCluster *ClusterOperation `protobuf:"bytes,3,opt,name=create_cluster,json=createCluster" json:"create_cluster,omitempty"` + // Output only. The workflow graph. + Graph *WorkflowGraph `protobuf:"bytes,4,opt,name=graph" json:"graph,omitempty"` + // Output only. The delete cluster operation metadata. + DeleteCluster *ClusterOperation `protobuf:"bytes,5,opt,name=delete_cluster,json=deleteCluster" json:"delete_cluster,omitempty"` + // Output only. The workflow state. + State WorkflowMetadata_State `protobuf:"varint,6,opt,name=state,enum=google.cloud.dataproc.v1beta2.WorkflowMetadata_State" json:"state,omitempty"` + // Output only. The name of the managed cluster. + ClusterName string `protobuf:"bytes,7,opt,name=cluster_name,json=clusterName" json:"cluster_name,omitempty"` +} + +func (m *WorkflowMetadata) Reset() { *m = WorkflowMetadata{} } +func (m *WorkflowMetadata) String() string { return proto.CompactTextString(m) } +func (*WorkflowMetadata) ProtoMessage() {} +func (*WorkflowMetadata) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{5} } + +func (m *WorkflowMetadata) GetTemplate() string { + if m != nil { + return m.Template + } + return "" +} + +func (m *WorkflowMetadata) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *WorkflowMetadata) GetCreateCluster() *ClusterOperation { + if m != nil { + return m.CreateCluster + } + return nil +} + +func (m *WorkflowMetadata) GetGraph() *WorkflowGraph { + if m != nil { + return m.Graph + } + return nil +} + +func (m *WorkflowMetadata) GetDeleteCluster() *ClusterOperation { + if m != nil { + return m.DeleteCluster + } + return nil +} + +func (m *WorkflowMetadata) GetState() WorkflowMetadata_State { + if m != nil { + return m.State + } + return WorkflowMetadata_UNKNOWN +} + +func (m *WorkflowMetadata) GetClusterName() string { + if m != nil { + return m.ClusterName + } + return "" +} + +type ClusterOperation struct { + // Output only. The id of the cluster operation. + OperationId string `protobuf:"bytes,1,opt,name=operation_id,json=operationId" json:"operation_id,omitempty"` + // Output only. Error, if operation failed. + Error string `protobuf:"bytes,2,opt,name=error" json:"error,omitempty"` + // Output only. Indicates the operation is done. + Done bool `protobuf:"varint,3,opt,name=done" json:"done,omitempty"` +} + +func (m *ClusterOperation) Reset() { *m = ClusterOperation{} } +func (m *ClusterOperation) String() string { return proto.CompactTextString(m) } +func (*ClusterOperation) ProtoMessage() {} +func (*ClusterOperation) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{6} } + +func (m *ClusterOperation) GetOperationId() string { + if m != nil { + return m.OperationId + } + return "" +} + +func (m *ClusterOperation) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +func (m *ClusterOperation) GetDone() bool { + if m != nil { + return m.Done + } + return false +} + +// The workflow graph. +type WorkflowGraph struct { + // Output only. The workflow nodes. + Nodes []*WorkflowNode `protobuf:"bytes,1,rep,name=nodes" json:"nodes,omitempty"` +} + +func (m *WorkflowGraph) Reset() { *m = WorkflowGraph{} } +func (m *WorkflowGraph) String() string { return proto.CompactTextString(m) } +func (*WorkflowGraph) ProtoMessage() {} +func (*WorkflowGraph) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{7} } + +func (m *WorkflowGraph) GetNodes() []*WorkflowNode { + if m != nil { + return m.Nodes + } + return nil +} + +// The workflow node. +type WorkflowNode struct { + // Output only. The name of the node. + StepId string `protobuf:"bytes,1,opt,name=step_id,json=stepId" json:"step_id,omitempty"` + // Output only. Node's prerequisite nodes. + PrerequisiteStepIds []string `protobuf:"bytes,2,rep,name=prerequisite_step_ids,json=prerequisiteStepIds" json:"prerequisite_step_ids,omitempty"` + // Output only. The job id; populated after the node enters RUNNING state. + JobId string `protobuf:"bytes,3,opt,name=job_id,json=jobId" json:"job_id,omitempty"` + // Output only. The node state. + State WorkflowNode_NodeState `protobuf:"varint,5,opt,name=state,enum=google.cloud.dataproc.v1beta2.WorkflowNode_NodeState" json:"state,omitempty"` + // Output only. The error detail. + Error string `protobuf:"bytes,6,opt,name=error" json:"error,omitempty"` +} + +func (m *WorkflowNode) Reset() { *m = WorkflowNode{} } +func (m *WorkflowNode) String() string { return proto.CompactTextString(m) } +func (*WorkflowNode) ProtoMessage() {} +func (*WorkflowNode) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{8} } + +func (m *WorkflowNode) GetStepId() string { + if m != nil { + return m.StepId + } + return "" +} + +func (m *WorkflowNode) GetPrerequisiteStepIds() []string { + if m != nil { + return m.PrerequisiteStepIds + } + return nil +} + +func (m *WorkflowNode) GetJobId() string { + if m != nil { + return m.JobId + } + return "" +} + +func (m *WorkflowNode) GetState() WorkflowNode_NodeState { + if m != nil { + return m.State + } + return WorkflowNode_NODE_STATUS_UNSPECIFIED +} + +func (m *WorkflowNode) GetError() string { + if m != nil { + return m.Error + } + return "" +} + +// A request to create a workflow template. +type CreateWorkflowTemplateRequest struct { + // Required. The "resource name" of the region, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Required. The Dataproc workflow template to create. + Template *WorkflowTemplate `protobuf:"bytes,2,opt,name=template" json:"template,omitempty"` +} + +func (m *CreateWorkflowTemplateRequest) Reset() { *m = CreateWorkflowTemplateRequest{} } +func (m *CreateWorkflowTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*CreateWorkflowTemplateRequest) ProtoMessage() {} +func (*CreateWorkflowTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{9} } + +func (m *CreateWorkflowTemplateRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateWorkflowTemplateRequest) GetTemplate() *WorkflowTemplate { + if m != nil { + return m.Template + } + return nil +} + +// A request to fetch a workflow template. +type GetWorkflowTemplateRequest struct { + // Required. The "resource name" of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. The version of workflow template to retrieve. Only previously + // instatiated versions can be retrieved. + // + // If unspecified, retrieves the current version. + Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` +} + +func (m *GetWorkflowTemplateRequest) Reset() { *m = GetWorkflowTemplateRequest{} } +func (m *GetWorkflowTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*GetWorkflowTemplateRequest) ProtoMessage() {} +func (*GetWorkflowTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{10} } + +func (m *GetWorkflowTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetWorkflowTemplateRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +// A request to instantiate a workflow template. +type InstantiateWorkflowTemplateRequest struct { + // Required. The "resource name" of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. The version of workflow template to instantiate. If specified, + // the workflow will be instantiated only if the current version of + // the workflow template has the supplied version. + // + // This option cannot be used to instantiate a previous version of + // workflow template. + Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` + // Optional. A tag that prevents multiple concurrent workflow + // instances with the same tag from running. This mitigates risk of + // concurrent instances started due to retries. + // + // It is recommended to always set this value to a + // [UUID](https://en.wikipedia.org/wiki/Universally_unique_identifier). + // + // The tag must contain only letters (a-z, A-Z), numbers (0-9), + // underscores (_), and hyphens (-). The maximum length is 40 characters. + InstanceId string `protobuf:"bytes,3,opt,name=instance_id,json=instanceId" json:"instance_id,omitempty"` +} + +func (m *InstantiateWorkflowTemplateRequest) Reset() { *m = InstantiateWorkflowTemplateRequest{} } +func (m *InstantiateWorkflowTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*InstantiateWorkflowTemplateRequest) ProtoMessage() {} +func (*InstantiateWorkflowTemplateRequest) Descriptor() ([]byte, []int) { + return fileDescriptor3, []int{11} +} + +func (m *InstantiateWorkflowTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *InstantiateWorkflowTemplateRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *InstantiateWorkflowTemplateRequest) GetInstanceId() string { + if m != nil { + return m.InstanceId + } + return "" +} + +// A request to update a workflow template. +type UpdateWorkflowTemplateRequest struct { + // Required. The updated workflow template. + // + // The `template.version` field must match the current version. + Template *WorkflowTemplate `protobuf:"bytes,1,opt,name=template" json:"template,omitempty"` +} + +func (m *UpdateWorkflowTemplateRequest) Reset() { *m = UpdateWorkflowTemplateRequest{} } +func (m *UpdateWorkflowTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateWorkflowTemplateRequest) ProtoMessage() {} +func (*UpdateWorkflowTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{12} } + +func (m *UpdateWorkflowTemplateRequest) GetTemplate() *WorkflowTemplate { + if m != nil { + return m.Template + } + return nil +} + +// A request to list workflow templates in a project. +type ListWorkflowTemplatesRequest struct { + // Required. The "resource name" of the region, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}` + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // Optional. The maximum number of results to return in each response. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // Optional. The page token, returned by a previous call, to request the + // next page of results. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListWorkflowTemplatesRequest) Reset() { *m = ListWorkflowTemplatesRequest{} } +func (m *ListWorkflowTemplatesRequest) String() string { return proto.CompactTextString(m) } +func (*ListWorkflowTemplatesRequest) ProtoMessage() {} +func (*ListWorkflowTemplatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{13} } + +func (m *ListWorkflowTemplatesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListWorkflowTemplatesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListWorkflowTemplatesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// A response to a request to list workflow templates in a project. +type ListWorkflowTemplatesResponse struct { + // Output only. WorkflowTemplates list. + Templates []*WorkflowTemplate `protobuf:"bytes,1,rep,name=templates" json:"templates,omitempty"` + // Output only. This token is included in the response if there are more results + // to fetch. To fetch additional results, provide this value as the + // page_token in a subsequent ListWorkflowTemplatesRequest. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListWorkflowTemplatesResponse) Reset() { *m = ListWorkflowTemplatesResponse{} } +func (m *ListWorkflowTemplatesResponse) String() string { return proto.CompactTextString(m) } +func (*ListWorkflowTemplatesResponse) ProtoMessage() {} +func (*ListWorkflowTemplatesResponse) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{14} } + +func (m *ListWorkflowTemplatesResponse) GetTemplates() []*WorkflowTemplate { + if m != nil { + return m.Templates + } + return nil +} + +func (m *ListWorkflowTemplatesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// A request to delete a workflow template. +// +// Currently started workflows will remain running. +type DeleteWorkflowTemplateRequest struct { + // Required. The "resource name" of the workflow template, as described + // in https://cloud.google.com/apis/design/resource_names of the form + // `projects/{project_id}/regions/{region}/workflowTemplates/{template_id}` + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // Optional. The version of workflow template to delete. If specified, + // will only delete the template if the current server version matches + // specified version. + Version int32 `protobuf:"varint,2,opt,name=version" json:"version,omitempty"` +} + +func (m *DeleteWorkflowTemplateRequest) Reset() { *m = DeleteWorkflowTemplateRequest{} } +func (m *DeleteWorkflowTemplateRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteWorkflowTemplateRequest) ProtoMessage() {} +func (*DeleteWorkflowTemplateRequest) Descriptor() ([]byte, []int) { return fileDescriptor3, []int{15} } + +func (m *DeleteWorkflowTemplateRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeleteWorkflowTemplateRequest) GetVersion() int32 { + if m != nil { + return m.Version + } + return 0 +} + +func init() { + proto.RegisterType((*WorkflowTemplate)(nil), "google.cloud.dataproc.v1beta2.WorkflowTemplate") + proto.RegisterType((*WorkflowTemplatePlacement)(nil), "google.cloud.dataproc.v1beta2.WorkflowTemplatePlacement") + proto.RegisterType((*ManagedCluster)(nil), "google.cloud.dataproc.v1beta2.ManagedCluster") + proto.RegisterType((*ClusterSelector)(nil), "google.cloud.dataproc.v1beta2.ClusterSelector") + proto.RegisterType((*OrderedJob)(nil), "google.cloud.dataproc.v1beta2.OrderedJob") + proto.RegisterType((*WorkflowMetadata)(nil), "google.cloud.dataproc.v1beta2.WorkflowMetadata") + proto.RegisterType((*ClusterOperation)(nil), "google.cloud.dataproc.v1beta2.ClusterOperation") + proto.RegisterType((*WorkflowGraph)(nil), "google.cloud.dataproc.v1beta2.WorkflowGraph") + proto.RegisterType((*WorkflowNode)(nil), "google.cloud.dataproc.v1beta2.WorkflowNode") + proto.RegisterType((*CreateWorkflowTemplateRequest)(nil), "google.cloud.dataproc.v1beta2.CreateWorkflowTemplateRequest") + proto.RegisterType((*GetWorkflowTemplateRequest)(nil), "google.cloud.dataproc.v1beta2.GetWorkflowTemplateRequest") + proto.RegisterType((*InstantiateWorkflowTemplateRequest)(nil), "google.cloud.dataproc.v1beta2.InstantiateWorkflowTemplateRequest") + proto.RegisterType((*UpdateWorkflowTemplateRequest)(nil), "google.cloud.dataproc.v1beta2.UpdateWorkflowTemplateRequest") + proto.RegisterType((*ListWorkflowTemplatesRequest)(nil), "google.cloud.dataproc.v1beta2.ListWorkflowTemplatesRequest") + proto.RegisterType((*ListWorkflowTemplatesResponse)(nil), "google.cloud.dataproc.v1beta2.ListWorkflowTemplatesResponse") + proto.RegisterType((*DeleteWorkflowTemplateRequest)(nil), "google.cloud.dataproc.v1beta2.DeleteWorkflowTemplateRequest") + proto.RegisterEnum("google.cloud.dataproc.v1beta2.WorkflowMetadata_State", WorkflowMetadata_State_name, WorkflowMetadata_State_value) + proto.RegisterEnum("google.cloud.dataproc.v1beta2.WorkflowNode_NodeState", WorkflowNode_NodeState_name, WorkflowNode_NodeState_value) +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for WorkflowTemplateService service + +type WorkflowTemplateServiceClient interface { + // Creates new workflow template. + CreateWorkflowTemplate(ctx context.Context, in *CreateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) + // Retrieves the latest workflow template. + // + // Can retrieve previously instantiated template by specifying optional + // version parameter. + GetWorkflowTemplate(ctx context.Context, in *GetWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) + // Instantiates a template and begins execution. + // + // The returned Operation can be used to track execution of + // workflow by polling + // [google.cloud.dataproc.v1beta2.OperationService.GetOperation][]. + // The Operation will complete when entire workflow is finished. + // + // The running workflow can be aborted via + // [google.cloud.dataproc.v1beta2.OperationService.CancelOperation][]. + // + // The [google.cloud.dataproc.v1beta2.Operation.metadata][] will always be + // [google.cloud.dataproc.v1beta2.WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + // + // The [google.cloud.dataproc.v1beta2.Operation.result][] will always be + // [google.protobuf.Empty][google.protobuf.Empty]. + InstantiateWorkflowTemplate(ctx context.Context, in *InstantiateWorkflowTemplateRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) + // Updates (replaces) workflow template. The updated template + // must contain version that matches the current server version. + UpdateWorkflowTemplate(ctx context.Context, in *UpdateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) + // Lists workflows that match the specified filter in the request. + ListWorkflowTemplates(ctx context.Context, in *ListWorkflowTemplatesRequest, opts ...grpc.CallOption) (*ListWorkflowTemplatesResponse, error) + // Deletes a workflow template. It does not cancel in-progress workflows. + DeleteWorkflowTemplate(ctx context.Context, in *DeleteWorkflowTemplateRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) +} + +type workflowTemplateServiceClient struct { + cc *grpc.ClientConn +} + +func NewWorkflowTemplateServiceClient(cc *grpc.ClientConn) WorkflowTemplateServiceClient { + return &workflowTemplateServiceClient{cc} +} + +func (c *workflowTemplateServiceClient) CreateWorkflowTemplate(ctx context.Context, in *CreateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) { + out := new(WorkflowTemplate) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) GetWorkflowTemplate(ctx context.Context, in *GetWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) { + out := new(WorkflowTemplate) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) InstantiateWorkflowTemplate(ctx context.Context, in *InstantiateWorkflowTemplateRequest, opts ...grpc.CallOption) (*google_longrunning.Operation, error) { + out := new(google_longrunning.Operation) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) UpdateWorkflowTemplate(ctx context.Context, in *UpdateWorkflowTemplateRequest, opts ...grpc.CallOption) (*WorkflowTemplate, error) { + out := new(WorkflowTemplate) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) ListWorkflowTemplates(ctx context.Context, in *ListWorkflowTemplatesRequest, opts ...grpc.CallOption) (*ListWorkflowTemplatesResponse, error) { + out := new(ListWorkflowTemplatesResponse) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *workflowTemplateServiceClient) DeleteWorkflowTemplate(ctx context.Context, in *DeleteWorkflowTemplateRequest, opts ...grpc.CallOption) (*google_protobuf2.Empty, error) { + out := new(google_protobuf2.Empty) + err := grpc.Invoke(ctx, "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for WorkflowTemplateService service + +type WorkflowTemplateServiceServer interface { + // Creates new workflow template. + CreateWorkflowTemplate(context.Context, *CreateWorkflowTemplateRequest) (*WorkflowTemplate, error) + // Retrieves the latest workflow template. + // + // Can retrieve previously instantiated template by specifying optional + // version parameter. + GetWorkflowTemplate(context.Context, *GetWorkflowTemplateRequest) (*WorkflowTemplate, error) + // Instantiates a template and begins execution. + // + // The returned Operation can be used to track execution of + // workflow by polling + // [google.cloud.dataproc.v1beta2.OperationService.GetOperation][]. + // The Operation will complete when entire workflow is finished. + // + // The running workflow can be aborted via + // [google.cloud.dataproc.v1beta2.OperationService.CancelOperation][]. + // + // The [google.cloud.dataproc.v1beta2.Operation.metadata][] will always be + // [google.cloud.dataproc.v1beta2.WorkflowMetadata][google.cloud.dataproc.v1beta2.WorkflowMetadata]. + // + // The [google.cloud.dataproc.v1beta2.Operation.result][] will always be + // [google.protobuf.Empty][google.protobuf.Empty]. + InstantiateWorkflowTemplate(context.Context, *InstantiateWorkflowTemplateRequest) (*google_longrunning.Operation, error) + // Updates (replaces) workflow template. The updated template + // must contain version that matches the current server version. + UpdateWorkflowTemplate(context.Context, *UpdateWorkflowTemplateRequest) (*WorkflowTemplate, error) + // Lists workflows that match the specified filter in the request. + ListWorkflowTemplates(context.Context, *ListWorkflowTemplatesRequest) (*ListWorkflowTemplatesResponse, error) + // Deletes a workflow template. It does not cancel in-progress workflows. + DeleteWorkflowTemplate(context.Context, *DeleteWorkflowTemplateRequest) (*google_protobuf2.Empty, error) +} + +func RegisterWorkflowTemplateServiceServer(s *grpc.Server, srv WorkflowTemplateServiceServer) { + s.RegisterService(&_WorkflowTemplateService_serviceDesc, srv) +} + +func _WorkflowTemplateService_CreateWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateWorkflowTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).CreateWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/CreateWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).CreateWorkflowTemplate(ctx, req.(*CreateWorkflowTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_GetWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetWorkflowTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).GetWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/GetWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).GetWorkflowTemplate(ctx, req.(*GetWorkflowTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_InstantiateWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(InstantiateWorkflowTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).InstantiateWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/InstantiateWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).InstantiateWorkflowTemplate(ctx, req.(*InstantiateWorkflowTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_UpdateWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateWorkflowTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).UpdateWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/UpdateWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).UpdateWorkflowTemplate(ctx, req.(*UpdateWorkflowTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_ListWorkflowTemplates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListWorkflowTemplatesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).ListWorkflowTemplates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/ListWorkflowTemplates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).ListWorkflowTemplates(ctx, req.(*ListWorkflowTemplatesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _WorkflowTemplateService_DeleteWorkflowTemplate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteWorkflowTemplateRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(WorkflowTemplateServiceServer).DeleteWorkflowTemplate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.dataproc.v1beta2.WorkflowTemplateService/DeleteWorkflowTemplate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(WorkflowTemplateServiceServer).DeleteWorkflowTemplate(ctx, req.(*DeleteWorkflowTemplateRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _WorkflowTemplateService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.dataproc.v1beta2.WorkflowTemplateService", + HandlerType: (*WorkflowTemplateServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateWorkflowTemplate", + Handler: _WorkflowTemplateService_CreateWorkflowTemplate_Handler, + }, + { + MethodName: "GetWorkflowTemplate", + Handler: _WorkflowTemplateService_GetWorkflowTemplate_Handler, + }, + { + MethodName: "InstantiateWorkflowTemplate", + Handler: _WorkflowTemplateService_InstantiateWorkflowTemplate_Handler, + }, + { + MethodName: "UpdateWorkflowTemplate", + Handler: _WorkflowTemplateService_UpdateWorkflowTemplate_Handler, + }, + { + MethodName: "ListWorkflowTemplates", + Handler: _WorkflowTemplateService_ListWorkflowTemplates_Handler, + }, + { + MethodName: "DeleteWorkflowTemplate", + Handler: _WorkflowTemplateService_DeleteWorkflowTemplate_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/dataproc/v1beta2/workflow_templates.proto", +} + +func init() { + proto.RegisterFile("google/cloud/dataproc/v1beta2/workflow_templates.proto", fileDescriptor3) +} + +var fileDescriptor3 = []byte{ + // 1634 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xac, 0x58, 0x5d, 0x6f, 0xdb, 0x5c, + 0x1d, 0xaf, 0xd3, 0x26, 0x4d, 0xfe, 0x59, 0xdb, 0xe8, 0x3c, 0xac, 0x0b, 0xe9, 0x53, 0xad, 0x33, + 0x62, 0x94, 0x32, 0x12, 0x11, 0x04, 0xea, 0xba, 0x4d, 0x5a, 0xdb, 0x64, 0x6b, 0xda, 0x34, 0x09, + 0x4e, 0xbb, 0x21, 0xb8, 0x88, 0x9c, 0xf8, 0xcc, 0x75, 0xeb, 0xf8, 0xb8, 0xb6, 0xd3, 0xd1, 0xa1, + 0xdd, 0xa0, 0x49, 0x7c, 0x00, 0x24, 0x3e, 0x07, 0x9f, 0x01, 0xbe, 0x00, 0x68, 0x5c, 0x22, 0x71, + 0x83, 0xb8, 0xdf, 0x15, 0xb7, 0xe8, 0xbc, 0xd8, 0x71, 0x5e, 0x9d, 0x74, 0xcf, 0x4d, 0xe4, 0x73, + 0x72, 0x7e, 0xbf, 0xf3, 0xfb, 0xbf, 0x1e, 0x1f, 0xc3, 0x2f, 0x75, 0x42, 0x74, 0x13, 0x17, 0x3a, + 0x26, 0xe9, 0x69, 0x05, 0x4d, 0xf5, 0x54, 0xdb, 0x21, 0x9d, 0xc2, 0xcd, 0xcf, 0xda, 0xd8, 0x53, + 0x8b, 0x85, 0xf7, 0xc4, 0xb9, 0x7a, 0x67, 0x92, 0xf7, 0x2d, 0x0f, 0x77, 0x6d, 0x53, 0xf5, 0xb0, + 0x9b, 0xb7, 0x1d, 0xe2, 0x11, 0xb4, 0xc9, 0x71, 0x79, 0x86, 0xcb, 0xfb, 0xb8, 0xbc, 0xc0, 0xe5, + 0xbe, 0x15, 0xb4, 0xaa, 0x6d, 0x14, 0x54, 0xcb, 0x22, 0x9e, 0xea, 0x19, 0xc4, 0x12, 0xe0, 0xdc, + 0x93, 0xe9, 0x9b, 0x76, 0xcc, 0x9e, 0xeb, 0x61, 0xc7, 0x5f, 0xbd, 0x3d, 0x7d, 0xf5, 0x25, 0x69, + 0xfb, 0x2b, 0x7f, 0x20, 0x56, 0x9a, 0xc4, 0xd2, 0x9d, 0x9e, 0x65, 0x19, 0x96, 0x5e, 0x20, 0x36, + 0x76, 0x06, 0x36, 0xdf, 0x10, 0x8b, 0xd8, 0xa8, 0xdd, 0x7b, 0x57, 0xc0, 0x5d, 0xdb, 0xbb, 0x15, + 0x7f, 0x3e, 0x1c, 0xfe, 0xd3, 0x33, 0xba, 0xd8, 0xf5, 0xd4, 0xae, 0xcd, 0x17, 0xc8, 0x5f, 0x16, + 0x21, 0xf3, 0x56, 0x38, 0xe5, 0x4c, 0xf8, 0x04, 0xad, 0x42, 0xcc, 0xd0, 0xb2, 0xb1, 0x2d, 0x69, + 0x3b, 0xa5, 0xc4, 0x0c, 0x0d, 0x21, 0x58, 0xb2, 0xd4, 0x2e, 0xce, 0x4a, 0x6c, 0x86, 0x3d, 0xa3, + 0x2c, 0x2c, 0xdf, 0x60, 0xc7, 0x35, 0x88, 0x95, 0x5d, 0xdc, 0x92, 0xb6, 0xe3, 0x8a, 0x3f, 0x44, + 0xcf, 0x20, 0xdd, 0x71, 0xb0, 0xea, 0xe1, 0x16, 0xdd, 0x2c, 0xbb, 0xb4, 0x25, 0x6d, 0xa7, 0x8b, + 0xb9, 0xbc, 0x70, 0xb0, 0xaf, 0x24, 0x7f, 0xe6, 0x2b, 0x51, 0x80, 0x2f, 0xa7, 0x13, 0x14, 0xdc, + 0xb3, 0xb5, 0x00, 0x1c, 0x8f, 0x06, 0xf3, 0xe5, 0x0c, 0xdc, 0x84, 0x84, 0xa9, 0xb6, 0xb1, 0xe9, + 0x66, 0x13, 0x5b, 0x8b, 0xdb, 0xe9, 0xe2, 0xb3, 0xfc, 0xd4, 0xa8, 0xe6, 0x87, 0x0d, 0xcf, 0x57, + 0x19, 0xba, 0x6c, 0x79, 0xce, 0xad, 0x22, 0xa8, 0xd0, 0x1b, 0x48, 0xd9, 0xa6, 0xda, 0xc1, 0x5d, + 0x6c, 0x79, 0xd9, 0x65, 0xa6, 0x67, 0x77, 0x4e, 0xde, 0x86, 0x8f, 0x57, 0xfa, 0x54, 0xe8, 0x05, + 0x2c, 0xd1, 0x50, 0x67, 0x93, 0x4c, 0xea, 0x8f, 0x23, 0x28, 0xeb, 0x8e, 0x86, 0x1d, 0xac, 0x1d, + 0x93, 0xb6, 0xc2, 0x60, 0xb9, 0xa7, 0x90, 0x0e, 0xa9, 0x45, 0x19, 0x58, 0xbc, 0xc2, 0xb7, 0x22, + 0x42, 0xf4, 0x11, 0x7d, 0x0f, 0xe2, 0x37, 0xaa, 0xd9, 0xc3, 0x22, 0x8e, 0x7c, 0xb0, 0x17, 0xdb, + 0x95, 0xe4, 0x7f, 0x4b, 0xf0, 0xfd, 0x89, 0x12, 0xd1, 0xaf, 0x61, 0xad, 0xab, 0x5a, 0xaa, 0x8e, + 0xb5, 0x96, 0x48, 0x5c, 0xc6, 0x9a, 0x2e, 0xfe, 0x34, 0x42, 0xe2, 0x29, 0x47, 0x1d, 0x72, 0xd0, + 0xd1, 0x82, 0xb2, 0xda, 0x1d, 0x98, 0x41, 0xbf, 0x85, 0x8c, 0x60, 0x6c, 0xb9, 0xd8, 0xc4, 0x1d, + 0x8f, 0x38, 0x4c, 0x5c, 0xba, 0x98, 0x8f, 0xa0, 0x16, 0x0c, 0x4d, 0x81, 0x3a, 0x5a, 0x50, 0xd6, + 0x3a, 0x83, 0x53, 0x07, 0xe9, 0x50, 0x98, 0xe4, 0x3f, 0xc6, 0x60, 0x75, 0x50, 0x0e, 0x7a, 0x04, + 0xf7, 0xfc, 0xcd, 0x59, 0x2e, 0x73, 0xaf, 0xa4, 0xc5, 0x5c, 0x8d, 0xa6, 0x74, 0x09, 0x12, 0x1d, + 0x62, 0xbd, 0x33, 0x74, 0x96, 0xd1, 0xe9, 0xe2, 0x93, 0xd9, 0x54, 0x1d, 0x32, 0x8c, 0x22, 0xb0, + 0xe8, 0x57, 0x41, 0x12, 0x2e, 0xb1, 0xc8, 0x3e, 0x9d, 0xcb, 0x6d, 0xe3, 0x52, 0xf0, 0x6b, 0x62, + 0xfd, 0x0f, 0x09, 0xd6, 0x86, 0xbc, 0x47, 0xcb, 0xf9, 0x03, 0xb1, 0x82, 0x72, 0xa6, 0xcf, 0xe8, + 0x02, 0x56, 0x7d, 0xf7, 0x08, 0xf5, 0x31, 0xa6, 0x7e, 0x7f, 0xbe, 0xc8, 0xf8, 0xe3, 0xb0, 0x15, + 0x2b, 0x9d, 0xf0, 0x5c, 0xee, 0x25, 0xa0, 0xd1, 0x45, 0x73, 0xd9, 0xf4, 0xbf, 0x38, 0x40, 0xbf, + 0x1e, 0xd0, 0x03, 0x58, 0x76, 0x3d, 0x6c, 0xb7, 0x0c, 0x4d, 0xc0, 0x13, 0x74, 0x58, 0xd1, 0x50, + 0x05, 0xe0, 0x42, 0xd5, 0x08, 0xb1, 0x5b, 0x97, 0xa4, 0x2d, 0x32, 0x6d, 0x3b, 0xc2, 0x9e, 0x23, + 0x06, 0x38, 0x26, 0xed, 0xa3, 0x05, 0x25, 0x75, 0xe1, 0x0f, 0xd0, 0x2b, 0x48, 0xb9, 0xb6, 0xea, + 0x5c, 0x31, 0x26, 0x9e, 0x1d, 0x3f, 0x8a, 0x60, 0x6a, 0xd2, 0xf5, 0x9c, 0x28, 0xe9, 0x8a, 0x67, + 0x54, 0x85, 0xb4, 0x7d, 0xdb, 0x67, 0xe2, 0xbd, 0x31, 0xaa, 0xf6, 0x1b, 0xb7, 0x21, 0x2e, 0x10, + 0x78, 0xca, 0x76, 0x08, 0xc9, 0x0b, 0xe3, 0x06, 0x33, 0x2a, 0xde, 0x29, 0x1f, 0x47, 0x99, 0x67, + 0xdc, 0x60, 0xce, 0xb3, 0x7c, 0xc1, 0x1f, 0xd1, 0x4b, 0x58, 0xb6, 0x0d, 0x9d, 0x71, 0x24, 0x18, + 0xc7, 0x0f, 0xa3, 0xe4, 0x18, 0x3a, 0xa7, 0x48, 0xd8, 0xec, 0x09, 0x35, 0x60, 0x85, 0x9b, 0xe4, + 0x5e, 0x9b, 0x8c, 0x87, 0x77, 0xc9, 0x9d, 0x59, 0x1c, 0xd4, 0xbc, 0x36, 0x39, 0x59, 0xda, 0xed, + 0x0f, 0xd1, 0x69, 0x50, 0x43, 0xbc, 0x3b, 0xfe, 0x62, 0xe6, 0xee, 0x38, 0xb6, 0x85, 0x57, 0x01, + 0xdc, 0xce, 0x05, 0xd6, 0x7a, 0xa6, 0x61, 0xe9, 0xd9, 0xd4, 0x4c, 0xc5, 0x7d, 0x4c, 0xda, 0xcd, + 0x00, 0xa3, 0x84, 0xf0, 0xa8, 0x08, 0xf7, 0x6d, 0x07, 0x3b, 0xf8, 0xba, 0x67, 0xb8, 0x86, 0x87, + 0x5b, 0x22, 0xf9, 0xdc, 0x2c, 0x6c, 0x2d, 0x6e, 0xa7, 0x94, 0x6f, 0xc2, 0x7f, 0x36, 0x59, 0x26, + 0x7e, 0x4d, 0x05, 0x1f, 0x00, 0x24, 0x2f, 0x49, 0xbb, 0xe5, 0xdd, 0xda, 0x58, 0xfe, 0x6f, 0xe8, + 0xb4, 0x3e, 0xc5, 0x9e, 0x4a, 0x55, 0xa3, 0x1c, 0x24, 0xfd, 0xb7, 0x19, 0xc1, 0x18, 0x8c, 0xc3, + 0xa7, 0x74, 0x6c, 0xf0, 0x94, 0x7e, 0x03, 0xab, 0xe2, 0x94, 0xf6, 0xbb, 0x3c, 0x4f, 0xeb, 0xc2, + 0x6c, 0x05, 0x5f, 0xf7, 0x5f, 0x43, 0x94, 0x15, 0x4e, 0xe3, 0xf7, 0xd9, 0x03, 0x88, 0xeb, 0x8e, + 0x6a, 0x5f, 0x88, 0xdc, 0x7e, 0x32, 0xe3, 0x51, 0xf9, 0x9a, 0x62, 0x14, 0x0e, 0xa5, 0xda, 0x34, + 0x6c, 0xe2, 0x90, 0xb6, 0xf8, 0x1d, 0xb5, 0x71, 0x1a, 0x5f, 0xdb, 0x09, 0xc4, 0x5d, 0x8f, 0xba, + 0x89, 0x26, 0xfa, 0x6a, 0x64, 0x56, 0x0d, 0x7b, 0x3a, 0xdf, 0xa4, 0x60, 0x85, 0x73, 0x8c, 0x1c, + 0x28, 0xcb, 0x23, 0x07, 0x8a, 0xbc, 0x0b, 0x71, 0x06, 0x41, 0x69, 0x58, 0x3e, 0xaf, 0x9d, 0xd4, + 0xea, 0x6f, 0x6b, 0x99, 0x05, 0x3a, 0x68, 0x94, 0x6b, 0xa5, 0x4a, 0xed, 0x75, 0x46, 0xa2, 0x03, + 0xe5, 0xbc, 0x56, 0xa3, 0x83, 0x18, 0x4a, 0xc2, 0x52, 0xa9, 0x5e, 0x2b, 0x67, 0x16, 0xe5, 0x16, + 0x64, 0x86, 0x8d, 0xa1, 0x1b, 0x06, 0x2f, 0x7f, 0xfd, 0x66, 0x97, 0x0e, 0xe6, 0x2a, 0x1a, 0xcd, + 0x22, 0xec, 0x38, 0xe2, 0x58, 0x4d, 0x29, 0x7c, 0x40, 0xfb, 0xbd, 0x46, 0xfb, 0x3d, 0x0d, 0x70, + 0x52, 0x61, 0xcf, 0xb2, 0x02, 0x2b, 0x03, 0xae, 0x47, 0xfb, 0x10, 0xb7, 0x88, 0x86, 0xdd, 0xac, + 0xc4, 0x2a, 0xee, 0x27, 0x33, 0xfa, 0xa6, 0x46, 0x34, 0xac, 0x70, 0xa4, 0xfc, 0xd7, 0x18, 0xdc, + 0x0b, 0xcf, 0x4f, 0xee, 0xcc, 0x13, 0x4b, 0x28, 0x36, 0xb1, 0x84, 0xd0, 0x7d, 0x48, 0xd0, 0x3a, + 0x30, 0x34, 0x66, 0x47, 0x4a, 0x89, 0x5f, 0x92, 0x76, 0x45, 0xeb, 0xc7, 0x34, 0x3e, 0x57, 0x4c, + 0xa9, 0xbe, 0x3c, 0xfd, 0x19, 0x88, 0x69, 0xe0, 0xbf, 0x44, 0xc8, 0x7f, 0xf2, 0x15, 0xa4, 0x82, + 0x95, 0x68, 0x03, 0x1e, 0xd4, 0xea, 0xa5, 0x72, 0xab, 0x79, 0xb6, 0x7f, 0x76, 0xde, 0x6c, 0x9d, + 0xd7, 0x9a, 0x8d, 0xf2, 0x61, 0xe5, 0x55, 0xa5, 0x5c, 0xe2, 0xa1, 0x3d, 0xa8, 0xd6, 0x0f, 0x4f, + 0xca, 0xa5, 0x8c, 0x84, 0xee, 0x41, 0x92, 0x86, 0x76, 0xff, 0xa0, 0x5a, 0xce, 0xc4, 0xc2, 0x81, + 0x5e, 0x44, 0x2b, 0x90, 0x3a, 0xac, 0x9f, 0x36, 0xaa, 0xe5, 0xb3, 0x72, 0x29, 0xb3, 0x84, 0x00, + 0x12, 0xaf, 0xf6, 0x2b, 0xd5, 0x72, 0x29, 0x13, 0x97, 0x3f, 0x49, 0xb0, 0x79, 0xc8, 0x2a, 0x6a, + 0xf8, 0x15, 0x4d, 0xc1, 0xd7, 0x3d, 0xec, 0x7a, 0x68, 0x1d, 0x12, 0xb6, 0xea, 0xd0, 0xb7, 0x51, + 0xe1, 0x54, 0x3e, 0x42, 0x27, 0xa1, 0x3e, 0x10, 0x9b, 0xa9, 0x5e, 0x46, 0x76, 0x08, 0x08, 0xe4, + 0x63, 0xc8, 0xbd, 0xc6, 0xde, 0x24, 0x09, 0x11, 0x17, 0x82, 0xc1, 0x56, 0x23, 0xbb, 0x20, 0x57, + 0x2c, 0xd7, 0x53, 0x2d, 0xcf, 0x98, 0x62, 0xd6, 0x5c, 0x9c, 0xe8, 0x21, 0xa4, 0x0d, 0xc6, 0xd9, + 0xc1, 0xfd, 0x94, 0x00, 0x7f, 0xaa, 0xa2, 0xc9, 0x26, 0x6c, 0x9e, 0xb3, 0x9b, 0xc1, 0xa4, 0xfd, + 0x4e, 0x86, 0xda, 0xe6, 0x57, 0xb9, 0xcb, 0x81, 0x6f, 0xab, 0x86, 0x3b, 0xe2, 0x2f, 0x37, 0x2a, + 0x66, 0x1b, 0x90, 0xb2, 0x55, 0x1d, 0xb7, 0x5c, 0xe3, 0x03, 0x16, 0x26, 0x26, 0xe9, 0x44, 0xd3, + 0xf8, 0x80, 0xd1, 0x26, 0x00, 0xfb, 0xd3, 0x23, 0x57, 0xd8, 0x12, 0x26, 0xb2, 0xe5, 0x67, 0x74, + 0x42, 0xfe, 0xb3, 0x04, 0x9b, 0x13, 0x36, 0x75, 0x6d, 0x62, 0xb9, 0x18, 0x9d, 0x42, 0x2a, 0xb8, + 0xe7, 0x8a, 0xba, 0x9e, 0xdb, 0xc6, 0x3e, 0x03, 0x7a, 0x0c, 0x6b, 0x16, 0xfe, 0x9d, 0xd7, 0x0a, + 0x89, 0xe2, 0x7d, 0x66, 0x85, 0x4e, 0x37, 0x02, 0x61, 0xa7, 0xb0, 0x59, 0x62, 0x7d, 0xf7, 0x3b, + 0x09, 0x75, 0xf1, 0x4b, 0x0a, 0x1e, 0x0c, 0x33, 0x35, 0xb1, 0x73, 0x63, 0x74, 0x30, 0xfa, 0x2c, + 0xc1, 0xfa, 0xf8, 0x6a, 0x41, 0xcf, 0xa3, 0x0e, 0x8b, 0x69, 0x45, 0x96, 0x9b, 0xd7, 0x4f, 0xf2, + 0xf1, 0x1f, 0x3e, 0xff, 0xe7, 0x4f, 0xb1, 0x92, 0xbc, 0x1b, 0x5c, 0xe4, 0x7f, 0xcf, 0x63, 0xfc, + 0xc2, 0x76, 0xc8, 0x25, 0xee, 0x78, 0x6e, 0x61, 0xa7, 0xe0, 0x60, 0x9d, 0xde, 0xdc, 0x0b, 0x3b, + 0x1f, 0x83, 0x2f, 0x11, 0x41, 0xd4, 0xf6, 0xfa, 0xa7, 0xf6, 0xdf, 0x24, 0xf8, 0x66, 0x4c, 0xf5, + 0xa1, 0xa8, 0xab, 0xc4, 0xe4, 0x8a, 0x9d, 0xdf, 0x9e, 0x97, 0xcc, 0x9e, 0x3d, 0x14, 0xb2, 0x87, + 0xc6, 0x69, 0xac, 0x35, 0xa3, 0xc6, 0x14, 0x76, 0x3e, 0xa2, 0xbf, 0x4b, 0xb0, 0x31, 0xa5, 0xee, + 0x51, 0xd4, 0xd5, 0x22, 0xba, 0x67, 0xe4, 0x36, 0x7d, 0x8a, 0xd0, 0x17, 0x92, 0x7c, 0x70, 0x62, + 0xca, 0x75, 0x66, 0x43, 0x45, 0x2e, 0xdd, 0xd5, 0x86, 0x3d, 0xa3, 0xaf, 0x61, 0x4f, 0xda, 0x41, + 0xff, 0x92, 0x60, 0x7d, 0x7c, 0x57, 0x89, 0x4c, 0xb7, 0xa9, 0xcd, 0x68, 0xfe, 0xf0, 0x34, 0x99, + 0x69, 0xa7, 0xb9, 0xfd, 0xbe, 0x69, 0x7e, 0xfa, 0xe4, 0xe7, 0xb4, 0xb1, 0x9f, 0x77, 0xff, 0x94, + 0xe0, 0xfe, 0xd8, 0x8e, 0x82, 0xa2, 0xbe, 0xa4, 0x4c, 0x6b, 0x7e, 0xb9, 0xe7, 0x77, 0x03, 0xf3, + 0x26, 0x36, 0x2e, 0x11, 0xe7, 0x2b, 0x2c, 0xf4, 0x17, 0x09, 0xd6, 0xc7, 0x37, 0xa4, 0xc8, 0xa8, + 0x4d, 0xed, 0x63, 0xb9, 0xf5, 0x91, 0xef, 0x52, 0xe5, 0xae, 0xed, 0xdd, 0xfa, 0x92, 0x77, 0xee, + 0x5c, 0x3b, 0x07, 0x9f, 0x24, 0x78, 0xd4, 0x21, 0xdd, 0xe9, 0xea, 0x0e, 0xd6, 0x47, 0xbc, 0xd6, + 0xa0, 0x42, 0x1a, 0xd2, 0x6f, 0xca, 0x02, 0xa8, 0x13, 0x53, 0xb5, 0xf4, 0x3c, 0x71, 0xf4, 0x82, + 0x8e, 0x2d, 0x26, 0xb3, 0xc0, 0xff, 0x52, 0x6d, 0xc3, 0x9d, 0xf0, 0x09, 0xf2, 0x99, 0x3f, 0xd1, + 0x4e, 0x30, 0xc4, 0xcf, 0xff, 0x1f, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x21, 0xc2, 0x3b, 0x56, 0x15, + 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/iot/v1/device_manager.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/iot/v1/device_manager.pb.go new file mode 100644 index 00000000..6fc71169 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/iot/v1/device_manager.pb.go @@ -0,0 +1,1327 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/iot/v1/device_manager.proto + +/* +Package iot is a generated protocol buffer package. + +It is generated from these files: + google/cloud/iot/v1/device_manager.proto + google/cloud/iot/v1/resources.proto + +It has these top-level messages: + CreateDeviceRegistryRequest + GetDeviceRegistryRequest + DeleteDeviceRegistryRequest + UpdateDeviceRegistryRequest + ListDeviceRegistriesRequest + ListDeviceRegistriesResponse + CreateDeviceRequest + GetDeviceRequest + UpdateDeviceRequest + DeleteDeviceRequest + ListDevicesRequest + ListDevicesResponse + ModifyCloudToDeviceConfigRequest + ListDeviceConfigVersionsRequest + ListDeviceConfigVersionsResponse + ListDeviceStatesRequest + ListDeviceStatesResponse + Device + DeviceRegistry + MqttConfig + HttpConfig + EventNotificationConfig + StateNotificationConfig + RegistryCredential + X509CertificateDetails + PublicKeyCertificate + DeviceCredential + PublicKeyCredential + DeviceConfig + DeviceState +*/ +package iot + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_iam_v11 "google.golang.org/genproto/googleapis/iam/v1" +import google_iam_v1 "google.golang.org/genproto/googleapis/iam/v1" +import google_protobuf3 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf4 "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the proto package it is being compiled against. +// A compilation error at this line likely means your copy of the +// proto package needs to be updated. +const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package + +// Request for `CreateDeviceRegistry`. +type CreateDeviceRegistryRequest struct { + // The project and cloud region where this device registry must be created. + // For example, `projects/example-project/locations/us-central1`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The device registry. The field `name` must be empty. The server will + // generate that field from the device registry `id` provided and the + // `parent` field. + DeviceRegistry *DeviceRegistry `protobuf:"bytes,2,opt,name=device_registry,json=deviceRegistry" json:"device_registry,omitempty"` +} + +func (m *CreateDeviceRegistryRequest) Reset() { *m = CreateDeviceRegistryRequest{} } +func (m *CreateDeviceRegistryRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDeviceRegistryRequest) ProtoMessage() {} +func (*CreateDeviceRegistryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{0} } + +func (m *CreateDeviceRegistryRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateDeviceRegistryRequest) GetDeviceRegistry() *DeviceRegistry { + if m != nil { + return m.DeviceRegistry + } + return nil +} + +// Request for `GetDeviceRegistry`. +type GetDeviceRegistryRequest struct { + // The name of the device registry. For example, + // `projects/example-project/locations/us-central1/registries/my-registry`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetDeviceRegistryRequest) Reset() { *m = GetDeviceRegistryRequest{} } +func (m *GetDeviceRegistryRequest) String() string { return proto.CompactTextString(m) } +func (*GetDeviceRegistryRequest) ProtoMessage() {} +func (*GetDeviceRegistryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{1} } + +func (m *GetDeviceRegistryRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for `DeleteDeviceRegistry`. +type DeleteDeviceRegistryRequest struct { + // The name of the device registry. For example, + // `projects/example-project/locations/us-central1/registries/my-registry`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteDeviceRegistryRequest) Reset() { *m = DeleteDeviceRegistryRequest{} } +func (m *DeleteDeviceRegistryRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDeviceRegistryRequest) ProtoMessage() {} +func (*DeleteDeviceRegistryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *DeleteDeviceRegistryRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for `UpdateDeviceRegistry`. +type UpdateDeviceRegistryRequest struct { + // The new values for the device registry. The `id` field must be empty, and + // the `name` field must indicate the path of the resource. For example, + // `projects/example-project/locations/us-central1/registries/my-registry`. + DeviceRegistry *DeviceRegistry `protobuf:"bytes,1,opt,name=device_registry,json=deviceRegistry" json:"device_registry,omitempty"` + // Only updates the `device_registry` fields indicated by this mask. + // The field mask must not be empty, and it must not contain fields that + // are immutable or only set by the server. + // Mutable top-level fields: `event_notification_config`, `http_config`, + // `mqtt_config`, and `state_notification_config`. + UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateDeviceRegistryRequest) Reset() { *m = UpdateDeviceRegistryRequest{} } +func (m *UpdateDeviceRegistryRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDeviceRegistryRequest) ProtoMessage() {} +func (*UpdateDeviceRegistryRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } + +func (m *UpdateDeviceRegistryRequest) GetDeviceRegistry() *DeviceRegistry { + if m != nil { + return m.DeviceRegistry + } + return nil +} + +func (m *UpdateDeviceRegistryRequest) GetUpdateMask() *google_protobuf4.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request for `ListDeviceRegistries`. +type ListDeviceRegistriesRequest struct { + // The project and cloud region path. For example, + // `projects/example-project/locations/us-central1`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The maximum number of registries to return in the response. If this value + // is zero, the service will select a default size. A call may return fewer + // objects than requested, but if there is a non-empty `page_token`, it + // indicates that more entries are available. + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last `ListDeviceRegistriesResponse`; indicates + // that this is a continuation of a prior `ListDeviceRegistries` call, and + // that the system should return the next page of data. + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListDeviceRegistriesRequest) Reset() { *m = ListDeviceRegistriesRequest{} } +func (m *ListDeviceRegistriesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDeviceRegistriesRequest) ProtoMessage() {} +func (*ListDeviceRegistriesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{4} } + +func (m *ListDeviceRegistriesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListDeviceRegistriesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDeviceRegistriesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for `ListDeviceRegistries`. +type ListDeviceRegistriesResponse struct { + // The registries that matched the query. + DeviceRegistries []*DeviceRegistry `protobuf:"bytes,1,rep,name=device_registries,json=deviceRegistries" json:"device_registries,omitempty"` + // If not empty, indicates that there may be more registries that match the + // request; this value should be passed in a new + // `ListDeviceRegistriesRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListDeviceRegistriesResponse) Reset() { *m = ListDeviceRegistriesResponse{} } +func (m *ListDeviceRegistriesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDeviceRegistriesResponse) ProtoMessage() {} +func (*ListDeviceRegistriesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{5} } + +func (m *ListDeviceRegistriesResponse) GetDeviceRegistries() []*DeviceRegistry { + if m != nil { + return m.DeviceRegistries + } + return nil +} + +func (m *ListDeviceRegistriesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for `CreateDevice`. +type CreateDeviceRequest struct { + // The name of the device registry where this device should be created. + // For example, + // `projects/example-project/locations/us-central1/registries/my-registry`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The device registration details. + Device *Device `protobuf:"bytes,2,opt,name=device" json:"device,omitempty"` +} + +func (m *CreateDeviceRequest) Reset() { *m = CreateDeviceRequest{} } +func (m *CreateDeviceRequest) String() string { return proto.CompactTextString(m) } +func (*CreateDeviceRequest) ProtoMessage() {} +func (*CreateDeviceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{6} } + +func (m *CreateDeviceRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateDeviceRequest) GetDevice() *Device { + if m != nil { + return m.Device + } + return nil +} + +// Request for `GetDevice`. +type GetDeviceRequest struct { + // The name of the device. For example, + // `projects/p0/locations/us-central1/registries/registry0/devices/device0` or + // `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The fields of the `Device` resource to be returned in the response. If the + // field mask is unset or empty, all fields are returned. + FieldMask *google_protobuf4.FieldMask `protobuf:"bytes,2,opt,name=field_mask,json=fieldMask" json:"field_mask,omitempty"` +} + +func (m *GetDeviceRequest) Reset() { *m = GetDeviceRequest{} } +func (m *GetDeviceRequest) String() string { return proto.CompactTextString(m) } +func (*GetDeviceRequest) ProtoMessage() {} +func (*GetDeviceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{7} } + +func (m *GetDeviceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *GetDeviceRequest) GetFieldMask() *google_protobuf4.FieldMask { + if m != nil { + return m.FieldMask + } + return nil +} + +// Request for `UpdateDevice`. +type UpdateDeviceRequest struct { + // The new values for the device registry. The `id` and `num_id` fields must + // be empty, and the field `name` must specify the name path. For example, + // `projects/p0/locations/us-central1/registries/registry0/devices/device0`or + // `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`. + Device *Device `protobuf:"bytes,2,opt,name=device" json:"device,omitempty"` + // Only updates the `device` fields indicated by this mask. + // The field mask must not be empty, and it must not contain fields that + // are immutable or only set by the server. + // Mutable top-level fields: `credentials`, `enabled_state`, and `metadata` + UpdateMask *google_protobuf4.FieldMask `protobuf:"bytes,3,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` +} + +func (m *UpdateDeviceRequest) Reset() { *m = UpdateDeviceRequest{} } +func (m *UpdateDeviceRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateDeviceRequest) ProtoMessage() {} +func (*UpdateDeviceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{8} } + +func (m *UpdateDeviceRequest) GetDevice() *Device { + if m != nil { + return m.Device + } + return nil +} + +func (m *UpdateDeviceRequest) GetUpdateMask() *google_protobuf4.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +// Request for `DeleteDevice`. +type DeleteDeviceRequest struct { + // The name of the device. For example, + // `projects/p0/locations/us-central1/registries/registry0/devices/device0` or + // `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteDeviceRequest) Reset() { *m = DeleteDeviceRequest{} } +func (m *DeleteDeviceRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteDeviceRequest) ProtoMessage() {} +func (*DeleteDeviceRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{9} } + +func (m *DeleteDeviceRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// Request for `ListDevices`. +type ListDevicesRequest struct { + // The device registry path. Required. For example, + // `projects/my-project/locations/us-central1/registries/my-registry`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // A list of device numerical ids. If empty, it will ignore this field. This + // field cannot hold more than 10,000 entries. + DeviceNumIds []uint64 `protobuf:"varint,2,rep,packed,name=device_num_ids,json=deviceNumIds" json:"device_num_ids,omitempty"` + // A list of device string identifiers. If empty, it will ignore this field. + // For example, `['device0', 'device12']`. This field cannot hold more than + // 10,000 entries. + DeviceIds []string `protobuf:"bytes,3,rep,name=device_ids,json=deviceIds" json:"device_ids,omitempty"` + // The fields of the `Device` resource to be returned in the response. The + // fields `id`, and `num_id` are always returned by default, along with any + // other fields specified. + FieldMask *google_protobuf4.FieldMask `protobuf:"bytes,4,opt,name=field_mask,json=fieldMask" json:"field_mask,omitempty"` + // The maximum number of devices to return in the response. If this value + // is zero, the service will select a default size. A call may return fewer + // objects than requested, but if there is a non-empty `page_token`, it + // indicates that more entries are available. + PageSize int32 `protobuf:"varint,100,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // The value returned by the last `ListDevicesResponse`; indicates + // that this is a continuation of a prior `ListDevices` call, and + // that the system should return the next page of data. + PageToken string `protobuf:"bytes,101,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListDevicesRequest) Reset() { *m = ListDevicesRequest{} } +func (m *ListDevicesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDevicesRequest) ProtoMessage() {} +func (*ListDevicesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{10} } + +func (m *ListDevicesRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListDevicesRequest) GetDeviceNumIds() []uint64 { + if m != nil { + return m.DeviceNumIds + } + return nil +} + +func (m *ListDevicesRequest) GetDeviceIds() []string { + if m != nil { + return m.DeviceIds + } + return nil +} + +func (m *ListDevicesRequest) GetFieldMask() *google_protobuf4.FieldMask { + if m != nil { + return m.FieldMask + } + return nil +} + +func (m *ListDevicesRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListDevicesRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// Response for `ListDevices`. +type ListDevicesResponse struct { + // The devices that match the request. + Devices []*Device `protobuf:"bytes,1,rep,name=devices" json:"devices,omitempty"` + // If not empty, indicates that there may be more devices that match the + // request; this value should be passed in a new `ListDevicesRequest`. + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListDevicesResponse) Reset() { *m = ListDevicesResponse{} } +func (m *ListDevicesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDevicesResponse) ProtoMessage() {} +func (*ListDevicesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{11} } + +func (m *ListDevicesResponse) GetDevices() []*Device { + if m != nil { + return m.Devices + } + return nil +} + +func (m *ListDevicesResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// Request for `ModifyCloudToDeviceConfig`. +type ModifyCloudToDeviceConfigRequest struct { + // The name of the device. For example, + // `projects/p0/locations/us-central1/registries/registry0/devices/device0` or + // `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The version number to update. If this value is zero, it will not check the + // version number of the server and will always update the current version; + // otherwise, this update will fail if the version number found on the server + // does not match this version number. This is used to support multiple + // simultaneous updates without losing data. + VersionToUpdate int64 `protobuf:"varint,2,opt,name=version_to_update,json=versionToUpdate" json:"version_to_update,omitempty"` + // The configuration data for the device. + BinaryData []byte `protobuf:"bytes,3,opt,name=binary_data,json=binaryData,proto3" json:"binary_data,omitempty"` +} + +func (m *ModifyCloudToDeviceConfigRequest) Reset() { *m = ModifyCloudToDeviceConfigRequest{} } +func (m *ModifyCloudToDeviceConfigRequest) String() string { return proto.CompactTextString(m) } +func (*ModifyCloudToDeviceConfigRequest) ProtoMessage() {} +func (*ModifyCloudToDeviceConfigRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{12} +} + +func (m *ModifyCloudToDeviceConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ModifyCloudToDeviceConfigRequest) GetVersionToUpdate() int64 { + if m != nil { + return m.VersionToUpdate + } + return 0 +} + +func (m *ModifyCloudToDeviceConfigRequest) GetBinaryData() []byte { + if m != nil { + return m.BinaryData + } + return nil +} + +// Request for `ListDeviceConfigVersions`. +type ListDeviceConfigVersionsRequest struct { + // The name of the device. For example, + // `projects/p0/locations/us-central1/registries/registry0/devices/device0` or + // `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The number of versions to list. Versions are listed in decreasing order of + // the version number. The maximum number of versions retained is 10. If this + // value is zero, it will return all the versions available. + NumVersions int32 `protobuf:"varint,2,opt,name=num_versions,json=numVersions" json:"num_versions,omitempty"` +} + +func (m *ListDeviceConfigVersionsRequest) Reset() { *m = ListDeviceConfigVersionsRequest{} } +func (m *ListDeviceConfigVersionsRequest) String() string { return proto.CompactTextString(m) } +func (*ListDeviceConfigVersionsRequest) ProtoMessage() {} +func (*ListDeviceConfigVersionsRequest) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{13} +} + +func (m *ListDeviceConfigVersionsRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListDeviceConfigVersionsRequest) GetNumVersions() int32 { + if m != nil { + return m.NumVersions + } + return 0 +} + +// Response for `ListDeviceConfigVersions`. +type ListDeviceConfigVersionsResponse struct { + // The device configuration for the last few versions. Versions are listed + // in decreasing order, starting from the most recent one. + DeviceConfigs []*DeviceConfig `protobuf:"bytes,1,rep,name=device_configs,json=deviceConfigs" json:"device_configs,omitempty"` +} + +func (m *ListDeviceConfigVersionsResponse) Reset() { *m = ListDeviceConfigVersionsResponse{} } +func (m *ListDeviceConfigVersionsResponse) String() string { return proto.CompactTextString(m) } +func (*ListDeviceConfigVersionsResponse) ProtoMessage() {} +func (*ListDeviceConfigVersionsResponse) Descriptor() ([]byte, []int) { + return fileDescriptor0, []int{14} +} + +func (m *ListDeviceConfigVersionsResponse) GetDeviceConfigs() []*DeviceConfig { + if m != nil { + return m.DeviceConfigs + } + return nil +} + +// Request for `ListDeviceStates`. +type ListDeviceStatesRequest struct { + // The name of the device. For example, + // `projects/p0/locations/us-central1/registries/registry0/devices/device0` or + // `projects/p0/locations/us-central1/registries/registry0/devices/{num_id}`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // The number of states to list. States are listed in descending order of + // update time. The maximum number of states retained is 10. If this + // value is zero, it will return all the states available. + NumStates int32 `protobuf:"varint,2,opt,name=num_states,json=numStates" json:"num_states,omitempty"` +} + +func (m *ListDeviceStatesRequest) Reset() { *m = ListDeviceStatesRequest{} } +func (m *ListDeviceStatesRequest) String() string { return proto.CompactTextString(m) } +func (*ListDeviceStatesRequest) ProtoMessage() {} +func (*ListDeviceStatesRequest) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{15} } + +func (m *ListDeviceStatesRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *ListDeviceStatesRequest) GetNumStates() int32 { + if m != nil { + return m.NumStates + } + return 0 +} + +// Response for `ListDeviceStates`. +type ListDeviceStatesResponse struct { + // The last few device states. States are listed in descending order of server + // update time, starting from the most recent one. + DeviceStates []*DeviceState `protobuf:"bytes,1,rep,name=device_states,json=deviceStates" json:"device_states,omitempty"` +} + +func (m *ListDeviceStatesResponse) Reset() { *m = ListDeviceStatesResponse{} } +func (m *ListDeviceStatesResponse) String() string { return proto.CompactTextString(m) } +func (*ListDeviceStatesResponse) ProtoMessage() {} +func (*ListDeviceStatesResponse) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{16} } + +func (m *ListDeviceStatesResponse) GetDeviceStates() []*DeviceState { + if m != nil { + return m.DeviceStates + } + return nil +} + +func init() { + proto.RegisterType((*CreateDeviceRegistryRequest)(nil), "google.cloud.iot.v1.CreateDeviceRegistryRequest") + proto.RegisterType((*GetDeviceRegistryRequest)(nil), "google.cloud.iot.v1.GetDeviceRegistryRequest") + proto.RegisterType((*DeleteDeviceRegistryRequest)(nil), "google.cloud.iot.v1.DeleteDeviceRegistryRequest") + proto.RegisterType((*UpdateDeviceRegistryRequest)(nil), "google.cloud.iot.v1.UpdateDeviceRegistryRequest") + proto.RegisterType((*ListDeviceRegistriesRequest)(nil), "google.cloud.iot.v1.ListDeviceRegistriesRequest") + proto.RegisterType((*ListDeviceRegistriesResponse)(nil), "google.cloud.iot.v1.ListDeviceRegistriesResponse") + proto.RegisterType((*CreateDeviceRequest)(nil), "google.cloud.iot.v1.CreateDeviceRequest") + proto.RegisterType((*GetDeviceRequest)(nil), "google.cloud.iot.v1.GetDeviceRequest") + proto.RegisterType((*UpdateDeviceRequest)(nil), "google.cloud.iot.v1.UpdateDeviceRequest") + proto.RegisterType((*DeleteDeviceRequest)(nil), "google.cloud.iot.v1.DeleteDeviceRequest") + proto.RegisterType((*ListDevicesRequest)(nil), "google.cloud.iot.v1.ListDevicesRequest") + proto.RegisterType((*ListDevicesResponse)(nil), "google.cloud.iot.v1.ListDevicesResponse") + proto.RegisterType((*ModifyCloudToDeviceConfigRequest)(nil), "google.cloud.iot.v1.ModifyCloudToDeviceConfigRequest") + proto.RegisterType((*ListDeviceConfigVersionsRequest)(nil), "google.cloud.iot.v1.ListDeviceConfigVersionsRequest") + proto.RegisterType((*ListDeviceConfigVersionsResponse)(nil), "google.cloud.iot.v1.ListDeviceConfigVersionsResponse") + proto.RegisterType((*ListDeviceStatesRequest)(nil), "google.cloud.iot.v1.ListDeviceStatesRequest") + proto.RegisterType((*ListDeviceStatesResponse)(nil), "google.cloud.iot.v1.ListDeviceStatesResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for DeviceManager service + +type DeviceManagerClient interface { + // Creates a device registry that contains devices. + CreateDeviceRegistry(ctx context.Context, in *CreateDeviceRegistryRequest, opts ...grpc.CallOption) (*DeviceRegistry, error) + // Gets a device registry configuration. + GetDeviceRegistry(ctx context.Context, in *GetDeviceRegistryRequest, opts ...grpc.CallOption) (*DeviceRegistry, error) + // Updates a device registry configuration. + UpdateDeviceRegistry(ctx context.Context, in *UpdateDeviceRegistryRequest, opts ...grpc.CallOption) (*DeviceRegistry, error) + // Deletes a device registry configuration. + DeleteDeviceRegistry(ctx context.Context, in *DeleteDeviceRegistryRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // Lists device registries. + ListDeviceRegistries(ctx context.Context, in *ListDeviceRegistriesRequest, opts ...grpc.CallOption) (*ListDeviceRegistriesResponse, error) + // Creates a device in a device registry. + CreateDevice(ctx context.Context, in *CreateDeviceRequest, opts ...grpc.CallOption) (*Device, error) + // Gets details about a device. + GetDevice(ctx context.Context, in *GetDeviceRequest, opts ...grpc.CallOption) (*Device, error) + // Updates a device. + UpdateDevice(ctx context.Context, in *UpdateDeviceRequest, opts ...grpc.CallOption) (*Device, error) + // Deletes a device. + DeleteDevice(ctx context.Context, in *DeleteDeviceRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) + // List devices in a device registry. + ListDevices(ctx context.Context, in *ListDevicesRequest, opts ...grpc.CallOption) (*ListDevicesResponse, error) + // Modifies the configuration for the device, which is eventually sent from + // the Cloud IoT Core servers. Returns the modified configuration version and + // its metadata. + ModifyCloudToDeviceConfig(ctx context.Context, in *ModifyCloudToDeviceConfigRequest, opts ...grpc.CallOption) (*DeviceConfig, error) + // Lists the last few versions of the device configuration in descending + // order (i.e.: newest first). + ListDeviceConfigVersions(ctx context.Context, in *ListDeviceConfigVersionsRequest, opts ...grpc.CallOption) (*ListDeviceConfigVersionsResponse, error) + // Lists the last few versions of the device state in descending order (i.e.: + // newest first). + ListDeviceStates(ctx context.Context, in *ListDeviceStatesRequest, opts ...grpc.CallOption) (*ListDeviceStatesResponse, error) + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a NOT_FOUND error. + TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +type deviceManagerClient struct { + cc *grpc.ClientConn +} + +func NewDeviceManagerClient(cc *grpc.ClientConn) DeviceManagerClient { + return &deviceManagerClient{cc} +} + +func (c *deviceManagerClient) CreateDeviceRegistry(ctx context.Context, in *CreateDeviceRegistryRequest, opts ...grpc.CallOption) (*DeviceRegistry, error) { + out := new(DeviceRegistry) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/CreateDeviceRegistry", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) GetDeviceRegistry(ctx context.Context, in *GetDeviceRegistryRequest, opts ...grpc.CallOption) (*DeviceRegistry, error) { + out := new(DeviceRegistry) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/GetDeviceRegistry", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) UpdateDeviceRegistry(ctx context.Context, in *UpdateDeviceRegistryRequest, opts ...grpc.CallOption) (*DeviceRegistry, error) { + out := new(DeviceRegistry) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/UpdateDeviceRegistry", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) DeleteDeviceRegistry(ctx context.Context, in *DeleteDeviceRegistryRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/DeleteDeviceRegistry", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) ListDeviceRegistries(ctx context.Context, in *ListDeviceRegistriesRequest, opts ...grpc.CallOption) (*ListDeviceRegistriesResponse, error) { + out := new(ListDeviceRegistriesResponse) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/ListDeviceRegistries", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) CreateDevice(ctx context.Context, in *CreateDeviceRequest, opts ...grpc.CallOption) (*Device, error) { + out := new(Device) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/CreateDevice", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) GetDevice(ctx context.Context, in *GetDeviceRequest, opts ...grpc.CallOption) (*Device, error) { + out := new(Device) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/GetDevice", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) UpdateDevice(ctx context.Context, in *UpdateDeviceRequest, opts ...grpc.CallOption) (*Device, error) { + out := new(Device) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/UpdateDevice", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) DeleteDevice(ctx context.Context, in *DeleteDeviceRequest, opts ...grpc.CallOption) (*google_protobuf3.Empty, error) { + out := new(google_protobuf3.Empty) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/DeleteDevice", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) ListDevices(ctx context.Context, in *ListDevicesRequest, opts ...grpc.CallOption) (*ListDevicesResponse, error) { + out := new(ListDevicesResponse) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/ListDevices", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) ModifyCloudToDeviceConfig(ctx context.Context, in *ModifyCloudToDeviceConfigRequest, opts ...grpc.CallOption) (*DeviceConfig, error) { + out := new(DeviceConfig) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/ModifyCloudToDeviceConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) ListDeviceConfigVersions(ctx context.Context, in *ListDeviceConfigVersionsRequest, opts ...grpc.CallOption) (*ListDeviceConfigVersionsResponse, error) { + out := new(ListDeviceConfigVersionsResponse) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/ListDeviceConfigVersions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) ListDeviceStates(ctx context.Context, in *ListDeviceStatesRequest, opts ...grpc.CallOption) (*ListDeviceStatesResponse, error) { + out := new(ListDeviceStatesResponse) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/ListDeviceStates", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) SetIamPolicy(ctx context.Context, in *google_iam_v11.SetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/SetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) GetIamPolicy(ctx context.Context, in *google_iam_v11.GetIamPolicyRequest, opts ...grpc.CallOption) (*google_iam_v1.Policy, error) { + out := new(google_iam_v1.Policy) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/GetIamPolicy", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *deviceManagerClient) TestIamPermissions(ctx context.Context, in *google_iam_v11.TestIamPermissionsRequest, opts ...grpc.CallOption) (*google_iam_v11.TestIamPermissionsResponse, error) { + out := new(google_iam_v11.TestIamPermissionsResponse) + err := grpc.Invoke(ctx, "/google.cloud.iot.v1.DeviceManager/TestIamPermissions", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for DeviceManager service + +type DeviceManagerServer interface { + // Creates a device registry that contains devices. + CreateDeviceRegistry(context.Context, *CreateDeviceRegistryRequest) (*DeviceRegistry, error) + // Gets a device registry configuration. + GetDeviceRegistry(context.Context, *GetDeviceRegistryRequest) (*DeviceRegistry, error) + // Updates a device registry configuration. + UpdateDeviceRegistry(context.Context, *UpdateDeviceRegistryRequest) (*DeviceRegistry, error) + // Deletes a device registry configuration. + DeleteDeviceRegistry(context.Context, *DeleteDeviceRegistryRequest) (*google_protobuf3.Empty, error) + // Lists device registries. + ListDeviceRegistries(context.Context, *ListDeviceRegistriesRequest) (*ListDeviceRegistriesResponse, error) + // Creates a device in a device registry. + CreateDevice(context.Context, *CreateDeviceRequest) (*Device, error) + // Gets details about a device. + GetDevice(context.Context, *GetDeviceRequest) (*Device, error) + // Updates a device. + UpdateDevice(context.Context, *UpdateDeviceRequest) (*Device, error) + // Deletes a device. + DeleteDevice(context.Context, *DeleteDeviceRequest) (*google_protobuf3.Empty, error) + // List devices in a device registry. + ListDevices(context.Context, *ListDevicesRequest) (*ListDevicesResponse, error) + // Modifies the configuration for the device, which is eventually sent from + // the Cloud IoT Core servers. Returns the modified configuration version and + // its metadata. + ModifyCloudToDeviceConfig(context.Context, *ModifyCloudToDeviceConfigRequest) (*DeviceConfig, error) + // Lists the last few versions of the device configuration in descending + // order (i.e.: newest first). + ListDeviceConfigVersions(context.Context, *ListDeviceConfigVersionsRequest) (*ListDeviceConfigVersionsResponse, error) + // Lists the last few versions of the device state in descending order (i.e.: + // newest first). + ListDeviceStates(context.Context, *ListDeviceStatesRequest) (*ListDeviceStatesResponse, error) + // Sets the access control policy on the specified resource. Replaces any + // existing policy. + SetIamPolicy(context.Context, *google_iam_v11.SetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Gets the access control policy for a resource. + // Returns an empty policy if the resource exists and does not have a policy + // set. + GetIamPolicy(context.Context, *google_iam_v11.GetIamPolicyRequest) (*google_iam_v1.Policy, error) + // Returns permissions that a caller has on the specified resource. + // If the resource does not exist, this will return an empty set of + // permissions, not a NOT_FOUND error. + TestIamPermissions(context.Context, *google_iam_v11.TestIamPermissionsRequest) (*google_iam_v11.TestIamPermissionsResponse, error) +} + +func RegisterDeviceManagerServer(s *grpc.Server, srv DeviceManagerServer) { + s.RegisterService(&_DeviceManager_serviceDesc, srv) +} + +func _DeviceManager_CreateDeviceRegistry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDeviceRegistryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).CreateDeviceRegistry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/CreateDeviceRegistry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).CreateDeviceRegistry(ctx, req.(*CreateDeviceRegistryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_GetDeviceRegistry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDeviceRegistryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).GetDeviceRegistry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/GetDeviceRegistry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).GetDeviceRegistry(ctx, req.(*GetDeviceRegistryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_UpdateDeviceRegistry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDeviceRegistryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).UpdateDeviceRegistry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/UpdateDeviceRegistry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).UpdateDeviceRegistry(ctx, req.(*UpdateDeviceRegistryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_DeleteDeviceRegistry_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDeviceRegistryRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).DeleteDeviceRegistry(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/DeleteDeviceRegistry", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).DeleteDeviceRegistry(ctx, req.(*DeleteDeviceRegistryRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_ListDeviceRegistries_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDeviceRegistriesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).ListDeviceRegistries(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/ListDeviceRegistries", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).ListDeviceRegistries(ctx, req.(*ListDeviceRegistriesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_CreateDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateDeviceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).CreateDevice(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/CreateDevice", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).CreateDevice(ctx, req.(*CreateDeviceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_GetDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetDeviceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).GetDevice(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/GetDevice", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).GetDevice(ctx, req.(*GetDeviceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_UpdateDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateDeviceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).UpdateDevice(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/UpdateDevice", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).UpdateDevice(ctx, req.(*UpdateDeviceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_DeleteDevice_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteDeviceRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).DeleteDevice(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/DeleteDevice", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).DeleteDevice(ctx, req.(*DeleteDeviceRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_ListDevices_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDevicesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).ListDevices(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/ListDevices", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).ListDevices(ctx, req.(*ListDevicesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_ModifyCloudToDeviceConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ModifyCloudToDeviceConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).ModifyCloudToDeviceConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/ModifyCloudToDeviceConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).ModifyCloudToDeviceConfig(ctx, req.(*ModifyCloudToDeviceConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_ListDeviceConfigVersions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDeviceConfigVersionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).ListDeviceConfigVersions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/ListDeviceConfigVersions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).ListDeviceConfigVersions(ctx, req.(*ListDeviceConfigVersionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_ListDeviceStates_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListDeviceStatesRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).ListDeviceStates(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/ListDeviceStates", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).ListDeviceStates(ctx, req.(*ListDeviceStatesRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_SetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.SetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).SetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/SetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).SetIamPolicy(ctx, req.(*google_iam_v11.SetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_GetIamPolicy_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.GetIamPolicyRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).GetIamPolicy(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/GetIamPolicy", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).GetIamPolicy(ctx, req.(*google_iam_v11.GetIamPolicyRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _DeviceManager_TestIamPermissions_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(google_iam_v11.TestIamPermissionsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(DeviceManagerServer).TestIamPermissions(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.cloud.iot.v1.DeviceManager/TestIamPermissions", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(DeviceManagerServer).TestIamPermissions(ctx, req.(*google_iam_v11.TestIamPermissionsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _DeviceManager_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.cloud.iot.v1.DeviceManager", + HandlerType: (*DeviceManagerServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "CreateDeviceRegistry", + Handler: _DeviceManager_CreateDeviceRegistry_Handler, + }, + { + MethodName: "GetDeviceRegistry", + Handler: _DeviceManager_GetDeviceRegistry_Handler, + }, + { + MethodName: "UpdateDeviceRegistry", + Handler: _DeviceManager_UpdateDeviceRegistry_Handler, + }, + { + MethodName: "DeleteDeviceRegistry", + Handler: _DeviceManager_DeleteDeviceRegistry_Handler, + }, + { + MethodName: "ListDeviceRegistries", + Handler: _DeviceManager_ListDeviceRegistries_Handler, + }, + { + MethodName: "CreateDevice", + Handler: _DeviceManager_CreateDevice_Handler, + }, + { + MethodName: "GetDevice", + Handler: _DeviceManager_GetDevice_Handler, + }, + { + MethodName: "UpdateDevice", + Handler: _DeviceManager_UpdateDevice_Handler, + }, + { + MethodName: "DeleteDevice", + Handler: _DeviceManager_DeleteDevice_Handler, + }, + { + MethodName: "ListDevices", + Handler: _DeviceManager_ListDevices_Handler, + }, + { + MethodName: "ModifyCloudToDeviceConfig", + Handler: _DeviceManager_ModifyCloudToDeviceConfig_Handler, + }, + { + MethodName: "ListDeviceConfigVersions", + Handler: _DeviceManager_ListDeviceConfigVersions_Handler, + }, + { + MethodName: "ListDeviceStates", + Handler: _DeviceManager_ListDeviceStates_Handler, + }, + { + MethodName: "SetIamPolicy", + Handler: _DeviceManager_SetIamPolicy_Handler, + }, + { + MethodName: "GetIamPolicy", + Handler: _DeviceManager_GetIamPolicy_Handler, + }, + { + MethodName: "TestIamPermissions", + Handler: _DeviceManager_TestIamPermissions_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/cloud/iot/v1/device_manager.proto", +} + +func init() { proto.RegisterFile("google/cloud/iot/v1/device_manager.proto", fileDescriptor0) } + +var fileDescriptor0 = []byte{ + // 1307 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xbc, 0x98, 0xcf, 0x8f, 0xdb, 0x44, + 0x14, 0xc7, 0x35, 0x9b, 0x52, 0xd8, 0xb7, 0x29, 0x6d, 0x67, 0xb7, 0x6d, 0x48, 0x5a, 0x9a, 0xba, + 0xfc, 0x48, 0x23, 0x6a, 0x77, 0xb7, 0xb4, 0x2a, 0xa9, 0xa0, 0xd0, 0x6e, 0xd9, 0xb6, 0x6a, 0x21, + 0x64, 0x17, 0x8a, 0x90, 0x50, 0x34, 0x1b, 0xcf, 0x46, 0xd3, 0x8d, 0x3d, 0xa9, 0xc7, 0x59, 0xb1, + 0x45, 0xbd, 0xd0, 0x03, 0x67, 0x40, 0x48, 0xbd, 0x21, 0x71, 0x40, 0xdc, 0x91, 0x10, 0x12, 0x77, + 0xfe, 0x01, 0x38, 0xf0, 0x07, 0x70, 0xe0, 0x4f, 0xe8, 0x11, 0x79, 0x66, 0xbc, 0x6b, 0x3b, 0xb6, + 0xe3, 0xe4, 0xc0, 0x2d, 0x9e, 0xf7, 0x66, 0xe6, 0x33, 0xdf, 0xf7, 0x9e, 0xe7, 0xc5, 0xd0, 0xe8, + 0x73, 0xde, 0x1f, 0x50, 0xab, 0x37, 0xe0, 0x23, 0xdb, 0x62, 0xdc, 0xb7, 0x76, 0x96, 0x2d, 0x9b, + 0xee, 0xb0, 0x1e, 0xed, 0x3a, 0xc4, 0x25, 0x7d, 0xea, 0x99, 0x43, 0x8f, 0xfb, 0x1c, 0x2f, 0x2a, + 0x4f, 0x53, 0x7a, 0x9a, 0x8c, 0xfb, 0xe6, 0xce, 0x72, 0xf5, 0xa4, 0x9e, 0x4e, 0x86, 0xcc, 0x22, + 0xae, 0xcb, 0x7d, 0xe2, 0x33, 0xee, 0x0a, 0x35, 0xa5, 0x7a, 0x36, 0x6d, 0x71, 0x8f, 0x0a, 0x3e, + 0xf2, 0x7a, 0x34, 0x74, 0x7a, 0x59, 0x3b, 0x31, 0xe2, 0x04, 0x66, 0x46, 0x9c, 0xee, 0x90, 0x0f, + 0x58, 0x6f, 0x57, 0xdb, 0xab, 0x71, 0x7b, 0xcc, 0x56, 0xd3, 0x36, 0xf9, 0xb4, 0x39, 0xda, 0xb2, + 0xa8, 0x33, 0xf4, 0x43, 0x63, 0x3d, 0x69, 0xdc, 0x62, 0x74, 0x60, 0x77, 0x1d, 0x22, 0xb6, 0x95, + 0x87, 0xf1, 0x04, 0x41, 0xed, 0x86, 0x47, 0x89, 0x4f, 0x57, 0xe5, 0x89, 0x3b, 0xb4, 0xcf, 0x84, + 0xef, 0xed, 0x76, 0xe8, 0xc3, 0x11, 0x15, 0x3e, 0x3e, 0x0e, 0x07, 0x87, 0xc4, 0xa3, 0xae, 0x5f, + 0x41, 0x75, 0xd4, 0x98, 0xef, 0xe8, 0x27, 0x7c, 0x17, 0x0e, 0x6b, 0x89, 0x3c, 0x3d, 0xa3, 0x32, + 0x57, 0x47, 0x8d, 0x85, 0x95, 0xb3, 0x66, 0x8a, 0x48, 0x66, 0x62, 0xf1, 0x17, 0xed, 0xd8, 0xb3, + 0x61, 0x42, 0x65, 0x8d, 0xfa, 0xe9, 0x04, 0x18, 0x0e, 0xb8, 0xc4, 0xa1, 0x7a, 0x7f, 0xf9, 0xdb, + 0x58, 0x86, 0xda, 0x2a, 0x1d, 0xd0, 0x2c, 0xe8, 0xb4, 0x29, 0x3f, 0x23, 0xa8, 0x7d, 0x3c, 0xb4, + 0x33, 0x0f, 0x9a, 0x72, 0x20, 0x34, 0xf3, 0x81, 0xf0, 0x55, 0x58, 0x18, 0xc9, 0xcd, 0xa4, 0xd6, + 0x5a, 0x9a, 0x6a, 0xb8, 0x52, 0x18, 0x0e, 0xf3, 0xfd, 0x20, 0x1c, 0xf7, 0x88, 0xd8, 0xee, 0x80, + 0x72, 0x0f, 0x7e, 0x1b, 0x0f, 0xa1, 0x76, 0x97, 0x89, 0xb8, 0x1c, 0x8c, 0x8a, 0x49, 0x21, 0xa9, + 0xc1, 0xfc, 0x90, 0xf4, 0x69, 0x57, 0xb0, 0x47, 0x54, 0xee, 0xf8, 0x5c, 0xe7, 0x85, 0x60, 0x60, + 0x9d, 0x3d, 0xa2, 0xf8, 0x14, 0x80, 0x34, 0xfa, 0x7c, 0x9b, 0xba, 0x95, 0x92, 0x9c, 0x28, 0xdd, + 0x37, 0x82, 0x01, 0xe3, 0x29, 0x82, 0x93, 0xe9, 0x7b, 0x8a, 0x21, 0x77, 0x05, 0xc5, 0x6d, 0x38, + 0x1a, 0x97, 0x87, 0x51, 0x51, 0x41, 0xf5, 0x52, 0x51, 0x81, 0x8e, 0xd8, 0x89, 0x95, 0xf1, 0x6b, + 0x70, 0xd8, 0xa5, 0x5f, 0xf8, 0xdd, 0x08, 0xd6, 0x9c, 0xc4, 0x3a, 0x14, 0x0c, 0xb7, 0xf7, 0xd0, + 0x36, 0x61, 0x31, 0x9e, 0xa0, 0xf9, 0x2a, 0x5c, 0x84, 0x83, 0x6a, 0x2b, 0x2d, 0x7a, 0x2d, 0x8f, + 0x4e, 0xbb, 0x1a, 0x04, 0x8e, 0x44, 0xf2, 0x2f, 0x33, 0x89, 0xf0, 0x5b, 0x00, 0xfb, 0x15, 0x54, + 0x20, 0xaa, 0xf3, 0x5b, 0xe1, 0x4f, 0xe3, 0x6b, 0x04, 0x8b, 0xf1, 0xfc, 0x53, 0xdb, 0xcc, 0xc2, + 0x9b, 0x4c, 0xaf, 0xd2, 0x54, 0xe9, 0x75, 0x0e, 0x16, 0xe3, 0xc5, 0x93, 0x5d, 0x34, 0xff, 0x22, + 0xc0, 0xfb, 0x69, 0x31, 0x31, 0x03, 0x5f, 0x01, 0x5d, 0x07, 0x5d, 0x77, 0xe4, 0x74, 0x99, 0x2d, + 0x2a, 0x73, 0xf5, 0x52, 0xe3, 0x40, 0xa7, 0xac, 0x46, 0x3f, 0x18, 0x39, 0xb7, 0x6d, 0x11, 0xa4, + 0xa2, 0xf6, 0x0a, 0x3c, 0x4a, 0xf5, 0x52, 0x90, 0x8a, 0x6a, 0x24, 0x30, 0xc7, 0x35, 0x3e, 0x30, + 0x85, 0xc6, 0xf1, 0x0a, 0xb0, 0x73, 0x2b, 0x80, 0x26, 0x2b, 0xc0, 0x87, 0xc5, 0xd8, 0x49, 0x75, + 0xde, 0x5f, 0x82, 0xe7, 0x15, 0x5a, 0x98, 0xed, 0xb9, 0xf1, 0x09, 0x7d, 0x0b, 0x27, 0xf7, 0x13, + 0x04, 0xf5, 0x7b, 0xdc, 0x66, 0x5b, 0xbb, 0x37, 0x82, 0xe5, 0x36, 0xb8, 0x5a, 0xe8, 0x06, 0x77, + 0xb7, 0x58, 0x3f, 0x2f, 0x13, 0x9b, 0x70, 0x74, 0x87, 0x7a, 0x82, 0x71, 0xb7, 0xeb, 0xf3, 0xae, + 0x8a, 0xae, 0xdc, 0xa2, 0xd4, 0x39, 0xac, 0x0d, 0x1b, 0x5c, 0xe5, 0x1b, 0x3e, 0x0d, 0x0b, 0x9b, + 0xcc, 0x25, 0xde, 0x6e, 0xd7, 0x26, 0x3e, 0x91, 0xd9, 0x52, 0xee, 0x80, 0x1a, 0x5a, 0x25, 0x3e, + 0x31, 0x3e, 0x85, 0xd3, 0xfb, 0x67, 0x57, 0x7b, 0x7f, 0xa2, 0xd6, 0x10, 0x79, 0x0c, 0x67, 0xa0, + 0x1c, 0xc4, 0x59, 0x6f, 0x27, 0xf4, 0x3b, 0x67, 0xc1, 0x1d, 0x39, 0xe1, 0x6c, 0x63, 0x00, 0xf5, + 0xec, 0x95, 0xb5, 0xc4, 0xb7, 0xf6, 0xb2, 0xa6, 0x27, 0x1d, 0x42, 0xa5, 0xcf, 0xe4, 0x28, 0xad, + 0x05, 0x3a, 0x64, 0x47, 0x9e, 0x84, 0x71, 0x17, 0x4e, 0xec, 0xef, 0xb6, 0xee, 0x13, 0x9f, 0xe6, + 0xf2, 0x9f, 0x02, 0x08, 0xf8, 0x85, 0x74, 0xd4, 0xf4, 0xf3, 0xee, 0xc8, 0x51, 0x33, 0x0d, 0x02, + 0x95, 0xf1, 0xd5, 0x34, 0xf3, 0x4d, 0xd0, 0x5b, 0x87, 0xb3, 0x15, 0x72, 0x3d, 0x07, 0x59, 0xae, + 0x10, 0x96, 0x82, 0x5a, 0x6e, 0xe5, 0xd9, 0x31, 0x38, 0xa4, 0xac, 0xf7, 0x54, 0xa3, 0x81, 0x7f, + 0x45, 0xb0, 0x94, 0x76, 0x1f, 0xe3, 0x0b, 0xa9, 0x4b, 0xe7, 0x5c, 0xdd, 0xd5, 0x22, 0xef, 0x65, + 0x63, 0xed, 0xab, 0x3f, 0xff, 0xf9, 0x6e, 0xee, 0x3d, 0xc3, 0x0c, 0x1a, 0x8b, 0x2f, 0x55, 0x1d, + 0xbf, 0x3d, 0xf4, 0xf8, 0x03, 0xda, 0xf3, 0x85, 0xd5, 0xb4, 0x06, 0xbc, 0xa7, 0xfa, 0x19, 0xab, + 0xf9, 0xd8, 0xda, 0x7f, 0xf5, 0xb7, 0x92, 0x97, 0x25, 0xfe, 0x01, 0xc1, 0xd1, 0xb1, 0x3b, 0x1c, + 0x9f, 0x4f, 0x65, 0xc8, 0xba, 0xeb, 0x8b, 0x21, 0x5f, 0x96, 0xc8, 0x17, 0xb0, 0x42, 0x0e, 0x22, + 0x99, 0x01, 0x1c, 0xe1, 0xb5, 0x9a, 0x8f, 0xf1, 0x1f, 0x08, 0x96, 0xd2, 0x3a, 0x80, 0x0c, 0x69, + 0x73, 0x9a, 0x85, 0x62, 0x9c, 0xf7, 0x25, 0xe7, 0x47, 0x2b, 0xef, 0x48, 0xce, 0x84, 0x5e, 0x66, + 0x61, 0xee, 0x71, 0xa9, 0x9f, 0x22, 0x58, 0x4a, 0x6b, 0x7f, 0x32, 0x0e, 0x92, 0xd3, 0x29, 0x55, + 0x8f, 0x8f, 0xbd, 0x58, 0x6f, 0x06, 0xed, 0x63, 0xa8, 0x71, 0x73, 0x5a, 0x8d, 0x7f, 0x41, 0xb0, + 0x94, 0xd6, 0x47, 0x64, 0xa0, 0xe5, 0xb4, 0x39, 0xd5, 0xe5, 0x29, 0x66, 0xa8, 0xaa, 0x4c, 0x64, + 0x46, 0xe1, 0x64, 0x0e, 0x72, 0xb7, 0x1c, 0xad, 0x24, 0xdc, 0x28, 0x50, 0x6c, 0x8a, 0x32, 0xef, + 0x3a, 0x30, 0x6e, 0x49, 0x9e, 0xeb, 0xc6, 0x95, 0xc9, 0x3c, 0x71, 0x1d, 0xf5, 0xdf, 0x0f, 0xd1, + 0x0a, 0x2f, 0xfc, 0x6f, 0x10, 0xcc, 0xef, 0x55, 0x0d, 0x7e, 0x75, 0x52, 0x55, 0x15, 0x60, 0x7b, + 0x57, 0xb2, 0xb5, 0xf0, 0x95, 0xa9, 0x22, 0x1c, 0x82, 0x05, 0xb1, 0xfe, 0x09, 0x41, 0x39, 0x5a, + 0x24, 0x19, 0xaa, 0xa5, 0x34, 0x3d, 0xf9, 0x64, 0x1f, 0x4a, 0xb2, 0xdb, 0x2b, 0xd7, 0x22, 0x75, + 0x63, 0xce, 0x00, 0xb8, 0x27, 0xde, 0xb7, 0x08, 0xca, 0xd1, 0x22, 0xc8, 0x00, 0x4d, 0x69, 0x8a, + 0x32, 0xeb, 0x43, 0xab, 0xd7, 0x9c, 0x5d, 0xbd, 0x1f, 0x11, 0x2c, 0x44, 0x1a, 0x0e, 0xfc, 0xfa, + 0x84, 0x74, 0xdf, 0xab, 0x8b, 0xc6, 0x64, 0x47, 0x5d, 0x0e, 0xf1, 0x10, 0xcf, 0x90, 0x7e, 0xf8, + 0x2f, 0x04, 0x2f, 0x65, 0xb6, 0x27, 0xf8, 0x52, 0x2a, 0xc9, 0xa4, 0x76, 0xa6, 0x3a, 0xf9, 0x5e, + 0x37, 0x3e, 0x97, 0xe4, 0xf7, 0x8d, 0xce, 0xac, 0xf2, 0xb6, 0x9c, 0x2c, 0x8a, 0x16, 0x6a, 0xe2, + 0xbf, 0x51, 0xf4, 0x6a, 0x8f, 0xb7, 0x25, 0xf8, 0xcd, 0x09, 0xfa, 0xa6, 0xf6, 0x47, 0xd5, 0x4b, + 0x53, 0xce, 0xd2, 0x21, 0xd2, 0xb9, 0x8e, 0xd7, 0x66, 0x3d, 0xa8, 0xd5, 0x8b, 0xd3, 0xff, 0x86, + 0xe0, 0x48, 0xb2, 0x6b, 0xc1, 0x6f, 0x4c, 0x80, 0x8b, 0xb5, 0x4a, 0xd5, 0xf3, 0x05, 0xbd, 0xf5, + 0x11, 0x74, 0x07, 0x81, 0xaf, 0xcd, 0x7c, 0x04, 0xd5, 0x42, 0xe1, 0xef, 0x11, 0x94, 0xd7, 0xa9, + 0x7f, 0x9b, 0x38, 0x6d, 0xf9, 0x81, 0x03, 0x1b, 0x21, 0x08, 0x23, 0x4e, 0x80, 0x10, 0x35, 0x86, + 0xb0, 0xc7, 0x12, 0x3e, 0xca, 0x6a, 0xdc, 0x91, 0x50, 0xab, 0x86, 0x82, 0x0a, 0xbf, 0xb7, 0x14, + 0xbb, 0x6f, 0x45, 0x64, 0x9b, 0x20, 0x5b, 0x02, 0xae, 0xb5, 0x3c, 0xae, 0xb5, 0xff, 0x87, 0xab, + 0x9f, 0xe0, 0xfa, 0x1d, 0x01, 0xde, 0xa0, 0x42, 0x0e, 0x52, 0xcf, 0x61, 0x42, 0x65, 0x40, 0x23, + 0xb1, 0xf3, 0xb8, 0x4b, 0xc8, 0x78, 0xae, 0x80, 0xa7, 0x0e, 0x72, 0x5b, 0x72, 0xdf, 0x31, 0x6e, + 0xce, 0xc0, 0xed, 0x8f, 0x2d, 0xdb, 0x42, 0xcd, 0xeb, 0x0f, 0xe0, 0x44, 0x8f, 0x3b, 0x69, 0xa9, + 0x76, 0x1d, 0xc7, 0x5a, 0xe2, 0x76, 0xf0, 0xe6, 0x6d, 0xa3, 0xcf, 0x2e, 0x6b, 0xd7, 0x3e, 0x1f, + 0x10, 0xb7, 0x6f, 0x72, 0xaf, 0x6f, 0xf5, 0xa9, 0x2b, 0xdf, 0xcb, 0x96, 0x32, 0x91, 0x21, 0x13, + 0xb1, 0x0f, 0x6d, 0x57, 0x19, 0xf7, 0x9f, 0x21, 0xb4, 0x79, 0x50, 0x7a, 0x5d, 0xfc, 0x2f, 0x00, + 0x00, 0xff, 0xff, 0xe6, 0xf2, 0xe0, 0x78, 0xea, 0x13, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/cloud/iot/v1/resources.pb.go b/vendor/google.golang.org/genproto/googleapis/cloud/iot/v1/resources.pb.go new file mode 100644 index 00000000..4533c77b --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/cloud/iot/v1/resources.pb.go @@ -0,0 +1,1026 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/cloud/iot/v1/resources.proto + +package iot + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf1 "github.com/golang/protobuf/ptypes/timestamp" +import google_rpc "google.golang.org/genproto/googleapis/rpc/status" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// Indicates whether an MQTT connection is enabled or disabled. See the field +// description for details. +type MqttState int32 + +const ( + // No MQTT state specified. If not specified, MQTT will be enabled by default. + MqttState_MQTT_STATE_UNSPECIFIED MqttState = 0 + // Enables a MQTT connection. + MqttState_MQTT_ENABLED MqttState = 1 + // Disables a MQTT connection. + MqttState_MQTT_DISABLED MqttState = 2 +) + +var MqttState_name = map[int32]string{ + 0: "MQTT_STATE_UNSPECIFIED", + 1: "MQTT_ENABLED", + 2: "MQTT_DISABLED", +} +var MqttState_value = map[string]int32{ + "MQTT_STATE_UNSPECIFIED": 0, + "MQTT_ENABLED": 1, + "MQTT_DISABLED": 2, +} + +func (x MqttState) String() string { + return proto.EnumName(MqttState_name, int32(x)) +} +func (MqttState) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +// Indicates whether DeviceService (HTTP) is enabled or disabled for the +// registry. See the field description for details. +type HttpState int32 + +const ( + // No HTTP state specified. If not specified, DeviceService will be + // enabled by default. + HttpState_HTTP_STATE_UNSPECIFIED HttpState = 0 + // Enables DeviceService (HTTP) service for the registry. + HttpState_HTTP_ENABLED HttpState = 1 + // Disables DeviceService (HTTP) service for the registry. + HttpState_HTTP_DISABLED HttpState = 2 +) + +var HttpState_name = map[int32]string{ + 0: "HTTP_STATE_UNSPECIFIED", + 1: "HTTP_ENABLED", + 2: "HTTP_DISABLED", +} +var HttpState_value = map[string]int32{ + "HTTP_STATE_UNSPECIFIED": 0, + "HTTP_ENABLED": 1, + "HTTP_DISABLED": 2, +} + +func (x HttpState) String() string { + return proto.EnumName(HttpState_name, int32(x)) +} +func (HttpState) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +// The supported formats for the public key. +type PublicKeyCertificateFormat int32 + +const ( + // The format has not been specified. This is an invalid default value and + // must not be used. + PublicKeyCertificateFormat_UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT PublicKeyCertificateFormat = 0 + // An X.509v3 certificate ([RFC5280](https://www.ietf.org/rfc/rfc5280.txt)), + // encoded in base64, and wrapped by `-----BEGIN CERTIFICATE-----` and + // `-----END CERTIFICATE-----`. + PublicKeyCertificateFormat_X509_CERTIFICATE_PEM PublicKeyCertificateFormat = 1 +) + +var PublicKeyCertificateFormat_name = map[int32]string{ + 0: "UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT", + 1: "X509_CERTIFICATE_PEM", +} +var PublicKeyCertificateFormat_value = map[string]int32{ + "UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT": 0, + "X509_CERTIFICATE_PEM": 1, +} + +func (x PublicKeyCertificateFormat) String() string { + return proto.EnumName(PublicKeyCertificateFormat_name, int32(x)) +} +func (PublicKeyCertificateFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +// The supported formats for the public key. +type PublicKeyFormat int32 + +const ( + // The format has not been specified. This is an invalid default value and + // must not be used. + PublicKeyFormat_UNSPECIFIED_PUBLIC_KEY_FORMAT PublicKeyFormat = 0 + // An RSA public key encoded in base64, and wrapped by + // `-----BEGIN PUBLIC KEY-----` and `-----END PUBLIC KEY-----`. This can be + // used to verify `RS256` signatures in JWT tokens ([RFC7518]( + // https://www.ietf.org/rfc/rfc7518.txt)). + PublicKeyFormat_RSA_PEM PublicKeyFormat = 3 + // As RSA_PEM, but wrapped in an X.509v3 certificate ([RFC5280]( + // https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped by + // `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. + PublicKeyFormat_RSA_X509_PEM PublicKeyFormat = 1 + // Public key for the ECDSA algorithm using P-256 and SHA-256, encoded in + // base64, and wrapped by `-----BEGIN PUBLIC KEY-----` and `-----END + // PUBLIC KEY-----`. This can be used to verify JWT tokens with the `ES256` + // algorithm ([RFC7518](https://www.ietf.org/rfc/rfc7518.txt)). This curve is + // defined in [OpenSSL](https://www.openssl.org/) as the `prime256v1` curve. + PublicKeyFormat_ES256_PEM PublicKeyFormat = 2 + // As ES256_PEM, but wrapped in an X.509v3 certificate ([RFC5280]( + // https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped by + // `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`. + PublicKeyFormat_ES256_X509_PEM PublicKeyFormat = 4 +) + +var PublicKeyFormat_name = map[int32]string{ + 0: "UNSPECIFIED_PUBLIC_KEY_FORMAT", + 3: "RSA_PEM", + 1: "RSA_X509_PEM", + 2: "ES256_PEM", + 4: "ES256_X509_PEM", +} +var PublicKeyFormat_value = map[string]int32{ + "UNSPECIFIED_PUBLIC_KEY_FORMAT": 0, + "RSA_PEM": 3, + "RSA_X509_PEM": 1, + "ES256_PEM": 2, + "ES256_X509_PEM": 4, +} + +func (x PublicKeyFormat) String() string { + return proto.EnumName(PublicKeyFormat_name, int32(x)) +} +func (PublicKeyFormat) EnumDescriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +// The device resource. +type Device struct { + // The user-defined device identifier. The device ID must be unique + // within a device registry. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The resource path name. For example, + // `projects/p1/locations/us-central1/registries/registry0/devices/dev0` or + // `projects/p1/locations/us-central1/registries/registry0/devices/{num_id}`. + // When `name` is populated as a response from the service, it always ends + // in the device numeric ID. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // [Output only] A server-defined unique numeric ID for the device. This is a + // more compact way to identify devices, and it is globally unique. + NumId uint64 `protobuf:"varint,3,opt,name=num_id,json=numId" json:"num_id,omitempty"` + // The credentials used to authenticate this device. To allow credential + // rotation without interruption, multiple device credentials can be bound to + // this device. No more than 3 credentials can be bound to a single device at + // a time. When new credentials are added to a device, they are verified + // against the registry credentials. For details, see the description of the + // `DeviceRegistry.credentials` field. + Credentials []*DeviceCredential `protobuf:"bytes,12,rep,name=credentials" json:"credentials,omitempty"` + // [Output only] The last time a heartbeat was received. Timestamps are + // periodically collected and written to storage; they may be stale by a few + // minutes. This field is only for devices connecting through MQTT. + LastHeartbeatTime *google_protobuf1.Timestamp `protobuf:"bytes,7,opt,name=last_heartbeat_time,json=lastHeartbeatTime" json:"last_heartbeat_time,omitempty"` + // [Output only] The last time a telemetry event was received. Timestamps are + // periodically collected and written to storage; they may be stale by a few + // minutes. + LastEventTime *google_protobuf1.Timestamp `protobuf:"bytes,8,opt,name=last_event_time,json=lastEventTime" json:"last_event_time,omitempty"` + // [Output only] The last time a state event was received. Timestamps are + // periodically collected and written to storage; they may be stale by a few + // minutes. + LastStateTime *google_protobuf1.Timestamp `protobuf:"bytes,20,opt,name=last_state_time,json=lastStateTime" json:"last_state_time,omitempty"` + // [Output only] The last time a cloud-to-device config version acknowledgment + // was received from the device. This field is only for configurations + // sent through MQTT. + LastConfigAckTime *google_protobuf1.Timestamp `protobuf:"bytes,14,opt,name=last_config_ack_time,json=lastConfigAckTime" json:"last_config_ack_time,omitempty"` + // [Output only] The last time a cloud-to-device config version was sent to + // the device. + LastConfigSendTime *google_protobuf1.Timestamp `protobuf:"bytes,18,opt,name=last_config_send_time,json=lastConfigSendTime" json:"last_config_send_time,omitempty"` + // If a device is blocked, connections or requests from this device will fail. + // Can be used to temporarily prevent the device from connecting if, for + // example, the sensor is generating bad data and needs maintenance. + Blocked bool `protobuf:"varint,19,opt,name=blocked" json:"blocked,omitempty"` + // [Output only] The time the most recent error occurred, such as a failure to + // publish to Cloud Pub/Sub. This field is the timestamp of + // 'last_error_status'. + LastErrorTime *google_protobuf1.Timestamp `protobuf:"bytes,10,opt,name=last_error_time,json=lastErrorTime" json:"last_error_time,omitempty"` + // [Output only] The error message of the most recent error, such as a failure + // to publish to Cloud Pub/Sub. 'last_error_time' is the timestamp of this + // field. If no errors have occurred, this field has an empty message + // and the status code 0 == OK. Otherwise, this field is expected to have a + // status code other than OK. + LastErrorStatus *google_rpc.Status `protobuf:"bytes,11,opt,name=last_error_status,json=lastErrorStatus" json:"last_error_status,omitempty"` + // The most recent device configuration, which is eventually sent from + // Cloud IoT Core to the device. If not present on creation, the + // configuration will be initialized with an empty payload and version value + // of `1`. To update this field after creation, use the + // `DeviceManager.ModifyCloudToDeviceConfig` method. + Config *DeviceConfig `protobuf:"bytes,13,opt,name=config" json:"config,omitempty"` + // [Output only] The state most recently received from the device. If no state + // has been reported, this field is not present. + State *DeviceState `protobuf:"bytes,16,opt,name=state" json:"state,omitempty"` + // The metadata key-value pairs assigned to the device. This metadata is not + // interpreted or indexed by Cloud IoT Core. It can be used to add contextual + // information for the device. + // + // Keys must conform to the regular expression [a-zA-Z0-9-_]+ and be less than + // 128 bytes in length. + // + // Values are free-form strings. Each value must be less than or equal to 32 + // KB in size. + // + // The total size of all keys and values must be less than 256 KB, and the + // maximum number of key-value pairs is 500. + Metadata map[string]string `protobuf:"bytes,17,rep,name=metadata" json:"metadata,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *Device) Reset() { *m = Device{} } +func (m *Device) String() string { return proto.CompactTextString(m) } +func (*Device) ProtoMessage() {} +func (*Device) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{0} } + +func (m *Device) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *Device) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *Device) GetNumId() uint64 { + if m != nil { + return m.NumId + } + return 0 +} + +func (m *Device) GetCredentials() []*DeviceCredential { + if m != nil { + return m.Credentials + } + return nil +} + +func (m *Device) GetLastHeartbeatTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastHeartbeatTime + } + return nil +} + +func (m *Device) GetLastEventTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastEventTime + } + return nil +} + +func (m *Device) GetLastStateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastStateTime + } + return nil +} + +func (m *Device) GetLastConfigAckTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastConfigAckTime + } + return nil +} + +func (m *Device) GetLastConfigSendTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastConfigSendTime + } + return nil +} + +func (m *Device) GetBlocked() bool { + if m != nil { + return m.Blocked + } + return false +} + +func (m *Device) GetLastErrorTime() *google_protobuf1.Timestamp { + if m != nil { + return m.LastErrorTime + } + return nil +} + +func (m *Device) GetLastErrorStatus() *google_rpc.Status { + if m != nil { + return m.LastErrorStatus + } + return nil +} + +func (m *Device) GetConfig() *DeviceConfig { + if m != nil { + return m.Config + } + return nil +} + +func (m *Device) GetState() *DeviceState { + if m != nil { + return m.State + } + return nil +} + +func (m *Device) GetMetadata() map[string]string { + if m != nil { + return m.Metadata + } + return nil +} + +// A container for a group of devices. +type DeviceRegistry struct { + // The identifier of this device registry. For example, `myRegistry`. + Id string `protobuf:"bytes,1,opt,name=id" json:"id,omitempty"` + // The resource path name. For example, + // `projects/example-project/locations/us-central1/registries/my-registry`. + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` + // The configuration for notification of telemetry events received from the + // device. All telemetry events that were successfully published by the + // device and acknowledged by Cloud IoT Core are guaranteed to be + // delivered to Cloud Pub/Sub. Only the first configuration is used. If you + // try to publish a device telemetry event using MQTT without specifying a + // Cloud Pub/Sub topic for the device's registry, the connection closes + // automatically. If you try to do so using an HTTP connection, an error + // is returned. + EventNotificationConfigs []*EventNotificationConfig `protobuf:"bytes,10,rep,name=event_notification_configs,json=eventNotificationConfigs" json:"event_notification_configs,omitempty"` + // The configuration for notification of new states received from the device. + // State updates are guaranteed to be stored in the state history, but + // notifications to Cloud Pub/Sub are not guaranteed. For example, if + // permissions are misconfigured or the specified topic doesn't exist, no + // notification will be published but the state will still be stored in Cloud + // IoT Core. + StateNotificationConfig *StateNotificationConfig `protobuf:"bytes,7,opt,name=state_notification_config,json=stateNotificationConfig" json:"state_notification_config,omitempty"` + // The MQTT configuration for this device registry. + MqttConfig *MqttConfig `protobuf:"bytes,4,opt,name=mqtt_config,json=mqttConfig" json:"mqtt_config,omitempty"` + // The DeviceService (HTTP) configuration for this device registry. + HttpConfig *HttpConfig `protobuf:"bytes,9,opt,name=http_config,json=httpConfig" json:"http_config,omitempty"` + // The credentials used to verify the device credentials. No more than 10 + // credentials can be bound to a single registry at a time. The verification + // process occurs at the time of device creation or update. If this field is + // empty, no verification is performed. Otherwise, the credentials of a newly + // created device or added credentials of an updated device should be signed + // with one of these registry credentials. + // + // Note, however, that existing devices will never be affected by + // modifications to this list of credentials: after a device has been + // successfully created in a registry, it should be able to connect even if + // its registry credentials are revoked, deleted, or modified. + Credentials []*RegistryCredential `protobuf:"bytes,8,rep,name=credentials" json:"credentials,omitempty"` +} + +func (m *DeviceRegistry) Reset() { *m = DeviceRegistry{} } +func (m *DeviceRegistry) String() string { return proto.CompactTextString(m) } +func (*DeviceRegistry) ProtoMessage() {} +func (*DeviceRegistry) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{1} } + +func (m *DeviceRegistry) GetId() string { + if m != nil { + return m.Id + } + return "" +} + +func (m *DeviceRegistry) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *DeviceRegistry) GetEventNotificationConfigs() []*EventNotificationConfig { + if m != nil { + return m.EventNotificationConfigs + } + return nil +} + +func (m *DeviceRegistry) GetStateNotificationConfig() *StateNotificationConfig { + if m != nil { + return m.StateNotificationConfig + } + return nil +} + +func (m *DeviceRegistry) GetMqttConfig() *MqttConfig { + if m != nil { + return m.MqttConfig + } + return nil +} + +func (m *DeviceRegistry) GetHttpConfig() *HttpConfig { + if m != nil { + return m.HttpConfig + } + return nil +} + +func (m *DeviceRegistry) GetCredentials() []*RegistryCredential { + if m != nil { + return m.Credentials + } + return nil +} + +// The configuration of MQTT for a device registry. +type MqttConfig struct { + // If enabled, allows connections using the MQTT protocol. Otherwise, MQTT + // connections to this registry will fail. + MqttEnabledState MqttState `protobuf:"varint,1,opt,name=mqtt_enabled_state,json=mqttEnabledState,enum=google.cloud.iot.v1.MqttState" json:"mqtt_enabled_state,omitempty"` +} + +func (m *MqttConfig) Reset() { *m = MqttConfig{} } +func (m *MqttConfig) String() string { return proto.CompactTextString(m) } +func (*MqttConfig) ProtoMessage() {} +func (*MqttConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{2} } + +func (m *MqttConfig) GetMqttEnabledState() MqttState { + if m != nil { + return m.MqttEnabledState + } + return MqttState_MQTT_STATE_UNSPECIFIED +} + +// The configuration of the HTTP bridge for a device registry. +type HttpConfig struct { + // If enabled, allows devices to use DeviceService via the HTTP protocol. + // Otherwise, any requests to DeviceService will fail for this registry. + HttpEnabledState HttpState `protobuf:"varint,1,opt,name=http_enabled_state,json=httpEnabledState,enum=google.cloud.iot.v1.HttpState" json:"http_enabled_state,omitempty"` +} + +func (m *HttpConfig) Reset() { *m = HttpConfig{} } +func (m *HttpConfig) String() string { return proto.CompactTextString(m) } +func (*HttpConfig) ProtoMessage() {} +func (*HttpConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{3} } + +func (m *HttpConfig) GetHttpEnabledState() HttpState { + if m != nil { + return m.HttpEnabledState + } + return HttpState_HTTP_STATE_UNSPECIFIED +} + +// The configuration to forward telemetry events. +type EventNotificationConfig struct { + // A Cloud Pub/Sub topic name. For example, + // `projects/myProject/topics/deviceEvents`. + PubsubTopicName string `protobuf:"bytes,1,opt,name=pubsub_topic_name,json=pubsubTopicName" json:"pubsub_topic_name,omitempty"` +} + +func (m *EventNotificationConfig) Reset() { *m = EventNotificationConfig{} } +func (m *EventNotificationConfig) String() string { return proto.CompactTextString(m) } +func (*EventNotificationConfig) ProtoMessage() {} +func (*EventNotificationConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{4} } + +func (m *EventNotificationConfig) GetPubsubTopicName() string { + if m != nil { + return m.PubsubTopicName + } + return "" +} + +// The configuration for notification of new states received from the device. +type StateNotificationConfig struct { + // A Cloud Pub/Sub topic name. For example, + // `projects/myProject/topics/deviceEvents`. + PubsubTopicName string `protobuf:"bytes,1,opt,name=pubsub_topic_name,json=pubsubTopicName" json:"pubsub_topic_name,omitempty"` +} + +func (m *StateNotificationConfig) Reset() { *m = StateNotificationConfig{} } +func (m *StateNotificationConfig) String() string { return proto.CompactTextString(m) } +func (*StateNotificationConfig) ProtoMessage() {} +func (*StateNotificationConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{5} } + +func (m *StateNotificationConfig) GetPubsubTopicName() string { + if m != nil { + return m.PubsubTopicName + } + return "" +} + +// A server-stored registry credential used to validate device credentials. +type RegistryCredential struct { + // The credential data. Reserved for expansion in the future. + // + // Types that are valid to be assigned to Credential: + // *RegistryCredential_PublicKeyCertificate + Credential isRegistryCredential_Credential `protobuf_oneof:"credential"` +} + +func (m *RegistryCredential) Reset() { *m = RegistryCredential{} } +func (m *RegistryCredential) String() string { return proto.CompactTextString(m) } +func (*RegistryCredential) ProtoMessage() {} +func (*RegistryCredential) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{6} } + +type isRegistryCredential_Credential interface { + isRegistryCredential_Credential() +} + +type RegistryCredential_PublicKeyCertificate struct { + PublicKeyCertificate *PublicKeyCertificate `protobuf:"bytes,1,opt,name=public_key_certificate,json=publicKeyCertificate,oneof"` +} + +func (*RegistryCredential_PublicKeyCertificate) isRegistryCredential_Credential() {} + +func (m *RegistryCredential) GetCredential() isRegistryCredential_Credential { + if m != nil { + return m.Credential + } + return nil +} + +func (m *RegistryCredential) GetPublicKeyCertificate() *PublicKeyCertificate { + if x, ok := m.GetCredential().(*RegistryCredential_PublicKeyCertificate); ok { + return x.PublicKeyCertificate + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*RegistryCredential) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _RegistryCredential_OneofMarshaler, _RegistryCredential_OneofUnmarshaler, _RegistryCredential_OneofSizer, []interface{}{ + (*RegistryCredential_PublicKeyCertificate)(nil), + } +} + +func _RegistryCredential_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*RegistryCredential) + // credential + switch x := m.Credential.(type) { + case *RegistryCredential_PublicKeyCertificate: + b.EncodeVarint(1<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PublicKeyCertificate); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("RegistryCredential.Credential has unexpected type %T", x) + } + return nil +} + +func _RegistryCredential_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*RegistryCredential) + switch tag { + case 1: // credential.public_key_certificate + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PublicKeyCertificate) + err := b.DecodeMessage(msg) + m.Credential = &RegistryCredential_PublicKeyCertificate{msg} + return true, err + default: + return false, nil + } +} + +func _RegistryCredential_OneofSizer(msg proto.Message) (n int) { + m := msg.(*RegistryCredential) + // credential + switch x := m.Credential.(type) { + case *RegistryCredential_PublicKeyCertificate: + s := proto.Size(x.PublicKeyCertificate) + n += proto.SizeVarint(1<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// Details of an X.509 certificate. For informational purposes only. +type X509CertificateDetails struct { + // The entity that signed the certificate. + Issuer string `protobuf:"bytes,1,opt,name=issuer" json:"issuer,omitempty"` + // The entity the certificate and public key belong to. + Subject string `protobuf:"bytes,2,opt,name=subject" json:"subject,omitempty"` + // The time the certificate becomes valid. + StartTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=start_time,json=startTime" json:"start_time,omitempty"` + // The time the certificate becomes invalid. + ExpiryTime *google_protobuf1.Timestamp `protobuf:"bytes,4,opt,name=expiry_time,json=expiryTime" json:"expiry_time,omitempty"` + // The algorithm used to sign the certificate. + SignatureAlgorithm string `protobuf:"bytes,5,opt,name=signature_algorithm,json=signatureAlgorithm" json:"signature_algorithm,omitempty"` + // The type of public key in the certificate. + PublicKeyType string `protobuf:"bytes,6,opt,name=public_key_type,json=publicKeyType" json:"public_key_type,omitempty"` +} + +func (m *X509CertificateDetails) Reset() { *m = X509CertificateDetails{} } +func (m *X509CertificateDetails) String() string { return proto.CompactTextString(m) } +func (*X509CertificateDetails) ProtoMessage() {} +func (*X509CertificateDetails) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{7} } + +func (m *X509CertificateDetails) GetIssuer() string { + if m != nil { + return m.Issuer + } + return "" +} + +func (m *X509CertificateDetails) GetSubject() string { + if m != nil { + return m.Subject + } + return "" +} + +func (m *X509CertificateDetails) GetStartTime() *google_protobuf1.Timestamp { + if m != nil { + return m.StartTime + } + return nil +} + +func (m *X509CertificateDetails) GetExpiryTime() *google_protobuf1.Timestamp { + if m != nil { + return m.ExpiryTime + } + return nil +} + +func (m *X509CertificateDetails) GetSignatureAlgorithm() string { + if m != nil { + return m.SignatureAlgorithm + } + return "" +} + +func (m *X509CertificateDetails) GetPublicKeyType() string { + if m != nil { + return m.PublicKeyType + } + return "" +} + +// A public key certificate format and data. +type PublicKeyCertificate struct { + // The certificate format. + Format PublicKeyCertificateFormat `protobuf:"varint,1,opt,name=format,enum=google.cloud.iot.v1.PublicKeyCertificateFormat" json:"format,omitempty"` + // The certificate data. + Certificate string `protobuf:"bytes,2,opt,name=certificate" json:"certificate,omitempty"` + // [Output only] The certificate details. Used only for X.509 certificates. + X509Details *X509CertificateDetails `protobuf:"bytes,3,opt,name=x509_details,json=x509Details" json:"x509_details,omitempty"` +} + +func (m *PublicKeyCertificate) Reset() { *m = PublicKeyCertificate{} } +func (m *PublicKeyCertificate) String() string { return proto.CompactTextString(m) } +func (*PublicKeyCertificate) ProtoMessage() {} +func (*PublicKeyCertificate) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{8} } + +func (m *PublicKeyCertificate) GetFormat() PublicKeyCertificateFormat { + if m != nil { + return m.Format + } + return PublicKeyCertificateFormat_UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT +} + +func (m *PublicKeyCertificate) GetCertificate() string { + if m != nil { + return m.Certificate + } + return "" +} + +func (m *PublicKeyCertificate) GetX509Details() *X509CertificateDetails { + if m != nil { + return m.X509Details + } + return nil +} + +// A server-stored device credential used for authentication. +type DeviceCredential struct { + // The credential data. Reserved for expansion in the future. + // + // Types that are valid to be assigned to Credential: + // *DeviceCredential_PublicKey + Credential isDeviceCredential_Credential `protobuf_oneof:"credential"` + // [Optional] The time at which this credential becomes invalid. This + // credential will be ignored for new client authentication requests after + // this timestamp; however, it will not be automatically deleted. + ExpirationTime *google_protobuf1.Timestamp `protobuf:"bytes,6,opt,name=expiration_time,json=expirationTime" json:"expiration_time,omitempty"` +} + +func (m *DeviceCredential) Reset() { *m = DeviceCredential{} } +func (m *DeviceCredential) String() string { return proto.CompactTextString(m) } +func (*DeviceCredential) ProtoMessage() {} +func (*DeviceCredential) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{9} } + +type isDeviceCredential_Credential interface { + isDeviceCredential_Credential() +} + +type DeviceCredential_PublicKey struct { + PublicKey *PublicKeyCredential `protobuf:"bytes,2,opt,name=public_key,json=publicKey,oneof"` +} + +func (*DeviceCredential_PublicKey) isDeviceCredential_Credential() {} + +func (m *DeviceCredential) GetCredential() isDeviceCredential_Credential { + if m != nil { + return m.Credential + } + return nil +} + +func (m *DeviceCredential) GetPublicKey() *PublicKeyCredential { + if x, ok := m.GetCredential().(*DeviceCredential_PublicKey); ok { + return x.PublicKey + } + return nil +} + +func (m *DeviceCredential) GetExpirationTime() *google_protobuf1.Timestamp { + if m != nil { + return m.ExpirationTime + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*DeviceCredential) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _DeviceCredential_OneofMarshaler, _DeviceCredential_OneofUnmarshaler, _DeviceCredential_OneofSizer, []interface{}{ + (*DeviceCredential_PublicKey)(nil), + } +} + +func _DeviceCredential_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*DeviceCredential) + // credential + switch x := m.Credential.(type) { + case *DeviceCredential_PublicKey: + b.EncodeVarint(2<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.PublicKey); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("DeviceCredential.Credential has unexpected type %T", x) + } + return nil +} + +func _DeviceCredential_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*DeviceCredential) + switch tag { + case 2: // credential.public_key + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(PublicKeyCredential) + err := b.DecodeMessage(msg) + m.Credential = &DeviceCredential_PublicKey{msg} + return true, err + default: + return false, nil + } +} + +func _DeviceCredential_OneofSizer(msg proto.Message) (n int) { + m := msg.(*DeviceCredential) + // credential + switch x := m.Credential.(type) { + case *DeviceCredential_PublicKey: + s := proto.Size(x.PublicKey) + n += proto.SizeVarint(2<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// A public key format and data. +type PublicKeyCredential struct { + // The format of the key. + Format PublicKeyFormat `protobuf:"varint,1,opt,name=format,enum=google.cloud.iot.v1.PublicKeyFormat" json:"format,omitempty"` + // The key data. + Key string `protobuf:"bytes,2,opt,name=key" json:"key,omitempty"` +} + +func (m *PublicKeyCredential) Reset() { *m = PublicKeyCredential{} } +func (m *PublicKeyCredential) String() string { return proto.CompactTextString(m) } +func (*PublicKeyCredential) ProtoMessage() {} +func (*PublicKeyCredential) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{10} } + +func (m *PublicKeyCredential) GetFormat() PublicKeyFormat { + if m != nil { + return m.Format + } + return PublicKeyFormat_UNSPECIFIED_PUBLIC_KEY_FORMAT +} + +func (m *PublicKeyCredential) GetKey() string { + if m != nil { + return m.Key + } + return "" +} + +// The device configuration. Eventually delivered to devices. +type DeviceConfig struct { + // [Output only] The version of this update. The version number is assigned by + // the server, and is always greater than 0 after device creation. The + // version must be 0 on the `CreateDevice` request if a `config` is + // specified; the response of `CreateDevice` will always have a value of 1. + Version int64 `protobuf:"varint,1,opt,name=version" json:"version,omitempty"` + // [Output only] The time at which this configuration version was updated in + // Cloud IoT Core. This timestamp is set by the server. + CloudUpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,2,opt,name=cloud_update_time,json=cloudUpdateTime" json:"cloud_update_time,omitempty"` + // [Output only] The time at which Cloud IoT Core received the + // acknowledgment from the device, indicating that the device has received + // this configuration version. If this field is not present, the device has + // not yet acknowledged that it received this version. Note that when + // the config was sent to the device, many config versions may have been + // available in Cloud IoT Core while the device was disconnected, and on + // connection, only the latest version is sent to the device. Some + // versions may never be sent to the device, and therefore are never + // acknowledged. This timestamp is set by Cloud IoT Core. + DeviceAckTime *google_protobuf1.Timestamp `protobuf:"bytes,3,opt,name=device_ack_time,json=deviceAckTime" json:"device_ack_time,omitempty"` + // The device configuration data. + BinaryData []byte `protobuf:"bytes,4,opt,name=binary_data,json=binaryData,proto3" json:"binary_data,omitempty"` +} + +func (m *DeviceConfig) Reset() { *m = DeviceConfig{} } +func (m *DeviceConfig) String() string { return proto.CompactTextString(m) } +func (*DeviceConfig) ProtoMessage() {} +func (*DeviceConfig) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{11} } + +func (m *DeviceConfig) GetVersion() int64 { + if m != nil { + return m.Version + } + return 0 +} + +func (m *DeviceConfig) GetCloudUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.CloudUpdateTime + } + return nil +} + +func (m *DeviceConfig) GetDeviceAckTime() *google_protobuf1.Timestamp { + if m != nil { + return m.DeviceAckTime + } + return nil +} + +func (m *DeviceConfig) GetBinaryData() []byte { + if m != nil { + return m.BinaryData + } + return nil +} + +// The device state, as reported by the device. +type DeviceState struct { + // [Output only] The time at which this state version was updated in Cloud + // IoT Core. + UpdateTime *google_protobuf1.Timestamp `protobuf:"bytes,1,opt,name=update_time,json=updateTime" json:"update_time,omitempty"` + // The device state data. + BinaryData []byte `protobuf:"bytes,2,opt,name=binary_data,json=binaryData,proto3" json:"binary_data,omitempty"` +} + +func (m *DeviceState) Reset() { *m = DeviceState{} } +func (m *DeviceState) String() string { return proto.CompactTextString(m) } +func (*DeviceState) ProtoMessage() {} +func (*DeviceState) Descriptor() ([]byte, []int) { return fileDescriptor1, []int{12} } + +func (m *DeviceState) GetUpdateTime() *google_protobuf1.Timestamp { + if m != nil { + return m.UpdateTime + } + return nil +} + +func (m *DeviceState) GetBinaryData() []byte { + if m != nil { + return m.BinaryData + } + return nil +} + +func init() { + proto.RegisterType((*Device)(nil), "google.cloud.iot.v1.Device") + proto.RegisterType((*DeviceRegistry)(nil), "google.cloud.iot.v1.DeviceRegistry") + proto.RegisterType((*MqttConfig)(nil), "google.cloud.iot.v1.MqttConfig") + proto.RegisterType((*HttpConfig)(nil), "google.cloud.iot.v1.HttpConfig") + proto.RegisterType((*EventNotificationConfig)(nil), "google.cloud.iot.v1.EventNotificationConfig") + proto.RegisterType((*StateNotificationConfig)(nil), "google.cloud.iot.v1.StateNotificationConfig") + proto.RegisterType((*RegistryCredential)(nil), "google.cloud.iot.v1.RegistryCredential") + proto.RegisterType((*X509CertificateDetails)(nil), "google.cloud.iot.v1.X509CertificateDetails") + proto.RegisterType((*PublicKeyCertificate)(nil), "google.cloud.iot.v1.PublicKeyCertificate") + proto.RegisterType((*DeviceCredential)(nil), "google.cloud.iot.v1.DeviceCredential") + proto.RegisterType((*PublicKeyCredential)(nil), "google.cloud.iot.v1.PublicKeyCredential") + proto.RegisterType((*DeviceConfig)(nil), "google.cloud.iot.v1.DeviceConfig") + proto.RegisterType((*DeviceState)(nil), "google.cloud.iot.v1.DeviceState") + proto.RegisterEnum("google.cloud.iot.v1.MqttState", MqttState_name, MqttState_value) + proto.RegisterEnum("google.cloud.iot.v1.HttpState", HttpState_name, HttpState_value) + proto.RegisterEnum("google.cloud.iot.v1.PublicKeyCertificateFormat", PublicKeyCertificateFormat_name, PublicKeyCertificateFormat_value) + proto.RegisterEnum("google.cloud.iot.v1.PublicKeyFormat", PublicKeyFormat_name, PublicKeyFormat_value) +} + +func init() { proto.RegisterFile("google/cloud/iot/v1/resources.proto", fileDescriptor1) } + +var fileDescriptor1 = []byte{ + // 1320 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x97, 0xdd, 0x72, 0xdb, 0x44, + 0x14, 0xc7, 0x23, 0x27, 0x71, 0x92, 0xe3, 0xf8, 0x23, 0x9b, 0x34, 0x11, 0x1e, 0xa0, 0xae, 0xf9, + 0x4a, 0x0b, 0xd8, 0x6d, 0x98, 0x76, 0x08, 0x65, 0x18, 0x12, 0x47, 0x69, 0x4c, 0x93, 0x60, 0x64, + 0x77, 0x06, 0x7a, 0xa3, 0x59, 0x4b, 0x1b, 0x47, 0x8d, 0x2d, 0xa9, 0xd2, 0xca, 0x53, 0x3f, 0x00, + 0x0f, 0xc0, 0x35, 0x2f, 0xc1, 0xab, 0x70, 0xc1, 0x05, 0x6f, 0xc2, 0x25, 0xb3, 0x67, 0x25, 0xf9, + 0x03, 0x39, 0x0e, 0x77, 0xde, 0xdd, 0xf3, 0xff, 0x1d, 0x9d, 0xaf, 0x95, 0x0c, 0x1f, 0xf5, 0x5c, + 0xb7, 0xd7, 0x67, 0x75, 0xb3, 0xef, 0x86, 0x56, 0xdd, 0x76, 0x79, 0x7d, 0xf8, 0xa4, 0xee, 0xb3, + 0xc0, 0x0d, 0x7d, 0x93, 0x05, 0x35, 0xcf, 0x77, 0xb9, 0x4b, 0xb6, 0xa5, 0x51, 0x0d, 0x8d, 0x6a, + 0xb6, 0xcb, 0x6b, 0xc3, 0x27, 0xe5, 0xf7, 0x23, 0x25, 0xf5, 0xec, 0x3a, 0x75, 0x1c, 0x97, 0x53, + 0x6e, 0xbb, 0x4e, 0x24, 0x29, 0xdf, 0x8f, 0x4e, 0x71, 0xd5, 0x0d, 0xaf, 0xea, 0xdc, 0x1e, 0xb0, + 0x80, 0xd3, 0x81, 0x17, 0x19, 0xec, 0x45, 0x06, 0xbe, 0x67, 0xd6, 0x03, 0x4e, 0x79, 0x18, 0x29, + 0xab, 0xbf, 0xad, 0x41, 0xf6, 0x84, 0x0d, 0x6d, 0x93, 0x91, 0x02, 0x64, 0x6c, 0x4b, 0x55, 0x2a, + 0xca, 0xfe, 0x86, 0x9e, 0xb1, 0x2d, 0x42, 0x60, 0xc5, 0xa1, 0x03, 0xa6, 0x66, 0x70, 0x07, 0x7f, + 0x93, 0x7b, 0x90, 0x75, 0xc2, 0x81, 0x61, 0x5b, 0xea, 0x72, 0x45, 0xd9, 0x5f, 0xd1, 0x57, 0x9d, + 0x70, 0xd0, 0xb4, 0xc8, 0x0b, 0xc8, 0x99, 0x3e, 0xb3, 0x98, 0xc3, 0x6d, 0xda, 0x0f, 0xd4, 0xcd, + 0xca, 0xf2, 0x7e, 0xee, 0xe0, 0x93, 0x5a, 0x4a, 0x20, 0x35, 0xe9, 0xac, 0x91, 0x58, 0xeb, 0x93, + 0x4a, 0xf2, 0x03, 0x6c, 0xf7, 0x69, 0xc0, 0x8d, 0x6b, 0x46, 0x7d, 0xde, 0x65, 0x94, 0x1b, 0x22, + 0x12, 0x75, 0xad, 0xa2, 0xec, 0xe7, 0x0e, 0xca, 0x31, 0x30, 0x0e, 0xb3, 0xd6, 0x89, 0xc3, 0xd4, + 0xb7, 0x84, 0xec, 0x2c, 0x56, 0x89, 0x7d, 0x72, 0x0c, 0x45, 0x64, 0xb1, 0x21, 0x73, 0x22, 0xce, + 0xfa, 0x42, 0x4e, 0x5e, 0x48, 0x34, 0xa1, 0x98, 0x62, 0x88, 0x9c, 0x31, 0xc9, 0xd8, 0xb9, 0x1b, + 0xa3, 0x2d, 0x14, 0xc8, 0x78, 0x09, 0x3b, 0xc8, 0x30, 0x5d, 0xe7, 0xca, 0xee, 0x19, 0xd4, 0xbc, + 0x91, 0xa0, 0xc2, 0xdd, 0x82, 0x6a, 0xa0, 0xec, 0xc8, 0xbc, 0x41, 0xd8, 0x05, 0xdc, 0x9b, 0x84, + 0x05, 0xcc, 0xb1, 0x24, 0x8d, 0x2c, 0xa4, 0x91, 0x31, 0xad, 0xcd, 0x1c, 0x0b, 0x71, 0x2a, 0xac, + 0x75, 0xfb, 0xae, 0x79, 0xc3, 0x2c, 0x75, 0xbb, 0xa2, 0xec, 0xaf, 0xeb, 0xf1, 0x72, 0x9c, 0x3d, + 0xdf, 0x77, 0x7d, 0xe9, 0x02, 0xee, 0x98, 0x3d, 0xa1, 0x40, 0xfa, 0x77, 0xb0, 0x35, 0xc1, 0x90, + 0x7d, 0xa7, 0xe6, 0x90, 0x42, 0x62, 0x8a, 0xef, 0x99, 0xb5, 0x36, 0x9e, 0xe8, 0xc5, 0x44, 0x2d, + 0x37, 0xc8, 0x21, 0x64, 0x65, 0x9c, 0x6a, 0x1e, 0x45, 0x0f, 0x6e, 0xeb, 0x28, 0x34, 0xd4, 0x23, + 0x01, 0x79, 0x06, 0xab, 0x58, 0x33, 0xb5, 0x84, 0xca, 0xca, 0x2d, 0x4a, 0xac, 0x94, 0x2e, 0xcd, + 0x89, 0x06, 0xeb, 0x03, 0xc6, 0xa9, 0x45, 0x39, 0x55, 0xb7, 0xb0, 0x8d, 0x1f, 0xde, 0x22, 0xad, + 0x5d, 0x44, 0xb6, 0x9a, 0xc3, 0xfd, 0x91, 0x9e, 0x48, 0xcb, 0xcf, 0x21, 0x3f, 0x75, 0x44, 0x4a, + 0xb0, 0x7c, 0xc3, 0x46, 0xd1, 0x74, 0x89, 0x9f, 0x64, 0x07, 0x56, 0x87, 0xb4, 0x1f, 0xc6, 0xf3, + 0x25, 0x17, 0xdf, 0x64, 0xbe, 0x56, 0xaa, 0x7f, 0x2f, 0x43, 0x41, 0xf2, 0x75, 0xd6, 0xb3, 0x03, + 0x21, 0xbf, 0xcb, 0x6c, 0xbe, 0x81, 0xb2, 0x6c, 0x75, 0xc7, 0xe5, 0xf6, 0x95, 0x6d, 0xe2, 0x0d, + 0x11, 0x35, 0x4a, 0xa0, 0x02, 0x06, 0xf3, 0x45, 0x6a, 0x30, 0xd8, 0xef, 0x97, 0x13, 0xaa, 0x28, + 0x99, 0x2a, 0x4b, 0x3f, 0x08, 0xc8, 0x35, 0xbc, 0x27, 0x47, 0x22, 0xc5, 0x57, 0x34, 0xad, 0xe9, + 0xae, 0x30, 0xd9, 0x29, 0xae, 0xf6, 0x82, 0xf4, 0x03, 0xf2, 0x3d, 0xe4, 0x06, 0x6f, 0x79, 0xdc, + 0xf0, 0xea, 0x0a, 0xb2, 0xef, 0xa7, 0xb2, 0x2f, 0xde, 0xf2, 0xa8, 0xbf, 0x75, 0x18, 0x24, 0xbf, + 0x05, 0xe1, 0x9a, 0x73, 0x2f, 0x26, 0x6c, 0xdc, 0x42, 0x38, 0xe3, 0xdc, 0x8b, 0x09, 0xd7, 0xc9, + 0x6f, 0xd2, 0x9c, 0xbe, 0xde, 0xd6, 0x31, 0x95, 0x9f, 0xa5, 0x12, 0xe2, 0x8a, 0xcd, 0xb9, 0xe0, + 0xaa, 0xaf, 0x01, 0xc6, 0x8f, 0x49, 0xce, 0x81, 0x60, 0x70, 0xcc, 0xa1, 0xdd, 0x3e, 0xb3, 0xe4, + 0x35, 0x83, 0x65, 0x2e, 0x1c, 0x7c, 0x38, 0x37, 0x46, 0xd9, 0xb0, 0x25, 0xa1, 0xd4, 0xa4, 0x10, + 0x77, 0x04, 0x7b, 0x1c, 0x80, 0x60, 0x63, 0xd8, 0x77, 0x67, 0x0b, 0x71, 0xc4, 0x16, 0xca, 0x29, + 0xb6, 0x06, 0x7b, 0x73, 0xba, 0x84, 0x3c, 0x82, 0x2d, 0x2f, 0xec, 0x06, 0x61, 0xd7, 0xe0, 0xae, + 0x67, 0x9b, 0x06, 0x36, 0xa6, 0x6c, 0xd5, 0xa2, 0x3c, 0xe8, 0x88, 0xfd, 0x4b, 0x3a, 0x40, 0xcc, + 0x9c, 0x0e, 0xf8, 0x5f, 0x98, 0x5f, 0x15, 0x20, 0xff, 0xcd, 0x34, 0xa1, 0xb0, 0xeb, 0x85, 0xdd, + 0xbe, 0x6d, 0x1a, 0x37, 0x6c, 0x64, 0x98, 0xcc, 0x8f, 0x9c, 0x48, 0xce, 0xbc, 0x51, 0x6e, 0xa1, + 0xe4, 0x25, 0x1b, 0x35, 0xc6, 0x82, 0xb3, 0x25, 0x7d, 0xc7, 0x4b, 0xd9, 0x3f, 0xde, 0x04, 0x18, + 0x97, 0xb3, 0xfa, 0x7b, 0x06, 0x76, 0x7f, 0x7e, 0xfa, 0xf8, 0x70, 0xc2, 0xe2, 0x84, 0x71, 0x6a, + 0xf7, 0x03, 0xb2, 0x0b, 0x59, 0x3b, 0x08, 0x42, 0xe6, 0x47, 0x31, 0x44, 0x2b, 0x71, 0xe3, 0x06, + 0x61, 0xf7, 0x0d, 0x33, 0x79, 0x34, 0xbc, 0xf1, 0x92, 0x1c, 0x02, 0x04, 0x9c, 0xfa, 0xd1, 0xab, + 0x6a, 0x79, 0xe1, 0x65, 0xbb, 0x81, 0xd6, 0x78, 0xd1, 0x3e, 0x87, 0x1c, 0x7b, 0xe7, 0xd9, 0xfe, + 0x48, 0x6a, 0x57, 0x16, 0x6a, 0x41, 0x9a, 0xa3, 0xb8, 0x0e, 0xdb, 0x81, 0xdd, 0x73, 0x28, 0x0f, + 0x7d, 0x66, 0xd0, 0x7e, 0xcf, 0xf5, 0x6d, 0x7e, 0x3d, 0x50, 0x57, 0xf1, 0xe9, 0x48, 0x72, 0x74, + 0x14, 0x9f, 0x90, 0x4f, 0xa1, 0x38, 0x91, 0x66, 0x3e, 0xf2, 0x98, 0x9a, 0x45, 0xe3, 0x7c, 0x92, + 0xb2, 0xce, 0xc8, 0x63, 0xd5, 0x3f, 0x15, 0xd8, 0x49, 0x4b, 0x2e, 0x79, 0x01, 0xd9, 0x2b, 0xd7, + 0x1f, 0x50, 0x1e, 0xb5, 0x63, 0xfd, 0xce, 0x75, 0x39, 0x45, 0x99, 0x1e, 0xc9, 0x49, 0x05, 0x72, + 0x93, 0x55, 0x96, 0x09, 0x9d, 0xdc, 0x22, 0x97, 0xb0, 0xf9, 0xee, 0xe9, 0xe3, 0x43, 0xc3, 0x92, + 0x65, 0x89, 0xd2, 0xfa, 0x79, 0xaa, 0xc3, 0xf4, 0x4a, 0xea, 0x39, 0x01, 0x88, 0x16, 0xd5, 0x3f, + 0x14, 0x28, 0xcd, 0x7e, 0xc2, 0x90, 0x26, 0xc0, 0x38, 0x21, 0xf8, 0x14, 0xb9, 0x83, 0xfd, 0x05, + 0x31, 0x25, 0xea, 0xb3, 0x25, 0x7d, 0x23, 0xc9, 0x1b, 0x69, 0x40, 0x11, 0x4b, 0x23, 0x2f, 0x54, + 0xac, 0x66, 0x76, 0x61, 0x35, 0x0b, 0x63, 0x89, 0xd8, 0x9c, 0x69, 0x52, 0x06, 0xdb, 0x29, 0x6e, + 0xc9, 0xb7, 0x33, 0x45, 0xf8, 0xf8, 0xf6, 0x07, 0x9e, 0xc9, 0x7c, 0xf4, 0x3e, 0xcb, 0x24, 0xef, + 0xb3, 0xea, 0x5f, 0x0a, 0x6c, 0x4e, 0xbe, 0x8a, 0x45, 0xa7, 0x0f, 0x99, 0x1f, 0xd8, 0xae, 0x83, + 0x1e, 0x96, 0xf5, 0x78, 0x49, 0x4e, 0x61, 0x0b, 0x9d, 0x18, 0xa1, 0x67, 0x25, 0xdf, 0x55, 0x99, + 0x85, 0x61, 0x16, 0x51, 0xf4, 0x0a, 0x35, 0xf1, 0xd7, 0x99, 0x85, 0x1e, 0xc7, 0x1f, 0x55, 0x8b, + 0xc7, 0x26, 0x2f, 0x25, 0xf1, 0x07, 0xd5, 0x7d, 0xc8, 0x75, 0x6d, 0x87, 0xfa, 0x23, 0x03, 0xdf, + 0xf9, 0x62, 0x74, 0x36, 0x75, 0x90, 0x5b, 0x27, 0x94, 0xd3, 0xea, 0x0d, 0xe4, 0x26, 0xbe, 0x13, + 0xc4, 0xa8, 0x4d, 0x3e, 0xb5, 0xb2, 0x78, 0xd4, 0xc2, 0xf1, 0x03, 0xcf, 0x38, 0xcb, 0xcc, 0x3a, + 0x7b, 0x74, 0x0e, 0x1b, 0xc9, 0x0d, 0x4f, 0xca, 0xb0, 0x7b, 0xf1, 0x53, 0xa7, 0x63, 0xb4, 0x3b, + 0x47, 0x1d, 0xcd, 0x78, 0x75, 0xd9, 0x6e, 0x69, 0x8d, 0xe6, 0x69, 0x53, 0x3b, 0x29, 0x2d, 0x91, + 0x12, 0x6c, 0xe2, 0x99, 0x76, 0x79, 0x74, 0x7c, 0xae, 0x9d, 0x94, 0x14, 0xb2, 0x05, 0x79, 0xdc, + 0x39, 0x69, 0xb6, 0xe5, 0x56, 0x46, 0xd0, 0x92, 0x3b, 0x5d, 0xd0, 0xce, 0x3a, 0x9d, 0xd6, 0x3c, + 0x1a, 0x9e, 0x4d, 0xd1, 0x70, 0x67, 0x82, 0xc6, 0xa0, 0x3c, 0x7f, 0x24, 0xc9, 0x97, 0xf0, 0x70, + 0x82, 0x69, 0xb4, 0x5e, 0x1d, 0x9f, 0x37, 0x1b, 0xc6, 0x4b, 0xed, 0x17, 0xa3, 0xa1, 0xe9, 0x9d, + 0xe6, 0x69, 0xb3, 0x21, 0xdc, 0x9e, 0xfe, 0xa8, 0x5f, 0x1c, 0x75, 0x4a, 0x4b, 0x44, 0x85, 0x1d, + 0x31, 0x6e, 0x53, 0x87, 0x2d, 0xed, 0xa2, 0xa4, 0x3c, 0x1a, 0x42, 0x71, 0xa6, 0xe9, 0xc8, 0x03, + 0xf8, 0x60, 0x0e, 0x3b, 0xe1, 0xe5, 0x60, 0x4d, 0x6f, 0x1f, 0x21, 0x62, 0x59, 0x84, 0x23, 0x16, + 0xe8, 0x00, 0xa1, 0x24, 0x0f, 0x1b, 0x5a, 0xfb, 0xe0, 0xe9, 0x33, 0x5c, 0x66, 0x08, 0x81, 0x82, + 0x5c, 0x26, 0x26, 0x2b, 0xc7, 0x57, 0xb0, 0x67, 0xba, 0x83, 0xb4, 0x21, 0x38, 0x2e, 0xe8, 0xf1, + 0x5f, 0xb4, 0x96, 0xa8, 0x6f, 0x4b, 0x79, 0xfd, 0x2c, 0x32, 0xeb, 0xb9, 0x7d, 0xea, 0xf4, 0x6a, + 0xae, 0xdf, 0xab, 0xf7, 0x98, 0x83, 0xd5, 0xaf, 0xcb, 0x23, 0xea, 0xd9, 0xc1, 0xd4, 0xff, 0xbc, + 0xe7, 0xb6, 0xcb, 0xff, 0x51, 0x94, 0x6e, 0x16, 0xad, 0xbe, 0xfa, 0x37, 0x00, 0x00, 0xff, 0xff, + 0x19, 0x85, 0xa9, 0x71, 0x0c, 0x0e, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go index 92254e55..4d967f52 100644 --- a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/common.pb.go @@ -10,6 +10,8 @@ It is generated from these files: google/monitoring/v3/group_service.proto google/monitoring/v3/metric.proto google/monitoring/v3/metric_service.proto + google/monitoring/v3/uptime.proto + google/monitoring/v3/uptime_service.proto It has these top-level messages: TypedValue @@ -38,6 +40,16 @@ It has these top-level messages: ListTimeSeriesResponse CreateTimeSeriesRequest CreateTimeSeriesError + UptimeCheckConfig + UptimeCheckIp + ListUptimeCheckConfigsRequest + ListUptimeCheckConfigsResponse + GetUptimeCheckConfigRequest + CreateUptimeCheckConfigRequest + UpdateUptimeCheckConfigRequest + DeleteUptimeCheckConfigRequest + ListUptimeCheckIpsRequest + ListUptimeCheckIpsResponse */ package monitoring diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go new file mode 100644 index 00000000..042cddd0 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime.pb.go @@ -0,0 +1,748 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/uptime.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import google_api4 "google.golang.org/genproto/googleapis/api/monitoredres" +import google_protobuf3 "github.com/golang/protobuf/ptypes/duration" + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The regions from which an uptime check can be run. +type UptimeCheckRegion int32 + +const ( + // Default value if no region is specified. Will result in uptime checks + // running from all regions. + UptimeCheckRegion_REGION_UNSPECIFIED UptimeCheckRegion = 0 + // Allows checks to run from locations within the United States of America. + UptimeCheckRegion_USA UptimeCheckRegion = 1 + // Allows checks to run from locations within the continent of Europe. + UptimeCheckRegion_EUROPE UptimeCheckRegion = 2 + // Allows checks to run from locations within the continent of South + // America. + UptimeCheckRegion_SOUTH_AMERICA UptimeCheckRegion = 3 + // Allows checks to run from locations within the Asia Pacific area (ex: + // Singapore). + UptimeCheckRegion_ASIA_PACIFIC UptimeCheckRegion = 4 +) + +var UptimeCheckRegion_name = map[int32]string{ + 0: "REGION_UNSPECIFIED", + 1: "USA", + 2: "EUROPE", + 3: "SOUTH_AMERICA", + 4: "ASIA_PACIFIC", +} +var UptimeCheckRegion_value = map[string]int32{ + "REGION_UNSPECIFIED": 0, + "USA": 1, + "EUROPE": 2, + "SOUTH_AMERICA": 3, + "ASIA_PACIFIC": 4, +} + +func (x UptimeCheckRegion) String() string { + return proto.EnumName(UptimeCheckRegion_name, int32(x)) +} +func (UptimeCheckRegion) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +// The supported resource types that can be used as values of +// group_resource.resource_type. gae_app and uptime_url are not allowed +// because group checks on App Engine modules and URLs are not allowed. +type GroupResourceType int32 + +const ( + // Default value (not valid). + GroupResourceType_RESOURCE_TYPE_UNSPECIFIED GroupResourceType = 0 + // A group of instances (could be either GCE or AWS_EC2). + GroupResourceType_INSTANCE GroupResourceType = 1 + // A group of AWS load balancers. + GroupResourceType_AWS_ELB_LOAD_BALANCER GroupResourceType = 2 +) + +var GroupResourceType_name = map[int32]string{ + 0: "RESOURCE_TYPE_UNSPECIFIED", + 1: "INSTANCE", + 2: "AWS_ELB_LOAD_BALANCER", +} +var GroupResourceType_value = map[string]int32{ + "RESOURCE_TYPE_UNSPECIFIED": 0, + "INSTANCE": 1, + "AWS_ELB_LOAD_BALANCER": 2, +} + +func (x GroupResourceType) String() string { + return proto.EnumName(GroupResourceType_name, int32(x)) +} +func (GroupResourceType) EnumDescriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } + +// This message configures which resources and services to monitor for +// availability. +type UptimeCheckConfig struct { + // A unique resource name for this UptimeCheckConfig. The format is: + // + // + // `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + // + // This field should be omitted when creating the uptime check configuration; + // on create, the resource name is assigned by the server and included in the + // response. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` + // A human-friendly name for the uptime check configuration. The display name + // should be unique within a Stackdriver Account in order to make it easier + // to identify; however, uniqueness is not enforced. Required. + DisplayName string `protobuf:"bytes,2,opt,name=display_name,json=displayName" json:"display_name,omitempty"` + // The resource the check is checking. Required. + // + // Types that are valid to be assigned to Resource: + // *UptimeCheckConfig_MonitoredResource + // *UptimeCheckConfig_ResourceGroup_ + Resource isUptimeCheckConfig_Resource `protobuf_oneof:"resource"` + // The type of uptime check request. + // + // Types that are valid to be assigned to CheckRequestType: + // *UptimeCheckConfig_HttpCheck_ + // *UptimeCheckConfig_TcpCheck_ + CheckRequestType isUptimeCheckConfig_CheckRequestType `protobuf_oneof:"check_request_type"` + // How often the uptime check is performed. + // Currently, only 1, 5, 10, and 15 minutes are supported. Required. + Period *google_protobuf3.Duration `protobuf:"bytes,7,opt,name=period" json:"period,omitempty"` + // The maximum amount of time to wait for the request to complete (must be + // between 1 and 60 seconds). Required. + Timeout *google_protobuf3.Duration `protobuf:"bytes,8,opt,name=timeout" json:"timeout,omitempty"` + // The expected content on the page the check is run against. + // Currently, only the first entry in the list is supported, and other entries + // will be ignored. The server will look for an exact match of the string in + // the page response's content. This field is optional and should only be + // specified if a content match is required. + ContentMatchers []*UptimeCheckConfig_ContentMatcher `protobuf:"bytes,9,rep,name=content_matchers,json=contentMatchers" json:"content_matchers,omitempty"` + // The list of regions from which the check will be run. + // If this field is specified, enough regions to include a minimum of + // 3 locations must be provided, or an error message is returned. + // Not specifying this field will result in uptime checks running from all + // regions. + SelectedRegions []UptimeCheckRegion `protobuf:"varint,10,rep,packed,name=selected_regions,json=selectedRegions,enum=google.monitoring.v3.UptimeCheckRegion" json:"selected_regions,omitempty"` + // The internal checkers that this check will egress from. + InternalCheckers []*UptimeCheckConfig_InternalChecker `protobuf:"bytes,14,rep,name=internal_checkers,json=internalCheckers" json:"internal_checkers,omitempty"` +} + +func (m *UptimeCheckConfig) Reset() { *m = UptimeCheckConfig{} } +func (m *UptimeCheckConfig) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig) ProtoMessage() {} +func (*UptimeCheckConfig) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0} } + +type isUptimeCheckConfig_Resource interface { + isUptimeCheckConfig_Resource() +} +type isUptimeCheckConfig_CheckRequestType interface { + isUptimeCheckConfig_CheckRequestType() +} + +type UptimeCheckConfig_MonitoredResource struct { + MonitoredResource *google_api4.MonitoredResource `protobuf:"bytes,3,opt,name=monitored_resource,json=monitoredResource,oneof"` +} +type UptimeCheckConfig_ResourceGroup_ struct { + ResourceGroup *UptimeCheckConfig_ResourceGroup `protobuf:"bytes,4,opt,name=resource_group,json=resourceGroup,oneof"` +} +type UptimeCheckConfig_HttpCheck_ struct { + HttpCheck *UptimeCheckConfig_HttpCheck `protobuf:"bytes,5,opt,name=http_check,json=httpCheck,oneof"` +} +type UptimeCheckConfig_TcpCheck_ struct { + TcpCheck *UptimeCheckConfig_TcpCheck `protobuf:"bytes,6,opt,name=tcp_check,json=tcpCheck,oneof"` +} + +func (*UptimeCheckConfig_MonitoredResource) isUptimeCheckConfig_Resource() {} +func (*UptimeCheckConfig_ResourceGroup_) isUptimeCheckConfig_Resource() {} +func (*UptimeCheckConfig_HttpCheck_) isUptimeCheckConfig_CheckRequestType() {} +func (*UptimeCheckConfig_TcpCheck_) isUptimeCheckConfig_CheckRequestType() {} + +func (m *UptimeCheckConfig) GetResource() isUptimeCheckConfig_Resource { + if m != nil { + return m.Resource + } + return nil +} +func (m *UptimeCheckConfig) GetCheckRequestType() isUptimeCheckConfig_CheckRequestType { + if m != nil { + return m.CheckRequestType + } + return nil +} + +func (m *UptimeCheckConfig) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +func (m *UptimeCheckConfig) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +func (m *UptimeCheckConfig) GetMonitoredResource() *google_api4.MonitoredResource { + if x, ok := m.GetResource().(*UptimeCheckConfig_MonitoredResource); ok { + return x.MonitoredResource + } + return nil +} + +func (m *UptimeCheckConfig) GetResourceGroup() *UptimeCheckConfig_ResourceGroup { + if x, ok := m.GetResource().(*UptimeCheckConfig_ResourceGroup_); ok { + return x.ResourceGroup + } + return nil +} + +func (m *UptimeCheckConfig) GetHttpCheck() *UptimeCheckConfig_HttpCheck { + if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_HttpCheck_); ok { + return x.HttpCheck + } + return nil +} + +func (m *UptimeCheckConfig) GetTcpCheck() *UptimeCheckConfig_TcpCheck { + if x, ok := m.GetCheckRequestType().(*UptimeCheckConfig_TcpCheck_); ok { + return x.TcpCheck + } + return nil +} + +func (m *UptimeCheckConfig) GetPeriod() *google_protobuf3.Duration { + if m != nil { + return m.Period + } + return nil +} + +func (m *UptimeCheckConfig) GetTimeout() *google_protobuf3.Duration { + if m != nil { + return m.Timeout + } + return nil +} + +func (m *UptimeCheckConfig) GetContentMatchers() []*UptimeCheckConfig_ContentMatcher { + if m != nil { + return m.ContentMatchers + } + return nil +} + +func (m *UptimeCheckConfig) GetSelectedRegions() []UptimeCheckRegion { + if m != nil { + return m.SelectedRegions + } + return nil +} + +func (m *UptimeCheckConfig) GetInternalCheckers() []*UptimeCheckConfig_InternalChecker { + if m != nil { + return m.InternalCheckers + } + return nil +} + +// XXX_OneofFuncs is for the internal use of the proto package. +func (*UptimeCheckConfig) XXX_OneofFuncs() (func(msg proto.Message, b *proto.Buffer) error, func(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error), func(msg proto.Message) (n int), []interface{}) { + return _UptimeCheckConfig_OneofMarshaler, _UptimeCheckConfig_OneofUnmarshaler, _UptimeCheckConfig_OneofSizer, []interface{}{ + (*UptimeCheckConfig_MonitoredResource)(nil), + (*UptimeCheckConfig_ResourceGroup_)(nil), + (*UptimeCheckConfig_HttpCheck_)(nil), + (*UptimeCheckConfig_TcpCheck_)(nil), + } +} + +func _UptimeCheckConfig_OneofMarshaler(msg proto.Message, b *proto.Buffer) error { + m := msg.(*UptimeCheckConfig) + // resource + switch x := m.Resource.(type) { + case *UptimeCheckConfig_MonitoredResource: + b.EncodeVarint(3<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.MonitoredResource); err != nil { + return err + } + case *UptimeCheckConfig_ResourceGroup_: + b.EncodeVarint(4<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.ResourceGroup); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("UptimeCheckConfig.Resource has unexpected type %T", x) + } + // check_request_type + switch x := m.CheckRequestType.(type) { + case *UptimeCheckConfig_HttpCheck_: + b.EncodeVarint(5<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.HttpCheck); err != nil { + return err + } + case *UptimeCheckConfig_TcpCheck_: + b.EncodeVarint(6<<3 | proto.WireBytes) + if err := b.EncodeMessage(x.TcpCheck); err != nil { + return err + } + case nil: + default: + return fmt.Errorf("UptimeCheckConfig.CheckRequestType has unexpected type %T", x) + } + return nil +} + +func _UptimeCheckConfig_OneofUnmarshaler(msg proto.Message, tag, wire int, b *proto.Buffer) (bool, error) { + m := msg.(*UptimeCheckConfig) + switch tag { + case 3: // resource.monitored_resource + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(google_api4.MonitoredResource) + err := b.DecodeMessage(msg) + m.Resource = &UptimeCheckConfig_MonitoredResource{msg} + return true, err + case 4: // resource.resource_group + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_ResourceGroup) + err := b.DecodeMessage(msg) + m.Resource = &UptimeCheckConfig_ResourceGroup_{msg} + return true, err + case 5: // check_request_type.http_check + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_HttpCheck) + err := b.DecodeMessage(msg) + m.CheckRequestType = &UptimeCheckConfig_HttpCheck_{msg} + return true, err + case 6: // check_request_type.tcp_check + if wire != proto.WireBytes { + return true, proto.ErrInternalBadWireType + } + msg := new(UptimeCheckConfig_TcpCheck) + err := b.DecodeMessage(msg) + m.CheckRequestType = &UptimeCheckConfig_TcpCheck_{msg} + return true, err + default: + return false, nil + } +} + +func _UptimeCheckConfig_OneofSizer(msg proto.Message) (n int) { + m := msg.(*UptimeCheckConfig) + // resource + switch x := m.Resource.(type) { + case *UptimeCheckConfig_MonitoredResource: + s := proto.Size(x.MonitoredResource) + n += proto.SizeVarint(3<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *UptimeCheckConfig_ResourceGroup_: + s := proto.Size(x.ResourceGroup) + n += proto.SizeVarint(4<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + // check_request_type + switch x := m.CheckRequestType.(type) { + case *UptimeCheckConfig_HttpCheck_: + s := proto.Size(x.HttpCheck) + n += proto.SizeVarint(5<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case *UptimeCheckConfig_TcpCheck_: + s := proto.Size(x.TcpCheck) + n += proto.SizeVarint(6<<3 | proto.WireBytes) + n += proto.SizeVarint(uint64(s)) + n += s + case nil: + default: + panic(fmt.Sprintf("proto: unexpected type %T in oneof", x)) + } + return n +} + +// The resource submessage for group checks. It can be used instead of a +// monitored resource, when multiple resources are being monitored. +type UptimeCheckConfig_ResourceGroup struct { + // The group of resources being monitored. Should be only the + // group_id, not projects//groups/. + GroupId string `protobuf:"bytes,1,opt,name=group_id,json=groupId" json:"group_id,omitempty"` + // The resource type of the group members. + ResourceType GroupResourceType `protobuf:"varint,2,opt,name=resource_type,json=resourceType,enum=google.monitoring.v3.GroupResourceType" json:"resource_type,omitempty"` +} + +func (m *UptimeCheckConfig_ResourceGroup) Reset() { *m = UptimeCheckConfig_ResourceGroup{} } +func (m *UptimeCheckConfig_ResourceGroup) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_ResourceGroup) ProtoMessage() {} +func (*UptimeCheckConfig_ResourceGroup) Descriptor() ([]byte, []int) { + return fileDescriptor5, []int{0, 0} +} + +func (m *UptimeCheckConfig_ResourceGroup) GetGroupId() string { + if m != nil { + return m.GroupId + } + return "" +} + +func (m *UptimeCheckConfig_ResourceGroup) GetResourceType() GroupResourceType { + if m != nil { + return m.ResourceType + } + return GroupResourceType_RESOURCE_TYPE_UNSPECIFIED +} + +// Information involved in an HTTP/HTTPS uptime check request. +type UptimeCheckConfig_HttpCheck struct { + // If true, use HTTPS instead of HTTP to run the check. + UseSsl bool `protobuf:"varint,1,opt,name=use_ssl,json=useSsl" json:"use_ssl,omitempty"` + // The path to the page to run the check against. Will be combined with the + // host (specified within the MonitoredResource) and port to construct the + // full URL. Optional (defaults to "/"). + Path string `protobuf:"bytes,2,opt,name=path" json:"path,omitempty"` + // The port to the page to run the check against. Will be combined with host + // (specified within the MonitoredResource) and path to construct the full + // URL. Optional (defaults to 80 without SSL, or 443 with SSL). + Port int32 `protobuf:"varint,3,opt,name=port" json:"port,omitempty"` + // The authentication information. Optional when creating an HTTP check; + // defaults to empty. + AuthInfo *UptimeCheckConfig_HttpCheck_BasicAuthentication `protobuf:"bytes,4,opt,name=auth_info,json=authInfo" json:"auth_info,omitempty"` + // Boolean specifiying whether to encrypt the header information. + // Encryption should be specified for any headers related to authentication + // that you do not wish to be seen when retrieving the configuration. The + // server will be responsible for encrypting the headers. + // On Get/List calls, if mask_headers is set to True then the headers + // will be obscured with ******. + MaskHeaders bool `protobuf:"varint,5,opt,name=mask_headers,json=maskHeaders" json:"mask_headers,omitempty"` + // The list of headers to send as part of the uptime check request. + // If two headers have the same key and different values, they should + // be entered as a single header, with the value being a comma-separated + // list of all the desired values as described at + // https://www.w3.org/Protocols/rfc2616/rfc2616.txt (page 31). + // Entering two separate headers with the same key in a Create call will + // cause the first to be overwritten by the second. + Headers map[string]string `protobuf:"bytes,6,rep,name=headers" json:"headers,omitempty" protobuf_key:"bytes,1,opt,name=key" protobuf_val:"bytes,2,opt,name=value"` +} + +func (m *UptimeCheckConfig_HttpCheck) Reset() { *m = UptimeCheckConfig_HttpCheck{} } +func (m *UptimeCheckConfig_HttpCheck) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_HttpCheck) ProtoMessage() {} +func (*UptimeCheckConfig_HttpCheck) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 1} } + +func (m *UptimeCheckConfig_HttpCheck) GetUseSsl() bool { + if m != nil { + return m.UseSsl + } + return false +} + +func (m *UptimeCheckConfig_HttpCheck) GetPath() string { + if m != nil { + return m.Path + } + return "" +} + +func (m *UptimeCheckConfig_HttpCheck) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +func (m *UptimeCheckConfig_HttpCheck) GetAuthInfo() *UptimeCheckConfig_HttpCheck_BasicAuthentication { + if m != nil { + return m.AuthInfo + } + return nil +} + +func (m *UptimeCheckConfig_HttpCheck) GetMaskHeaders() bool { + if m != nil { + return m.MaskHeaders + } + return false +} + +func (m *UptimeCheckConfig_HttpCheck) GetHeaders() map[string]string { + if m != nil { + return m.Headers + } + return nil +} + +// A type of authentication to perform against the specified resource or URL +// that uses username and password. +// Currently, only Basic authentication is supported in Uptime Monitoring. +type UptimeCheckConfig_HttpCheck_BasicAuthentication struct { + // The username to authenticate. + Username string `protobuf:"bytes,1,opt,name=username" json:"username,omitempty"` + // The password to authenticate. + Password string `protobuf:"bytes,2,opt,name=password" json:"password,omitempty"` +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) Reset() { + *m = UptimeCheckConfig_HttpCheck_BasicAuthentication{} +} +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) String() string { + return proto.CompactTextString(m) +} +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) ProtoMessage() {} +func (*UptimeCheckConfig_HttpCheck_BasicAuthentication) Descriptor() ([]byte, []int) { + return fileDescriptor5, []int{0, 1, 0} +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetUsername() string { + if m != nil { + return m.Username + } + return "" +} + +func (m *UptimeCheckConfig_HttpCheck_BasicAuthentication) GetPassword() string { + if m != nil { + return m.Password + } + return "" +} + +// Information required for a TCP uptime check request. +type UptimeCheckConfig_TcpCheck struct { + // The port to the page to run the check against. Will be combined with host + // (specified within the MonitoredResource) to construct the full URL. + // Required. + Port int32 `protobuf:"varint,1,opt,name=port" json:"port,omitempty"` +} + +func (m *UptimeCheckConfig_TcpCheck) Reset() { *m = UptimeCheckConfig_TcpCheck{} } +func (m *UptimeCheckConfig_TcpCheck) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_TcpCheck) ProtoMessage() {} +func (*UptimeCheckConfig_TcpCheck) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{0, 2} } + +func (m *UptimeCheckConfig_TcpCheck) GetPort() int32 { + if m != nil { + return m.Port + } + return 0 +} + +// Used to perform string matching. Currently, this matches on the exact +// content. In the future, it can be expanded to allow for regular expressions +// and more complex matching. +type UptimeCheckConfig_ContentMatcher struct { + // String content to match + Content string `protobuf:"bytes,1,opt,name=content" json:"content,omitempty"` +} + +func (m *UptimeCheckConfig_ContentMatcher) Reset() { *m = UptimeCheckConfig_ContentMatcher{} } +func (m *UptimeCheckConfig_ContentMatcher) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_ContentMatcher) ProtoMessage() {} +func (*UptimeCheckConfig_ContentMatcher) Descriptor() ([]byte, []int) { + return fileDescriptor5, []int{0, 3} +} + +func (m *UptimeCheckConfig_ContentMatcher) GetContent() string { + if m != nil { + return m.Content + } + return "" +} + +// Nimbus InternalCheckers. +type UptimeCheckConfig_InternalChecker struct { + // The GCP project ID. Not necessarily the same as the project_id for the config. + ProjectId string `protobuf:"bytes,1,opt,name=project_id,json=projectId" json:"project_id,omitempty"` + // The internal network to perform this uptime check on. + Network string `protobuf:"bytes,2,opt,name=network" json:"network,omitempty"` + // The GCP zone the uptime check should egress from. Only respected for + // internal uptime checks, where internal_network is specified. + GcpZone string `protobuf:"bytes,3,opt,name=gcp_zone,json=gcpZone" json:"gcp_zone,omitempty"` + // The checker ID. + CheckerId string `protobuf:"bytes,4,opt,name=checker_id,json=checkerId" json:"checker_id,omitempty"` + // The checker's human-readable name. + DisplayName string `protobuf:"bytes,5,opt,name=display_name,json=displayName" json:"display_name,omitempty"` +} + +func (m *UptimeCheckConfig_InternalChecker) Reset() { *m = UptimeCheckConfig_InternalChecker{} } +func (m *UptimeCheckConfig_InternalChecker) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckConfig_InternalChecker) ProtoMessage() {} +func (*UptimeCheckConfig_InternalChecker) Descriptor() ([]byte, []int) { + return fileDescriptor5, []int{0, 4} +} + +func (m *UptimeCheckConfig_InternalChecker) GetProjectId() string { + if m != nil { + return m.ProjectId + } + return "" +} + +func (m *UptimeCheckConfig_InternalChecker) GetNetwork() string { + if m != nil { + return m.Network + } + return "" +} + +func (m *UptimeCheckConfig_InternalChecker) GetGcpZone() string { + if m != nil { + return m.GcpZone + } + return "" +} + +func (m *UptimeCheckConfig_InternalChecker) GetCheckerId() string { + if m != nil { + return m.CheckerId + } + return "" +} + +func (m *UptimeCheckConfig_InternalChecker) GetDisplayName() string { + if m != nil { + return m.DisplayName + } + return "" +} + +// Contains the region, location, and list of IP +// addresses where checkers in the location run from. +type UptimeCheckIp struct { + // A broad region category in which the IP address is located. + Region UptimeCheckRegion `protobuf:"varint,1,opt,name=region,enum=google.monitoring.v3.UptimeCheckRegion" json:"region,omitempty"` + // A more specific location within the region that typically encodes + // a particular city/town/metro (and its containing state/province or country) + // within the broader umbrella region category. + Location string `protobuf:"bytes,2,opt,name=location" json:"location,omitempty"` + // The IP address from which the uptime check originates. This is a full + // IP address (not an IP address range). Most IP addresses, as of this + // publication, are in IPv4 format; however, one should not rely on the + // IP addresses being in IPv4 format indefinitely and should support + // interpreting this field in either IPv4 or IPv6 format. + IpAddress string `protobuf:"bytes,3,opt,name=ip_address,json=ipAddress" json:"ip_address,omitempty"` +} + +func (m *UptimeCheckIp) Reset() { *m = UptimeCheckIp{} } +func (m *UptimeCheckIp) String() string { return proto.CompactTextString(m) } +func (*UptimeCheckIp) ProtoMessage() {} +func (*UptimeCheckIp) Descriptor() ([]byte, []int) { return fileDescriptor5, []int{1} } + +func (m *UptimeCheckIp) GetRegion() UptimeCheckRegion { + if m != nil { + return m.Region + } + return UptimeCheckRegion_REGION_UNSPECIFIED +} + +func (m *UptimeCheckIp) GetLocation() string { + if m != nil { + return m.Location + } + return "" +} + +func (m *UptimeCheckIp) GetIpAddress() string { + if m != nil { + return m.IpAddress + } + return "" +} + +func init() { + proto.RegisterType((*UptimeCheckConfig)(nil), "google.monitoring.v3.UptimeCheckConfig") + proto.RegisterType((*UptimeCheckConfig_ResourceGroup)(nil), "google.monitoring.v3.UptimeCheckConfig.ResourceGroup") + proto.RegisterType((*UptimeCheckConfig_HttpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck") + proto.RegisterType((*UptimeCheckConfig_HttpCheck_BasicAuthentication)(nil), "google.monitoring.v3.UptimeCheckConfig.HttpCheck.BasicAuthentication") + proto.RegisterType((*UptimeCheckConfig_TcpCheck)(nil), "google.monitoring.v3.UptimeCheckConfig.TcpCheck") + proto.RegisterType((*UptimeCheckConfig_ContentMatcher)(nil), "google.monitoring.v3.UptimeCheckConfig.ContentMatcher") + proto.RegisterType((*UptimeCheckConfig_InternalChecker)(nil), "google.monitoring.v3.UptimeCheckConfig.InternalChecker") + proto.RegisterType((*UptimeCheckIp)(nil), "google.monitoring.v3.UptimeCheckIp") + proto.RegisterEnum("google.monitoring.v3.UptimeCheckRegion", UptimeCheckRegion_name, UptimeCheckRegion_value) + proto.RegisterEnum("google.monitoring.v3.GroupResourceType", GroupResourceType_name, GroupResourceType_value) +} + +func init() { proto.RegisterFile("google/monitoring/v3/uptime.proto", fileDescriptor5) } + +var fileDescriptor5 = []byte{ + // 1021 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x94, 0x56, 0xdd, 0x4e, 0xe3, 0x46, + 0x14, 0x5e, 0x13, 0xc8, 0xcf, 0x21, 0xb0, 0x66, 0x4a, 0xdb, 0x60, 0x89, 0x15, 0xbb, 0xbd, 0x28, + 0xe2, 0xc2, 0xe9, 0x12, 0xf5, 0x47, 0x5b, 0x69, 0x2b, 0x27, 0xb8, 0xc4, 0x12, 0x24, 0xd1, 0x84, + 0x6c, 0xdb, 0x2d, 0xaa, 0x65, 0xec, 0x21, 0x71, 0x49, 0x3c, 0xae, 0x67, 0xcc, 0x96, 0xbe, 0x42, + 0x1f, 0xa3, 0x17, 0x95, 0xfa, 0x04, 0x7d, 0x86, 0xbe, 0x4d, 0xdf, 0xa0, 0x9a, 0xf1, 0x4c, 0x20, + 0x40, 0xb5, 0x70, 0x37, 0xdf, 0xf9, 0xf9, 0xe6, 0x1c, 0x9f, 0x9f, 0x31, 0x3c, 0x1f, 0x53, 0x3a, + 0x9e, 0x92, 0xe6, 0x8c, 0x26, 0x31, 0xa7, 0x59, 0x9c, 0x8c, 0x9b, 0x97, 0xad, 0x66, 0x9e, 0xf2, + 0x78, 0x46, 0xec, 0x34, 0xa3, 0x9c, 0xa2, 0xcd, 0xc2, 0xc4, 0xbe, 0x36, 0xb1, 0x2f, 0x5b, 0xd6, + 0x27, 0xca, 0x31, 0x48, 0x63, 0xed, 0x4c, 0x22, 0x3f, 0x23, 0x8c, 0xe6, 0x59, 0xa8, 0x5c, 0xad, + 0x67, 0xca, 0x48, 0xa2, 0xb3, 0xfc, 0xbc, 0x19, 0xe5, 0x59, 0xc0, 0x63, 0x9a, 0x14, 0xfa, 0x17, + 0xff, 0xd6, 0x61, 0x63, 0x24, 0xef, 0xea, 0x4c, 0x48, 0x78, 0xd1, 0xa1, 0xc9, 0x79, 0x3c, 0x46, + 0x08, 0x96, 0x93, 0x60, 0x46, 0x1a, 0xc6, 0x8e, 0xb1, 0x5b, 0xc3, 0xf2, 0x8c, 0x9e, 0x43, 0x3d, + 0x8a, 0x59, 0x3a, 0x0d, 0xae, 0x7c, 0xa9, 0x5b, 0x92, 0xba, 0x55, 0x25, 0xeb, 0x09, 0x93, 0x1e, + 0xa0, 0xbb, 0x81, 0x34, 0x4a, 0x3b, 0xc6, 0xee, 0xea, 0xfe, 0xb6, 0xad, 0x92, 0x08, 0xd2, 0xd8, + 0x3e, 0xd6, 0x56, 0x58, 0x19, 0x75, 0x9f, 0xe0, 0x8d, 0xd9, 0x6d, 0x21, 0xfa, 0x09, 0xd6, 0x35, + 0x8b, 0x3f, 0xce, 0x68, 0x9e, 0x36, 0x96, 0x25, 0xd7, 0xe7, 0xf6, 0x7d, 0x1f, 0xc4, 0xbe, 0x93, + 0x87, 0xad, 0x99, 0x0e, 0x85, 0x73, 0xf7, 0x09, 0x5e, 0xcb, 0x6e, 0x0a, 0x10, 0x06, 0x98, 0x70, + 0x9e, 0xfa, 0xa1, 0x70, 0x69, 0xac, 0x48, 0xee, 0x97, 0x0f, 0xe5, 0xee, 0x72, 0x9e, 0x4a, 0xdc, + 0x35, 0x70, 0x6d, 0xa2, 0x01, 0xea, 0x43, 0x8d, 0x87, 0x9a, 0xb2, 0x2c, 0x29, 0x3f, 0x7b, 0x28, + 0xe5, 0x49, 0x38, 0x67, 0xac, 0x72, 0x75, 0x46, 0x2f, 0xa1, 0x9c, 0x92, 0x2c, 0xa6, 0x51, 0xa3, + 0x22, 0xd9, 0xb6, 0x34, 0x9b, 0x2e, 0xa9, 0x7d, 0xa0, 0x4a, 0x8a, 0x95, 0x21, 0x6a, 0x41, 0x45, + 0x50, 0xd3, 0x9c, 0x37, 0xaa, 0xef, 0xf3, 0xd1, 0x96, 0x28, 0x00, 0x33, 0xa4, 0x09, 0x27, 0x09, + 0xf7, 0x67, 0x01, 0x0f, 0x27, 0x24, 0x63, 0x8d, 0xda, 0x4e, 0x69, 0x77, 0x75, 0xff, 0x8b, 0x87, + 0xc6, 0xdf, 0x29, 0xfc, 0x8f, 0x0b, 0x77, 0xfc, 0x34, 0x5c, 0xc0, 0x0c, 0x61, 0x30, 0x19, 0x99, + 0x92, 0x90, 0xcb, 0xf6, 0x18, 0xc7, 0x34, 0x61, 0x0d, 0xd8, 0x29, 0xed, 0xae, 0xef, 0x7f, 0xfa, + 0xde, 0x2b, 0xb0, 0xb4, 0xc7, 0x4f, 0x35, 0x41, 0x81, 0x19, 0x8a, 0x60, 0x23, 0x4e, 0x38, 0xc9, + 0x92, 0x60, 0x5a, 0x7c, 0x74, 0x11, 0xf7, 0xba, 0x8c, 0xfb, 0xcb, 0x87, 0xc6, 0xed, 0x29, 0x82, + 0x4e, 0xe1, 0x8f, 0xcd, 0x78, 0x51, 0xc0, 0xac, 0x5f, 0x61, 0x6d, 0xa1, 0x97, 0xd0, 0x16, 0x54, + 0x65, 0x47, 0xfa, 0x71, 0xa4, 0xa6, 0xa4, 0x22, 0xb1, 0x17, 0xa1, 0x23, 0x98, 0xb7, 0x99, 0xcf, + 0xaf, 0xd2, 0x62, 0x52, 0xfe, 0x37, 0x45, 0x49, 0xa7, 0xb9, 0x4f, 0xae, 0x52, 0x82, 0xeb, 0xd9, + 0x0d, 0x64, 0xfd, 0x5d, 0x82, 0xda, 0xbc, 0xd5, 0xd0, 0xc7, 0x50, 0xc9, 0x19, 0xf1, 0x19, 0x9b, + 0xca, 0x5b, 0xab, 0xb8, 0x9c, 0x33, 0x32, 0x64, 0x53, 0x31, 0xb1, 0x69, 0xc0, 0x27, 0x6a, 0x2a, + 0xe5, 0x59, 0xca, 0x68, 0xc6, 0xe5, 0x00, 0xae, 0x60, 0x79, 0x46, 0x67, 0x50, 0x0b, 0x72, 0x3e, + 0xf1, 0xe3, 0xe4, 0x9c, 0xaa, 0x69, 0x72, 0x1f, 0xdd, 0xf1, 0x76, 0x3b, 0x60, 0x71, 0xe8, 0xe4, + 0x7c, 0x42, 0x12, 0x1e, 0x87, 0x45, 0x23, 0x55, 0x05, 0xaf, 0x97, 0x9c, 0x53, 0xb1, 0x29, 0x66, + 0x01, 0xbb, 0xf0, 0x27, 0x24, 0x88, 0x44, 0x35, 0x56, 0x64, 0xa4, 0xab, 0x42, 0xd6, 0x2d, 0x44, + 0xe8, 0x7b, 0xa8, 0x68, 0x6d, 0x59, 0xd6, 0xea, 0xf5, 0xe3, 0x83, 0x50, 0x5c, 0x6e, 0xc2, 0xb3, + 0x2b, 0xac, 0xe9, 0xac, 0x63, 0xf8, 0xe0, 0x9e, 0xe8, 0x90, 0x05, 0xd5, 0x9c, 0x89, 0x9a, 0xce, + 0xb7, 0xda, 0x1c, 0x0b, 0x5d, 0x1a, 0x30, 0xf6, 0x8e, 0x66, 0x91, 0xfa, 0x7e, 0x73, 0x6c, 0xbd, + 0x82, 0xfa, 0xcd, 0x7b, 0x90, 0x09, 0xa5, 0x0b, 0x72, 0xa5, 0x28, 0xc4, 0x11, 0x6d, 0xc2, 0xca, + 0x65, 0x30, 0xcd, 0xf5, 0x42, 0x2c, 0xc0, 0xab, 0xa5, 0xaf, 0x0c, 0xeb, 0x19, 0x54, 0xf5, 0x44, + 0xcf, 0x6b, 0x61, 0x5c, 0xd7, 0xc2, 0xda, 0x83, 0xf5, 0xc5, 0x89, 0x41, 0x0d, 0xa8, 0xa8, 0x99, + 0xd1, 0x4d, 0xa5, 0xa0, 0xf5, 0xa7, 0x01, 0x4f, 0x6f, 0xb5, 0x29, 0xda, 0x06, 0x48, 0x33, 0xfa, + 0x33, 0x09, 0xf9, 0x75, 0x17, 0xd6, 0x94, 0xc4, 0x8b, 0x04, 0x59, 0x42, 0xf8, 0x3b, 0x9a, 0x5d, + 0xa8, 0xd0, 0x34, 0x94, 0xcd, 0x1b, 0xa6, 0xfe, 0x6f, 0x34, 0x29, 0xb6, 0xb3, 0x68, 0xde, 0x30, + 0x7d, 0x4b, 0x13, 0x22, 0x38, 0xd5, 0x14, 0x09, 0xce, 0xe5, 0x82, 0x53, 0x49, 0xbc, 0xe8, 0xce, + 0x23, 0xb0, 0x72, 0xe7, 0x11, 0x68, 0x03, 0x54, 0x75, 0x03, 0xb7, 0x37, 0x01, 0x49, 0x5f, 0x3f, + 0x23, 0xbf, 0xe4, 0x84, 0x71, 0x39, 0x0f, 0x2f, 0x7e, 0x37, 0x60, 0xed, 0x46, 0x61, 0xbd, 0x14, + 0x7d, 0x03, 0xe5, 0x62, 0x1f, 0xc8, 0x2c, 0x1e, 0xb1, 0x0e, 0x94, 0x9b, 0x28, 0xe1, 0x94, 0x16, + 0xa5, 0xd6, 0x25, 0xd4, 0x58, 0xa4, 0x14, 0xa7, 0x7e, 0x10, 0x45, 0x19, 0x61, 0x4c, 0xe5, 0x5b, + 0x8b, 0x53, 0xa7, 0x10, 0xec, 0x91, 0x85, 0x07, 0xb0, 0xe0, 0x45, 0x1f, 0x01, 0xc2, 0xee, 0xa1, + 0xd7, 0xef, 0xf9, 0xa3, 0xde, 0x70, 0xe0, 0x76, 0xbc, 0x6f, 0x3d, 0xf7, 0xc0, 0x7c, 0x82, 0x2a, + 0x50, 0x1a, 0x0d, 0x1d, 0xd3, 0x40, 0x00, 0x65, 0x77, 0x84, 0xfb, 0x03, 0xd7, 0x5c, 0x42, 0x1b, + 0xb0, 0x36, 0xec, 0x8f, 0x4e, 0xba, 0xbe, 0x73, 0xec, 0x62, 0xaf, 0xe3, 0x98, 0x25, 0x64, 0x42, + 0xdd, 0x19, 0x7a, 0x8e, 0x3f, 0x70, 0x84, 0x6b, 0xc7, 0x5c, 0xde, 0xfb, 0x11, 0x36, 0xee, 0x8c, + 0x3a, 0xda, 0x86, 0x2d, 0xec, 0x0e, 0xfb, 0x23, 0xdc, 0x71, 0xfd, 0x93, 0x1f, 0x06, 0xee, 0xad, + 0xdb, 0xea, 0x50, 0xf5, 0x7a, 0xc3, 0x13, 0xa7, 0xd7, 0x71, 0x4d, 0x03, 0x6d, 0xc1, 0x87, 0xce, + 0x77, 0x43, 0xdf, 0x3d, 0x6a, 0xfb, 0x47, 0x7d, 0xe7, 0xc0, 0x6f, 0x3b, 0x47, 0x42, 0x83, 0xcd, + 0xa5, 0xf6, 0x1f, 0x06, 0x34, 0x42, 0x3a, 0xbb, 0xf7, 0xab, 0xb5, 0x57, 0x8b, 0xf4, 0x06, 0x62, + 0xf5, 0x0f, 0x8c, 0xb7, 0xaf, 0x95, 0xd1, 0x98, 0x4e, 0x83, 0x64, 0x6c, 0xd3, 0x6c, 0xdc, 0x1c, + 0x93, 0x44, 0x3e, 0x0c, 0xcd, 0x42, 0x15, 0xa4, 0x31, 0x5b, 0xfc, 0x1d, 0xf9, 0xfa, 0x1a, 0xfd, + 0xb5, 0x64, 0x1d, 0x16, 0x04, 0x9d, 0x29, 0xcd, 0x23, 0xfd, 0x94, 0x8b, 0xbb, 0xde, 0xb4, 0xfe, + 0xd1, 0xca, 0x53, 0xa9, 0x3c, 0xbd, 0x56, 0x9e, 0xbe, 0x69, 0x9d, 0x95, 0xe5, 0x25, 0xad, 0xff, + 0x02, 0x00, 0x00, 0xff, 0xff, 0xc9, 0xa5, 0xbc, 0x87, 0xf2, 0x08, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go new file mode 100644 index 00000000..5e2fb2a7 --- /dev/null +++ b/vendor/google.golang.org/genproto/googleapis/monitoring/v3/uptime_service.pb.go @@ -0,0 +1,591 @@ +// Code generated by protoc-gen-go. DO NOT EDIT. +// source: google/monitoring/v3/uptime_service.proto + +package monitoring + +import proto "github.com/golang/protobuf/proto" +import fmt "fmt" +import math "math" +import _ "google.golang.org/genproto/googleapis/api/annotations" +import google_protobuf4 "github.com/golang/protobuf/ptypes/empty" +import google_protobuf5 "google.golang.org/genproto/protobuf/field_mask" + +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + +// Reference imports to suppress errors if they are not otherwise used. +var _ = proto.Marshal +var _ = fmt.Errorf +var _ = math.Inf + +// The protocol for the `ListUptimeCheckConfigs` request. +type ListUptimeCheckConfigsRequest struct { + // The project whose uptime check configurations are listed. The format is + // + // `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + PageSize int32 `protobuf:"varint,3,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + PageToken string `protobuf:"bytes,4,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListUptimeCheckConfigsRequest) Reset() { *m = ListUptimeCheckConfigsRequest{} } +func (m *ListUptimeCheckConfigsRequest) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckConfigsRequest) ProtoMessage() {} +func (*ListUptimeCheckConfigsRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{0} } + +func (m *ListUptimeCheckConfigsRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *ListUptimeCheckConfigsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUptimeCheckConfigsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckConfigs` response. +type ListUptimeCheckConfigsResponse struct { + // The returned uptime check configurations. + UptimeCheckConfigs []*UptimeCheckConfig `protobuf:"bytes,1,rep,name=uptime_check_configs,json=uptimeCheckConfigs" json:"uptime_check_configs,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListUptimeCheckConfigsResponse) Reset() { *m = ListUptimeCheckConfigsResponse{} } +func (m *ListUptimeCheckConfigsResponse) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckConfigsResponse) ProtoMessage() {} +func (*ListUptimeCheckConfigsResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{1} } + +func (m *ListUptimeCheckConfigsResponse) GetUptimeCheckConfigs() []*UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfigs + } + return nil +} + +func (m *ListUptimeCheckConfigsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +// The protocol for the `GetUptimeCheckConfig` request. +type GetUptimeCheckConfigRequest struct { + // The uptime check configuration to retrieve. The format is + // + // `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *GetUptimeCheckConfigRequest) Reset() { *m = GetUptimeCheckConfigRequest{} } +func (m *GetUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*GetUptimeCheckConfigRequest) ProtoMessage() {} +func (*GetUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{2} } + +func (m *GetUptimeCheckConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `CreateUptimeCheckConfig` request. +type CreateUptimeCheckConfigRequest struct { + // The project in which to create the uptime check. The format is: + // + // `projects/[PROJECT_ID]`. + Parent string `protobuf:"bytes,1,opt,name=parent" json:"parent,omitempty"` + // The new uptime check configuration. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,2,opt,name=uptime_check_config,json=uptimeCheckConfig" json:"uptime_check_config,omitempty"` +} + +func (m *CreateUptimeCheckConfigRequest) Reset() { *m = CreateUptimeCheckConfigRequest{} } +func (m *CreateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*CreateUptimeCheckConfigRequest) ProtoMessage() {} +func (*CreateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{3} } + +func (m *CreateUptimeCheckConfigRequest) GetParent() string { + if m != nil { + return m.Parent + } + return "" +} + +func (m *CreateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfig + } + return nil +} + +// The protocol for the `UpdateUptimeCheckConfig` request. +type UpdateUptimeCheckConfigRequest struct { + // Optional. If present, only the listed fields in the current uptime check + // configuration are updated with values from the new configuration. If this + // field is empty, then the current configuration is completely replaced with + // the new configuration. + UpdateMask *google_protobuf5.FieldMask `protobuf:"bytes,2,opt,name=update_mask,json=updateMask" json:"update_mask,omitempty"` + // Required. If an `"updateMask"` has been specified, this field gives + // the values for the set of fields mentioned in the `"updateMask"`. If an + // `"updateMask"` has not been given, this uptime check configuration replaces + // the current configuration. If a field is mentioned in `"updateMask`" but + // the corresonding field is omitted in this partial uptime check + // configuration, it has the effect of deleting/clearing the field from the + // configuration on the server. + UptimeCheckConfig *UptimeCheckConfig `protobuf:"bytes,3,opt,name=uptime_check_config,json=uptimeCheckConfig" json:"uptime_check_config,omitempty"` +} + +func (m *UpdateUptimeCheckConfigRequest) Reset() { *m = UpdateUptimeCheckConfigRequest{} } +func (m *UpdateUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*UpdateUptimeCheckConfigRequest) ProtoMessage() {} +func (*UpdateUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{4} } + +func (m *UpdateUptimeCheckConfigRequest) GetUpdateMask() *google_protobuf5.FieldMask { + if m != nil { + return m.UpdateMask + } + return nil +} + +func (m *UpdateUptimeCheckConfigRequest) GetUptimeCheckConfig() *UptimeCheckConfig { + if m != nil { + return m.UptimeCheckConfig + } + return nil +} + +// The protocol for the `DeleteUptimeCheckConfig` request. +type DeleteUptimeCheckConfigRequest struct { + // The uptime check configuration to delete. The format is + // + // `projects/[PROJECT_ID]/uptimeCheckConfigs/[UPTIME_CHECK_ID]`. + Name string `protobuf:"bytes,1,opt,name=name" json:"name,omitempty"` +} + +func (m *DeleteUptimeCheckConfigRequest) Reset() { *m = DeleteUptimeCheckConfigRequest{} } +func (m *DeleteUptimeCheckConfigRequest) String() string { return proto.CompactTextString(m) } +func (*DeleteUptimeCheckConfigRequest) ProtoMessage() {} +func (*DeleteUptimeCheckConfigRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{5} } + +func (m *DeleteUptimeCheckConfigRequest) GetName() string { + if m != nil { + return m.Name + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` request. +type ListUptimeCheckIpsRequest struct { + // The maximum number of results to return in a single response. The server + // may further constrain the maximum number of results returned in a single + // page. If the page_size is <=0, the server will decide the number of results + // to be returned. + // NOTE: this field is not yet implemented + PageSize int32 `protobuf:"varint,2,opt,name=page_size,json=pageSize" json:"page_size,omitempty"` + // If this field is not empty then it must contain the `nextPageToken` value + // returned by a previous call to this method. Using this field causes the + // method to return more results from the previous method call. + // NOTE: this field is not yet implemented + PageToken string `protobuf:"bytes,3,opt,name=page_token,json=pageToken" json:"page_token,omitempty"` +} + +func (m *ListUptimeCheckIpsRequest) Reset() { *m = ListUptimeCheckIpsRequest{} } +func (m *ListUptimeCheckIpsRequest) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckIpsRequest) ProtoMessage() {} +func (*ListUptimeCheckIpsRequest) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{6} } + +func (m *ListUptimeCheckIpsRequest) GetPageSize() int32 { + if m != nil { + return m.PageSize + } + return 0 +} + +func (m *ListUptimeCheckIpsRequest) GetPageToken() string { + if m != nil { + return m.PageToken + } + return "" +} + +// The protocol for the `ListUptimeCheckIps` response. +type ListUptimeCheckIpsResponse struct { + // The returned list of IP addresses (including region and location) that the + // checkers run from. + UptimeCheckIps []*UptimeCheckIp `protobuf:"bytes,1,rep,name=uptime_check_ips,json=uptimeCheckIps" json:"uptime_check_ips,omitempty"` + // This field represents the pagination token to retrieve the next page of + // results. If the value is empty, it means no further results for the + // request. To retrieve the next page of results, the value of the + // next_page_token is passed to the subsequent List method call (in the + // request message's page_token field). + // NOTE: this field is not yet implemented + NextPageToken string `protobuf:"bytes,2,opt,name=next_page_token,json=nextPageToken" json:"next_page_token,omitempty"` +} + +func (m *ListUptimeCheckIpsResponse) Reset() { *m = ListUptimeCheckIpsResponse{} } +func (m *ListUptimeCheckIpsResponse) String() string { return proto.CompactTextString(m) } +func (*ListUptimeCheckIpsResponse) ProtoMessage() {} +func (*ListUptimeCheckIpsResponse) Descriptor() ([]byte, []int) { return fileDescriptor6, []int{7} } + +func (m *ListUptimeCheckIpsResponse) GetUptimeCheckIps() []*UptimeCheckIp { + if m != nil { + return m.UptimeCheckIps + } + return nil +} + +func (m *ListUptimeCheckIpsResponse) GetNextPageToken() string { + if m != nil { + return m.NextPageToken + } + return "" +} + +func init() { + proto.RegisterType((*ListUptimeCheckConfigsRequest)(nil), "google.monitoring.v3.ListUptimeCheckConfigsRequest") + proto.RegisterType((*ListUptimeCheckConfigsResponse)(nil), "google.monitoring.v3.ListUptimeCheckConfigsResponse") + proto.RegisterType((*GetUptimeCheckConfigRequest)(nil), "google.monitoring.v3.GetUptimeCheckConfigRequest") + proto.RegisterType((*CreateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.CreateUptimeCheckConfigRequest") + proto.RegisterType((*UpdateUptimeCheckConfigRequest)(nil), "google.monitoring.v3.UpdateUptimeCheckConfigRequest") + proto.RegisterType((*DeleteUptimeCheckConfigRequest)(nil), "google.monitoring.v3.DeleteUptimeCheckConfigRequest") + proto.RegisterType((*ListUptimeCheckIpsRequest)(nil), "google.monitoring.v3.ListUptimeCheckIpsRequest") + proto.RegisterType((*ListUptimeCheckIpsResponse)(nil), "google.monitoring.v3.ListUptimeCheckIpsResponse") +} + +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for UptimeCheckService service + +type UptimeCheckServiceClient interface { + // Lists the existing valid uptime check configurations for the project, + // leaving out any invalid configurations. + ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) + // Gets a single uptime check configuration. + GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Creates a new uptime check configuration. + CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Updates an uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `"updateMask"`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) + // Deletes an uptime check configuration. Note that this method will fail + // if the uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) + // Returns the list of IPs that checkers run from + ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) +} + +type uptimeCheckServiceClient struct { + cc *grpc.ClientConn +} + +func NewUptimeCheckServiceClient(cc *grpc.ClientConn) UptimeCheckServiceClient { + return &uptimeCheckServiceClient{cc} +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckConfigs(ctx context.Context, in *ListUptimeCheckConfigsRequest, opts ...grpc.CallOption) (*ListUptimeCheckConfigsResponse, error) { + out := new(ListUptimeCheckConfigsResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) GetUptimeCheckConfig(ctx context.Context, in *GetUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := grpc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) CreateUptimeCheckConfig(ctx context.Context, in *CreateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := grpc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) UpdateUptimeCheckConfig(ctx context.Context, in *UpdateUptimeCheckConfigRequest, opts ...grpc.CallOption) (*UptimeCheckConfig, error) { + out := new(UptimeCheckConfig) + err := grpc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) DeleteUptimeCheckConfig(ctx context.Context, in *DeleteUptimeCheckConfigRequest, opts ...grpc.CallOption) (*google_protobuf4.Empty, error) { + out := new(google_protobuf4.Empty) + err := grpc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +func (c *uptimeCheckServiceClient) ListUptimeCheckIps(ctx context.Context, in *ListUptimeCheckIpsRequest, opts ...grpc.CallOption) (*ListUptimeCheckIpsResponse, error) { + out := new(ListUptimeCheckIpsResponse) + err := grpc.Invoke(ctx, "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for UptimeCheckService service + +type UptimeCheckServiceServer interface { + // Lists the existing valid uptime check configurations for the project, + // leaving out any invalid configurations. + ListUptimeCheckConfigs(context.Context, *ListUptimeCheckConfigsRequest) (*ListUptimeCheckConfigsResponse, error) + // Gets a single uptime check configuration. + GetUptimeCheckConfig(context.Context, *GetUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Creates a new uptime check configuration. + CreateUptimeCheckConfig(context.Context, *CreateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Updates an uptime check configuration. You can either replace the entire + // configuration with a new one or replace only certain fields in the current + // configuration by specifying the fields to be updated via `"updateMask"`. + // Returns the updated configuration. + UpdateUptimeCheckConfig(context.Context, *UpdateUptimeCheckConfigRequest) (*UptimeCheckConfig, error) + // Deletes an uptime check configuration. Note that this method will fail + // if the uptime check configuration is referenced by an alert policy or + // other dependent configs that would be rendered invalid by the deletion. + DeleteUptimeCheckConfig(context.Context, *DeleteUptimeCheckConfigRequest) (*google_protobuf4.Empty, error) + // Returns the list of IPs that checkers run from + ListUptimeCheckIps(context.Context, *ListUptimeCheckIpsRequest) (*ListUptimeCheckIpsResponse, error) +} + +func RegisterUptimeCheckServiceServer(s *grpc.Server, srv UptimeCheckServiceServer) { + s.RegisterService(&_UptimeCheckService_serviceDesc, srv) +} + +func _UptimeCheckService_ListUptimeCheckConfigs_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckConfigsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckConfigs", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckConfigs(ctx, req.(*ListUptimeCheckConfigsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_GetUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(GetUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/GetUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).GetUptimeCheckConfig(ctx, req.(*GetUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_CreateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(CreateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/CreateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).CreateUptimeCheckConfig(ctx, req.(*CreateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_UpdateUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(UpdateUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/UpdateUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).UpdateUptimeCheckConfig(ctx, req.(*UpdateUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_DeleteUptimeCheckConfig_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(DeleteUptimeCheckConfigRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/DeleteUptimeCheckConfig", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).DeleteUptimeCheckConfig(ctx, req.(*DeleteUptimeCheckConfigRequest)) + } + return interceptor(ctx, in, info, handler) +} + +func _UptimeCheckService_ListUptimeCheckIps_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(ListUptimeCheckIpsRequest) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/google.monitoring.v3.UptimeCheckService/ListUptimeCheckIps", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(UptimeCheckServiceServer).ListUptimeCheckIps(ctx, req.(*ListUptimeCheckIpsRequest)) + } + return interceptor(ctx, in, info, handler) +} + +var _UptimeCheckService_serviceDesc = grpc.ServiceDesc{ + ServiceName: "google.monitoring.v3.UptimeCheckService", + HandlerType: (*UptimeCheckServiceServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "ListUptimeCheckConfigs", + Handler: _UptimeCheckService_ListUptimeCheckConfigs_Handler, + }, + { + MethodName: "GetUptimeCheckConfig", + Handler: _UptimeCheckService_GetUptimeCheckConfig_Handler, + }, + { + MethodName: "CreateUptimeCheckConfig", + Handler: _UptimeCheckService_CreateUptimeCheckConfig_Handler, + }, + { + MethodName: "UpdateUptimeCheckConfig", + Handler: _UptimeCheckService_UpdateUptimeCheckConfig_Handler, + }, + { + MethodName: "DeleteUptimeCheckConfig", + Handler: _UptimeCheckService_DeleteUptimeCheckConfig_Handler, + }, + { + MethodName: "ListUptimeCheckIps", + Handler: _UptimeCheckService_ListUptimeCheckIps_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "google/monitoring/v3/uptime_service.proto", +} + +func init() { proto.RegisterFile("google/monitoring/v3/uptime_service.proto", fileDescriptor6) } + +var fileDescriptor6 = []byte{ + // 735 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xa4, 0x56, 0xdf, 0x4e, 0x13, 0x4f, + 0x14, 0xce, 0xb4, 0xfc, 0x08, 0x1c, 0xf2, 0xf3, 0xcf, 0xd8, 0x40, 0x5d, 0xa4, 0xa9, 0x35, 0x51, + 0x6c, 0xcc, 0xae, 0xb4, 0x5c, 0x49, 0x24, 0x91, 0xaa, 0x84, 0x44, 0x12, 0x52, 0x04, 0xa2, 0x92, + 0x34, 0x4b, 0x19, 0xd6, 0xb5, 0xed, 0xce, 0xd8, 0x99, 0x25, 0x8a, 0xe1, 0xc6, 0x37, 0x30, 0x5c, + 0x7a, 0x69, 0xe2, 0x05, 0x0f, 0xa0, 0xd7, 0x5e, 0x99, 0x78, 0x6b, 0x7c, 0x03, 0x1f, 0xc4, 0xec, + 0xec, 0x2c, 0xa5, 0xed, 0xec, 0xba, 0x8d, 0x77, 0xdd, 0x39, 0x67, 0xce, 0xf9, 0xce, 0xb7, 0xdf, + 0xf9, 0xba, 0x70, 0xdb, 0xa1, 0xd4, 0x69, 0x13, 0xab, 0x43, 0x3d, 0x57, 0xd0, 0xae, 0xeb, 0x39, + 0xd6, 0x61, 0xd5, 0xf2, 0x99, 0x70, 0x3b, 0xa4, 0xc1, 0x49, 0xf7, 0xd0, 0x6d, 0x12, 0x93, 0x75, + 0xa9, 0xa0, 0x38, 0x17, 0xa6, 0x9a, 0xbd, 0x54, 0xf3, 0xb0, 0x6a, 0x5c, 0x53, 0x05, 0x6c, 0xe6, + 0x5a, 0xb6, 0xe7, 0x51, 0x61, 0x0b, 0x97, 0x7a, 0x3c, 0xbc, 0x63, 0x5c, 0x4f, 0x28, 0xaf, 0x52, + 0x66, 0x55, 0x8a, 0x7c, 0xda, 0xf3, 0x0f, 0x2c, 0xd2, 0x61, 0xe2, 0xad, 0x0a, 0x16, 0x07, 0x83, + 0x07, 0x2e, 0x69, 0xef, 0x37, 0x3a, 0x36, 0x6f, 0x85, 0x19, 0x25, 0x0e, 0x73, 0x4f, 0x5c, 0x2e, + 0xb6, 0x64, 0xc9, 0xda, 0x4b, 0xd2, 0x6c, 0xd5, 0xa8, 0x77, 0xe0, 0x3a, 0xbc, 0x4e, 0x5e, 0xfb, + 0x84, 0x0b, 0x3c, 0x0d, 0xe3, 0xcc, 0xee, 0x12, 0x4f, 0xe4, 0x51, 0x11, 0xcd, 0x4f, 0xd6, 0xd5, + 0x13, 0x9e, 0x85, 0x49, 0x66, 0x3b, 0xa4, 0xc1, 0xdd, 0x23, 0x92, 0xcf, 0x16, 0xd1, 0xfc, 0x7f, + 0xf5, 0x89, 0xe0, 0x60, 0xd3, 0x3d, 0x22, 0x78, 0x0e, 0x40, 0x06, 0x05, 0x6d, 0x11, 0x2f, 0x3f, + 0x26, 0x2f, 0xca, 0xf4, 0xa7, 0xc1, 0x41, 0xe9, 0x13, 0x82, 0x42, 0x5c, 0x57, 0xce, 0xa8, 0xc7, + 0x09, 0x7e, 0x06, 0x39, 0xc5, 0x62, 0x33, 0x08, 0x37, 0x9a, 0x61, 0x3c, 0x8f, 0x8a, 0xd9, 0xf9, + 0xa9, 0xca, 0x2d, 0x53, 0x47, 0xa6, 0x39, 0x54, 0xaf, 0x8e, 0xfd, 0xa1, 0x16, 0xf8, 0x26, 0x5c, + 0xf4, 0xc8, 0x1b, 0xd1, 0x38, 0x87, 0x30, 0x23, 0x11, 0xfe, 0x1f, 0x1c, 0x6f, 0x9c, 0xa1, 0x5c, + 0x80, 0xd9, 0x55, 0x32, 0x8c, 0x31, 0x22, 0x06, 0xc3, 0x98, 0x67, 0x77, 0x88, 0xa2, 0x45, 0xfe, + 0x2e, 0x7d, 0x40, 0x50, 0xa8, 0x75, 0x89, 0x2d, 0x48, 0xec, 0xb5, 0x38, 0x3e, 0x77, 0xe0, 0x8a, + 0x66, 0x60, 0x89, 0x6c, 0x84, 0x79, 0x2f, 0x0f, 0xcd, 0x5b, 0xfa, 0x82, 0xa0, 0xb0, 0xc5, 0xf6, + 0x93, 0x30, 0x2d, 0xc1, 0x94, 0x2f, 0x33, 0xa4, 0x32, 0x54, 0x4f, 0x23, 0xea, 0x19, 0x89, 0xc7, + 0x7c, 0x1c, 0x88, 0x67, 0xdd, 0xe6, 0xad, 0x3a, 0x84, 0xe9, 0xc1, 0xef, 0x38, 0xe0, 0xd9, 0x7f, + 0x06, 0xbe, 0x08, 0x85, 0x87, 0xa4, 0x4d, 0x12, 0x70, 0xeb, 0x5e, 0xc1, 0x0e, 0x5c, 0x1d, 0x90, + 0xd6, 0x1a, 0x3b, 0x13, 0x73, 0x9f, 0x68, 0x33, 0x89, 0xa2, 0xcd, 0x0e, 0x8a, 0xf6, 0x04, 0x81, + 0xa1, 0xab, 0xac, 0x04, 0xbb, 0x0e, 0x97, 0xfa, 0x68, 0x70, 0x59, 0x24, 0xd6, 0x1b, 0x7f, 0xe5, + 0x60, 0x8d, 0xd5, 0x2f, 0xf8, 0x7d, 0x65, 0xd3, 0x8a, 0xb4, 0xf2, 0x7d, 0x02, 0xf0, 0xb9, 0x4a, + 0x9b, 0xa1, 0xe5, 0xe0, 0xaf, 0x08, 0xa6, 0xf5, 0x1b, 0x86, 0xab, 0x7a, 0x38, 0x89, 0x2e, 0x60, + 0x2c, 0x8e, 0x76, 0x29, 0xe4, 0xa4, 0x54, 0x79, 0xff, 0xf3, 0xf7, 0x49, 0xe6, 0x0e, 0x2e, 0x07, + 0xae, 0xf5, 0x2e, 0x14, 0xfa, 0x7d, 0xd6, 0xa5, 0xaf, 0x48, 0x53, 0x70, 0xab, 0x7c, 0x6c, 0x69, + 0xb6, 0xf3, 0x33, 0x82, 0x9c, 0x6e, 0xed, 0xf0, 0x82, 0x1e, 0x42, 0xc2, 0x8a, 0x1a, 0x69, 0xd5, + 0x37, 0x00, 0x34, 0xd0, 0xd1, 0x39, 0x98, 0x1a, 0x94, 0x56, 0xf9, 0x18, 0x7f, 0x43, 0x30, 0x13, + 0xb3, 0xeb, 0x38, 0x86, 0xae, 0x64, 0x6b, 0x48, 0x0f, 0x77, 0x55, 0xc2, 0x7d, 0x50, 0x1a, 0x81, + 0xd7, 0x7b, 0xba, 0x25, 0xc5, 0xbf, 0x10, 0xcc, 0xc4, 0x78, 0x43, 0xdc, 0x0c, 0xc9, 0x56, 0x92, + 0x7e, 0x86, 0x17, 0x72, 0x86, 0xad, 0xca, 0xb2, 0x9c, 0x41, 0x03, 0xce, 0x4c, 0xf5, 0x1a, 0xf4, + 0x73, 0x7d, 0x44, 0x30, 0x13, 0xe3, 0x1d, 0x71, 0x73, 0x25, 0x5b, 0x8d, 0x31, 0x3d, 0xe4, 0x86, + 0x8f, 0x82, 0xff, 0xd9, 0x48, 0x39, 0xe5, 0x51, 0x94, 0x73, 0x82, 0x00, 0x0f, 0x3b, 0x09, 0xb6, + 0x52, 0xed, 0x58, 0xcf, 0xcd, 0x8c, 0xbb, 0xe9, 0x2f, 0xa8, 0x85, 0x34, 0x24, 0xda, 0x1c, 0xc6, + 0xbd, 0xcf, 0x88, 0x28, 0x67, 0xe5, 0x14, 0x41, 0xbe, 0x49, 0x3b, 0xda, 0x9a, 0x2b, 0xca, 0x63, + 0x94, 0xbd, 0x6c, 0x04, 0x1c, 0x6c, 0xa0, 0xe7, 0xcb, 0x2a, 0xd7, 0xa1, 0x6d, 0xdb, 0x73, 0x4c, + 0xda, 0x75, 0x2c, 0x87, 0x78, 0x92, 0x21, 0x2b, 0x0c, 0xd9, 0xcc, 0xe5, 0xfd, 0x5f, 0x2f, 0x4b, + 0xbd, 0xa7, 0xd3, 0x8c, 0xb1, 0x1a, 0x16, 0xa8, 0xb5, 0xa9, 0xbf, 0x6f, 0xae, 0xf7, 0x5a, 0x6e, + 0x57, 0x7f, 0x44, 0xc1, 0x5d, 0x19, 0xdc, 0xed, 0x05, 0x77, 0xb7, 0xab, 0x7b, 0xe3, 0xb2, 0x49, + 0xf5, 0x4f, 0x00, 0x00, 0x00, 0xff, 0xff, 0x4a, 0x1d, 0x15, 0x69, 0x80, 0x09, 0x00, 0x00, +} diff --git a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go index 42693d68..485bf006 100644 --- a/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go +++ b/vendor/google.golang.org/genproto/protobuf/field_mask/field_mask.pb.go @@ -228,12 +228,6 @@ const _ = proto.ProtoPackageIsVersion2 // please upgrade the proto package // // Note that oneof type names ("test_oneof" in this case) cannot be used in // paths. -// -// ## Field Mask Verification -// -// The implementation of the all the API methods, which have any FieldMask type -// field in the request, should verify the included field paths, and return -// `INVALID_ARGUMENT` error if any path is duplicated or unmappable. type FieldMask struct { // The set of field mask paths. Paths []string `protobuf:"bytes,1,rep,name=paths" json:"paths,omitempty"` diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/README.md b/vendor/gopkg.in/alecthomas/kingpin.v2/README.md index cd4edeb6..498704c8 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/README.md +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/README.md @@ -222,7 +222,7 @@ Args: IP address to ping. [] Number of packets to send $ ping 1.2.3.4 5 -Would ping: 1.2.3.4 with timeout 5s and count 0 +Would ping: 1.2.3.4 with timeout 5s and count 5 ``` From the following source: @@ -461,7 +461,7 @@ Here are some examples of flags with various permutations: --name=NAME // Flag(...).String() --name="Harry" // Flag(...).Default("Harry").String() - --name=FULL-NAME // flag(...).PlaceHolder("FULL-NAME").Default("Harry").String() + --name=FULL-NAME // Flag(...).PlaceHolder("FULL-NAME").Default("Harry").String() ### Consuming all remaining arguments diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/_examples/completion/main.go b/vendor/gopkg.in/alecthomas/kingpin.v2/_examples/completion/main.go index fe17b525..0bbabe3b 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/_examples/completion/main.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/_examples/completion/main.go @@ -4,7 +4,7 @@ import ( "fmt" "os" - "github.com/alecthomas/kingpin" + "gopkg.in/alecthomas/kingpin.v2" ) func listHosts() []string { diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/app.go b/vendor/gopkg.in/alecthomas/kingpin.v2/app.go index a5e8b804..1a1a5eff 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/app.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/app.go @@ -402,6 +402,9 @@ func (a *Application) setDefaults(context *ParseContext) error { flagElements := map[string]*ParseElement{} for _, element := range context.Elements { if flag, ok := element.Clause.(*FlagClause); ok { + if flag.name == "help" { + return nil + } flagElements[flag.name] = element } } diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/app_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/app_test.go index 993e25c4..b9083a69 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/app_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/app_test.go @@ -3,7 +3,7 @@ package kingpin import ( "io/ioutil" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" "sort" "strings" diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/args_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/args_test.go index f2837ec6..c16a6304 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/args_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/args_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" ) func TestArgRemainder(t *testing.T) { diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/cmd_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/cmd_test.go index b1207be6..d531589b 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/cmd_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/cmd_test.go @@ -4,7 +4,7 @@ import ( "sort" "strings" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" "testing" ) diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/completions_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/completions_test.go index 74656ea8..7da9c060 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/completions_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/completions_test.go @@ -3,7 +3,7 @@ package kingpin import ( "testing" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" ) func TestResolveWithBuiltin(t *testing.T) { diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/flags_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/flags_test.go index 9c83a1cb..29327e6c 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/flags_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/flags_test.go @@ -4,7 +4,7 @@ import ( "io/ioutil" "os" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" "testing" ) diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go index efa198af..2a183519 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/parser.go @@ -153,6 +153,10 @@ func (p *ParseContext) EOL() bool { return p.Peek().Type == TokenEOL } +func (p *ParseContext) Error() bool { + return p.Peek().Type == TokenError +} + // Next token in the parse context. func (p *ParseContext) Next() *Token { if len(p.peek) > 0 { @@ -266,9 +270,12 @@ func (p *ParseContext) matchedCmd(cmd *CmdClause) { // Expand arguments from a file. Lines starting with # will be treated as comments. func ExpandArgsFromFile(filename string) (out []string, err error) { + if filename == "" { + return nil, fmt.Errorf("expected @ file to expand arguments from") + } r, err := os.Open(filename) if err != nil { - return nil, err + return nil, fmt.Errorf("failed to open arguments file %q: %s", filename, err) } defer r.Close() scanner := bufio.NewScanner(r) @@ -280,6 +287,9 @@ func ExpandArgsFromFile(filename string) (out []string, err error) { out = append(out, line) } err = scanner.Err() + if err != nil { + return nil, fmt.Errorf("failed to read arguments from %q: %s", filename, err) + } return } @@ -291,7 +301,7 @@ func parse(context *ParseContext, app *Application) (err error) { ignoreDefault := context.ignoreDefault loop: - for !context.EOL() { + for !context.EOL() && !context.Error() { token := context.Peek() switch token.Type { @@ -365,6 +375,10 @@ loop: } } + if context.Error() { + return fmt.Errorf("%s", context.Peek().Value) + } + if !context.EOL() { return fmt.Errorf("unexpected %s", context.Peek()) } diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parser_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parser_test.go index 71bafe58..43dfde98 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/parser_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/parser_test.go @@ -5,7 +5,7 @@ import ( "os" "testing" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" ) func TestParserExpandFromFile(t *testing.T) { diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/parsers_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/parsers_test.go index 04e836c9..81708c7c 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/parsers_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/parsers_test.go @@ -6,7 +6,7 @@ import ( "net/url" "os" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" "testing" ) diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/usage_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/usage_test.go index 441b90c2..2b818570 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/usage_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/usage_test.go @@ -5,7 +5,7 @@ import ( "strings" "testing" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" ) func TestFormatTwoColumns(t *testing.T) { diff --git a/vendor/gopkg.in/alecthomas/kingpin.v2/values_test.go b/vendor/gopkg.in/alecthomas/kingpin.v2/values_test.go index d88f6678..e16ee2a4 100644 --- a/vendor/gopkg.in/alecthomas/kingpin.v2/values_test.go +++ b/vendor/gopkg.in/alecthomas/kingpin.v2/values_test.go @@ -3,7 +3,7 @@ package kingpin import ( "net" - "github.com/alecthomas/assert" + "github.com/stretchr/testify/assert" "testing" )