Update k8s.io components to efc5bbc9e.

A manual change was required in kube2sky.go.
Updates were required in appc/spec and a few other libraries.
pull/98/head
dlorenc 2016-05-23 11:27:28 -07:00
parent d4c23777ed
commit 0552f504dd
334 changed files with 29634 additions and 7947 deletions

1753
Godeps/Godeps.json generated

File diff suppressed because it is too large Load Diff

View File

@ -200,7 +200,7 @@ func (ks *kube2sky) generateRecordsForHeadlessService(subdomain string, e *kapi.
return err
}
if hostRecord, exists := podHostnames[string(endpointIP)]; exists {
if validation.IsDNS1123Label(hostRecord.HostName) {
if len(validation.IsDNS1123Label(hostRecord.HostName)) == 0 {
recordLabel = hostRecord.HostName
}
}

View File

@ -152,11 +152,12 @@ func (r Mount) assertValid() error {
// RuntimeApp describes an application referenced in a PodManifest
type RuntimeApp struct {
Name types.ACName `json:"name"`
Image RuntimeImage `json:"image"`
App *types.App `json:"app,omitempty"`
Mounts []Mount `json:"mounts,omitempty"`
Annotations types.Annotations `json:"annotations,omitempty"`
Name types.ACName `json:"name"`
Image RuntimeImage `json:"image"`
App *types.App `json:"app,omitempty"`
ReadOnlyRootFS bool `json:"readOnlyRootFS,omitempty"`
Mounts []Mount `json:"mounts,omitempty"`
Annotations types.Annotations `json:"annotations,omitempty"`
}
// RuntimeImage describes an image referenced in a RuntimeApp

View File

@ -155,8 +155,8 @@ func NewResourceCPUIsolator(request, limit string) (*ResourceCPU, error) {
res := &ResourceCPU{
ResourceBase{
resourceValue{
Request: req,
Limit: lim,
Request: &req,
Limit: &lim,
},
},
}
@ -209,8 +209,8 @@ func NewResourceMemoryIsolator(request, limit string) (*ResourceMemory, error) {
res := &ResourceMemory{
ResourceBase{
resourceValue{
Request: req,
Limit: lim,
Request: &req,
Limit: &lim,
},
},
}

View File

@ -22,7 +22,7 @@ const (
// version represents the canonical version of the appc spec and tooling.
// For now, the schema and tooling is coupled with the spec itself, so
// this must be kept in sync with the VERSION file in the root of the repo.
version string = "0.7.4+git"
version string = "0.8.2+git"
)
var (

View File

@ -1,51 +1,7 @@
package http
import (
"io/ioutil"
"net/http"
"net/http/httptest"
)
import "net/http"
type Client interface {
Do(*http.Request) (*http.Response, error)
}
type HandlerClient struct {
Handler http.Handler
}
func (hc *HandlerClient) Do(r *http.Request) (*http.Response, error) {
w := httptest.NewRecorder()
hc.Handler.ServeHTTP(w, r)
resp := http.Response{
StatusCode: w.Code,
Header: w.Header(),
Body: ioutil.NopCloser(w.Body),
}
return &resp, nil
}
type RequestRecorder struct {
Response *http.Response
Error error
Request *http.Request
}
func (rr *RequestRecorder) Do(req *http.Request) (*http.Response, error) {
rr.Request = req
if rr.Response == nil && rr.Error == nil {
panic("RequestRecorder Response and Error cannot both be nil")
} else if rr.Response != nil && rr.Error != nil {
panic("RequestRecorder Response and Error cannot both be non-nil")
}
return rr.Response, rr.Error
}
func (rr *RequestRecorder) RoundTrip(req *http.Request) (*http.Response, error) {
return rr.Do(req)
}

View File

@ -3,6 +3,7 @@ package jose
import (
"encoding/json"
"fmt"
"math"
"time"
)
@ -70,13 +71,33 @@ func (c Claims) Int64Claim(name string) (int64, bool, error) {
return v, true, nil
}
func (c Claims) Float64Claim(name string) (float64, bool, error) {
cl, ok := c[name]
if !ok {
return 0, false, nil
}
v, ok := cl.(float64)
if !ok {
vi, ok := cl.(int64)
if !ok {
return 0, false, fmt.Errorf("unable to parse claim as float64: %v", name)
}
v = float64(vi)
}
return v, true, nil
}
func (c Claims) TimeClaim(name string) (time.Time, bool, error) {
v, ok, err := c.Int64Claim(name)
v, ok, err := c.Float64Claim(name)
if !ok || err != nil {
return time.Time{}, ok, err
}
return time.Unix(v, 0).UTC(), true, nil
s := math.Trunc(v)
ns := (v - s) * math.Pow(10, 9)
return time.Unix(int64(s), int64(ns)).UTC(), true, nil
}
func decodeClaims(payload []byte) (Claims, error) {

View File

@ -13,6 +13,57 @@ const (
HeaderKeyID = "kid"
)
const (
// Encryption Algorithm Header Parameter Values for JWS
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-6
AlgHS256 = "HS256"
AlgHS384 = "HS384"
AlgHS512 = "HS512"
AlgRS256 = "RS256"
AlgRS384 = "RS384"
AlgRS512 = "RS512"
AlgES256 = "ES256"
AlgES384 = "ES384"
AlgES512 = "ES512"
AlgPS256 = "PS256"
AlgPS384 = "PS384"
AlgPS512 = "PS512"
AlgNone = "none"
)
const (
// Algorithm Header Parameter Values for JWE
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#section-4.1
AlgRSA15 = "RSA1_5"
AlgRSAOAEP = "RSA-OAEP"
AlgRSAOAEP256 = "RSA-OAEP-256"
AlgA128KW = "A128KW"
AlgA192KW = "A192KW"
AlgA256KW = "A256KW"
AlgDir = "dir"
AlgECDHES = "ECDH-ES"
AlgECDHESA128KW = "ECDH-ES+A128KW"
AlgECDHESA192KW = "ECDH-ES+A192KW"
AlgECDHESA256KW = "ECDH-ES+A256KW"
AlgA128GCMKW = "A128GCMKW"
AlgA192GCMKW = "A192GCMKW"
AlgA256GCMKW = "A256GCMKW"
AlgPBES2HS256A128KW = "PBES2-HS256+A128KW"
AlgPBES2HS384A192KW = "PBES2-HS384+A192KW"
AlgPBES2HS512A256KW = "PBES2-HS512+A256KW"
)
const (
// Encryption Algorithm Header Parameter Values for JWE
// See: https://tools.ietf.org/html/draft-ietf-jose-json-web-algorithms-40#page-22
EncA128CBCHS256 = "A128CBC-HS256"
EncA128CBCHS384 = "A128CBC-HS384"
EncA256CBCHS512 = "A256CBC-HS512"
EncA128GCM = "A128GCM"
EncA192GCM = "A192GCM"
EncA256GCM = "A256GCM"
)
type JOSEHeader map[string]string
func (j JOSEHeader) Validate() error {

View File

@ -70,6 +70,10 @@ func (j *JWK) UnmarshalJSON(data []byte) error {
return nil
}
type JWKSet struct {
Keys []JWK `json:"keys"`
}
func decodeExponent(e string) (int, error) {
decE, err := decodeBase64URLPaddingOptional(e)
if err != nil {

View File

@ -2,7 +2,6 @@ package jose
import (
"fmt"
"strings"
)
type Verifier interface {
@ -17,7 +16,7 @@ type Signer interface {
}
func NewVerifier(jwk JWK) (Verifier, error) {
if strings.ToUpper(jwk.Type) != "RSA" {
if jwk.Type != "RSA" {
return nil, fmt.Errorf("unsupported key type %q", jwk.Type)
}

View File

@ -7,7 +7,6 @@ import (
_ "crypto/sha256"
"errors"
"fmt"
"strings"
)
type VerifierHMAC struct {
@ -21,7 +20,7 @@ type SignerHMAC struct {
}
func NewVerifierHMAC(jwk JWK) (*VerifierHMAC, error) {
if strings.ToUpper(jwk.Alg) != "HS256" {
if jwk.Alg != "" && jwk.Alg != "HS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}

View File

@ -5,7 +5,6 @@ import (
"crypto/rand"
"crypto/rsa"
"fmt"
"strings"
)
type VerifierRSA struct {
@ -20,7 +19,7 @@ type SignerRSA struct {
}
func NewVerifierRSA(jwk JWK) (*VerifierRSA, error) {
if strings.ToUpper(jwk.Alg) != "RS256" {
if jwk.Alg != "" && jwk.Alg != "RS256" {
return nil, fmt.Errorf("unsupported key algorithm %q", jwk.Alg)
}

View File

@ -20,7 +20,7 @@ type PublicKey struct {
}
func (k *PublicKey) MarshalJSON() ([]byte, error) {
return json.Marshal(k.jwk)
return json.Marshal(&k.jwk)
}
func (k *PublicKey) UnmarshalJSON(data []byte) error {
@ -135,7 +135,7 @@ func (s *PrivateKeySet) Active() *PrivateKey {
type GeneratePrivateKeyFunc func() (*PrivateKey, error)
func GeneratePrivateKey() (*PrivateKey, error) {
pk, err := rsa.GenerateKey(rand.Reader, 1024)
pk, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, err
}

View File

@ -1,6 +1,9 @@
package key
import "errors"
import (
"errors"
"sync"
)
var ErrorNoKeys = errors.New("no keys found")
@ -22,6 +25,7 @@ func NewPrivateKeySetRepo() PrivateKeySetRepo {
}
type memPrivateKeySetRepo struct {
mu sync.RWMutex
pks PrivateKeySet
}
@ -33,11 +37,17 @@ func (r *memPrivateKeySetRepo) Set(ks KeySet) error {
return errors.New("nil KeySet")
}
r.mu.Lock()
defer r.mu.Unlock()
r.pks = *pks
return nil
}
func (r *memPrivateKeySetRepo) Get() (KeySet, error) {
r.mu.RLock()
defer r.mu.RUnlock()
if r.pks.keys == nil {
return nil, ErrorNoKeys
}

View File

@ -29,7 +29,7 @@ func (s *KeySetSyncer) Run() chan struct{} {
var failing bool
var next time.Duration
for {
exp, err := sync(s.readable, s.writable, s.clock)
exp, err := syncKeySet(s.readable, s.writable, s.clock)
if err != nil || exp == 0 {
if !failing {
failing = true
@ -62,12 +62,12 @@ func (s *KeySetSyncer) Run() chan struct{} {
}
func Sync(r ReadableKeySetRepo, w WritableKeySetRepo) (time.Duration, error) {
return sync(r, w, clockwork.NewRealClock())
return syncKeySet(r, w, clockwork.NewRealClock())
}
// sync copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire.
// syncKeySet copies the keyset from r to the KeySet at w and returns the duration in which the KeySet will expire.
// If keyset has already expired, returns a zero duration.
func sync(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) {
func syncKeySet(r ReadableKeySetRepo, w WritableKeySetRepo, clock clockwork.Clock) (exp time.Duration, err error) {
var ks KeySet
ks, err = r.Get()
if err != nil {

View File

@ -8,19 +8,55 @@ import (
"mime"
"net/http"
"net/url"
"sort"
"strconv"
"strings"
phttp "github.com/coreos/go-oidc/http"
)
// ResponseTypesEqual compares two response_type values. If either
// contains a space, it is treated as an unordered list. For example,
// comparing "code id_token" and "id_token code" would evaluate to true.
func ResponseTypesEqual(r1, r2 string) bool {
if !strings.Contains(r1, " ") || !strings.Contains(r2, " ") {
// fast route, no split needed
return r1 == r2
}
// split, sort, and compare
r1Fields := strings.Fields(r1)
r2Fields := strings.Fields(r2)
if len(r1Fields) != len(r2Fields) {
return false
}
sort.Strings(r1Fields)
sort.Strings(r2Fields)
for i, r1Field := range r1Fields {
if r1Field != r2Fields[i] {
return false
}
}
return true
}
const (
ResponseTypeCode = "code"
// OAuth2.0 response types registered by OIDC.
//
// See: https://openid.net/specs/oauth-v2-multiple-response-types-1_0.html#RegistryContents
ResponseTypeCode = "code"
ResponseTypeCodeIDToken = "code id_token"
ResponseTypeCodeIDTokenToken = "code id_token token"
ResponseTypeIDToken = "id_token"
ResponseTypeIDTokenToken = "id_token token"
ResponseTypeToken = "token"
ResponseTypeNone = "none"
)
const (
GrantTypeAuthCode = "authorization_code"
GrantTypeClientCreds = "client_credentials"
GrantTypeUserCreds = "password"
GrantTypeImplicit = "implicit"
GrantTypeRefreshToken = "refresh_token"
@ -105,6 +141,11 @@ func NewClient(hc phttp.Client, cfg Config) (c *Client, err error) {
return
}
// Return the embedded HTTP client
func (c *Client) HttpClient() phttp.Client {
return c.hc
}
// Generate the url for initial redirect to oauth provider.
func (c *Client) AuthCodeURL(state, accessType, prompt string) string {
v := c.commonURLValues()
@ -136,22 +177,24 @@ func (c *Client) commonURLValues() url.Values {
}
}
func (c *Client) newAuthenticatedRequest(url string, values url.Values) (*http.Request, error) {
func (c *Client) newAuthenticatedRequest(urlToken string, values url.Values) (*http.Request, error) {
var req *http.Request
var err error
switch c.authMethod {
case AuthMethodClientSecretPost:
values.Set("client_secret", c.creds.Secret)
req, err = http.NewRequest("POST", url, strings.NewReader(values.Encode()))
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
if err != nil {
return nil, err
}
case AuthMethodClientSecretBasic:
req, err = http.NewRequest("POST", url, strings.NewReader(values.Encode()))
req, err = http.NewRequest("POST", urlToken, strings.NewReader(values.Encode()))
if err != nil {
return nil, err
}
req.SetBasicAuth(c.creds.ID, c.creds.Secret)
encodedID := url.QueryEscape(c.creds.ID)
encodedSecret := url.QueryEscape(c.creds.Secret)
req.SetBasicAuth(encodedID, encodedSecret)
default:
panic("misconfigured client: auth method not supported")
}
@ -183,6 +226,30 @@ func (c *Client) ClientCredsToken(scope []string) (result TokenResponse, err err
return parseTokenResponse(resp)
}
// UserCredsToken posts the username and password to obtain a token scoped to the OAuth2 client via the "password" grant_type
// May not be supported by all OAuth2 servers.
func (c *Client) UserCredsToken(username, password string) (result TokenResponse, err error) {
v := url.Values{
"scope": {strings.Join(c.scope, " ")},
"grant_type": {GrantTypeUserCreds},
"username": {username},
"password": {password},
}
req, err := c.newAuthenticatedRequest(c.tokenURL.String(), v)
if err != nil {
return
}
resp, err := c.hc.Do(req)
if err != nil {
return
}
defer resp.Body.Close()
return parseTokenResponse(resp)
}
// RequestToken requests a token from the Token Endpoint with the specified grantType.
// If 'grantType' == GrantTypeAuthCode, then 'value' should be the authorization code.
// If 'grantType' == GrantTypeRefreshToken, then 'value' should be the refresh token.

View File

@ -1,9 +1,11 @@
package oidc
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/mail"
"net/url"
"sync"
"time"
@ -36,23 +38,520 @@ type ClientIdentity struct {
Metadata ClientMetadata
}
type ClientMetadata struct {
RedirectURLs []url.URL
type JWAOptions struct {
// SigningAlg specifies an JWA alg for signing JWTs.
//
// Specifying this field implies different actions depending on the context. It may
// require objects be serialized and signed as a JWT instead of plain JSON, or
// require an existing JWT object use the specified alg.
//
// See: http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
SigningAlg string
// EncryptionAlg, if provided, specifies that the returned or sent object be stored
// (or nested) within a JWT object and encrypted with the provided JWA alg.
EncryptionAlg string
// EncryptionEnc specifies the JWA enc algorithm to use with EncryptionAlg. If
// EncryptionAlg is provided and EncryptionEnc is omitted, this field defaults
// to A128CBC-HS256.
//
// If EncryptionEnc is provided EncryptionAlg must also be specified.
EncryptionEnc string
}
func (opt JWAOptions) valid() error {
if opt.EncryptionEnc != "" && opt.EncryptionAlg == "" {
return errors.New("encryption encoding provided with no encryption algorithm")
}
return nil
}
func (opt JWAOptions) defaults() JWAOptions {
if opt.EncryptionAlg != "" && opt.EncryptionEnc == "" {
opt.EncryptionEnc = jose.EncA128CBCHS256
}
return opt
}
var (
// Ensure ClientMetadata satisfies these interfaces.
_ json.Marshaler = &ClientMetadata{}
_ json.Unmarshaler = &ClientMetadata{}
)
// ClientMetadata holds metadata that the authorization server associates
// with a client identifier. The fields range from human-facing display
// strings such as client name, to items that impact the security of the
// protocol, such as the list of valid redirect URIs.
//
// See http://openid.net/specs/openid-connect-registration-1_0.html#ClientMetadata
//
// TODO: support language specific claim representations
// http://openid.net/specs/openid-connect-registration-1_0.html#LanguagesAndScripts
type ClientMetadata struct {
RedirectURIs []url.URL // Required
// A list of OAuth 2.0 "response_type" values that the client wishes to restrict
// itself to. Either "code", "token", or another registered extension.
//
// If omitted, only "code" will be used.
ResponseTypes []string
// A list of OAuth 2.0 grant types the client wishes to restrict itself to.
// The grant type values used by OIDC are "authorization_code", "implicit",
// and "refresh_token".
//
// If ommitted, only "authorization_code" will be used.
GrantTypes []string
// "native" or "web". If omitted, "web".
ApplicationType string
// List of email addresses.
Contacts []mail.Address
// Name of client to be presented to the end-user.
ClientName string
// URL that references a logo for the Client application.
LogoURI *url.URL
// URL of the home page of the Client.
ClientURI *url.URL
// Profile data policies and terms of use to be provided to the end user.
PolicyURI *url.URL
TermsOfServiceURI *url.URL
// URL to or the value of the client's JSON Web Key Set document.
JWKSURI *url.URL
JWKS *jose.JWKSet
// URL referencing a flie with a single JSON array of redirect URIs.
SectorIdentifierURI *url.URL
SubjectType string
// Options to restrict the JWS alg and enc values used for server responses and requests.
IDTokenResponseOptions JWAOptions
UserInfoResponseOptions JWAOptions
RequestObjectOptions JWAOptions
// Client requested authorization method and signing options for the token endpoint.
//
// Defaults to "client_secret_basic"
TokenEndpointAuthMethod string
TokenEndpointAuthSigningAlg string
// DefaultMaxAge specifies the maximum amount of time in seconds before an authorized
// user must reauthroize.
//
// If 0, no limitation is placed on the maximum.
DefaultMaxAge int64
// RequireAuthTime specifies if the auth_time claim in the ID token is required.
RequireAuthTime bool
// Default Authentication Context Class Reference values for authentication requests.
DefaultACRValues []string
// URI that a third party can use to initiate a login by the relaying party.
//
// See: http://openid.net/specs/openid-connect-core-1_0.html#ThirdPartyInitiatedLogin
InitiateLoginURI *url.URL
// Pre-registered request_uri values that may be cached by the server.
RequestURIs []url.URL
}
// Defaults returns a shallow copy of ClientMetadata with default
// values replacing omitted fields.
func (m ClientMetadata) Defaults() ClientMetadata {
if len(m.ResponseTypes) == 0 {
m.ResponseTypes = []string{oauth2.ResponseTypeCode}
}
if len(m.GrantTypes) == 0 {
m.GrantTypes = []string{oauth2.GrantTypeAuthCode}
}
if m.ApplicationType == "" {
m.ApplicationType = "web"
}
if m.TokenEndpointAuthMethod == "" {
m.TokenEndpointAuthMethod = oauth2.AuthMethodClientSecretBasic
}
m.IDTokenResponseOptions = m.IDTokenResponseOptions.defaults()
m.UserInfoResponseOptions = m.UserInfoResponseOptions.defaults()
m.RequestObjectOptions = m.RequestObjectOptions.defaults()
return m
}
func (m *ClientMetadata) MarshalJSON() ([]byte, error) {
e := m.toEncodableStruct()
return json.Marshal(&e)
}
func (m *ClientMetadata) UnmarshalJSON(data []byte) error {
var e encodableClientMetadata
if err := json.Unmarshal(data, &e); err != nil {
return err
}
meta, err := e.toStruct()
if err != nil {
return err
}
if err := meta.Valid(); err != nil {
return err
}
*m = meta
return nil
}
type encodableClientMetadata struct {
RedirectURIs []string `json:"redirect_uris"` // Required
ResponseTypes []string `json:"response_types,omitempty"`
GrantTypes []string `json:"grant_types,omitempty"`
ApplicationType string `json:"application_type,omitempty"`
Contacts []string `json:"contacts,omitempty"`
ClientName string `json:"client_name,omitempty"`
LogoURI string `json:"logo_uri,omitempty"`
ClientURI string `json:"client_uri,omitempty"`
PolicyURI string `json:"policy_uri,omitempty"`
TermsOfServiceURI string `json:"tos_uri,omitempty"`
JWKSURI string `json:"jwks_uri,omitempty"`
JWKS *jose.JWKSet `json:"jwks,omitempty"`
SectorIdentifierURI string `json:"sector_identifier_uri,omitempty"`
SubjectType string `json:"subject_type,omitempty"`
IDTokenSignedResponseAlg string `json:"id_token_signed_response_alg,omitempty"`
IDTokenEncryptedResponseAlg string `json:"id_token_encrypted_response_alg,omitempty"`
IDTokenEncryptedResponseEnc string `json:"id_token_encrypted_response_enc,omitempty"`
UserInfoSignedResponseAlg string `json:"userinfo_signed_response_alg,omitempty"`
UserInfoEncryptedResponseAlg string `json:"userinfo_encrypted_response_alg,omitempty"`
UserInfoEncryptedResponseEnc string `json:"userinfo_encrypted_response_enc,omitempty"`
RequestObjectSigningAlg string `json:"request_object_signing_alg,omitempty"`
RequestObjectEncryptionAlg string `json:"request_object_encryption_alg,omitempty"`
RequestObjectEncryptionEnc string `json:"request_object_encryption_enc,omitempty"`
TokenEndpointAuthMethod string `json:"token_endpoint_auth_method,omitempty"`
TokenEndpointAuthSigningAlg string `json:"token_endpoint_auth_signing_alg,omitempty"`
DefaultMaxAge int64 `json:"default_max_age,omitempty"`
RequireAuthTime bool `json:"require_auth_time,omitempty"`
DefaultACRValues []string `json:"default_acr_values,omitempty"`
InitiateLoginURI string `json:"initiate_login_uri,omitempty"`
RequestURIs []string `json:"request_uris,omitempty"`
}
func (c *encodableClientMetadata) toStruct() (ClientMetadata, error) {
p := stickyErrParser{}
m := ClientMetadata{
RedirectURIs: p.parseURIs(c.RedirectURIs, "redirect_uris"),
ResponseTypes: c.ResponseTypes,
GrantTypes: c.GrantTypes,
ApplicationType: c.ApplicationType,
Contacts: p.parseEmails(c.Contacts, "contacts"),
ClientName: c.ClientName,
LogoURI: p.parseURI(c.LogoURI, "logo_uri"),
ClientURI: p.parseURI(c.ClientURI, "client_uri"),
PolicyURI: p.parseURI(c.PolicyURI, "policy_uri"),
TermsOfServiceURI: p.parseURI(c.TermsOfServiceURI, "tos_uri"),
JWKSURI: p.parseURI(c.JWKSURI, "jwks_uri"),
JWKS: c.JWKS,
SectorIdentifierURI: p.parseURI(c.SectorIdentifierURI, "sector_identifier_uri"),
SubjectType: c.SubjectType,
TokenEndpointAuthMethod: c.TokenEndpointAuthMethod,
TokenEndpointAuthSigningAlg: c.TokenEndpointAuthSigningAlg,
DefaultMaxAge: c.DefaultMaxAge,
RequireAuthTime: c.RequireAuthTime,
DefaultACRValues: c.DefaultACRValues,
InitiateLoginURI: p.parseURI(c.InitiateLoginURI, "initiate_login_uri"),
RequestURIs: p.parseURIs(c.RequestURIs, "request_uris"),
IDTokenResponseOptions: JWAOptions{
c.IDTokenSignedResponseAlg,
c.IDTokenEncryptedResponseAlg,
c.IDTokenEncryptedResponseEnc,
},
UserInfoResponseOptions: JWAOptions{
c.UserInfoSignedResponseAlg,
c.UserInfoEncryptedResponseAlg,
c.UserInfoEncryptedResponseEnc,
},
RequestObjectOptions: JWAOptions{
c.RequestObjectSigningAlg,
c.RequestObjectEncryptionAlg,
c.RequestObjectEncryptionEnc,
},
}
if p.firstErr != nil {
return ClientMetadata{}, p.firstErr
}
return m, nil
}
// stickyErrParser parses URIs and email addresses. Once it encounters
// a parse error, subsequent calls become no-op.
type stickyErrParser struct {
firstErr error
}
func (p *stickyErrParser) parseURI(s, field string) *url.URL {
if p.firstErr != nil || s == "" {
return nil
}
u, err := url.Parse(s)
if err == nil {
if u.Host == "" {
err = errors.New("no host in URI")
} else if u.Scheme != "http" && u.Scheme != "https" {
err = errors.New("invalid URI scheme")
}
}
if err != nil {
p.firstErr = fmt.Errorf("failed to parse %s: %v", field, err)
return nil
}
return u
}
func (p *stickyErrParser) parseURIs(s []string, field string) []url.URL {
if p.firstErr != nil || len(s) == 0 {
return nil
}
uris := make([]url.URL, len(s))
for i, val := range s {
if val == "" {
p.firstErr = fmt.Errorf("invalid URI in field %s", field)
return nil
}
if u := p.parseURI(val, field); u != nil {
uris[i] = *u
}
}
return uris
}
func (p *stickyErrParser) parseEmails(s []string, field string) []mail.Address {
if p.firstErr != nil || len(s) == 0 {
return nil
}
addrs := make([]mail.Address, len(s))
for i, addr := range s {
if addr == "" {
p.firstErr = fmt.Errorf("invalid email in field %s", field)
return nil
}
a, err := mail.ParseAddress(addr)
if err != nil {
p.firstErr = fmt.Errorf("invalid email in field %s: %v", field, err)
return nil
}
addrs[i] = *a
}
return addrs
}
func (m *ClientMetadata) toEncodableStruct() encodableClientMetadata {
return encodableClientMetadata{
RedirectURIs: urisToStrings(m.RedirectURIs),
ResponseTypes: m.ResponseTypes,
GrantTypes: m.GrantTypes,
ApplicationType: m.ApplicationType,
Contacts: emailsToStrings(m.Contacts),
ClientName: m.ClientName,
LogoURI: uriToString(m.LogoURI),
ClientURI: uriToString(m.ClientURI),
PolicyURI: uriToString(m.PolicyURI),
TermsOfServiceURI: uriToString(m.TermsOfServiceURI),
JWKSURI: uriToString(m.JWKSURI),
JWKS: m.JWKS,
SectorIdentifierURI: uriToString(m.SectorIdentifierURI),
SubjectType: m.SubjectType,
IDTokenSignedResponseAlg: m.IDTokenResponseOptions.SigningAlg,
IDTokenEncryptedResponseAlg: m.IDTokenResponseOptions.EncryptionAlg,
IDTokenEncryptedResponseEnc: m.IDTokenResponseOptions.EncryptionEnc,
UserInfoSignedResponseAlg: m.UserInfoResponseOptions.SigningAlg,
UserInfoEncryptedResponseAlg: m.UserInfoResponseOptions.EncryptionAlg,
UserInfoEncryptedResponseEnc: m.UserInfoResponseOptions.EncryptionEnc,
RequestObjectSigningAlg: m.RequestObjectOptions.SigningAlg,
RequestObjectEncryptionAlg: m.RequestObjectOptions.EncryptionAlg,
RequestObjectEncryptionEnc: m.RequestObjectOptions.EncryptionEnc,
TokenEndpointAuthMethod: m.TokenEndpointAuthMethod,
TokenEndpointAuthSigningAlg: m.TokenEndpointAuthSigningAlg,
DefaultMaxAge: m.DefaultMaxAge,
RequireAuthTime: m.RequireAuthTime,
DefaultACRValues: m.DefaultACRValues,
InitiateLoginURI: uriToString(m.InitiateLoginURI),
RequestURIs: urisToStrings(m.RequestURIs),
}
}
func uriToString(u *url.URL) string {
if u == nil {
return ""
}
return u.String()
}
func urisToStrings(urls []url.URL) []string {
if len(urls) == 0 {
return nil
}
sli := make([]string, len(urls))
for i, u := range urls {
sli[i] = u.String()
}
return sli
}
func emailsToStrings(addrs []mail.Address) []string {
if len(addrs) == 0 {
return nil
}
sli := make([]string, len(addrs))
for i, addr := range addrs {
sli[i] = addr.String()
}
return sli
}
// Valid determines if a ClientMetadata conforms with the OIDC specification.
//
// Valid is called by UnmarshalJSON.
//
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
// URLs fields where the OIDC spec requires it. This may change in future releases
// of this package. See: https://github.com/coreos/go-oidc/issues/34
func (m *ClientMetadata) Valid() error {
if len(m.RedirectURLs) == 0 {
if len(m.RedirectURIs) == 0 {
return errors.New("zero redirect URLs")
}
for _, u := range m.RedirectURLs {
validURI := func(u *url.URL, fieldName string) error {
if u.Host == "" {
return fmt.Errorf("no host for uri field %s", fieldName)
}
if u.Scheme != "http" && u.Scheme != "https" {
return errors.New("invalid redirect URL: scheme not http/https")
} else if u.Host == "" {
return errors.New("invalid redirect URL: host empty")
return fmt.Errorf("uri field %s scheme is not http or https", fieldName)
}
return nil
}
uris := []struct {
val *url.URL
name string
}{
{m.LogoURI, "logo_uri"},
{m.ClientURI, "client_uri"},
{m.PolicyURI, "policy_uri"},
{m.TermsOfServiceURI, "tos_uri"},
{m.JWKSURI, "jwks_uri"},
{m.SectorIdentifierURI, "sector_identifier_uri"},
{m.InitiateLoginURI, "initiate_login_uri"},
}
for _, uri := range uris {
if uri.val == nil {
continue
}
if err := validURI(uri.val, uri.name); err != nil {
return err
}
}
uriLists := []struct {
vals []url.URL
name string
}{
{m.RedirectURIs, "redirect_uris"},
{m.RequestURIs, "request_uris"},
}
for _, list := range uriLists {
for _, uri := range list.vals {
if err := validURI(&uri, list.name); err != nil {
return err
}
}
}
options := []struct {
option JWAOptions
name string
}{
{m.IDTokenResponseOptions, "id_token response"},
{m.UserInfoResponseOptions, "userinfo response"},
{m.RequestObjectOptions, "request_object"},
}
for _, option := range options {
if err := option.option.valid(); err != nil {
return fmt.Errorf("invalid JWA values for %s: %v", option.name, err)
}
}
return nil
}
type ClientRegistrationResponse struct {
ClientID string // Required
ClientSecret string
RegistrationAccessToken string
RegistrationClientURI string
// If IsZero is true, unspecified.
ClientIDIssuedAt time.Time
// Time at which the client_secret will expire.
// If IsZero is true, it will not expire.
ClientSecretExpiresAt time.Time
ClientMetadata
}
type encodableClientRegistrationResponse struct {
ClientID string `json:"client_id"` // Required
ClientSecret string `json:"client_secret,omitempty"`
RegistrationAccessToken string `json:"registration_access_token,omitempty"`
RegistrationClientURI string `json:"registration_client_uri,omitempty"`
ClientIDIssuedAt int64 `json:"client_id_issued_at,omitempty"`
// Time at which the client_secret will expire, in seconds since the epoch.
// If 0 it will not expire.
ClientSecretExpiresAt int64 `json:"client_secret_expires_at"` // Required
encodableClientMetadata
}
func unixToSec(t time.Time) int64 {
if t.IsZero() {
return 0
}
return t.Unix()
}
func (c *ClientRegistrationResponse) MarshalJSON() ([]byte, error) {
e := encodableClientRegistrationResponse{
ClientID: c.ClientID,
ClientSecret: c.ClientSecret,
RegistrationAccessToken: c.RegistrationAccessToken,
RegistrationClientURI: c.RegistrationClientURI,
ClientIDIssuedAt: unixToSec(c.ClientIDIssuedAt),
ClientSecretExpiresAt: unixToSec(c.ClientSecretExpiresAt),
encodableClientMetadata: c.ClientMetadata.toEncodableStruct(),
}
return json.Marshal(&e)
}
func secToUnix(sec int64) time.Time {
if sec == 0 {
return time.Time{}
}
return time.Unix(sec, 0)
}
func (c *ClientRegistrationResponse) UnmarshalJSON(data []byte) error {
var e encodableClientRegistrationResponse
if err := json.Unmarshal(data, &e); err != nil {
return err
}
if e.ClientID == "" {
return errors.New("no client_id in client registration response")
}
metadata, err := e.encodableClientMetadata.toStruct()
if err != nil {
return err
}
*c = ClientRegistrationResponse{
ClientID: e.ClientID,
ClientSecret: e.ClientSecret,
RegistrationAccessToken: e.RegistrationAccessToken,
RegistrationClientURI: e.RegistrationClientURI,
ClientIDIssuedAt: secToUnix(e.ClientIDIssuedAt),
ClientSecretExpiresAt: secToUnix(e.ClientSecretExpiresAt),
ClientMetadata: metadata,
}
return nil
}
@ -101,34 +600,12 @@ type Client struct {
redirectURL string
scope []string
keySet key.PublicKeySet
providerSyncer *ProviderConfigSyncer
keySetSyncMutex sync.RWMutex
lastKeySetSync time.Time
}
type providerConfigRepo struct {
mu sync.RWMutex
config ProviderConfig // do not access directly, use Get()
}
func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo {
return &providerConfigRepo{sync.RWMutex{}, pc}
}
// returns an error to implement ProviderConfigSetter
func (r *providerConfigRepo) Set(cfg ProviderConfig) error {
r.mu.Lock()
defer r.mu.Unlock()
r.config = cfg
return nil
}
func (r *providerConfigRepo) Get() ProviderConfig {
r.mu.RLock()
defer r.mu.RUnlock()
return r.config
}
func (c *Client) Healthy() error {
now := time.Now().UTC()
@ -155,8 +632,8 @@ func (c *Client) OAuthClient() (*oauth2.Client, error) {
ocfg := oauth2.Config{
Credentials: oauth2.ClientCredentials(c.credentials),
RedirectURL: c.redirectURL,
AuthURL: cfg.AuthEndpoint,
TokenURL: cfg.TokenEndpoint,
AuthURL: cfg.AuthEndpoint.String(),
TokenURL: cfg.TokenEndpoint.String(),
Scope: c.scope,
AuthMethod: authMethod,
}
@ -178,9 +655,13 @@ func chooseAuthMethod(cfg ProviderConfig) (string, error) {
return "", errors.New("no supported auth methods")
}
// SyncProviderConfig starts the provider config syncer
func (c *Client) SyncProviderConfig(discoveryURL string) chan struct{} {
r := NewHTTPProviderConfigGetter(c.httpClient, discoveryURL)
return NewProviderConfigSyncer(r, c.providerConfig).Run()
s := NewProviderConfigSyncer(r, c.providerConfig)
stop := s.Run()
s.WaitUntilInitialSync()
return stop
}
func (c *Client) maybeSyncKeys() error {
@ -204,7 +685,7 @@ func (c *Client) maybeSyncKeys() error {
}
cfg := c.providerConfig.Get()
r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint)
r := NewRemotePublicKeyRepo(c.httpClient, cfg.KeysEndpoint.String())
w := &clientKeyRepo{client: c}
_, err := key.Sync(r, w)
c.lastKeySetSync = time.Now().UTC()
@ -299,7 +780,7 @@ func (c *Client) VerifyJWT(jwt jose.JWT) error {
}
v := NewJWTVerifier(
c.providerConfig.Get().Issuer,
c.providerConfig.Get().Issuer.String(),
c.credentials.ID,
c.maybeSyncKeys, keysFunc)
@ -340,3 +821,26 @@ func (c *Client) keysFuncAll() func() []key.PublicKey {
return c.keySet.Keys()
}
}
type providerConfigRepo struct {
mu sync.RWMutex
config ProviderConfig // do not access directly, use Get()
}
func newProviderConfigRepo(pc ProviderConfig) *providerConfigRepo {
return &providerConfigRepo{sync.RWMutex{}, pc}
}
// returns an error to implement ProviderConfigSetter
func (r *providerConfigRepo) Set(cfg ProviderConfig) error {
r.mu.Lock()
defer r.mu.Unlock()
r.config = cfg
return nil
}
func (r *providerConfigRepo) Get() ProviderConfig {
r.mu.RLock()
defer r.mu.RUnlock()
return r.config
}

View File

@ -11,6 +11,11 @@ import (
"github.com/coreos/go-oidc/key"
)
// DefaultPublicKeySetTTL is the default TTL set on the PublicKeySet if no
// Cache-Control header is provided by the JWK Set document endpoint.
const DefaultPublicKeySetTTL = 24 * time.Hour
// NewRemotePublicKeyRepo is responsible for fetching the JWK Set document.
func NewRemotePublicKeyRepo(hc phttp.Client, ep string) *remotePublicKeyRepo {
return &remotePublicKeyRepo{hc: hc, ep: ep}
}
@ -20,6 +25,11 @@ type remotePublicKeyRepo struct {
ep string
}
// Get returns a PublicKeySet fetched from the JWK Set document endpoint. A TTL
// is set on the Key Set to avoid it having to be re-retrieved for every
// encryption event. This TTL is typically controlled by the endpoint returning
// a Cache-Control header, but defaults to 24 hours if no Cache-Control header
// is found.
func (r *remotePublicKeyRepo) Get() (key.KeySet, error) {
req, err := http.NewRequest("GET", r.ep, nil)
if err != nil {
@ -48,7 +58,7 @@ func (r *remotePublicKeyRepo) Get() (key.KeySet, error) {
return nil, err
}
if !ok {
return nil, errors.New("HTTP cache headers not set")
ttl = DefaultPublicKeySetTTL
}
exp := time.Now().UTC().Add(ttl)

View File

@ -2,8 +2,12 @@ package oidc
import (
"encoding/json"
"errors"
"fmt"
"net/http"
"net/url"
"strings"
"sync"
"time"
"github.com/coreos/pkg/capnslog"
@ -18,6 +22,26 @@ var (
log = capnslog.NewPackageLogger("github.com/coreos/go-oidc", "http")
)
const (
// Subject Identifier types defined by the OIDC spec. Specifies if the provider
// should provide the same sub claim value to all clients (public) or a unique
// value for each client (pairwise).
//
// See: http://openid.net/specs/openid-connect-core-1_0.html#SubjectIDTypes
SubjectTypePublic = "public"
SubjectTypePairwise = "pairwise"
)
var (
// Default values for omitted provider config fields.
//
// Use ProviderConfig's Defaults method to fill a provider config with these values.
DefaultGrantTypesSupported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit}
DefaultResponseModesSupported = []string{"query", "fragment"}
DefaultTokenEndpointAuthMethodsSupported = []string{oauth2.AuthMethodClientSecretBasic}
DefaultClaimTypesSupported = []string{"normal"}
)
const (
MaximumProviderConfigSyncInterval = 24 * time.Hour
MinimumProviderConfigSyncInterval = time.Minute
@ -28,29 +52,414 @@ const (
// internally configurable for tests
var minimumProviderConfigSyncInterval = MinimumProviderConfigSyncInterval
var (
// Ensure ProviderConfig satisfies these interfaces.
_ json.Marshaler = &ProviderConfig{}
_ json.Unmarshaler = &ProviderConfig{}
)
// ProviderConfig represents the OpenID Provider Metadata specifying what
// configurations a provider supports.
//
// See: http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
type ProviderConfig struct {
Issuer string `json:"issuer"`
AuthEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
KeysEndpoint string `json:"jwks_uri"`
ResponseTypesSupported []string `json:"response_types_supported"`
GrantTypesSupported []string `json:"grant_types_supported"`
SubjectTypesSupported []string `json:"subject_types_supported"`
IDTokenAlgValuesSupported []string `json:"id_token_alg_values_supported"`
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported"`
ExpiresAt time.Time `json:"-"`
Issuer *url.URL // Required
AuthEndpoint *url.URL // Required
TokenEndpoint *url.URL // Required if grant types other than "implicit" are supported
UserInfoEndpoint *url.URL
KeysEndpoint *url.URL // Required
RegistrationEndpoint *url.URL
// Servers MAY choose not to advertise some supported scope values even when this
// parameter is used, although those defined in OpenID Core SHOULD be listed, if supported.
ScopesSupported []string
// OAuth2.0 response types supported.
ResponseTypesSupported []string // Required
// OAuth2.0 response modes supported.
//
// If omitted, defaults to DefaultResponseModesSupported.
ResponseModesSupported []string
// OAuth2.0 grant types supported.
//
// If omitted, defaults to DefaultGrantTypesSupported.
GrantTypesSupported []string
ACRValuesSupported []string
// SubjectTypesSupported specifies strategies for providing values for the sub claim.
SubjectTypesSupported []string // Required
// JWA signing and encryption algorith values supported for ID tokens.
IDTokenSigningAlgValues []string // Required
IDTokenEncryptionAlgValues []string
IDTokenEncryptionEncValues []string
// JWA signing and encryption algorith values supported for user info responses.
UserInfoSigningAlgValues []string
UserInfoEncryptionAlgValues []string
UserInfoEncryptionEncValues []string
// JWA signing and encryption algorith values supported for request objects.
ReqObjSigningAlgValues []string
ReqObjEncryptionAlgValues []string
ReqObjEncryptionEncValues []string
TokenEndpointAuthMethodsSupported []string
TokenEndpointAuthSigningAlgValuesSupported []string
DisplayValuesSupported []string
ClaimTypesSupported []string
ClaimsSupported []string
ServiceDocs *url.URL
ClaimsLocalsSupported []string
UILocalsSupported []string
ClaimsParameterSupported bool
RequestParameterSupported bool
RequestURIParamaterSupported bool
RequireRequestURIRegistration bool
Policy *url.URL
TermsOfService *url.URL
// Not part of the OpenID Provider Metadata
ExpiresAt time.Time
}
// Defaults returns a shallow copy of ProviderConfig with default
// values replacing omitted fields.
//
// var cfg oidc.ProviderConfig
// // Fill provider config with default values for omitted fields.
// cfg = cfg.Defaults()
//
func (p ProviderConfig) Defaults() ProviderConfig {
setDefault := func(val *[]string, defaultVal []string) {
if len(*val) == 0 {
*val = defaultVal
}
}
setDefault(&p.GrantTypesSupported, DefaultGrantTypesSupported)
setDefault(&p.ResponseModesSupported, DefaultResponseModesSupported)
setDefault(&p.TokenEndpointAuthMethodsSupported, DefaultTokenEndpointAuthMethodsSupported)
setDefault(&p.ClaimTypesSupported, DefaultClaimTypesSupported)
return p
}
func (p *ProviderConfig) MarshalJSON() ([]byte, error) {
e := p.toEncodableStruct()
return json.Marshal(&e)
}
func (p *ProviderConfig) UnmarshalJSON(data []byte) error {
var e encodableProviderConfig
if err := json.Unmarshal(data, &e); err != nil {
return err
}
conf, err := e.toStruct()
if err != nil {
return err
}
if err := conf.Valid(); err != nil {
return err
}
*p = conf
return nil
}
type encodableProviderConfig struct {
Issuer string `json:"issuer"`
AuthEndpoint string `json:"authorization_endpoint"`
TokenEndpoint string `json:"token_endpoint"`
UserInfoEndpoint string `json:"userinfo_endpoint,omitempty"`
KeysEndpoint string `json:"jwks_uri"`
RegistrationEndpoint string `json:"registration_endpoint,omitempty"`
// Use 'omitempty' for all slices as per OIDC spec:
// "Claims that return multiple values are represented as JSON arrays.
// Claims with zero elements MUST be omitted from the response."
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationResponse
ScopesSupported []string `json:"scopes_supported,omitempty"`
ResponseTypesSupported []string `json:"response_types_supported,omitempty"`
ResponseModesSupported []string `json:"response_modes_supported,omitempty"`
GrantTypesSupported []string `json:"grant_types_supported,omitempty"`
ACRValuesSupported []string `json:"acr_values_supported,omitempty"`
SubjectTypesSupported []string `json:"subject_types_supported,omitempty"`
IDTokenSigningAlgValues []string `json:"id_token_signing_alg_values_supported,omitempty"`
IDTokenEncryptionAlgValues []string `json:"id_token_encryption_alg_values_supported,omitempty"`
IDTokenEncryptionEncValues []string `json:"id_token_encryption_enc_values_supported,omitempty"`
UserInfoSigningAlgValues []string `json:"userinfo_signing_alg_values_supported,omitempty"`
UserInfoEncryptionAlgValues []string `json:"userinfo_encryption_alg_values_supported,omitempty"`
UserInfoEncryptionEncValues []string `json:"userinfo_encryption_enc_values_supported,omitempty"`
ReqObjSigningAlgValues []string `json:"request_object_signing_alg_values_supported,omitempty"`
ReqObjEncryptionAlgValues []string `json:"request_object_encryption_alg_values_supported,omitempty"`
ReqObjEncryptionEncValues []string `json:"request_object_encryption_enc_values_supported,omitempty"`
TokenEndpointAuthMethodsSupported []string `json:"token_endpoint_auth_methods_supported,omitempty"`
TokenEndpointAuthSigningAlgValuesSupported []string `json:"token_endpoint_auth_signing_alg_values_supported,omitempty"`
DisplayValuesSupported []string `json:"display_values_supported,omitempty"`
ClaimTypesSupported []string `json:"claim_types_supported,omitempty"`
ClaimsSupported []string `json:"claims_supported,omitempty"`
ServiceDocs string `json:"service_documentation,omitempty"`
ClaimsLocalsSupported []string `json:"claims_locales_supported,omitempty"`
UILocalsSupported []string `json:"ui_locales_supported,omitempty"`
ClaimsParameterSupported bool `json:"claims_parameter_supported,omitempty"`
RequestParameterSupported bool `json:"request_parameter_supported,omitempty"`
RequestURIParamaterSupported bool `json:"request_uri_parameter_supported,omitempty"`
RequireRequestURIRegistration bool `json:"require_request_uri_registration,omitempty"`
Policy string `json:"op_policy_uri,omitempty"`
TermsOfService string `json:"op_tos_uri,omitempty"`
}
func (cfg ProviderConfig) toEncodableStruct() encodableProviderConfig {
return encodableProviderConfig{
Issuer: uriToString(cfg.Issuer),
AuthEndpoint: uriToString(cfg.AuthEndpoint),
TokenEndpoint: uriToString(cfg.TokenEndpoint),
UserInfoEndpoint: uriToString(cfg.UserInfoEndpoint),
KeysEndpoint: uriToString(cfg.KeysEndpoint),
RegistrationEndpoint: uriToString(cfg.RegistrationEndpoint),
ScopesSupported: cfg.ScopesSupported,
ResponseTypesSupported: cfg.ResponseTypesSupported,
ResponseModesSupported: cfg.ResponseModesSupported,
GrantTypesSupported: cfg.GrantTypesSupported,
ACRValuesSupported: cfg.ACRValuesSupported,
SubjectTypesSupported: cfg.SubjectTypesSupported,
IDTokenSigningAlgValues: cfg.IDTokenSigningAlgValues,
IDTokenEncryptionAlgValues: cfg.IDTokenEncryptionAlgValues,
IDTokenEncryptionEncValues: cfg.IDTokenEncryptionEncValues,
UserInfoSigningAlgValues: cfg.UserInfoSigningAlgValues,
UserInfoEncryptionAlgValues: cfg.UserInfoEncryptionAlgValues,
UserInfoEncryptionEncValues: cfg.UserInfoEncryptionEncValues,
ReqObjSigningAlgValues: cfg.ReqObjSigningAlgValues,
ReqObjEncryptionAlgValues: cfg.ReqObjEncryptionAlgValues,
ReqObjEncryptionEncValues: cfg.ReqObjEncryptionEncValues,
TokenEndpointAuthMethodsSupported: cfg.TokenEndpointAuthMethodsSupported,
TokenEndpointAuthSigningAlgValuesSupported: cfg.TokenEndpointAuthSigningAlgValuesSupported,
DisplayValuesSupported: cfg.DisplayValuesSupported,
ClaimTypesSupported: cfg.ClaimTypesSupported,
ClaimsSupported: cfg.ClaimsSupported,
ServiceDocs: uriToString(cfg.ServiceDocs),
ClaimsLocalsSupported: cfg.ClaimsLocalsSupported,
UILocalsSupported: cfg.UILocalsSupported,
ClaimsParameterSupported: cfg.ClaimsParameterSupported,
RequestParameterSupported: cfg.RequestParameterSupported,
RequestURIParamaterSupported: cfg.RequestURIParamaterSupported,
RequireRequestURIRegistration: cfg.RequireRequestURIRegistration,
Policy: uriToString(cfg.Policy),
TermsOfService: uriToString(cfg.TermsOfService),
}
}
func (e encodableProviderConfig) toStruct() (ProviderConfig, error) {
p := stickyErrParser{}
conf := ProviderConfig{
Issuer: p.parseURI(e.Issuer, "issuer"),
AuthEndpoint: p.parseURI(e.AuthEndpoint, "authorization_endpoint"),
TokenEndpoint: p.parseURI(e.TokenEndpoint, "token_endpoint"),
UserInfoEndpoint: p.parseURI(e.UserInfoEndpoint, "userinfo_endpoint"),
KeysEndpoint: p.parseURI(e.KeysEndpoint, "jwks_uri"),
RegistrationEndpoint: p.parseURI(e.RegistrationEndpoint, "registration_endpoint"),
ScopesSupported: e.ScopesSupported,
ResponseTypesSupported: e.ResponseTypesSupported,
ResponseModesSupported: e.ResponseModesSupported,
GrantTypesSupported: e.GrantTypesSupported,
ACRValuesSupported: e.ACRValuesSupported,
SubjectTypesSupported: e.SubjectTypesSupported,
IDTokenSigningAlgValues: e.IDTokenSigningAlgValues,
IDTokenEncryptionAlgValues: e.IDTokenEncryptionAlgValues,
IDTokenEncryptionEncValues: e.IDTokenEncryptionEncValues,
UserInfoSigningAlgValues: e.UserInfoSigningAlgValues,
UserInfoEncryptionAlgValues: e.UserInfoEncryptionAlgValues,
UserInfoEncryptionEncValues: e.UserInfoEncryptionEncValues,
ReqObjSigningAlgValues: e.ReqObjSigningAlgValues,
ReqObjEncryptionAlgValues: e.ReqObjEncryptionAlgValues,
ReqObjEncryptionEncValues: e.ReqObjEncryptionEncValues,
TokenEndpointAuthMethodsSupported: e.TokenEndpointAuthMethodsSupported,
TokenEndpointAuthSigningAlgValuesSupported: e.TokenEndpointAuthSigningAlgValuesSupported,
DisplayValuesSupported: e.DisplayValuesSupported,
ClaimTypesSupported: e.ClaimTypesSupported,
ClaimsSupported: e.ClaimsSupported,
ServiceDocs: p.parseURI(e.ServiceDocs, "service_documentation"),
ClaimsLocalsSupported: e.ClaimsLocalsSupported,
UILocalsSupported: e.UILocalsSupported,
ClaimsParameterSupported: e.ClaimsParameterSupported,
RequestParameterSupported: e.RequestParameterSupported,
RequestURIParamaterSupported: e.RequestURIParamaterSupported,
RequireRequestURIRegistration: e.RequireRequestURIRegistration,
Policy: p.parseURI(e.Policy, "op_policy-uri"),
TermsOfService: p.parseURI(e.TermsOfService, "op_tos_uri"),
}
if p.firstErr != nil {
return ProviderConfig{}, p.firstErr
}
return conf, nil
}
// Empty returns if a ProviderConfig holds no information.
//
// This case generally indicates a ProviderConfigGetter has experienced an error
// and has nothing to report.
func (p ProviderConfig) Empty() bool {
return p.Issuer == ""
return p.Issuer == nil
}
func contains(sli []string, ele string) bool {
for _, s := range sli {
if s == ele {
return true
}
}
return false
}
// Valid determines if a ProviderConfig conforms with the OIDC specification.
// If Valid returns successfully it guarantees required field are non-nil and
// URLs are well formed.
//
// Valid is called by UnmarshalJSON.
//
// NOTE(ericchiang): For development purposes Valid does not mandate 'https' for
// URLs fields where the OIDC spec requires it. This may change in future releases
// of this package. See: https://github.com/coreos/go-oidc/issues/34
func (p ProviderConfig) Valid() error {
grantTypes := p.GrantTypesSupported
if len(grantTypes) == 0 {
grantTypes = DefaultGrantTypesSupported
}
implicitOnly := true
for _, grantType := range grantTypes {
if grantType != oauth2.GrantTypeImplicit {
implicitOnly = false
break
}
}
if len(p.SubjectTypesSupported) == 0 {
return errors.New("missing required field subject_types_supported")
}
if len(p.IDTokenSigningAlgValues) == 0 {
return errors.New("missing required field id_token_signing_alg_values_supported")
}
if len(p.ScopesSupported) != 0 && !contains(p.ScopesSupported, "openid") {
return errors.New("scoped_supported must be unspecified or include 'openid'")
}
if !contains(p.IDTokenSigningAlgValues, "RS256") {
return errors.New("id_token_signing_alg_values_supported must include 'RS256'")
}
if contains(p.TokenEndpointAuthMethodsSupported, "none") {
return errors.New("token_endpoint_auth_signing_alg_values_supported cannot include 'none'")
}
uris := []struct {
val *url.URL
name string
required bool
}{
{p.Issuer, "issuer", true},
{p.AuthEndpoint, "authorization_endpoint", true},
{p.TokenEndpoint, "token_endpoint", !implicitOnly},
{p.UserInfoEndpoint, "userinfo_endpoint", false},
{p.KeysEndpoint, "jwks_uri", true},
{p.RegistrationEndpoint, "registration_endpoint", false},
{p.ServiceDocs, "service_documentation", false},
{p.Policy, "op_policy_uri", false},
{p.TermsOfService, "op_tos_uri", false},
}
for _, uri := range uris {
if uri.val == nil {
if !uri.required {
continue
}
return fmt.Errorf("empty value for required uri field %s", uri.name)
}
if uri.val.Host == "" {
return fmt.Errorf("no host for uri field %s", uri.name)
}
if uri.val.Scheme != "http" && uri.val.Scheme != "https" {
return fmt.Errorf("uri field %s schemeis not http or https", uri.name)
}
}
return nil
}
// Supports determines if provider supports a client given their respective metadata.
func (p ProviderConfig) Supports(c ClientMetadata) error {
if err := p.Valid(); err != nil {
return fmt.Errorf("invalid provider config: %v", err)
}
if err := c.Valid(); err != nil {
return fmt.Errorf("invalid client config: %v", err)
}
// Fill default values for omitted fields
c = c.Defaults()
p = p.Defaults()
// Do the supported values list the requested one?
supports := []struct {
supported []string
requested string
name string
}{
{p.IDTokenSigningAlgValues, c.IDTokenResponseOptions.SigningAlg, "id_token_signed_response_alg"},
{p.IDTokenEncryptionAlgValues, c.IDTokenResponseOptions.EncryptionAlg, "id_token_encryption_response_alg"},
{p.IDTokenEncryptionEncValues, c.IDTokenResponseOptions.EncryptionEnc, "id_token_encryption_response_enc"},
{p.UserInfoSigningAlgValues, c.UserInfoResponseOptions.SigningAlg, "userinfo_signed_response_alg"},
{p.UserInfoEncryptionAlgValues, c.UserInfoResponseOptions.EncryptionAlg, "userinfo_encryption_response_alg"},
{p.UserInfoEncryptionEncValues, c.UserInfoResponseOptions.EncryptionEnc, "userinfo_encryption_response_enc"},
{p.ReqObjSigningAlgValues, c.RequestObjectOptions.SigningAlg, "request_object_signing_alg"},
{p.ReqObjEncryptionAlgValues, c.RequestObjectOptions.EncryptionAlg, "request_object_encryption_alg"},
{p.ReqObjEncryptionEncValues, c.RequestObjectOptions.EncryptionEnc, "request_object_encryption_enc"},
}
for _, field := range supports {
if field.requested == "" {
continue
}
if !contains(field.supported, field.requested) {
return fmt.Errorf("provider does not support requested value for field %s", field.name)
}
}
stringsEqual := func(s1, s2 string) bool { return s1 == s2 }
// For lists, are the list of requested values a subset of the supported ones?
supportsAll := []struct {
supported []string
requested []string
name string
// OAuth2.0 response_type can be space separated lists where order doesn't matter.
// For example "id_token token" is the same as "token id_token"
// Support a custom compare method.
comp func(s1, s2 string) bool
}{
{p.GrantTypesSupported, c.GrantTypes, "grant_types", stringsEqual},
{p.ResponseTypesSupported, c.ResponseTypes, "response_type", oauth2.ResponseTypesEqual},
}
for _, field := range supportsAll {
requestLoop:
for _, req := range field.requested {
for _, sup := range field.supported {
if field.comp(req, sup) {
continue requestLoop
}
}
return fmt.Errorf("provider does not support requested value for field %s", field.name)
}
}
// TODO(ericchiang): Are there more checks we feel comfortable with begin strict about?
return nil
}
func (p ProviderConfig) SupportsGrantType(grantType string) bool {
var supported []string
if len(p.GrantTypesSupported) == 0 {
// If omitted, the default value is ["authorization_code", "implicit"].
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderMetadata
supported = []string{oauth2.GrantTypeAuthCode, oauth2.GrantTypeImplicit}
supported = DefaultGrantTypesSupported
} else {
supported = p.GrantTypesSupported
}
@ -75,6 +484,9 @@ type ProviderConfigSyncer struct {
from ProviderConfigGetter
to ProviderConfigSetter
clock clockwork.Clock
initialSyncDone bool
initialSyncWait sync.WaitGroup
}
func NewProviderConfigSyncer(from ProviderConfigGetter, to ProviderConfigSetter) *ProviderConfigSyncer {
@ -91,6 +503,7 @@ func (s *ProviderConfigSyncer) Run() chan struct{} {
var next pcsStepper
next = &pcsStepNext{aft: time.Duration(0)}
s.initialSyncWait.Add(1)
go func() {
for {
select {
@ -105,6 +518,10 @@ func (s *ProviderConfigSyncer) Run() chan struct{} {
return stop
}
func (s *ProviderConfigSyncer) WaitUntilInitialSync() {
s.initialSyncWait.Wait()
}
func (s *ProviderConfigSyncer) sync() (time.Duration, error) {
cfg, err := s.from.Get()
if err != nil {
@ -115,6 +532,11 @@ func (s *ProviderConfigSyncer) sync() (time.Duration, error) {
return 0, fmt.Errorf("error setting provider config: %v", err)
}
if !s.initialSyncDone {
s.initialSyncWait.Done()
s.initialSyncDone = true
}
log.Infof("Updating provider config: config=%#v", cfg)
return nextSyncAfter(cfg.ExpiresAt, s.clock), nil
@ -197,7 +619,11 @@ func NewHTTPProviderConfigGetter(hc phttp.Client, issuerURL string) *httpProvide
}
func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) {
req, err := http.NewRequest("GET", r.issuerURL+discoveryConfigPath, nil)
// If the Issuer value contains a path component, any terminating / MUST be removed before
// appending /.well-known/openid-configuration.
// https://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationRequest
discoveryURL := strings.TrimSuffix(r.issuerURL, "/") + discoveryConfigPath
req, err := http.NewRequest("GET", discoveryURL, nil)
if err != nil {
return
}
@ -223,7 +649,7 @@ func (r *httpProviderConfigGetter) Get() (cfg ProviderConfig, err error) {
// The issuer value returned MUST be identical to the Issuer URL that was directly used to retrieve the configuration information.
// http://openid.net/specs/openid-connect-discovery-1_0.html#ProviderConfigurationValidation
if !urlEqual(cfg.Issuer, r.issuerURL) {
if !urlEqual(cfg.Issuer.String(), r.issuerURL) {
err = fmt.Errorf(`"issuer" in config (%v) does not match provided issuer URL (%v)`, cfg.Issuer, r.issuerURL)
return
}

View File

@ -67,6 +67,15 @@ func (t *AuthenticatedTransport) verifiedJWT() (jose.JWT, error) {
return t.jwt, nil
}
// SetJWT sets the JWT held by the Transport.
// This is useful for cases in which you want to set an initial JWT.
func (t *AuthenticatedTransport) SetJWT(jwt jose.JWT) {
t.mu.Lock()
defer t.mu.Unlock()
t.jwt = jwt
}
func (t *AuthenticatedTransport) RoundTrip(r *http.Request) (*http.Response, error) {
jwt, err := t.verifiedJWT()
if err != nil {

View File

@ -59,8 +59,8 @@ func NewClaims(iss, sub string, aud interface{}, iat, exp time.Time) jose.Claims
"iss": iss,
"sub": sub,
"aud": aud,
"iat": float64(iat.Unix()),
"exp": float64(exp.Unix()),
"iat": iat.Unix(),
"exp": exp.Unix(),
}
}

22
vendor/github.com/robfig/cron/.gitignore generated vendored Normal file
View File

@ -0,0 +1,22 @@
# Compiled Object files, Static and Dynamic libs (Shared Objects)
*.o
*.a
*.so
# Folders
_obj
_test
# Architecture specific extensions/prefixes
*.[568vq]
[568vq].out
*.cgo1.go
*.cgo2.c
_cgo_defun.c
_cgo_gotypes.go
_cgo_export.*
_testmain.go
*.exe

1
vendor/github.com/robfig/cron/.travis.yml generated vendored Normal file
View File

@ -0,0 +1 @@
language: go

21
vendor/github.com/robfig/cron/LICENSE generated vendored Normal file
View File

@ -0,0 +1,21 @@
Copyright (C) 2012 Rob Figueiredo
All Rights Reserved.
MIT LICENSE
Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in
the Software without restriction, including without limitation the rights to
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
the Software, and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.

2
vendor/github.com/robfig/cron/README.md generated vendored Normal file
View File

@ -0,0 +1,2 @@
[![GoDoc](http://godoc.org/github.com/robfig/cron?status.png)](http://godoc.org/github.com/robfig/cron)
[![Build Status](https://travis-ci.org/robfig/cron.svg?branch=master)](https://travis-ci.org/robfig/cron)

27
vendor/github.com/robfig/cron/constantdelay.go generated vendored Normal file
View File

@ -0,0 +1,27 @@
package cron
import "time"
// ConstantDelaySchedule represents a simple recurring duty cycle, e.g. "Every 5 minutes".
// It does not support jobs more frequent than once a second.
type ConstantDelaySchedule struct {
Delay time.Duration
}
// Every returns a crontab Schedule that activates once every duration.
// Delays of less than a second are not supported (will round up to 1 second).
// Any fields less than a Second are truncated.
func Every(duration time.Duration) ConstantDelaySchedule {
if duration < time.Second {
duration = time.Second
}
return ConstantDelaySchedule{
Delay: duration - time.Duration(duration.Nanoseconds())%time.Second,
}
}
// Next returns the next time this should be run.
// This rounds so that the next activation time will be on the second.
func (schedule ConstantDelaySchedule) Next(t time.Time) time.Time {
return t.Add(schedule.Delay - time.Duration(t.Nanosecond())*time.Nanosecond)
}

227
vendor/github.com/robfig/cron/cron.go generated vendored Normal file
View File

@ -0,0 +1,227 @@
// This library implements a cron spec parser and runner. See the README for
// more details.
package cron
import (
"log"
"runtime"
"sort"
"time"
)
// Cron keeps track of any number of entries, invoking the associated func as
// specified by the schedule. It may be started, stopped, and the entries may
// be inspected while running.
type Cron struct {
entries []*Entry
stop chan struct{}
add chan *Entry
snapshot chan []*Entry
running bool
ErrorLog *log.Logger
}
// Job is an interface for submitted cron jobs.
type Job interface {
Run()
}
// The Schedule describes a job's duty cycle.
type Schedule interface {
// Return the next activation time, later than the given time.
// Next is invoked initially, and then each time the job is run.
Next(time.Time) time.Time
}
// Entry consists of a schedule and the func to execute on that schedule.
type Entry struct {
// The schedule on which this job should be run.
Schedule Schedule
// The next time the job will run. This is the zero time if Cron has not been
// started or this entry's schedule is unsatisfiable
Next time.Time
// The last time this job was run. This is the zero time if the job has never
// been run.
Prev time.Time
// The Job to run.
Job Job
}
// byTime is a wrapper for sorting the entry array by time
// (with zero time at the end).
type byTime []*Entry
func (s byTime) Len() int { return len(s) }
func (s byTime) Swap(i, j int) { s[i], s[j] = s[j], s[i] }
func (s byTime) Less(i, j int) bool {
// Two zero times should return false.
// Otherwise, zero is "greater" than any other time.
// (To sort it at the end of the list.)
if s[i].Next.IsZero() {
return false
}
if s[j].Next.IsZero() {
return true
}
return s[i].Next.Before(s[j].Next)
}
// New returns a new Cron job runner.
func New() *Cron {
return &Cron{
entries: nil,
add: make(chan *Entry),
stop: make(chan struct{}),
snapshot: make(chan []*Entry),
running: false,
ErrorLog: nil,
}
}
// A wrapper that turns a func() into a cron.Job
type FuncJob func()
func (f FuncJob) Run() { f() }
// AddFunc adds a func to the Cron to be run on the given schedule.
func (c *Cron) AddFunc(spec string, cmd func()) error {
return c.AddJob(spec, FuncJob(cmd))
}
// AddJob adds a Job to the Cron to be run on the given schedule.
func (c *Cron) AddJob(spec string, cmd Job) error {
schedule, err := Parse(spec)
if err != nil {
return err
}
c.Schedule(schedule, cmd)
return nil
}
// Schedule adds a Job to the Cron to be run on the given schedule.
func (c *Cron) Schedule(schedule Schedule, cmd Job) {
entry := &Entry{
Schedule: schedule,
Job: cmd,
}
if !c.running {
c.entries = append(c.entries, entry)
return
}
c.add <- entry
}
// Entries returns a snapshot of the cron entries.
func (c *Cron) Entries() []*Entry {
if c.running {
c.snapshot <- nil
x := <-c.snapshot
return x
}
return c.entrySnapshot()
}
// Start the cron scheduler in its own go-routine.
func (c *Cron) Start() {
c.running = true
go c.run()
}
func (c *Cron) runWithRecovery(j Job) {
defer func() {
if r := recover(); r != nil {
const size = 64 << 10
buf := make([]byte, size)
buf = buf[:runtime.Stack(buf, false)]
c.logf("cron: panic running job: %v\n%s", r, buf)
}
}()
j.Run()
}
// Run the scheduler.. this is private just due to the need to synchronize
// access to the 'running' state variable.
func (c *Cron) run() {
// Figure out the next activation times for each entry.
now := time.Now().Local()
for _, entry := range c.entries {
entry.Next = entry.Schedule.Next(now)
}
for {
// Determine the next entry to run.
sort.Sort(byTime(c.entries))
var effective time.Time
if len(c.entries) == 0 || c.entries[0].Next.IsZero() {
// If there are no entries yet, just sleep - it still handles new entries
// and stop requests.
effective = now.AddDate(10, 0, 0)
} else {
effective = c.entries[0].Next
}
select {
case now = <-time.After(effective.Sub(now)):
// Run every entry whose next time was this effective time.
for _, e := range c.entries {
if e.Next != effective {
break
}
go c.runWithRecovery(e.Job)
e.Prev = e.Next
e.Next = e.Schedule.Next(effective)
}
continue
case newEntry := <-c.add:
c.entries = append(c.entries, newEntry)
newEntry.Next = newEntry.Schedule.Next(time.Now().Local())
case <-c.snapshot:
c.snapshot <- c.entrySnapshot()
case <-c.stop:
return
}
// 'now' should be updated after newEntry and snapshot cases.
now = time.Now().Local()
}
}
// Logs an error to stderr or to the configured error log
func (c *Cron) logf(format string, args ...interface{}) {
if c.ErrorLog != nil {
c.ErrorLog.Printf(format, args...)
} else {
log.Printf(format, args...)
}
}
// Stop stops the cron scheduler if it is running; otherwise it does nothing.
func (c *Cron) Stop() {
if !c.running {
return
}
c.stop <- struct{}{}
c.running = false
}
// entrySnapshot returns a copy of the current cron entry list.
func (c *Cron) entrySnapshot() []*Entry {
entries := []*Entry{}
for _, e := range c.entries {
entries = append(entries, &Entry{
Schedule: e.Schedule,
Next: e.Next,
Prev: e.Prev,
Job: e.Job,
})
}
return entries
}

129
vendor/github.com/robfig/cron/doc.go generated vendored Normal file
View File

@ -0,0 +1,129 @@
/*
Package cron implements a cron spec parser and job runner.
Usage
Callers may register Funcs to be invoked on a given schedule. Cron will run
them in their own goroutines.
c := cron.New()
c.AddFunc("0 30 * * * *", func() { fmt.Println("Every hour on the half hour") })
c.AddFunc("@hourly", func() { fmt.Println("Every hour") })
c.AddFunc("@every 1h30m", func() { fmt.Println("Every hour thirty") })
c.Start()
..
// Funcs are invoked in their own goroutine, asynchronously.
...
// Funcs may also be added to a running Cron
c.AddFunc("@daily", func() { fmt.Println("Every day") })
..
// Inspect the cron job entries' next and previous run times.
inspect(c.Entries())
..
c.Stop() // Stop the scheduler (does not stop any jobs already running).
CRON Expression Format
A cron expression represents a set of times, using 6 space-separated fields.
Field name | Mandatory? | Allowed values | Allowed special characters
---------- | ---------- | -------------- | --------------------------
Seconds | Yes | 0-59 | * / , -
Minutes | Yes | 0-59 | * / , -
Hours | Yes | 0-23 | * / , -
Day of month | Yes | 1-31 | * / , - ?
Month | Yes | 1-12 or JAN-DEC | * / , -
Day of week | Yes | 0-6 or SUN-SAT | * / , - ?
Note: Month and Day-of-week field values are case insensitive. "SUN", "Sun",
and "sun" are equally accepted.
Special Characters
Asterisk ( * )
The asterisk indicates that the cron expression will match for all values of the
field; e.g., using an asterisk in the 5th field (month) would indicate every
month.
Slash ( / )
Slashes are used to describe increments of ranges. For example 3-59/15 in the
1st field (minutes) would indicate the 3rd minute of the hour and every 15
minutes thereafter. The form "*\/..." is equivalent to the form "first-last/...",
that is, an increment over the largest possible range of the field. The form
"N/..." is accepted as meaning "N-MAX/...", that is, starting at N, use the
increment until the end of that specific range. It does not wrap around.
Comma ( , )
Commas are used to separate items of a list. For example, using "MON,WED,FRI" in
the 5th field (day of week) would mean Mondays, Wednesdays and Fridays.
Hyphen ( - )
Hyphens are used to define ranges. For example, 9-17 would indicate every
hour between 9am and 5pm inclusive.
Question mark ( ? )
Question mark may be used instead of '*' for leaving either day-of-month or
day-of-week blank.
Predefined schedules
You may use one of several pre-defined schedules in place of a cron expression.
Entry | Description | Equivalent To
----- | ----------- | -------------
@yearly (or @annually) | Run once a year, midnight, Jan. 1st | 0 0 0 1 1 *
@monthly | Run once a month, midnight, first of month | 0 0 0 1 * *
@weekly | Run once a week, midnight on Sunday | 0 0 0 * * 0
@daily (or @midnight) | Run once a day, midnight | 0 0 0 * * *
@hourly | Run once an hour, beginning of hour | 0 0 * * * *
Intervals
You may also schedule a job to execute at fixed intervals. This is supported by
formatting the cron spec like this:
@every <duration>
where "duration" is a string accepted by time.ParseDuration
(http://golang.org/pkg/time/#ParseDuration).
For example, "@every 1h30m10s" would indicate a schedule that activates every
1 hour, 30 minutes, 10 seconds.
Note: The interval does not take the job runtime into account. For example,
if a job takes 3 minutes to run, and it is scheduled to run every 5 minutes,
it will have only 2 minutes of idle time between each run.
Time zones
All interpretation and scheduling is done in the machine's local time zone (as
provided by the Go time package (http://www.golang.org/pkg/time).
Be aware that jobs scheduled during daylight-savings leap-ahead transitions will
not be run!
Thread safety
Since the Cron service runs concurrently with the calling code, some amount of
care must be taken to ensure proper synchronization.
All cron methods are designed to be correctly synchronized as long as the caller
ensures that invocations have a clear happens-before ordering between them.
Implementation
Cron entries are stored in an array, sorted by their next activation time. Cron
sleeps until the next job is due to be run.
Upon waking:
- it runs each entry that is active on that second
- it calculates the next run times for the jobs that were run
- it re-sorts the array of entries by next activation time.
- it goes to sleep until the soonest job.
*/
package cron

231
vendor/github.com/robfig/cron/parser.go generated vendored Normal file
View File

@ -0,0 +1,231 @@
package cron
import (
"fmt"
"log"
"math"
"strconv"
"strings"
"time"
)
// Parse returns a new crontab schedule representing the given spec.
// It returns a descriptive error if the spec is not valid.
//
// It accepts
// - Full crontab specs, e.g. "* * * * * ?"
// - Descriptors, e.g. "@midnight", "@every 1h30m"
func Parse(spec string) (_ Schedule, err error) {
// Convert panics into errors
defer func() {
if recovered := recover(); recovered != nil {
err = fmt.Errorf("%v", recovered)
}
}()
if spec[0] == '@' {
return parseDescriptor(spec), nil
}
// Split on whitespace. We require 5 or 6 fields.
// (second) (minute) (hour) (day of month) (month) (day of week, optional)
fields := strings.Fields(spec)
if len(fields) != 5 && len(fields) != 6 {
log.Panicf("Expected 5 or 6 fields, found %d: %s", len(fields), spec)
}
// If a sixth field is not provided (DayOfWeek), then it is equivalent to star.
if len(fields) == 5 {
fields = append(fields, "*")
}
schedule := &SpecSchedule{
Second: getField(fields[0], seconds),
Minute: getField(fields[1], minutes),
Hour: getField(fields[2], hours),
Dom: getField(fields[3], dom),
Month: getField(fields[4], months),
Dow: getField(fields[5], dow),
}
return schedule, nil
}
// getField returns an Int with the bits set representing all of the times that
// the field represents. A "field" is a comma-separated list of "ranges".
func getField(field string, r bounds) uint64 {
// list = range {"," range}
var bits uint64
ranges := strings.FieldsFunc(field, func(r rune) bool { return r == ',' })
for _, expr := range ranges {
bits |= getRange(expr, r)
}
return bits
}
// getRange returns the bits indicated by the given expression:
// number | number "-" number [ "/" number ]
func getRange(expr string, r bounds) uint64 {
var (
start, end, step uint
rangeAndStep = strings.Split(expr, "/")
lowAndHigh = strings.Split(rangeAndStep[0], "-")
singleDigit = len(lowAndHigh) == 1
)
var extra_star uint64
if lowAndHigh[0] == "*" || lowAndHigh[0] == "?" {
start = r.min
end = r.max
extra_star = starBit
} else {
start = parseIntOrName(lowAndHigh[0], r.names)
switch len(lowAndHigh) {
case 1:
end = start
case 2:
end = parseIntOrName(lowAndHigh[1], r.names)
default:
log.Panicf("Too many hyphens: %s", expr)
}
}
switch len(rangeAndStep) {
case 1:
step = 1
case 2:
step = mustParseInt(rangeAndStep[1])
// Special handling: "N/step" means "N-max/step".
if singleDigit {
end = r.max
}
default:
log.Panicf("Too many slashes: %s", expr)
}
if start < r.min {
log.Panicf("Beginning of range (%d) below minimum (%d): %s", start, r.min, expr)
}
if end > r.max {
log.Panicf("End of range (%d) above maximum (%d): %s", end, r.max, expr)
}
if start > end {
log.Panicf("Beginning of range (%d) beyond end of range (%d): %s", start, end, expr)
}
return getBits(start, end, step) | extra_star
}
// parseIntOrName returns the (possibly-named) integer contained in expr.
func parseIntOrName(expr string, names map[string]uint) uint {
if names != nil {
if namedInt, ok := names[strings.ToLower(expr)]; ok {
return namedInt
}
}
return mustParseInt(expr)
}
// mustParseInt parses the given expression as an int or panics.
func mustParseInt(expr string) uint {
num, err := strconv.Atoi(expr)
if err != nil {
log.Panicf("Failed to parse int from %s: %s", expr, err)
}
if num < 0 {
log.Panicf("Negative number (%d) not allowed: %s", num, expr)
}
return uint(num)
}
// getBits sets all bits in the range [min, max], modulo the given step size.
func getBits(min, max, step uint) uint64 {
var bits uint64
// If step is 1, use shifts.
if step == 1 {
return ^(math.MaxUint64 << (max + 1)) & (math.MaxUint64 << min)
}
// Else, use a simple loop.
for i := min; i <= max; i += step {
bits |= 1 << i
}
return bits
}
// all returns all bits within the given bounds. (plus the star bit)
func all(r bounds) uint64 {
return getBits(r.min, r.max, 1) | starBit
}
// parseDescriptor returns a pre-defined schedule for the expression, or panics
// if none matches.
func parseDescriptor(spec string) Schedule {
switch spec {
case "@yearly", "@annually":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: 1 << dom.min,
Month: 1 << months.min,
Dow: all(dow),
}
case "@monthly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: 1 << dom.min,
Month: all(months),
Dow: all(dow),
}
case "@weekly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: all(dom),
Month: all(months),
Dow: 1 << dow.min,
}
case "@daily", "@midnight":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: 1 << hours.min,
Dom: all(dom),
Month: all(months),
Dow: all(dow),
}
case "@hourly":
return &SpecSchedule{
Second: 1 << seconds.min,
Minute: 1 << minutes.min,
Hour: all(hours),
Dom: all(dom),
Month: all(months),
Dow: all(dow),
}
}
const every = "@every "
if strings.HasPrefix(spec, every) {
duration, err := time.ParseDuration(spec[len(every):])
if err != nil {
log.Panicf("Failed to parse duration %s: %s", spec, err)
}
return Every(duration)
}
log.Panicf("Unrecognized descriptor: %s", spec)
return nil
}

159
vendor/github.com/robfig/cron/spec.go generated vendored Normal file
View File

@ -0,0 +1,159 @@
package cron
import "time"
// SpecSchedule specifies a duty cycle (to the second granularity), based on a
// traditional crontab specification. It is computed initially and stored as bit sets.
type SpecSchedule struct {
Second, Minute, Hour, Dom, Month, Dow uint64
}
// bounds provides a range of acceptable values (plus a map of name to value).
type bounds struct {
min, max uint
names map[string]uint
}
// The bounds for each field.
var (
seconds = bounds{0, 59, nil}
minutes = bounds{0, 59, nil}
hours = bounds{0, 23, nil}
dom = bounds{1, 31, nil}
months = bounds{1, 12, map[string]uint{
"jan": 1,
"feb": 2,
"mar": 3,
"apr": 4,
"may": 5,
"jun": 6,
"jul": 7,
"aug": 8,
"sep": 9,
"oct": 10,
"nov": 11,
"dec": 12,
}}
dow = bounds{0, 6, map[string]uint{
"sun": 0,
"mon": 1,
"tue": 2,
"wed": 3,
"thu": 4,
"fri": 5,
"sat": 6,
}}
)
const (
// Set the top bit if a star was included in the expression.
starBit = 1 << 63
)
// Next returns the next time this schedule is activated, greater than the given
// time. If no time can be found to satisfy the schedule, return the zero time.
func (s *SpecSchedule) Next(t time.Time) time.Time {
// General approach:
// For Month, Day, Hour, Minute, Second:
// Check if the time value matches. If yes, continue to the next field.
// If the field doesn't match the schedule, then increment the field until it matches.
// While incrementing the field, a wrap-around brings it back to the beginning
// of the field list (since it is necessary to re-verify previous field
// values)
// Start at the earliest possible time (the upcoming second).
t = t.Add(1*time.Second - time.Duration(t.Nanosecond())*time.Nanosecond)
// This flag indicates whether a field has been incremented.
added := false
// If no time is found within five years, return zero.
yearLimit := t.Year() + 5
WRAP:
if t.Year() > yearLimit {
return time.Time{}
}
// Find the first applicable month.
// If it's this month, then do nothing.
for 1<<uint(t.Month())&s.Month == 0 {
// If we have to add a month, reset the other parts to 0.
if !added {
added = true
// Otherwise, set the date at the beginning (since the current time is irrelevant).
t = time.Date(t.Year(), t.Month(), 1, 0, 0, 0, 0, t.Location())
}
t = t.AddDate(0, 1, 0)
// Wrapped around.
if t.Month() == time.January {
goto WRAP
}
}
// Now get a day in that month.
for !dayMatches(s, t) {
if !added {
added = true
t = time.Date(t.Year(), t.Month(), t.Day(), 0, 0, 0, 0, t.Location())
}
t = t.AddDate(0, 0, 1)
if t.Day() == 1 {
goto WRAP
}
}
for 1<<uint(t.Hour())&s.Hour == 0 {
if !added {
added = true
t = t.Truncate(time.Hour)
}
t = t.Add(1 * time.Hour)
if t.Hour() == 0 {
goto WRAP
}
}
for 1<<uint(t.Minute())&s.Minute == 0 {
if !added {
added = true
t = t.Truncate(time.Minute)
}
t = t.Add(1 * time.Minute)
if t.Minute() == 0 {
goto WRAP
}
}
for 1<<uint(t.Second())&s.Second == 0 {
if !added {
added = true
t = t.Truncate(time.Second)
}
t = t.Add(1 * time.Second)
if t.Second() == 0 {
goto WRAP
}
}
return t
}
// dayMatches returns true if the schedule's day-of-week and day-of-month
// restrictions are satisfied by the given time.
func dayMatches(s *SpecSchedule, t time.Time) bool {
var (
domMatch bool = 1<<uint(t.Day())&s.Dom > 0
dowMatch bool = 1<<uint(t.Weekday())&s.Dow > 0
)
if s.Dom&starBit > 0 || s.Dow&starBit > 0 {
return domMatch && dowMatch
}
return domMatch || dowMatch
}

View File

@ -1 +1 @@
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.FP7BTRxaTbVILW2DBSeJu63eb2Ppa3-gxe5Ja6B5y9iijsqJWkg9k_4iGnojcfgIXz0GVL7XPqdGOFrzQAJWYn0US53t3OINyjwm0J4O0UFHoyLXjwCscfQ3MU_Ha-nVpoWoBll8Omyv4tXykWs2_lCt9LT6h8u7DlYaz2cFns6JFcawRPe-cbN_yuDM7P24wSHNwPGk_Ls1MvR4phmrwCsIhV7e82Be8bVRMU4btbxv1JSqUgGr6vur8JKqM4n0_yKOtBPswcH4_jyzmQABjaKAUfpuznOsfO42t9RyeV7L6IZfFIqgpZMh00DOl3a2I_YdDpydZT1laCGd1_Sn8Q.pCgShcjPktT8Bx48.pbMl3UhYFCe9iVvTCLrxsoOFziShsKSAIzMc4ZphUFb91BvUFATSGOnEYcfFRdCl3Dd5sFzvFcbIiPvDn5klhk-HZMMSHxPbPGutBsSFoPb4WwRPSz9FxybQkXB1DLFpgMGAMfGyjnt1k5420KDyuogwTNUvMOg_vxnH_P7b.zGHv8arI8aMdXX6wyo51RQ
eyJhbGciOiJSU0EtT0FFUCIsImVuYyI6IkExMjhHQ00ifQ.kK6pryC8R-O1R0Gj9ydLvQuIZlcYLGze23WdW7xbpiEEKdz6nweJrMm7ysy8lgu1tM47JVo19p2_b26bNKSQshCUOETvd7Hb2UMZOjnyUnqdyAAyoi6UkIquXfUUbHTNS0iMxwSxxW9KMp2GXNq8-o6T8xQZTDirBJFKKd8ZNUasTaoa5j8U9IfdR1aCavTBuOhvk8IVs-jSbY5TVJMJiE0IOPXois7aRJ6uAiANQBk9VKLegEcZD_qAewecXHDsHi-u0jbmg3o3PPaJaK_Qv5dsPlR2M-E2kE3AGUn0-zn5zYRngoAZ8WZr2O4GvLdltJKq9i2z7jOrdOzzRcDRow.96qvwl_E1Hj15u7Q.hWs-jQ8FsqQFD7pE9N-UEP1BWQ9rsJIcCaPvQRIp8Fukm_vvlw9YEaEq0ERLrsUWsJWpd1ca8_h8x7xD6f_d5YppwRqRHIeGIsdBOTMhNs0lG8ikkQXLat-UroCpy8EC17nuUtDE2E2Kdxrk4Cdd6Bk-dKk0Ta4w3Ud0YBKa.P8zrO7xizgv0i98eVWWzEg

View File

@ -9,15 +9,9 @@ build:
- GOVC_INSECURE=1
- VCA=1
commands:
- go get golang.org/x/tools/cmd/vet
- go get golang.org/x/tools/cmd/goimports
- go get github.com/davecgh/go-spew/spew
- go get
- make all
- make install
- make all install
- git clone https://github.com/sstephenson/bats.git /tmp/bats
- /tmp/bats/install.sh /usr/local
- apt-get -qq update && apt-get install -yqq uuid-runtime bsdmainutils jq
- govc/test/images/update.sh
- bats govc/test
- govc/test/clean.sh

1
vendor/github.com/vmware/govmomi/.gitignore generated vendored Normal file
View File

@ -0,0 +1 @@
secrets.yml

View File

@ -1,11 +1,12 @@
sudo: false
language: go
go: 1.4
go:
- 1.6
before_install:
- go get golang.org/x/tools/cmd/vet
- go get golang.org/x/tools/cmd/goimports
- go get github.com/davecgh/go-spew/spew
- make vendor
script:
- make check test

View File

@ -1,5 +1,33 @@
# changelog
### 0.6.2 (2016-05-11)
* Get complete file details in Datastore.Stat
* SOAP decoding fixes
* Add VirtualMachine.RemoveAllSnapshot
### 0.6.1 (2016-04-30)
* Fix mo.Entity interface
### 0.6.0 (2016-04-29)
* Add Common.Rename method
* Add mo.Entity interface
* Add OptionManager
* Add Finder.FolderList method
* Add VirtualMachine.WaitForNetIP method
* Add VirtualMachine.RevertToSnapshot method
* Add Datastore.Download method
### 0.5.0 (2016-03-30)
Generated fields using xsd type 'int' change to Go type 'int32'

View File

@ -7,11 +7,13 @@ Alvaro Miranda <kikitux@gmail.com>
Amit Bathla <abathla@.vmware.com>
Andrew Chin <andrew@andrewtchin.com>
Arran Walker <arran.walker@zopa.com>
Austin Parker <aparker@apprenda.com>
Bob Killen <killen.bob@gmail.com>
Bruce Downs <bdowns@vmware.com>
Clint Greenwood <cgreenwood@vmware.com> <clint.greenwood@gmail.com>
Cédric Blomart <cblomart@gmail.com>
Danny Lockard <danny.lockard@banno.com>
Dave Tucker <dave@dtucker.co.uk>
Doug MacEachern <dougm@vmware.com>
Eloy Coto <eloy.coto@gmail.com>
Eric Yutao <eric.yutao@gmail.com>
@ -28,6 +30,7 @@ Mevan Samaratunga <mevansam@gmail.com>
Pieter Noordhuis <pnoordhuis@vmware.com> <pcnoordhuis@gmail.com>
runner.mei <runner.mei@gmail.com>
S.Çağlar Onur <conur@vmware.com>
Sergey Ignatov <sergey.ignatov@jetbrains.com>
Takaaki Furukawa <takaaki.frkw@gmail.com> <takaaki.furukawa@mail.rakuten.com>
Steve Purcell <steve@sanityinc.com>
Yang Yang <yangy@vmware.com>

View File

@ -4,7 +4,12 @@ all: check test
check: goimports govet
goimports:
vendor:
go get golang.org/x/tools/cmd/goimports
go get github.com/davecgh/go-spew/spew
go get golang.org/x/net/context
goimports: vendor
@echo checking go imports...
@! goimports -d . 2>&1 | egrep -v '^$$'
@ -12,9 +17,8 @@ govet:
@echo checking go vet...
@go tool vet -structtags=false -methods=false .
test:
go get
test: vendor
go test -v $(TEST_OPTS) ./...
install:
install: vendor
go install github.com/vmware/govmomi/govc

View File

@ -762,28 +762,42 @@ func (f *Finder) VirtualApp(ctx context.Context, path string) (*object.VirtualAp
return apps[0], nil
}
func (f *Finder) Folder(ctx context.Context, path string) (*object.Folder, error) {
mo, err := f.ManagedObjectList(ctx, path)
func (f *Finder) FolderList(ctx context.Context, path string) ([]*object.Folder, error) {
es, err := f.ManagedObjectList(ctx, path)
if err != nil {
return nil, err
}
if len(mo) == 0 {
var folders []*object.Folder
for _, e := range es {
switch o := e.Object.(type) {
case mo.Folder:
folder := object.NewFolder(f.client, o.Reference())
folder.InventoryPath = e.Path
folders = append(folders, folder)
case *object.Folder:
// RootFolder
folders = append(folders, o)
}
}
if len(folders) == 0 {
return nil, &NotFoundError{"folder", path}
}
if len(mo) > 1 {
return folders, nil
}
func (f *Finder) Folder(ctx context.Context, path string) (*object.Folder, error) {
folders, err := f.FolderList(ctx, path)
if err != nil {
return nil, err
}
if len(folders) > 1 {
return nil, &MultipleFoundError{"folder", path}
}
ref := mo[0].Object.Reference()
if ref.Type != "Folder" {
return nil, &NotFoundError{"folder", path}
}
folder := object.NewFolder(f.client, ref)
folder.InventoryPath = mo[0].Path
return folder, nil
return folders[0], nil
}

View File

@ -69,3 +69,17 @@ func (c Common) Destroy(ctx context.Context) (*Task, error) {
return NewTask(c.c, res.Returnval), nil
}
func (c Common) Rename(ctx context.Context, name string) (*Task, error) {
req := types.Rename_Task{
This: c.Reference(),
NewName: name,
}
res, err := methods.Rename_Task(ctx, c.c, &req)
if err != nil {
return nil, err
}
return NewTask(c.c, res.Returnval), nil
}

View File

@ -220,7 +220,16 @@ func (d Datastore) UploadFile(ctx context.Context, file string, path string, par
return d.Client().UploadFile(file, u, p)
}
// DownloadFile via soap.Upload with an http service ticket
// Download via soap.Download with an http service ticket
func (d Datastore) Download(ctx context.Context, path string, param *soap.Download) (io.ReadCloser, int64, error) {
u, p, err := d.downloadTicket(ctx, path, param)
if err != nil {
return nil, 0, err
}
return d.Client().Download(u, p)
}
// DownloadFile via soap.Download with an http service ticket
func (d Datastore) DownloadFile(ctx context.Context, path string, file string, param *soap.Download) error {
u, p, err := d.downloadTicket(ctx, path, param)
if err != nil {
@ -305,8 +314,10 @@ func (d Datastore) Stat(ctx context.Context, file string) (types.BaseFileInfo, e
spec := types.HostDatastoreBrowserSearchSpec{
Details: &types.FileQueryFlags{
FileType: true,
FileOwner: types.NewBool(true), // TODO: omitempty is generated, but seems to be required
FileType: true,
FileSize: true,
Modification: true,
FileOwner: types.NewBool(true),
},
MatchPattern: []string{path.Base(file)},
}

View File

@ -37,7 +37,9 @@ func NewFolder(c *vim25.Client, ref types.ManagedObjectReference) *Folder {
}
func NewRootFolder(c *vim25.Client) *Folder {
return NewFolder(c, c.ServiceContent.RootFolder)
f := NewFolder(c, c.ServiceContent.RootFolder)
f.InventoryPath = "/"
return f
}
func (f Folder) Children(ctx context.Context) ([]Reference, error) {
@ -196,3 +198,17 @@ func (f Folder) CreateDVS(ctx context.Context, spec types.DVSCreateSpec) (*Task,
return NewTask(f.c, res.Returnval), nil
}
func (f Folder) MoveInto(ctx context.Context, list []types.ManagedObjectReference) (*Task, error) {
req := types.MoveIntoFolder_Task{
This: f.Reference(),
List: list,
}
res, err := methods.MoveIntoFolder_Task(ctx, f.c, &req)
if err != nil {
return nil, err
}
return NewTask(f.c, res.Returnval), nil
}

View File

@ -0,0 +1,64 @@
/*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
type HostAccountManager struct {
Common
}
func NewHostAccountManager(c *vim25.Client, ref types.ManagedObjectReference) *HostAccountManager {
return &HostAccountManager{
Common: NewCommon(c, ref),
}
}
func (m HostAccountManager) Create(ctx context.Context, user *types.HostAccountSpec) error {
req := types.CreateUser{
This: m.Reference(),
User: user,
}
_, err := methods.CreateUser(ctx, m.Client(), &req)
return err
}
func (m HostAccountManager) Update(ctx context.Context, user *types.HostAccountSpec) error {
req := types.UpdateUser{
This: m.Reference(),
User: user,
}
_, err := methods.UpdateUser(ctx, m.Client(), &req)
return err
}
func (m HostAccountManager) Remove(ctx context.Context, userName string) error {
req := types.RemoveUser{
This: m.Reference(),
UserName: userName,
}
_, err := methods.RemoveUser(ctx, m.Client(), &req)
return err
}

View File

@ -98,3 +98,25 @@ func (m HostConfigManager) VsanSystem(ctx context.Context) (*HostVsanSystem, err
return NewHostVsanSystem(m.c, *h.ConfigManager.VsanSystem), nil
}
func (m HostConfigManager) AccountManager(ctx context.Context) (*HostAccountManager, error) {
var h mo.HostSystem
err := m.Properties(ctx, m.Reference(), []string{"configManager.accountManager"}, &h)
if err != nil {
return nil, err
}
return NewHostAccountManager(m.c, *h.ConfigManager.AccountManager), nil
}
func (m HostConfigManager) OptionManager(ctx context.Context) (*OptionManager, error) {
var h mo.HostSystem
err := m.Properties(ctx, m.Reference(), []string{"configManager.advancedOption"}, &h)
if err != nil {
return nil, err
}
return NewOptionManager(m.c, *h.ConfigManager.AdvancedOption), nil
}

View File

@ -0,0 +1,58 @@
/*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package object
import (
"github.com/vmware/govmomi/vim25"
"github.com/vmware/govmomi/vim25/methods"
"github.com/vmware/govmomi/vim25/types"
"golang.org/x/net/context"
)
type OptionManager struct {
Common
}
func NewOptionManager(c *vim25.Client, ref types.ManagedObjectReference) *OptionManager {
return &OptionManager{
Common: NewCommon(c, ref),
}
}
func (m OptionManager) Query(ctx context.Context, name string) ([]types.BaseOptionValue, error) {
req := types.QueryOptions{
This: m.Reference(),
Name: name,
}
res, err := methods.QueryOptions(ctx, m.Client(), &req)
if err != nil {
return nil, err
}
return res.Returnval, nil
}
func (m OptionManager) Update(ctx context.Context, value []types.BaseOptionValue) error {
req := types.UpdateOptions{
This: m.Reference(),
ChangedValue: value,
}
_, err := methods.UpdateOptions(ctx, m.Client(), &req)
return err
}

View File

@ -63,7 +63,6 @@ func EthernetCardTypes() VirtualDeviceList {
&types.VirtualVmxnet3{},
}).Select(func(device types.BaseVirtualDevice) bool {
c := device.(types.BaseVirtualEthernetCard).GetVirtualEthernetCard()
c.AddressType = string(types.VirtualEthernetCardMacTypeGenerated)
c.GetVirtualDevice().Key = -1
return true
})

View File

@ -19,6 +19,7 @@ package object
import (
"errors"
"fmt"
"net"
"github.com/vmware/govmomi/property"
"github.com/vmware/govmomi/vim25"
@ -245,6 +246,77 @@ func (v VirtualMachine) WaitForIP(ctx context.Context) (string, error) {
return ip, nil
}
// WaitForNetIP waits for the VM guest.net property to report an IP address for all VM NICs.
// Only consider IPv4 addresses if the v4 param is true.
// Returns a map with MAC address as the key and IP address list as the value.
func (v VirtualMachine) WaitForNetIP(ctx context.Context, v4 bool) (map[string][]string, error) {
macs := make(map[string][]string)
p := property.DefaultCollector(v.c)
// Wait for all NICs to have a MacAddress, which may not be generated yet.
err := property.Wait(ctx, p, v.Reference(), []string{"config.hardware.device"}, func(pc []types.PropertyChange) bool {
for _, c := range pc {
if c.Op != types.PropertyChangeOpAssign {
continue
}
devices := c.Val.(types.ArrayOfVirtualDevice).VirtualDevice
for _, device := range devices {
if nic, ok := device.(types.BaseVirtualEthernetCard); ok {
mac := nic.GetVirtualEthernetCard().MacAddress
if mac == "" {
return false
}
macs[mac] = nil
}
}
}
return true
})
err = property.Wait(ctx, p, v.Reference(), []string{"guest.net"}, func(pc []types.PropertyChange) bool {
for _, c := range pc {
if c.Op != types.PropertyChangeOpAssign {
continue
}
nics := c.Val.(types.ArrayOfGuestNicInfo).GuestNicInfo
for _, nic := range nics {
mac := nic.MacAddress
if mac == "" || nic.IpConfig == nil {
continue
}
for _, ip := range nic.IpConfig.IpAddress {
if _, ok := macs[mac]; !ok {
continue // Ignore any that don't correspond to a VM device
}
if v4 && net.ParseIP(ip.IpAddress).To4() == nil {
continue // Ignore non IPv4 address
}
macs[mac] = append(macs[mac], ip.IpAddress)
}
}
}
for _, ips := range macs {
if len(ips) == 0 {
return false
}
}
return true
})
if err != nil {
return nil, err
}
return macs, nil
}
// Device returns the VirtualMachine's config.hardware.device property.
func (v VirtualMachine) Device(ctx context.Context) (VirtualDeviceList, error) {
var o mo.VirtualMachine
@ -336,8 +408,12 @@ func (v VirtualMachine) EditDevice(ctx context.Context, device ...types.BaseVirt
}
// RemoveDevice removes the given devices on the VirtualMachine
func (v VirtualMachine) RemoveDevice(ctx context.Context, device ...types.BaseVirtualDevice) error {
return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, types.VirtualDeviceConfigSpecFileOperationDestroy, device...)
func (v VirtualMachine) RemoveDevice(ctx context.Context, keepFiles bool, device ...types.BaseVirtualDevice) error {
fop := types.VirtualDeviceConfigSpecFileOperationDestroy
if keepFiles {
fop = ""
}
return v.configureDevice(ctx, types.VirtualDeviceConfigSpecOperationRemove, fop, device...)
}
// BootOptions returns the VirtualMachine's config.bootOptions property.
@ -400,6 +476,76 @@ func (v VirtualMachine) CreateSnapshot(ctx context.Context, name string, descrip
return NewTask(v.c, res.Returnval), nil
}
// RemoveAllSnapshot removes all snapshots of a virtual machine
func (v VirtualMachine) RemoveAllSnapshot(ctx context.Context, consolidate *bool) (*Task, error) {
req := types.RemoveAllSnapshots_Task{
This: v.Reference(),
Consolidate: consolidate,
}
res, err := methods.RemoveAllSnapshots_Task(ctx, v.c, &req)
if err != nil {
return nil, err
}
return NewTask(v.c, res.Returnval), nil
}
// RevertToSnapshot reverts to a named snapshot
func (v VirtualMachine) RevertToSnapshot(ctx context.Context, name string, suppressPowerOn bool) (*Task, error) {
var o mo.VirtualMachine
err := v.Properties(ctx, v.Reference(), []string{"snapshot"}, &o)
snapshotTree := o.Snapshot.RootSnapshotList
if len(snapshotTree) < 1 {
return nil, errors.New("No snapshots for this VM")
}
snapshot, err := traverseSnapshotInTree(snapshotTree, name)
if err != nil {
return nil, err
}
req := types.RevertToSnapshot_Task{
This: snapshot,
SuppressPowerOn: types.NewBool(suppressPowerOn),
}
res, err := methods.RevertToSnapshot_Task(ctx, v.c, &req)
if err != nil {
return nil, err
}
return NewTask(v.c, res.Returnval), nil
}
// traverseSnapshotInTree is a recursive function that will traverse a snapshot tree to find a given snapshot
func traverseSnapshotInTree(tree []types.VirtualMachineSnapshotTree, name string) (types.ManagedObjectReference, error) {
var o types.ManagedObjectReference
if tree == nil {
return o, errors.New("Snapshot tree is empty")
}
for _, s := range tree {
if s.Name == name {
o = s.Snapshot
break
} else {
childTree := s.ChildSnapshotList
var err error
o, err = traverseSnapshotInTree(childTree, name)
if err != nil {
return o, err
}
}
}
if o.Value == "" {
return o, errors.New("Snapshot not found")
}
return o, nil
}
// IsToolsRunning returns true if VMware Tools is currently running in the guest OS, and false otherwise.
func (v VirtualMachine) IsToolsRunning(ctx context.Context) (bool, error) {
var o mo.VirtualMachine

24
vendor/github.com/vmware/govmomi/vim25/mo/entity.go generated vendored Normal file
View File

@ -0,0 +1,24 @@
/*
Copyright (c) 2016 VMware, Inc. All Rights Reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mo
// Entity is the interface that is implemented by all managed objects
// that extend ManagedEntity.
type Entity interface {
Reference
Entity() *ManagedEntity
}

View File

@ -130,6 +130,10 @@ type ComputeResource struct {
ConfigurationEx types.BaseComputeResourceConfigInfo `mo:"configurationEx"`
}
func (m *ComputeResource) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["ComputeResource"] = reflect.TypeOf((*ComputeResource)(nil)).Elem()
}
@ -187,6 +191,10 @@ type Datacenter struct {
Configuration types.DatacenterConfigInfo `mo:"configuration"`
}
func (m *Datacenter) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["Datacenter"] = reflect.TypeOf((*Datacenter)(nil)).Elem()
}
@ -203,6 +211,10 @@ type Datastore struct {
IormConfiguration *types.StorageIORMInfo `mo:"iormConfiguration"`
}
func (m *Datastore) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["Datastore"] = reflect.TypeOf((*Datastore)(nil)).Elem()
}
@ -255,6 +267,10 @@ type DistributedVirtualSwitch struct {
Runtime *types.DVSRuntimeInfo `mo:"runtime"`
}
func (m *DistributedVirtualSwitch) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["DistributedVirtualSwitch"] = reflect.TypeOf((*DistributedVirtualSwitch)(nil)).Elem()
}
@ -359,6 +375,10 @@ type Folder struct {
ChildEntity []types.ManagedObjectReference `mo:"childEntity"`
}
func (m *Folder) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["Folder"] = reflect.TypeOf((*Folder)(nil)).Elem()
}
@ -878,6 +898,10 @@ type HostSystem struct {
SystemResources *types.HostSystemResourceInfo `mo:"systemResources"`
}
func (m *HostSystem) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["HostSystem"] = reflect.TypeOf((*HostSystem)(nil)).Elem()
}
@ -1117,6 +1141,10 @@ type Network struct {
Vm []types.ManagedObjectReference `mo:"vm"`
}
func (m *Network) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["Network"] = reflect.TypeOf((*Network)(nil)).Elem()
}
@ -1286,6 +1314,10 @@ type ResourcePool struct {
ChildConfiguration []types.ResourceConfigSpec `mo:"childConfiguration"`
}
func (m *ResourcePool) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["ResourcePool"] = reflect.TypeOf((*ResourcePool)(nil)).Elem()
}
@ -1551,6 +1583,10 @@ type VirtualMachine struct {
GuestHeartbeatStatus types.ManagedEntityStatus `mo:"guestHeartbeatStatus"`
}
func (m *VirtualMachine) Entity() *ManagedEntity {
return &m.ManagedEntity
}
func init() {
t["VirtualMachine"] = reflect.TypeOf((*VirtualMachine)(nil)).Elem()
}

View File

@ -189,12 +189,20 @@ func assignValue(val reflect.Value, fi []int, pv reflect.Value) {
npv := reflect.New(pt)
npv.Elem().Set(pv)
pv = npv
pt = pv.Type()
} else {
panic(fmt.Sprintf("type %s doesn't implement %s", pt.Name(), rt.Name()))
}
}
rv.Set(pv)
if pt.AssignableTo(rt) {
rv.Set(pv)
} else if rt.ConvertibleTo(pt) {
rv.Set(pv.Convert(rt))
} else {
panic(fmt.Sprintf("cannot assign %s (%s) to %s (%s)", rt.Name(), rt.Kind(), pt.Name(), pt.Kind()))
}
return
}

View File

@ -428,23 +428,12 @@ var DefaultDownload = Download{
Method: "GET",
}
// DownloadFile GETs the given URL to a local file
func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {
var err error
if param == nil {
param = &DefaultDownload
}
fh, err := os.Create(file)
if err != nil {
return err
}
defer fh.Close()
// Download GETs the remote file from the given URL
func (c *Client) Download(u *url.URL, param *Download) (io.ReadCloser, int64, error) {
req, err := http.NewRequest(param.Method, u.String(), nil)
if err != nil {
return err
return nil, 0, err
}
if param.Ticket != nil {
@ -453,11 +442,9 @@ func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {
res, err := c.Client.Do(req)
if err != nil {
return err
return nil, 0, err
}
defer res.Body.Close()
switch res.StatusCode {
case http.StatusOK:
default:
@ -465,12 +452,37 @@ func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {
}
if err != nil {
return err
return nil, 0, err
}
var r io.Reader = res.Body
var r io.ReadCloser = res.Body
return r, res.ContentLength, nil
}
// DownloadFile GETs the given URL to a local file
func (c *Client) DownloadFile(file string, u *url.URL, param *Download) error {
var err error
if param == nil {
param = &DefaultDownload
}
rc, contentLength, err := c.Download(u, param)
if err != nil {
return err
}
defer rc.Close()
var r io.Reader = rc
fh, err := os.Create(file)
if err != nil {
return err
}
defer fh.Close()
if param.Progress != nil {
pr := progress.NewReader(param.Progress, res.Body, res.ContentLength)
pr := progress.NewReader(param.Progress, r, contentLength)
r = pr
// Mark progress reader as done when returning from this function.

View File

@ -271,9 +271,19 @@ var (
// Find reflect.Type for an element's type attribute.
func (p *Decoder) typeForElement(val reflect.Value, start *StartElement) reflect.Type {
t := ""
for _, a := range start.Attr {
for i, a := range start.Attr {
if a.Name == xmlSchemaInstance {
t = a.Value
// HACK: ensure xsi:type is last in the list to avoid using that value for
// a "type" attribute, such as ManagedObjectReference.Type for example.
// Note that xsi:type is already the last attribute in VC/ESX responses.
// This is only an issue with govmomi simulator generated responses.
// Proper fix will require finding a few needles in this xml package haystack.
x := len(start.Attr) - 1
if i != x {
start.Attr[i] = start.Attr[x]
start.Attr[x] = a
}
break
}
}

View File

@ -0,0 +1,56 @@
// Copyright 2016 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package v1alpha1
import (
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/api/v1"
)
// resource usage metrics of a node.
type NodeMetrics struct {
unversioned.TypeMeta `json:",inline"`
v1.ObjectMeta `json:"metadata,omitempty"`
// The following fields define time interval from which metrics were
// collected from the interval [Timestamp-Window, Timestamp].
Timestamp unversioned.Time `json:"timestamp"`
Window unversioned.Duration `json:"window"`
// The memory usage is the memory working set.
Usage v1.ResourceList `json:"usage"`
}
// resource usage metrics of a pod.
type PodMetrics struct {
unversioned.TypeMeta `json:",inline"`
v1.ObjectMeta `json:"metadata,omitempty"`
// The following fields define time interval from which metrics were
// collected from the interval [Timestamp-Window, Timestamp].
Timestamp unversioned.Time `json:"timestamp"`
Window unversioned.Duration `json:"window"`
// Metrics for all containers are collected within the same time window.
Containers []ContainerMetrics `json:"containers"`
}
// resource usage metrics of a container.
type ContainerMetrics struct {
// Container name corresponding to the one from pod.spec.containers.
Name string `json:"name"`
// The memory usage is the memory working set.
Usage v1.ResourceList `json:"usage"`
}

View File

@ -18,14 +18,10 @@ limitations under the License.
package options
import (
"strings"
"time"
"k8s.io/kubernetes/pkg/admission"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/api/validation"
"k8s.io/kubernetes/pkg/apiserver"
"k8s.io/kubernetes/pkg/genericapiserver"
genericoptions "k8s.io/kubernetes/pkg/genericapiserver/options"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
"k8s.io/kubernetes/pkg/master/ports"
@ -34,54 +30,30 @@ import (
// APIServer runs a kubernetes api server.
type APIServer struct {
*genericapiserver.ServerRunOptions
AdmissionControl string
AdmissionControlConfigFile string
*genericoptions.ServerRunOptions
AllowPrivileged bool
AuthorizationMode string
AuthorizationConfig apiserver.AuthorizationConfig
BasicAuthFile string
DefaultStorageMediaType string
DeleteCollectionWorkers int
EtcdServersOverrides []string
EventTTL time.Duration
KeystoneURL string
KubeletConfig kubeletclient.KubeletClientConfig
MasterServiceNamespace string
MaxConnectionBytesPerSec int64
OIDCCAFile string
OIDCClientID string
OIDCIssuerURL string
OIDCUsernameClaim string
OIDCGroupsClaim string
SSHKeyfile string
SSHUser string
ServiceAccountKeyFile string
ServiceAccountLookup bool
WebhookTokenAuthnConfigFile string
// The default values for StorageVersions. StorageVersions overrides
// these; you can change this if you want to change the defaults (e.g.,
// for testing). This is not actually exposed as a flag.
DefaultStorageVersions string
TokenAuthFile string
WatchCacheSizes []string
WebhookTokenAuthnCacheTTL time.Duration
}
// NewAPIServer creates a new APIServer object with default parameters
func NewAPIServer() *APIServer {
s := APIServer{
ServerRunOptions: genericapiserver.NewServerRunOptions(),
AdmissionControl: "AlwaysAdmit",
AuthorizationMode: "AlwaysAllow",
DefaultStorageMediaType: "application/json",
DeleteCollectionWorkers: 1,
EventTTL: 1 * time.Hour,
MasterServiceNamespace: api.NamespaceDefault,
ServerRunOptions: genericoptions.NewServerRunOptions(),
EventTTL: 1 * time.Hour,
KubeletConfig: kubeletclient.KubeletClientConfig{
Port: ports.KubeletPort,
EnableHttps: true,
HTTPTimeout: time.Duration(5) * time.Second,
},
WebhookTokenAuthnCacheTTL: 2 * time.Minute,
}
return &s
}
@ -92,30 +64,12 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
s.ServerRunOptions.AddFlags(fs)
// Note: the weird ""+ in below lines seems to be the only way to get gofmt to
// arrange these text blocks sensibly. Grrr.
fs.StringVar(&s.DefaultStorageMediaType, "storage-media-type", s.DefaultStorageMediaType, "The media type to use to store objects in storage. Defaults to application/json. Some resources may only support a specific media type and will ignore this setting.")
fs.DurationVar(&s.EventTTL, "event-ttl", s.EventTTL, "Amount of time to retain events. Default 1 hour.")
fs.StringVar(&s.BasicAuthFile, "basic-auth-file", s.BasicAuthFile, "If set, the file that will be used to admit requests to the secure port of the API server via http basic authentication.")
fs.StringVar(&s.TokenAuthFile, "token-auth-file", s.TokenAuthFile, "If set, the file that will be used to secure the secure port of the API server via token authentication.")
fs.StringVar(&s.OIDCIssuerURL, "oidc-issuer-url", s.OIDCIssuerURL, "The URL of the OpenID issuer, only HTTPS scheme will be accepted. If set, it will be used to verify the OIDC JSON Web Token (JWT)")
fs.StringVar(&s.OIDCClientID, "oidc-client-id", s.OIDCClientID, "The client ID for the OpenID Connect client, must be set if oidc-issuer-url is set")
fs.StringVar(&s.OIDCCAFile, "oidc-ca-file", s.OIDCCAFile, "If set, the OpenID server's certificate will be verified by one of the authorities in the oidc-ca-file, otherwise the host's root CA set will be used")
fs.StringVar(&s.OIDCUsernameClaim, "oidc-username-claim", "sub", ""+
"The OpenID claim to use as the user name. Note that claims other than the default ('sub') is not "+
"guaranteed to be unique and immutable. This flag is experimental, please see the authentication documentation for further details.")
fs.StringVar(&s.OIDCGroupsClaim, "oidc-groups-claim", "", "If provided, the name of a custom OpenID Connect claim for specifying user groups. The claim value is expected to be an array of strings. This flag is experimental, please see the authentication documentation for further details.")
fs.StringVar(&s.ServiceAccountKeyFile, "service-account-key-file", s.ServiceAccountKeyFile, "File containing PEM-encoded x509 RSA private or public key, used to verify ServiceAccount tokens. If unspecified, --tls-private-key-file is used.")
fs.BoolVar(&s.ServiceAccountLookup, "service-account-lookup", s.ServiceAccountLookup, "If true, validate ServiceAccount tokens exist in etcd as part of authentication.")
fs.StringVar(&s.KeystoneURL, "experimental-keystone-url", s.KeystoneURL, "If passed, activates the keystone authentication plugin")
fs.StringVar(&s.AuthorizationMode, "authorization-mode", s.AuthorizationMode, "Ordered list of plug-ins to do authorization on secure port. Comma-delimited list of: "+strings.Join(apiserver.AuthorizationModeChoices, ","))
fs.StringVar(&s.AuthorizationConfig.PolicyFile, "authorization-policy-file", s.AuthorizationConfig.PolicyFile, "File with authorization policy in csv format, used with --authorization-mode=ABAC, on the secure port.")
fs.StringVar(&s.AuthorizationConfig.WebhookConfigFile, "authorization-webhook-config-file", s.AuthorizationConfig.WebhookConfigFile, "File with webhook configuration in kubeconfig format, used with --authorization-mode=Webhook. The API server will query the remote service to determine access on the API server's secure port.")
fs.StringVar(&s.WebhookTokenAuthnConfigFile, "authentication-token-webhook-config-file", s.WebhookTokenAuthnConfigFile, "File with webhook configuration for token authentication in kubeconfig format. The API server will query the remote service to determine authentication for bearer tokens.")
fs.StringVar(&s.AdmissionControl, "admission-control", s.AdmissionControl, "Ordered list of plug-ins to do admission control of resources into cluster. Comma-delimited list of: "+strings.Join(admission.GetPlugins(), ", "))
fs.StringVar(&s.AdmissionControlConfigFile, "admission-control-config-file", s.AdmissionControlConfigFile, "File with admission control configuration.")
fs.StringSliceVar(&s.EtcdServersOverrides, "etcd-servers-overrides", s.EtcdServersOverrides, "Per-resource etcd servers overrides, comma separated. The individual override format: group/resource#servers, where servers are http://ip:port, semicolon separated.")
fs.DurationVar(&s.WebhookTokenAuthnCacheTTL, "authentication-token-webhook-cache-ttl", s.WebhookTokenAuthnCacheTTL, "The duration to cache responses from the webhook token authenticator. Default is 2m")
fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged, "If true, allow privileged containers.")
fs.StringVar(&s.MasterServiceNamespace, "master-service-namespace", s.MasterServiceNamespace, "The namespace from which the kubernetes master services should be injected into pods")
fs.IntVar(&s.DeleteCollectionWorkers, "delete-collection-workers", s.DeleteCollectionWorkers, "Number of workers spawned for DeleteCollection call. These are used to speed up namespace cleanup.")
fs.StringVar(&s.SSHUser, "ssh-user", s.SSHUser, "If non-empty, use secure SSH proxy to the nodes, using this user name")
fs.StringVar(&s.SSHKeyfile, "ssh-keyfile", s.SSHKeyfile, "If non-empty, use secure SSH proxy to the nodes, using this user keyfile")
fs.Int64Var(&s.MaxConnectionBytesPerSec, "max-connection-bytes-per-sec", s.MaxConnectionBytesPerSec, "If non-zero, throttle each user connection to this number of bytes/sec. Currently only applies to long-running requests")
@ -129,5 +83,4 @@ func (s *APIServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.KubeletConfig.CAFile, "kubelet-certificate-authority", s.KubeletConfig.CAFile, "Path to a cert. file for the certificate authority.")
// TODO: delete this flag as soon as we identify and fix all clients that send malformed updates, like #14126.
fs.BoolVar(&validation.RepairMalformedUpdates, "repair-malformed-updates", validation.RepairMalformedUpdates, "If true, server will do its best to fix the update request to pass the validation, e.g., setting empty UID in update request to its existing value. This flag can be turned off after we fix all the clients that send malformed updates.")
fs.StringSliceVar(&s.WatchCacheSizes, "watch-cache-sizes", s.WatchCacheSizes, "List of watch cache sizes for every resource (pods, nodes, etc.), comma separated. The individual override format: resource#size, where size is a number. It takes effect when watch-cache is enabled.")
}

View File

@ -190,6 +190,7 @@ func Run(s *options.APIServer) error {
ServiceAccountTokenGetter: serviceAccountGetter,
KeystoneURL: s.KeystoneURL,
WebhookTokenAuthnConfigFile: s.WebhookTokenAuthnConfigFile,
WebhookTokenAuthnCacheTTL: s.WebhookTokenAuthnCacheTTL,
})
if err != nil {

View File

@ -227,10 +227,11 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
// this cidr has been validated already
_, clusterCIDR, _ := net.ParseCIDR(s.ClusterCIDR)
_, serviceCIDR, _ := net.ParseCIDR(s.ServiceCIDR)
nodeController := nodecontroller.NewNodeController(cloud, clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "node-controller")),
s.PodEvictionTimeout.Duration, flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
flowcontrol.NewTokenBucketRateLimiter(s.DeletingPodsQps, int(s.DeletingPodsBurst)),
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, s.AllocateNodeCIDRs)
s.NodeMonitorGracePeriod.Duration, s.NodeStartupGracePeriod.Duration, s.NodeMonitorPeriod.Duration, clusterCIDR, serviceCIDR, int(s.NodeCIDRMaskSize), s.AllocateNodeCIDRs)
nodeController.Run(s.NodeSyncPeriod.Duration)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
@ -298,7 +299,7 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
// Find the list of namespaced resources via discovery that the namespace controller must manage
namespaceKubeClient := clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "namespace-controller"))
namespaceClientPool := dynamic.NewClientPool(restclient.AddUserAgent(kubeconfig, "namespace-controller"), dynamic.LegacyAPIPathResolverFunc)
groupVersionResources, err := namespacecontroller.ServerPreferredNamespacedGroupVersionResources(namespaceKubeClient.Discovery())
groupVersionResources, err := namespaceKubeClient.Discovery().ServerPreferredNamespacedResources()
if err != nil {
glog.Fatalf("Failed to get supported resources from server: %v", err)
}
@ -373,38 +374,23 @@ func StartControllers(s *options.CMServer, kubeClient *client.Client, kubeconfig
}
}
volumePlugins := ProbeRecyclableVolumePlugins(s.VolumeConfiguration)
provisioner, err := NewVolumeProvisioner(cloud, s.VolumeConfiguration)
if err != nil {
glog.Fatal("A Provisioner could not be created, but one was expected. Provisioning will not work. This functionality is considered an early Alpha version.")
}
pvclaimBinder := persistentvolumecontroller.NewPersistentVolumeClaimBinder(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")), s.PVClaimBinderSyncPeriod.Duration)
pvclaimBinder.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
pvRecycler, err := persistentvolumecontroller.NewPersistentVolumeRecycler(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-recycler")),
volumeController := persistentvolumecontroller.NewPersistentVolumeController(
clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-binder")),
s.PVClaimBinderSyncPeriod.Duration,
int(s.VolumeConfiguration.PersistentVolumeRecyclerConfiguration.MaximumRetry),
provisioner,
ProbeRecyclableVolumePlugins(s.VolumeConfiguration),
cloud,
s.ClusterName,
nil, nil, nil,
)
if err != nil {
glog.Fatalf("Failed to start persistent volume recycler: %+v", err)
}
pvRecycler.Run()
volumeController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
if provisioner != nil {
pvController, err := persistentvolumecontroller.NewPersistentVolumeProvisionerController(persistentvolumecontroller.NewControllerClient(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "persistent-volume-provisioner"))), s.PVClaimBinderSyncPeriod.Duration, s.ClusterName, volumePlugins, provisioner, cloud)
if err != nil {
glog.Fatalf("Failed to start persistent volume provisioner controller: %+v", err)
}
pvController.Run()
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))
}
go volume.NewAttachDetachController(clientset.NewForConfigOrDie(restclient.AddUserAgent(kubeconfig, "attachdetach-controller")), podInformer, nodeInformer, ResyncPeriod(s)()).
Run(wait.NeverStop)
time.Sleep(wait.Jitter(s.ControllerStartInterval.Duration, ControllerStartJitter))

View File

@ -70,6 +70,7 @@ func NewCMServer() *CMServer {
NodeStartupGracePeriod: unversioned.Duration{Duration: 60 * time.Second},
NodeMonitorPeriod: unversioned.Duration{Duration: 5 * time.Second},
ClusterName: "kubernetes",
NodeCIDRMaskSize: 24,
TerminatedPodGCThreshold: 12500,
VolumeConfiguration: componentconfig.VolumeConfiguration{
EnableHostPathProvisioning: false,
@ -81,6 +82,7 @@ func NewCMServer() *CMServer {
IncrementTimeoutHostPath: 30,
},
},
ContentType: "application/vnd.kubernetes.protobuf",
KubeAPIQPS: 20.0,
KubeAPIBurst: 30,
LeaderElection: leaderelection.DefaultLeaderElectionConfiguration(),
@ -141,11 +143,13 @@ func (s *CMServer) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&s.EnableProfiling, "profiling", true, "Enable profiling via web interface host:port/debug/pprof/")
fs.StringVar(&s.ClusterName, "cluster-name", s.ClusterName, "The instance prefix for the cluster")
fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "CIDR Range for Pods in cluster.")
fs.StringVar(&s.ServiceCIDR, "service-cluster-ip-range", s.ServiceCIDR, "CIDR Range for Services in cluster.")
fs.Int32Var(&s.NodeCIDRMaskSize, "node-cidr-mask-size", s.NodeCIDRMaskSize, "Mask size for node cidr in cluster.")
fs.BoolVar(&s.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
fs.StringVar(&s.Master, "master", s.Master, "The address of the Kubernetes API server (overrides any value in kubeconfig)")
fs.StringVar(&s.Kubeconfig, "kubeconfig", s.Kubeconfig, "Path to kubeconfig file with authorization and master location information.")
fs.StringVar(&s.RootCAFile, "root-ca-file", s.RootCAFile, "If set, this root certificate authority will be included in service account's token secret. This must be a valid PEM-encoded CA bundle.")
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.")
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.")
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.DurationVar(&s.ControllerStartInterval.Duration, "controller-start-interval", s.ControllerStartInterval.Duration, "Interval between starting controller managers.")

View File

@ -32,6 +32,7 @@ import (
"k8s.io/kubernetes/pkg/cloudprovider/providers/aws"
"k8s.io/kubernetes/pkg/cloudprovider/providers/gce"
"k8s.io/kubernetes/pkg/cloudprovider/providers/openstack"
"k8s.io/kubernetes/pkg/cloudprovider/providers/vsphere"
"k8s.io/kubernetes/pkg/util/io"
"k8s.io/kubernetes/pkg/volume"
"k8s.io/kubernetes/pkg/volume/aws_ebs"
@ -39,6 +40,7 @@ import (
"k8s.io/kubernetes/pkg/volume/gce_pd"
"k8s.io/kubernetes/pkg/volume/host_path"
"k8s.io/kubernetes/pkg/volume/nfs"
"k8s.io/kubernetes/pkg/volume/vsphere_volume"
"github.com/golang/glog"
)
@ -79,6 +81,7 @@ func ProbeRecyclableVolumePlugins(config componentconfig.VolumeConfiguration) []
allPlugins = append(allPlugins, aws_ebs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, gce_pd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
return allPlugins
}
@ -97,6 +100,8 @@ func NewVolumeProvisioner(cloud cloudprovider.Interface, config componentconfig.
return getProvisionablePluginFromVolumePlugins(gce_pd.ProbeVolumePlugins())
case cloud != nil && openstack.ProviderName == cloud.ProviderName():
return getProvisionablePluginFromVolumePlugins(cinder.ProbeVolumePlugins())
case cloud != nil && vsphere.ProviderName == cloud.ProviderName():
return getProvisionablePluginFromVolumePlugins(vsphere_volume.ProbeVolumePlugins())
}
return nil, nil
}

View File

@ -53,6 +53,7 @@ func NewProxyConfig() *ProxyServerConfig {
api.Scheme.Convert(&v1alpha1.KubeProxyConfiguration{}, &config)
return &ProxyServerConfig{
KubeProxyConfiguration: config,
ContentType: "application/vnd.kubernetes.protobuf",
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
ConfigSyncPeriod: 15 * time.Minute,
@ -78,7 +79,7 @@ func (s *ProxyServerConfig) AddFlags(fs *pflag.FlagSet) {
fs.BoolVar(&s.MasqueradeAll, "masquerade-all", s.MasqueradeAll, "If using the pure iptables proxy, SNAT everything")
fs.StringVar(&s.ClusterCIDR, "cluster-cidr", s.ClusterCIDR, "The CIDR range of pods in the cluster. It is used to bridge traffic coming from outside of the cluster. If not provided, no off-cluster bridging will be performed.")
fs.BoolVar(&s.CleanupAndExit, "cleanup-iptables", s.CleanupAndExit, "If true cleanup iptables rules and exit.")
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.")
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.")
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.DurationVar(&s.UDPIdleTimeout.Duration, "udp-timeout", s.UDPIdleTimeout.Duration, "How long an idle UDP connection will be kept open (e.g. '250ms', '2s'). Must be greater than 0. Only applicable for proxy-mode=userspace")

View File

@ -116,6 +116,7 @@ func NewKubeletServer() *KubeletServer {
NodeLabels: make(map[string]string),
OOMScoreAdj: int32(qos.KubeletOOMScoreAdj),
LockFilePath: "",
ExitOnLockContention: false,
PodInfraContainerImage: GetDefaultPodInfraContainerImage(),
Port: ports.KubeletPort,
ReadOnlyPort: ports.KubeletReadOnlyPort,
@ -135,6 +136,7 @@ func NewKubeletServer() *KubeletServer {
SyncFrequency: unversioned.Duration{Duration: 1 * time.Minute},
SystemCgroups: "",
ReconcileCIDR: true,
ContentType: "application/vnd.kubernetes.protobuf",
KubeAPIQPS: 5.0,
KubeAPIBurst: 10,
ExperimentalFlannelOverlay: experimentalFlannelOverlay,
@ -220,6 +222,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.CgroupRoot, "cgroup-root", s.CgroupRoot, "Optional root cgroup to use for pods. This is handled by the container runtime on a best effort basis. Default: '', which means use the container runtime default.")
fs.StringVar(&s.ContainerRuntime, "container-runtime", s.ContainerRuntime, "The container runtime to use. Possible values: 'docker', 'rkt'. Default: 'docker'.")
fs.StringVar(&s.LockFilePath, "lock-file", s.LockFilePath, "<Warning: Alpha feature> The path to file for kubelet to use as a lock file.")
fs.BoolVar(&s.ExitOnLockContention, "exit-on-lock-contention", s.ExitOnLockContention, "Whether kubelet should exit upon lock-file contention.")
fs.StringVar(&s.RktPath, "rkt-path", s.RktPath, "Path of rkt binary. Leave empty to use the first rkt in $PATH. Only used if --container-runtime='rkt'.")
fs.StringVar(&s.RktAPIEndpoint, "rkt-api-endpoint", s.RktAPIEndpoint, "The endpoint of the rkt API service to communicate with. Only used if --container-runtime='rkt'.")
fs.StringVar(&s.RktStage1Image, "rkt-stage1-image", s.RktStage1Image, "image to use as stage1. Local paths and http/https URLs are supported. If empty, the 'stage1.aci' in the same directory as '--rkt-path' will be used.")
@ -244,7 +247,7 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.Var(&s.SystemReserved, "system-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for non-kubernetes components. Currently only cpu and memory are supported. See http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md for more detail. [default=none]")
fs.Var(&s.KubeReserved, "kube-reserved", "A set of ResourceName=ResourceQuantity (e.g. cpu=200m,memory=150G) pairs that describe resources reserved for kubernetes system components. Currently only cpu and memory are supported. See http://releases.k8s.io/HEAD/docs/user-guide/compute-resources.md for more detail. [default=none]")
fs.BoolVar(&s.RegisterSchedulable, "register-schedulable", s.RegisterSchedulable, "Register the node as schedulable. No-op if register-node is false. [default=true]")
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "ContentType of requests sent to apiserver. Passing application/vnd.kubernetes.protobuf is an experimental feature now.")
fs.StringVar(&s.ContentType, "kube-api-content-type", s.ContentType, "Content type of requests sent to apiserver.")
fs.Float32Var(&s.KubeAPIQPS, "kube-api-qps", s.KubeAPIQPS, "QPS to use while talking with kubernetes apiserver")
fs.Int32Var(&s.KubeAPIBurst, "kube-api-burst", s.KubeAPIBurst, "Burst to use while talking with kubernetes apiserver")
fs.BoolVar(&s.SerializeImagePulls, "serialize-image-pulls", s.SerializeImagePulls, "Pull images one at a time. We recommend *not* changing the default value on nodes that run docker daemon with version < 1.9 or an Aufs storage backend. Issue #10959 has more details. [default=true]")
@ -257,4 +260,5 @@ func (s *KubeletServer) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.EvictionSoft, "eviction-soft", s.EvictionSoft, "A set of eviction thresholds (e.g. memory.available<1.5Gi) that if met over a corresponding grace period would trigger a pod eviction.")
fs.StringVar(&s.EvictionSoftGracePeriod, "eviction-soft-grace-period", s.EvictionSoftGracePeriod, "A set of eviction grace periods (e.g. memory.available=1m30s) that correspond to how long a soft eviction threshold must hold before triggering a pod eviction.")
fs.DurationVar(&s.EvictionPressureTransitionPeriod.Duration, "eviction-pressure-transition-period", s.EvictionPressureTransitionPeriod.Duration, "Duration for which the kubelet has to wait before transitioning out of an eviction pressure condition.")
fs.Int32Var(&s.EvictionMaxPodGracePeriod, "eviction-max-pod-grace-period", s.EvictionMaxPodGracePeriod, "Maximum allowed grace period (in seconds) to use when terminating pods in response to a soft eviction threshold being met. If negative, defer to pod specified value.")
}

View File

@ -44,9 +44,9 @@ import (
"k8s.io/kubernetes/pkg/volume/host_path"
"k8s.io/kubernetes/pkg/volume/iscsi"
"k8s.io/kubernetes/pkg/volume/nfs"
"k8s.io/kubernetes/pkg/volume/persistent_claim"
"k8s.io/kubernetes/pkg/volume/rbd"
"k8s.io/kubernetes/pkg/volume/secret"
"k8s.io/kubernetes/pkg/volume/vsphere_volume"
// Cloud providers
_ "k8s.io/kubernetes/pkg/cloudprovider/providers"
)
@ -72,7 +72,6 @@ func ProbeVolumePlugins(pluginDir string) []volume.VolumePlugin {
allPlugins = append(allPlugins, secret.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, iscsi.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, glusterfs.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, persistent_claim.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, rbd.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cinder.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, cephfs.ProbeVolumePlugins()...)
@ -82,6 +81,7 @@ func ProbeVolumePlugins(pluginDir string) []volume.VolumePlugin {
allPlugins = append(allPlugins, flexvolume.ProbeVolumePlugins(pluginDir)...)
allPlugins = append(allPlugins, azure_file.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, configmap.ProbeVolumePlugins()...)
allPlugins = append(allPlugins, vsphere_volume.ProbeVolumePlugins()...)
return allPlugins
}

View File

@ -19,6 +19,7 @@ package app
import (
"crypto/tls"
"errors"
"fmt"
"math/rand"
"net"
@ -190,6 +191,7 @@ func UnsecuredKubeletConfig(s *options.KubeletServer) (*KubeletConfig, error) {
}
evictionConfig := eviction.Config{
PressureTransitionPeriod: s.EvictionPressureTransitionPeriod.Duration,
MaxPodGracePeriodSeconds: int64(s.EvictionMaxPodGracePeriod),
Thresholds: thresholds,
}
@ -289,11 +291,22 @@ func Run(s *options.KubeletServer, kcfg *KubeletConfig) error {
}
func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
if s.ExitOnLockContention && s.LockFilePath == "" {
return errors.New("cannot exit on lock file contention: no lock file specified")
}
done := make(chan struct{})
if s.LockFilePath != "" {
glog.Infof("aquiring lock on %q", s.LockFilePath)
if err := flock.Acquire(s.LockFilePath); err != nil {
return fmt.Errorf("unable to aquire file lock on %q: %v", s.LockFilePath, err)
}
if s.ExitOnLockContention {
glog.Infof("watching for inotify events for: %v", s.LockFilePath)
if err := watchForLockfileContention(s.LockFilePath, done); err != nil {
return err
}
}
}
if c, err := configz.New("componentconfig"); err == nil {
c.Set(s.KubeletConfiguration)
@ -330,7 +343,7 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
}
if kcfg.CAdvisorInterface == nil {
kcfg.CAdvisorInterface, err = cadvisor.New(s.CAdvisorPort)
kcfg.CAdvisorInterface, err = cadvisor.New(s.CAdvisorPort, kcfg.ContainerRuntime)
if err != nil {
return err
}
@ -383,8 +396,8 @@ func run(s *options.KubeletServer, kcfg *KubeletConfig) (err error) {
return nil
}
// run forever
select {}
<-done
return nil
}
// InitializeTLS checks for a configured TLSCertFile and TLSPrivateKeyFile: if unspecified a new self-signed
@ -927,10 +940,10 @@ func parseResourceList(m utilconfig.ConfigurationMap) (api.ResourceList, error)
if err != nil {
return nil, err
}
if q.Amount.Sign() == -1 {
if q.Sign() == -1 {
return nil, fmt.Errorf("resource quantity for %q cannot be negative: %v", k, v)
}
rl[api.ResourceName(k)] = *q
rl[api.ResourceName(k)] = q
default:
return nil, fmt.Errorf("cannot reserve %q resource", k)
}

View File

@ -0,0 +1,44 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"github.com/golang/glog"
"golang.org/x/exp/inotify"
)
func watchForLockfileContention(path string, done chan struct{}) error {
watcher, err := inotify.NewWatcher()
if err != nil {
glog.Errorf("unable to create watcher for lockfile: %v", err)
return err
}
if err = watcher.AddWatch(path, inotify.IN_OPEN|inotify.IN_DELETE_SELF); err != nil {
glog.Errorf("unable to watch lockfile: %v", err)
return err
}
go func() {
select {
case ev := <-watcher.Event:
glog.Infof("inotify event: %v", ev)
case err = <-watcher.Error:
glog.Errorf("inotify watcher error: %v", err)
}
close(done)
}()
return nil
}

View File

@ -0,0 +1,25 @@
// +build !linux
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import "errors"
func watchForLockfileContention(path string, done chan struct{}) error {
return errors.New("kubelet unsupported in this build")
}

View File

@ -175,9 +175,12 @@ func init() {
DeepCopy_api_ServiceSpec,
DeepCopy_api_ServiceStatus,
DeepCopy_api_TCPSocketAction,
DeepCopy_api_Taint,
DeepCopy_api_Toleration,
DeepCopy_api_Volume,
DeepCopy_api_VolumeMount,
DeepCopy_api_VolumeSource,
DeepCopy_api_VsphereVirtualDiskVolumeSource,
DeepCopy_api_WeightedPodAffinityTerm,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
@ -1585,6 +1588,8 @@ func DeepCopy_api_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conv
out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
out.KubeletVersion = in.KubeletVersion
out.KubeProxyVersion = in.KubeProxyVersion
out.OperatingSystem = in.OperatingSystem
out.Architecture = in.Architecture
return nil
}
@ -1921,6 +1926,15 @@ func DeepCopy_api_PersistentVolumeSource(in PersistentVolumeSource, out *Persist
} else {
out.AzureFile = nil
}
if in.VsphereVolume != nil {
in, out := in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
if err := DeepCopy_api_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
@ -2230,6 +2244,17 @@ func DeepCopy_api_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error
} else {
out.Volumes = nil
}
if in.InitContainers != nil {
in, out := in.InitContainers, &out.InitContainers
*out = make([]Container, len(in))
for i := range in {
if err := DeepCopy_api_Container(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.InitContainers = nil
}
if in.Containers != nil {
in, out := in.Containers, &out.Containers
*out = make([]Container, len(in))
@ -2319,6 +2344,17 @@ func DeepCopy_api_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner)
} else {
out.StartTime = nil
}
if in.InitContainerStatuses != nil {
in, out := in.InitContainerStatuses, &out.InitContainerStatuses
*out = make([]ContainerStatus, len(in))
for i := range in {
if err := DeepCopy_api_ContainerStatus(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.InitContainerStatuses = nil
}
if in.ContainerStatuses != nil {
in, out := in.ContainerStatuses, &out.ContainerStatuses
*out = make([]ContainerStatus, len(in))
@ -2722,6 +2758,17 @@ func DeepCopy_api_SecretList(in SecretList, out *SecretList, c *conversion.Clone
func DeepCopy_api_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error {
out.SecretName = in.SecretName
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]KeyToPath, len(in))
for i := range in {
if err := DeepCopy_api_KeyToPath(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
@ -2943,6 +2990,21 @@ func DeepCopy_api_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *c
return nil
}
func DeepCopy_api_Taint(in Taint, out *Taint, c *conversion.Cloner) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = in.Effect
return nil
}
func DeepCopy_api_Toleration(in Toleration, out *Toleration, c *conversion.Cloner) error {
out.Key = in.Key
out.Operator = in.Operator
out.Value = in.Value
out.Effect = in.Effect
return nil
}
func DeepCopy_api_Volume(in Volume, out *Volume, c *conversion.Cloner) error {
out.Name = in.Name
if err := DeepCopy_api_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil {
@ -3131,6 +3193,21 @@ func DeepCopy_api_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion
} else {
out.ConfigMap = nil
}
if in.VsphereVolume != nil {
in, out := in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
if err := DeepCopy_api_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
func DeepCopy_api_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error {
out.VolumePath = in.VolumePath
out.FSType = in.FSType
return nil
}

View File

@ -93,7 +93,7 @@ func FromObject(obj runtime.Object) error {
}
// NewNotFound returns a new error which indicates that the resource of the kind and the name was not found.
func NewNotFound(qualifiedResource unversioned.GroupResource, name string) error {
func NewNotFound(qualifiedResource unversioned.GroupResource, name string) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusNotFound,
@ -108,7 +108,7 @@ func NewNotFound(qualifiedResource unversioned.GroupResource, name string) error
}
// NewAlreadyExists returns an error indicating the item requested exists by that identifier.
func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string) error {
func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusConflict,
@ -124,7 +124,7 @@ func NewAlreadyExists(qualifiedResource unversioned.GroupResource, name string)
// NewUnauthorized returns an error indicating the client is not authorized to perform the requested
// action.
func NewUnauthorized(reason string) error {
func NewUnauthorized(reason string) *StatusError {
message := reason
if len(message) == 0 {
message = "not authorized"
@ -138,7 +138,7 @@ func NewUnauthorized(reason string) error {
}
// NewForbidden returns an error indicating the requested action was forbidden
func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err error) error {
func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusForbidden,
@ -153,7 +153,7 @@ func NewForbidden(qualifiedResource unversioned.GroupResource, name string, err
}
// NewConflict returns an error indicating the item can't be updated as provided.
func NewConflict(qualifiedResource unversioned.GroupResource, name string, err error) error {
func NewConflict(qualifiedResource unversioned.GroupResource, name string, err error) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusConflict,
@ -168,7 +168,7 @@ func NewConflict(qualifiedResource unversioned.GroupResource, name string, err e
}
// NewGone returns an error indicating the item no longer available at the server and no forwarding address is known.
func NewGone(message string) error {
func NewGone(message string) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusGone,
@ -178,7 +178,7 @@ func NewGone(message string) error {
}
// NewInvalid returns an error indicating the item is invalid and cannot be processed.
func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.ErrorList) error {
func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.ErrorList) *StatusError {
causes := make([]unversioned.StatusCause, 0, len(errs))
for i := range errs {
err := errs[i]
@ -203,7 +203,7 @@ func NewInvalid(qualifiedKind unversioned.GroupKind, name string, errs field.Err
}
// NewBadRequest creates an error that indicates that the request is invalid and can not be processed.
func NewBadRequest(reason string) error {
func NewBadRequest(reason string) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusBadRequest,
@ -213,7 +213,7 @@ func NewBadRequest(reason string) error {
}
// NewServiceUnavailable creates an error that indicates that the requested service is unavailable.
func NewServiceUnavailable(reason string) error {
func NewServiceUnavailable(reason string) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusServiceUnavailable,
@ -223,7 +223,7 @@ func NewServiceUnavailable(reason string) error {
}
// NewMethodNotSupported returns an error indicating the requested action is not supported on this kind.
func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action string) error {
func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action string) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusMethodNotAllowed,
@ -238,7 +238,7 @@ func NewMethodNotSupported(qualifiedResource unversioned.GroupResource, action s
// NewServerTimeout returns an error indicating the requested action could not be completed due to a
// transient error, and the client should try again.
func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation string, retryAfterSeconds int) error {
func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation string, retryAfterSeconds int) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusInternalServerError,
@ -255,12 +255,12 @@ func NewServerTimeout(qualifiedResource unversioned.GroupResource, operation str
// NewServerTimeoutForKind should not exist. Server timeouts happen when accessing resources, the Kind is just what we
// happened to be looking at when the request failed. This delegates to keep code sane, but we should work towards removing this.
func NewServerTimeoutForKind(qualifiedKind unversioned.GroupKind, operation string, retryAfterSeconds int) error {
func NewServerTimeoutForKind(qualifiedKind unversioned.GroupKind, operation string, retryAfterSeconds int) *StatusError {
return NewServerTimeout(unversioned.GroupResource{Group: qualifiedKind.Group, Resource: qualifiedKind.Kind}, operation, retryAfterSeconds)
}
// NewInternalError returns an error indicating the item is invalid and cannot be processed.
func NewInternalError(err error) error {
func NewInternalError(err error) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: http.StatusInternalServerError,
@ -274,7 +274,7 @@ func NewInternalError(err error) error {
// NewTimeoutError returns an error indicating that a timeout occurred before the request
// could be completed. Clients may retry, but the operation may still complete.
func NewTimeoutError(message string, retryAfterSeconds int) error {
func NewTimeoutError(message string, retryAfterSeconds int) *StatusError {
return &StatusError{unversioned.Status{
Status: unversioned.StatusFailure,
Code: StatusServerTimeout,
@ -287,7 +287,7 @@ func NewTimeoutError(message string, retryAfterSeconds int) error {
}
// NewGenericServerResponse returns a new error for server responses that are not in a recognizable form.
func NewGenericServerResponse(code int, verb string, qualifiedResource unversioned.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) error {
func NewGenericServerResponse(code int, verb string, qualifiedResource unversioned.GroupResource, name, serverMessage string, retryAfterSeconds int, isUnexpectedResponse bool) *StatusError {
reason := unversioned.StatusReasonUnknown
message := fmt.Sprintf("the server responded with the status code %d but did not return more information", code)
switch code {

View File

@ -58,16 +58,7 @@ var Semantic = conversion.EqualitiesOrDie(
// TODO: if we decide it's important, it should be safe to start comparing the format.
//
// Uninitialized quantities are equivalent to 0 quantities.
if a.Amount == nil && b.MilliValue() == 0 {
return true
}
if b.Amount == nil && a.MilliValue() == 0 {
return true
}
if a.Amount == nil || b.Amount == nil {
return false
}
return a.Amount.Cmp(b.Amount) == 0
return a.Cmp(b) == 0
},
func(a, b unversioned.Time) bool {
return a.UTC() == b.UTC()
@ -411,9 +402,19 @@ func NodeSelectorRequirementsAsSelector(nsm []NodeSelectorRequirement) (labels.S
return selector, nil
}
// AffinityAnnotationKey represents the key of affinity data (json serialized)
// in the Annotations of a Pod.
const AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
const (
// AffinityAnnotationKey represents the key of affinity data (json serialized)
// in the Annotations of a Pod.
AffinityAnnotationKey string = "scheduler.alpha.kubernetes.io/affinity"
// TolerationsAnnotationKey represents the key of tolerations data (json serialized)
// in the Annotations of a Pod.
TolerationsAnnotationKey string = "scheduler.alpha.kubernetes.io/tolerations"
// TaintsAnnotationKey represents the key of taints data (json serialized)
// in the Annotations of a Node.
TaintsAnnotationKey string = "scheduler.alpha.kubernetes.io/taints"
)
// GetAffinityFromPod gets the json serialized affinity data from Pod.Annotations
// and converts it to the Affinity type in api.
@ -427,3 +428,61 @@ func GetAffinityFromPodAnnotations(annotations map[string]string) (Affinity, err
}
return affinity, nil
}
// GetTolerationsFromPodAnnotations gets the json serialized tolerations data from Pod.Annotations
// and converts it to the []Toleration type in api.
func GetTolerationsFromPodAnnotations(annotations map[string]string) ([]Toleration, error) {
var tolerations []Toleration
if len(annotations) > 0 && annotations[TolerationsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[TolerationsAnnotationKey]), &tolerations)
if err != nil {
return tolerations, err
}
}
return tolerations, nil
}
// GetTaintsFromNodeAnnotations gets the json serialized taints data from Pod.Annotations
// and converts it to the []Taint type in api.
func GetTaintsFromNodeAnnotations(annotations map[string]string) ([]Taint, error) {
var taints []Taint
if len(annotations) > 0 && annotations[TaintsAnnotationKey] != "" {
err := json.Unmarshal([]byte(annotations[TaintsAnnotationKey]), &taints)
if err != nil {
return []Taint{}, err
}
}
return taints, nil
}
// TolerationToleratesTaint checks if the toleration tolerates the taint.
func TolerationToleratesTaint(toleration Toleration, taint Taint) bool {
if len(toleration.Effect) != 0 && toleration.Effect != taint.Effect {
return false
}
if toleration.Key != taint.Key {
return false
}
// TODO: Use proper defaulting when Toleration becomes a field of PodSpec
if (len(toleration.Operator) == 0 || toleration.Operator == TolerationOpEqual) && toleration.Value == taint.Value {
return true
}
if toleration.Operator == TolerationOpExists {
return true
}
return false
}
// TaintToleratedByTolerations checks if taint is tolerated by any of the tolerations.
func TaintToleratedByTolerations(taint Taint, tolerations []Toleration) bool {
tolerated := false
for _, toleration := range tolerations {
if TolerationToleratesTaint(toleration, taint) {
tolerated = true
break
}
}
return tolerated
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package api
import (
"k8s.io/kubernetes/pkg/api/meta/metatypes"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
@ -89,3 +90,25 @@ func (meta *ObjectMeta) GetLabels() map[string]string { return m
func (meta *ObjectMeta) SetLabels(labels map[string]string) { meta.Labels = labels }
func (meta *ObjectMeta) GetAnnotations() map[string]string { return meta.Annotations }
func (meta *ObjectMeta) SetAnnotations(annotations map[string]string) { meta.Annotations = annotations }
func (meta *ObjectMeta) GetOwnerReferences() []metatypes.OwnerReference {
ret := make([]metatypes.OwnerReference, len(meta.OwnerReferences))
for i := 0; i < len(meta.OwnerReferences); i++ {
ret[i].Kind = meta.OwnerReferences[i].Kind
ret[i].Name = meta.OwnerReferences[i].Name
ret[i].UID = meta.OwnerReferences[i].UID
ret[i].APIVersion = meta.OwnerReferences[i].APIVersion
}
return ret
}
func (meta *ObjectMeta) SetOwnerReferences(references []metatypes.OwnerReference) {
newReferences := make([]OwnerReference, len(references))
for i := 0; i < len(references); i++ {
newReferences[i].Kind = references[i].Kind
newReferences[i].Name = references[i].Name
newReferences[i].UID = references[i].UID
newReferences[i].APIVersion = references[i].APIVersion
}
meta.OwnerReferences = newReferences
}

View File

@ -17,6 +17,7 @@ limitations under the License.
package meta
import (
"k8s.io/kubernetes/pkg/api/meta/metatypes"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/runtime"
"k8s.io/kubernetes/pkg/types"
@ -57,6 +58,14 @@ type Object interface {
SetLabels(labels map[string]string)
GetAnnotations() map[string]string
SetAnnotations(annotations map[string]string)
GetOwnerReferences() []metatypes.OwnerReference
SetOwnerReferences([]metatypes.OwnerReference)
}
var _ Object = &runtime.Unstructured{}
type ListMetaAccessor interface {
GetListMeta() List
}
// List lets you work with list metadata from any of the versioned or
@ -177,5 +186,3 @@ type RESTMapper interface {
AliasesForResource(resource string) ([]string, bool)
ResourceSingularizer(resource string) (singular string, err error)
}
var _ Object = &runtime.Unstructured{}

View File

@ -20,6 +20,7 @@ import (
"fmt"
"reflect"
"k8s.io/kubernetes/pkg/api/meta/metatypes"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
@ -28,19 +29,53 @@ import (
"github.com/golang/glog"
)
func ListAccessor(obj interface{}) (List, error) {
if listMetaAccessor, ok := obj.(ListMetaAccessor); ok {
if om := listMetaAccessor.GetListMeta(); om != nil {
return om, nil
}
}
// we may get passed an object that is directly portable to List
if list, ok := obj.(List); ok {
return list, nil
}
glog.V(4).Infof("Calling ListAccessor on non-internal object: %v", reflect.TypeOf(obj))
// legacy path for objects that do not implement List and ListMetaAccessor via
// reflection - very slow code path.
v, err := conversion.EnforcePtr(obj)
if err != nil {
return nil, err
}
t := v.Type()
if v.Kind() != reflect.Struct {
return nil, fmt.Errorf("expected struct, but got %v: %v (%#v)", v.Kind(), t, v.Interface())
}
a := &genericAccessor{}
listMeta := v.FieldByName("ListMeta")
if listMeta.IsValid() {
// look for the ListMeta fields
if err := extractFromListMeta(listMeta, a); err != nil {
return nil, fmt.Errorf("unable to find list fields on %#v: %v", listMeta, err)
}
} else {
return nil, fmt.Errorf("unable to find listMeta on %#v", v)
}
return a, nil
}
// Accessor takes an arbitrary object pointer and returns meta.Interface.
// obj must be a pointer to an API type. An error is returned if the minimum
// required fields are missing. Fields that are not required return the default
// value and are a no-op if set.
func Accessor(obj interface{}) (Object, error) {
if oi, ok := obj.(ObjectMetaAccessor); ok {
if om := oi.GetObjectMeta(); om != nil {
if objectMetaAccessor, ok := obj.(ObjectMetaAccessor); ok {
if om := objectMetaAccessor.GetObjectMeta(); om != nil {
return om, nil
}
}
// we may get passed an object that is directly portable to Object
if oi, ok := obj.(Object); ok {
return oi, nil
if object, ok := obj.(Object); ok {
return object, nil
}
glog.V(4).Infof("Calling Accessor on non-internal object: %v", reflect.TypeOf(obj))
@ -310,6 +345,40 @@ func (resourceAccessor) SetResourceVersion(obj runtime.Object, version string) e
return nil
}
// extractFromOwnerReference extracts v to o. v is the OwnerReferences field of an object.
func extractFromOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error {
if err := runtime.Field(v, "APIVersion", &o.APIVersion); err != nil {
return err
}
if err := runtime.Field(v, "Kind", &o.Kind); err != nil {
return err
}
if err := runtime.Field(v, "Name", &o.Name); err != nil {
return err
}
if err := runtime.Field(v, "UID", &o.UID); err != nil {
return err
}
return nil
}
// setOwnerReference sets v to o. v is the OwnerReferences field of an object.
func setOwnerReference(v reflect.Value, o *metatypes.OwnerReference) error {
if err := runtime.SetField(o.APIVersion, v, "APIVersion"); err != nil {
return err
}
if err := runtime.SetField(o.Kind, v, "Kind"); err != nil {
return err
}
if err := runtime.SetField(o.Name, v, "Name"); err != nil {
return err
}
if err := runtime.SetField(o.UID, v, "UID"); err != nil {
return err
}
return nil
}
// genericAccessor contains pointers to strings that can modify an arbitrary
// struct and implements the Accessor interface.
type genericAccessor struct {
@ -325,6 +394,7 @@ type genericAccessor struct {
deletionTimestamp **unversioned.Time
labels *map[string]string
annotations *map[string]string
ownerReferences reflect.Value
}
func (a genericAccessor) GetNamespace() string {
@ -457,6 +527,41 @@ func (a genericAccessor) SetAnnotations(annotations map[string]string) {
*a.annotations = annotations
}
func (a genericAccessor) GetOwnerReferences() []metatypes.OwnerReference {
var ret []metatypes.OwnerReference
s := a.ownerReferences
if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
glog.Errorf("expect %v to be a pointer to slice", s)
return ret
}
s = s.Elem()
// Set the capacity to one element greater to avoid copy if the caller later append an element.
ret = make([]metatypes.OwnerReference, s.Len(), s.Len()+1)
for i := 0; i < s.Len(); i++ {
if err := extractFromOwnerReference(s.Index(i), &ret[i]); err != nil {
glog.Errorf("extractFromOwnerReference failed: %v", err)
return ret
}
}
return ret
}
func (a genericAccessor) SetOwnerReferences(references []metatypes.OwnerReference) {
s := a.ownerReferences
if s.Kind() != reflect.Ptr || s.Elem().Kind() != reflect.Slice {
glog.Errorf("expect %v to be a pointer to slice", s)
}
s = s.Elem()
newReferences := reflect.MakeSlice(s.Type(), len(references), len(references))
for i := 0; i < len(references); i++ {
if err := setOwnerReference(newReferences.Index(i), &references[i]); err != nil {
glog.Errorf("setOwnerReference failed: %v", err)
return
}
}
s.Set(newReferences)
}
// extractFromTypeMeta extracts pointers to version and kind fields from an object
func extractFromTypeMeta(v reflect.Value, a *genericAccessor) error {
if err := runtime.FieldPtr(v, "APIVersion", &a.apiVersion); err != nil {
@ -494,6 +599,14 @@ func extractFromObjectMeta(v reflect.Value, a *genericAccessor) error {
if err := runtime.FieldPtr(v, "Annotations", &a.annotations); err != nil {
return err
}
ownerReferences := v.FieldByName("OwnerReferences")
if !ownerReferences.IsValid() {
return fmt.Errorf("struct %#v lacks OwnerReferences type", v)
}
if ownerReferences.Kind() != reflect.Slice {
return fmt.Errorf("expect %v to be a slice", ownerReferences.Kind())
}
a.ownerReferences = ownerReferences.Addr()
return nil
}

View File

@ -0,0 +1,33 @@
// +build !ignore_autogenerated
/*
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// This file was autogenerated by deepcopy-gen. Do not edit it manually!
package metatypes
import (
conversion "k8s.io/kubernetes/pkg/conversion"
)
func DeepCopy_metatypes_OwnerReference(in OwnerReference, out *OwnerReference, c *conversion.Cloner) error {
out.APIVersion = in.APIVersion
out.Kind = in.Kind
out.UID = in.UID
out.Name = in.Name
return nil
}

View File

@ -0,0 +1,29 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// The types defined in this package are used by the meta package to represent
// the in-memory version of objects. We cannot reuse the __internal version of
// API objects because it causes import cycle.
package metatypes
import "k8s.io/kubernetes/pkg/types"
type OwnerReference struct {
APIVersion string
Kind string
UID types.UID
Name string
}

298
vendor/k8s.io/kubernetes/pkg/api/resource/amount.go generated vendored Normal file
View File

@ -0,0 +1,298 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"math/big"
"strconv"
inf "gopkg.in/inf.v0"
)
// Scale is used for getting and setting the base-10 scaled value.
// Base-2 scales are omitted for mathematical simplicity.
// See Quantity.ScaledValue for more details.
type Scale int32
// infScale adapts a Scale value to an inf.Scale value.
func (s Scale) infScale() inf.Scale {
return inf.Scale(-s) // inf.Scale is upside-down
}
const (
Nano Scale = -9
Micro Scale = -6
Milli Scale = -3
Kilo Scale = 3
Mega Scale = 6
Giga Scale = 9
Tera Scale = 12
Peta Scale = 15
Exa Scale = 18
)
var (
Zero = int64Amount{}
// Used by quantity strings - treat as read only
zeroBytes = []byte("0")
)
// int64Amount represents a fixed precision numerator and arbitary scale exponent. It is faster
// than operations on inf.Dec for values that can be represented as int64.
type int64Amount struct {
value int64
scale Scale
}
// Sign returns 0 if the value is zero, -1 if it is less than 0, or 1 if it is greater than 0.
func (a int64Amount) Sign() int {
switch {
case a.value == 0:
return 0
case a.value > 0:
return 1
default:
return -1
}
}
// AsInt64 returns the current amount as an int64 at scale 0, or false if the value cannot be
// represented in an int64 OR would result in a loss of precision. This method is intended as
// an optimization to avoid calling AsDec.
func (a int64Amount) AsInt64() (int64, bool) {
if a.scale == 0 {
return a.value, true
}
if a.scale < 0 {
// TODO: attempt to reduce factors, although it is assumed that factors are reduced prior
// to the int64Amount being created.
return 0, false
}
return positiveScaleInt64(a.value, a.scale)
}
// AsScaledInt64 returns an int64 representing the value of this amount at the specified scale,
// rounding up, or false if that would result in overflow. (1e20).AsScaledInt64(1) would result
// in overflow because 1e19 is not representable as an int64. Note that setting a scale larger
// than the current value may result in loss of precision - i.e. (1e-6).AsScaledInt64(0) would
// return 1, because 0.000001 is rounded up to 1.
func (a int64Amount) AsScaledInt64(scale Scale) (result int64, ok bool) {
if a.scale < scale {
result, _ = negativeScaleInt64(a.value, scale-a.scale)
return result, true
}
return positiveScaleInt64(a.value, a.scale-scale)
}
// AsDec returns an inf.Dec representation of this value.
func (a int64Amount) AsDec() *inf.Dec {
var base inf.Dec
base.SetUnscaled(a.value)
base.SetScale(inf.Scale(-a.scale))
return &base
}
// Cmp returns 0 if a and b are equal, 1 if a is greater than b, or -1 if a is less than b.
func (a int64Amount) Cmp(b int64Amount) int {
switch {
case a.scale == b.scale:
// compare only the unscaled portion
case a.scale > b.scale:
result, remainder, exact := divideByScaleInt64(b.value, a.scale-b.scale)
if !exact {
return a.AsDec().Cmp(b.AsDec())
}
if result == a.value {
switch {
case remainder == 0:
return 0
case remainder > 0:
return -1
default:
return 1
}
}
b.value = result
default:
result, remainder, exact := divideByScaleInt64(a.value, b.scale-a.scale)
if !exact {
return a.AsDec().Cmp(b.AsDec())
}
if result == b.value {
switch {
case remainder == 0:
return 0
case remainder > 0:
return 1
default:
return -1
}
}
a.value = result
}
switch {
case a.value == b.value:
return 0
case a.value < b.value:
return -1
default:
return 1
}
}
// Add adds two int64Amounts together, matching scales. It will return false and not mutate
// a if overflow or underflow would result.
func (a *int64Amount) Add(b int64Amount) bool {
switch {
case b.value == 0:
return true
case a.value == 0:
a.value = b.value
a.scale = b.scale
return true
case a.scale == b.scale:
c, ok := int64Add(a.value, b.value)
if !ok {
return false
}
a.value = c
case a.scale > b.scale:
c, ok := positiveScaleInt64(a.value, a.scale-b.scale)
if !ok {
return false
}
c, ok = int64Add(c, b.value)
if !ok {
return false
}
a.scale = b.scale
a.value = c
default:
c, ok := positiveScaleInt64(b.value, b.scale-a.scale)
if !ok {
return false
}
c, ok = int64Add(a.value, c)
if !ok {
return false
}
a.value = c
}
return true
}
// Sub removes the value of b from the current amount, or returns false if underflow would result.
func (a *int64Amount) Sub(b int64Amount) bool {
return a.Add(int64Amount{value: -b.value, scale: b.scale})
}
// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
func (a int64Amount) AsScale(scale Scale) (int64Amount, bool) {
if a.scale >= scale {
return a, true
}
result, exact := negativeScaleInt64(a.value, scale-a.scale)
return int64Amount{value: result, scale: scale}, exact
}
// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns
// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted
// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3.
func (a int64Amount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
mantissa := a.value
exponent = int32(a.scale)
amount, times := removeInt64Factors(mantissa, 10)
exponent += int32(times)
// make sure exponent is a multiple of 3
var ok bool
switch exponent % 3 {
case 1, -2:
amount, ok = int64MultiplyScale10(amount)
if !ok {
return infDecAmount{a.AsDec()}.AsCanonicalBytes(out)
}
exponent = exponent - 1
case 2, -1:
amount, ok = int64MultiplyScale100(amount)
if !ok {
return infDecAmount{a.AsDec()}.AsCanonicalBytes(out)
}
exponent = exponent - 2
}
return strconv.AppendInt(out, amount, 10), exponent
}
// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns
// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would
// return []byte("2048"), 1.
func (a int64Amount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) {
value, ok := a.AsScaledInt64(0)
if !ok {
return infDecAmount{a.AsDec()}.AsCanonicalBase1024Bytes(out)
}
amount, exponent := removeInt64Factors(value, 1024)
return strconv.AppendInt(out, amount, 10), exponent
}
// infDecAmount implements common operations over an inf.Dec that are specific to the quantity
// representation.
type infDecAmount struct {
*inf.Dec
}
// AsScale adjusts this amount to set a minimum scale, rounding up, and returns true iff no precision
// was lost. (1.1e5).AsScale(5) would return 1.1e5, but (1.1e5).AsScale(6) would return 1e6.
func (a infDecAmount) AsScale(scale Scale) (infDecAmount, bool) {
tmp := &inf.Dec{}
tmp.Round(a.Dec, scale.infScale(), inf.RoundUp)
return infDecAmount{tmp}, tmp.Cmp(a.Dec) == 0
}
// AsCanonicalBytes accepts a buffer to write the base-10 string value of this field to, and returns
// either that buffer or a larger buffer and the current exponent of the value. The value is adjusted
// until the exponent is a multiple of 3 - i.e. 1.1e5 would return "110", 3.
func (a infDecAmount) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
mantissa := a.Dec.UnscaledBig()
exponent = int32(-a.Dec.Scale())
amount := big.NewInt(0).Set(mantissa)
// move all factors of 10 into the exponent for easy reasoning
amount, times := removeBigIntFactors(amount, bigTen)
exponent += times
// make sure exponent is a multiple of 3
for exponent%3 != 0 {
amount.Mul(amount, bigTen)
exponent--
}
return append(out, amount.String()...), exponent
}
// AsCanonicalBase1024Bytes accepts a buffer to write the base-1024 string value of this field to, and returns
// either that buffer or a larger buffer and the current exponent of the value. 2048 is 2 * 1024 ^ 1 and would
// return []byte("2048"), 1.
func (a infDecAmount) AsCanonicalBase1024Bytes(out []byte) (result []byte, exponent int32) {
tmp := &inf.Dec{}
tmp.Round(a.Dec, 0, inf.RoundUp)
amount, exponent := removeBigIntFactors(tmp.UnscaledBig(), big1024)
return append(out, amount.String()...), exponent
}

View File

@ -21,35 +21,27 @@ limitations under the License.
package resource
import (
inf_v0 "gopkg.in/inf.v0"
conversion "k8s.io/kubernetes/pkg/conversion"
)
func DeepCopy_resource_Quantity(in Quantity, out *Quantity, c *conversion.Cloner) error {
if in.Amount != nil {
in, out := in.Amount, &out.Amount
*out = new(inf_v0.Dec)
if newVal, err := c.DeepCopy(*in); err != nil {
return err
} else {
**out = newVal.(inf_v0.Dec)
}
if newVal, err := c.DeepCopy(in.i); err != nil {
return err
} else {
out.Amount = nil
out.i = newVal.(int64Amount)
}
out.Format = in.Format
return nil
}
func DeepCopy_resource_QuantityProto(in QuantityProto, out *QuantityProto, c *conversion.Cloner) error {
out.Format = in.Format
out.Scale = in.Scale
if in.Bigint != nil {
in, out := in.Bigint, &out.Bigint
if newVal, err := c.DeepCopy(in.d); err != nil {
return err
} else {
out.d = newVal.(infDecAmount)
}
if in.s != nil {
in, out := in.s, &out.s
*out = make([]byte, len(in))
copy(*out, in)
} else {
out.Bigint = nil
out.s = nil
}
out.Format = in.Format
return nil
}

View File

@ -26,7 +26,6 @@ limitations under the License.
It has these top-level messages:
Quantity
QuantityProto
*/
package resource
@ -34,8 +33,6 @@ import proto "github.com/gogo/protobuf/proto"
import fmt "fmt"
import math "math"
import io "io"
// Reference imports to suppress errors if they are not otherwise used.
var _ = proto.Marshal
var _ = fmt.Errorf
@ -44,328 +41,6 @@ var _ = math.Inf
func (m *Quantity) Reset() { *m = Quantity{} }
func (*Quantity) ProtoMessage() {}
func (m *QuantityProto) Reset() { *m = QuantityProto{} }
func (m *QuantityProto) String() string { return proto.CompactTextString(m) }
func (*QuantityProto) ProtoMessage() {}
func init() {
proto.RegisterType((*Quantity)(nil), "k8s.io.kubernetes.pkg.api.resource.Quantity")
proto.RegisterType((*QuantityProto)(nil), "k8s.io.kubernetes.pkg.api.resource.QuantityProto")
}
func (m *QuantityProto) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
func (m *QuantityProto) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0xa
i++
i = encodeVarintGenerated(data, i, uint64(len(m.Format)))
i += copy(data[i:], m.Format)
data[i] = 0x10
i++
i = encodeVarintGenerated(data, i, uint64(m.Scale))
if m.Bigint != nil {
data[i] = 0x1a
i++
i = encodeVarintGenerated(data, i, uint64(len(m.Bigint)))
i += copy(data[i:], m.Bigint)
}
return i, nil
}
func encodeFixed64Generated(data []byte, offset int, v uint64) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
data[offset+4] = uint8(v >> 32)
data[offset+5] = uint8(v >> 40)
data[offset+6] = uint8(v >> 48)
data[offset+7] = uint8(v >> 56)
return offset + 8
}
func encodeFixed32Generated(data []byte, offset int, v uint32) int {
data[offset] = uint8(v)
data[offset+1] = uint8(v >> 8)
data[offset+2] = uint8(v >> 16)
data[offset+3] = uint8(v >> 24)
return offset + 4
}
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *QuantityProto) Size() (n int) {
var l int
_ = l
l = len(m.Format)
n += 1 + l + sovGenerated(uint64(l))
n += 1 + sovGenerated(uint64(m.Scale))
if m.Bigint != nil {
l = len(m.Bigint)
n += 1 + l + sovGenerated(uint64(l))
}
return n
}
func sovGenerated(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
func sozGenerated(x uint64) (n int) {
return sovGenerated(uint64((x << 1) ^ uint64((int64(x) >> 63))))
}
func (m *QuantityProto) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: QuantityProto: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: QuantityProto: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Format", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Format = Format(data[iNdEx:postIndex])
iNdEx = postIndex
case 2:
if wireType != 0 {
return fmt.Errorf("proto: wrong wireType = %d for field Scale", wireType)
}
m.Scale = 0
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
m.Scale |= (int32(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
case 3:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field Bigint", wireType)
}
var byteLen int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
byteLen |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
if byteLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + byteLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
m.Bigint = append(m.Bigint[:0], data[iNdEx:postIndex]...)
if m.Bigint == nil {
m.Bigint = []byte{}
}
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
return nil
}
func skipGenerated(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipGenerated(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
)

View File

@ -84,26 +84,10 @@ option go_package = "resource";
// cause implementors to also use a fixed point implementation.
//
// +protobuf=true
// +protobuf.embed=QuantityProto
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
message Quantity {
optional QuantityProto QuantityProto = 1;
}
// QuantityProto is a struct that is equivalent to Quantity, but intended for
// protobuf marshalling/unmarshalling. It is generated into a serialization
// that matches Quantity. Do not use in Go structs.
//
// +protobuf=true
message QuantityProto {
// The format of the quantity
optional string format = 1;
// The scale dimension of the value
optional int32 scale = 2;
// Bigint is serialized as a raw bytes array
optional bytes bigint = 3;
optional string string = 1;
}

327
vendor/k8s.io/kubernetes/pkg/api/resource/math.go generated vendored Normal file
View File

@ -0,0 +1,327 @@
/*
Copyright 2014 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package resource
import (
"math/big"
inf "gopkg.in/inf.v0"
)
const (
// maxInt64Factors is the highest value that will be checked when removing factors of 10 from an int64.
// It is also the maximum decimal digits that can be represented with an int64.
maxInt64Factors = 18
)
var (
// Commonly needed big.Int values-- treat as read only!
bigTen = big.NewInt(10)
bigZero = big.NewInt(0)
bigOne = big.NewInt(1)
bigThousand = big.NewInt(1000)
big1024 = big.NewInt(1024)
// Commonly needed inf.Dec values-- treat as read only!
decZero = inf.NewDec(0, 0)
decOne = inf.NewDec(1, 0)
decMinusOne = inf.NewDec(-1, 0)
decThousand = inf.NewDec(1000, 0)
dec1024 = inf.NewDec(1024, 0)
decMinus1024 = inf.NewDec(-1024, 0)
// Largest (in magnitude) number allowed.
maxAllowed = infDecAmount{inf.NewDec((1<<63)-1, 0)} // == max int64
// The maximum value we can represent milli-units for.
// Compare with the return value of Quantity.Value() to
// see if it's safe to use Quantity.MilliValue().
MaxMilliValue = int64(((1 << 63) - 1) / 1000)
)
const mostNegative = -(mostPositive + 1)
const mostPositive = 1<<63 - 1
// int64Add returns a+b, or false if that would overflow int64.
func int64Add(a, b int64) (int64, bool) {
c := a + b
switch {
case a > 0 && b > 0:
if c < 0 {
return 0, false
}
case a < 0 && b < 0:
if c > 0 {
return 0, false
}
if a == mostNegative && b == mostNegative {
return 0, false
}
}
return c, true
}
// int64Multiply returns a*b, or false if that would overflow or underflow int64.
func int64Multiply(a, b int64) (int64, bool) {
if a == 0 || b == 0 || a == 1 || b == 1 {
return a * b, true
}
if a == mostNegative || b == mostNegative {
return 0, false
}
c := a * b
return c, c/b == a
}
// int64MultiplyScale returns a*b, assuming b is greater than one, or false if that would overflow or underflow int64.
// Use when b is known to be greater than one.
func int64MultiplyScale(a int64, b int64) (int64, bool) {
if a == 0 || a == 1 {
return a * b, true
}
if a == mostNegative && b != 1 {
return 0, false
}
c := a * b
return c, c/b == a
}
// int64MultiplyScale10 multiplies a by 10, or returns false if that would overflow. This method is faster than
// int64Multiply(a, 10) because the compiler can optimize constant factor multiplication.
func int64MultiplyScale10(a int64) (int64, bool) {
if a == 0 || a == 1 {
return a * 10, true
}
if a == mostNegative {
return 0, false
}
c := a * 10
return c, c/10 == a
}
// int64MultiplyScale100 multiplies a by 100, or returns false if that would overflow. This method is faster than
// int64Multiply(a, 100) because the compiler can optimize constant factor multiplication.
func int64MultiplyScale100(a int64) (int64, bool) {
if a == 0 || a == 1 {
return a * 100, true
}
if a == mostNegative {
return 0, false
}
c := a * 100
return c, c/100 == a
}
// int64MultiplyScale1000 multiplies a by 1000, or returns false if that would overflow. This method is faster than
// int64Multiply(a, 1000) because the compiler can optimize constant factor multiplication.
func int64MultiplyScale1000(a int64) (int64, bool) {
if a == 0 || a == 1 {
return a * 1000, true
}
if a == mostNegative {
return 0, false
}
c := a * 1000
return c, c/1000 == a
}
// positiveScaleInt64 multiplies base by 10^scale, returning false if the
// value overflows. Passing a negative scale is undefined.
func positiveScaleInt64(base int64, scale Scale) (int64, bool) {
switch scale {
case 0:
return base, true
case 1:
return int64MultiplyScale10(base)
case 2:
return int64MultiplyScale100(base)
case 3:
return int64MultiplyScale1000(base)
case 6:
return int64MultiplyScale(base, 1000000)
case 9:
return int64MultiplyScale(base, 1000000000)
default:
value := base
var ok bool
for i := Scale(0); i < scale; i++ {
if value, ok = int64MultiplyScale(value, 10); !ok {
return 0, false
}
}
return value, true
}
}
// negativeScaleInt64 reduces base by the provided scale, rounding up, until the
// value is zero or the scale is reached. Passing a negative scale is undefined.
// The value returned, if not exact, is rounded away from zero.
func negativeScaleInt64(base int64, scale Scale) (result int64, exact bool) {
if scale == 0 {
return base, true
}
value := base
var fraction bool
for i := Scale(0); i < scale; i++ {
if !fraction && value%10 != 0 {
fraction = true
}
value = value / 10
if value == 0 {
if fraction {
if base > 0 {
return 1, false
}
return -1, false
}
return 0, true
}
}
if fraction {
if base > 0 {
value += 1
} else {
value += -1
}
}
return value, !fraction
}
func pow10Int64(b int64) int64 {
switch b {
case 0:
return 1
case 1:
return 10
case 2:
return 100
case 3:
return 1000
case 4:
return 10000
case 5:
return 100000
case 6:
return 1000000
case 7:
return 10000000
case 8:
return 100000000
case 9:
return 1000000000
case 10:
return 10000000000
case 11:
return 100000000000
case 12:
return 1000000000000
case 13:
return 10000000000000
case 14:
return 100000000000000
case 15:
return 1000000000000000
case 16:
return 10000000000000000
case 17:
return 100000000000000000
case 18:
return 1000000000000000000
default:
return 0
}
}
// powInt64 raises a to the bth power. Is not overflow aware.
func powInt64(a, b int64) int64 {
p := int64(1)
for b > 0 {
if b&1 != 0 {
p *= a
}
b >>= 1
a *= a
}
return p
}
// negativeScaleInt64 returns the result of dividing base by scale * 10 and the remainder, or
// false if no such division is possible. Dividing by negative scales is undefined.
func divideByScaleInt64(base int64, scale Scale) (result, remainder int64, exact bool) {
if scale == 0 {
return base, 0, true
}
// the max scale representable in base 10 in an int64 is 18 decimal places
if scale >= 18 {
return 0, base, false
}
divisor := pow10Int64(int64(scale))
return base / divisor, base % divisor, true
}
// removeInt64Factors divides in a loop; the return values have the property that
// value == result * base ^ scale
func removeInt64Factors(value int64, base int64) (result int64, times int32) {
times = 0
result = value
negative := result < 0
if negative {
result = -result
}
switch base {
// allow the compiler to optimize the common cases
case 10:
for result >= 10 && result%10 == 0 {
times++
result = result / 10
}
// allow the compiler to optimize the common cases
case 1024:
for result >= 1024 && result%1024 == 0 {
times++
result = result / 1024
}
default:
for result >= base && result%base == 0 {
times++
result = result / base
}
}
if negative {
result = -result
}
return result, times
}
// removeBigIntFactors divides in a loop; the return values have the property that
// d == result * factor ^ times
// d may be modified in place.
// If d == 0, then the return values will be (0, 0)
func removeBigIntFactors(d, factor *big.Int) (result *big.Int, times int32) {
q := big.NewInt(0)
m := big.NewInt(0)
for d.Cmp(bigZero) != 0 {
q.DivMod(d, factor, m)
if m.Cmp(bigZero) != 0 {
break
}
times++
d, q = q, d
}
return d, times
}

View File

@ -17,10 +17,12 @@ limitations under the License.
package resource
import (
"bytes"
"errors"
"fmt"
"math/big"
"regexp"
"strconv"
"strings"
flag "github.com/spf13/pflag"
@ -86,19 +88,34 @@ import (
// cause implementors to also use a fixed point implementation.
//
// +protobuf=true
// +protobuf.embed=QuantityProto
// +protobuf.embed=string
// +protobuf.options.marshal=false
// +protobuf.options.(gogoproto.goproto_stringer)=false
type Quantity struct {
// Amount is public, so you can manipulate it if the accessor
// functions are not sufficient.
Amount *inf.Dec
// i is the quantity in int64 scaled form, if d.Dec == nil
i int64Amount
// d is the quantity in inf.Dec form if d.Dec != nil
d infDecAmount
// s is the generated value of this quantity to avoid recalculation
s []byte
// Change Format at will. See the comment for Canonicalize for
// more details.
Format
}
// CanonicalValue allows a quantity amount to be converted to a string.
type CanonicalValue interface {
// AsCanonicalBytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-10. Callers may
// pass a byte slice to the method to avoid allocations.
AsCanonicalBytes(out []byte) ([]byte, int32)
// AsCanonicalBase1024Bytes returns a byte array representing the string representation
// of the value mantissa and an int32 representing its exponent in base-1024. Callers
// may pass a byte slice to the method to avoid allocations.
AsCanonicalBase1024Bytes(out []byte) ([]byte, int32)
}
// Format lists the three possible formattings of a quantity.
type Format string
@ -115,26 +132,9 @@ func MustParse(str string) Quantity {
if err != nil {
panic(fmt.Errorf("cannot parse '%v': %v", str, err))
}
return *q
return q
}
// Scale is used for getting and setting the base-10 scaled value.
// Base-2 scales are omitted for mathematical simplicity.
// See Quantity.ScaledValue for more details.
type Scale int
const (
Nano Scale = -9
Micro Scale = -6
Milli Scale = -3
Kilo Scale = 3
Mega Scale = 6
Giga Scale = 9
Tera Scale = 12
Peta Scale = 15
Exa Scale = 18
)
const (
// splitREString is used to separate a number from its suffix; as such,
// this is overly permissive, but that's OK-- it will be checked later.
@ -149,47 +149,189 @@ var (
ErrFormatWrong = errors.New("quantities must match the regular expression '" + splitREString + "'")
ErrNumeric = errors.New("unable to parse numeric part of quantity")
ErrSuffix = errors.New("unable to parse quantity's suffix")
// Commonly needed big.Int values-- treat as read only!
bigTen = big.NewInt(10)
bigZero = big.NewInt(0)
bigOne = big.NewInt(1)
bigThousand = big.NewInt(1000)
big1024 = big.NewInt(1024)
// Commonly needed inf.Dec values-- treat as read only!
decZero = inf.NewDec(0, 0)
decOne = inf.NewDec(1, 0)
decMinusOne = inf.NewDec(-1, 0)
decThousand = inf.NewDec(1000, 0)
dec1024 = inf.NewDec(1024, 0)
decMinus1024 = inf.NewDec(-1024, 0)
// Largest (in magnitude) number allowed.
maxAllowed = inf.NewDec((1<<63)-1, 0) // == max int64
// The maximum value we can represent milli-units for.
// Compare with the return value of Quantity.Value() to
// see if it's safe to use Quantity.MilliValue().
MaxMilliValue = int64(((1 << 63) - 1) / 1000)
)
// parseQuantityString is a fast scanner for quantity values.
func parseQuantityString(str string) (positive bool, value, num, denom, suffix string, err error) {
positive = true
pos := 0
end := len(str)
// handle leading sign
if pos < end {
switch str[0] {
case '-':
positive = false
pos++
case '+':
pos++
}
}
// strip leading zeros
Zeroes:
for i := pos; ; i++ {
if i >= end {
num = "0"
value = num
return
}
switch str[i] {
case '0':
pos++
default:
break Zeroes
}
}
// extract the numerator
Num:
for i := pos; ; i++ {
if i >= end {
num = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
num = str[pos:i]
pos = i
break Num
}
}
// if we stripped all numerator positions, always return 0
if len(num) == 0 {
num = "0"
}
// handle a denominator
if pos < end && str[pos] == '.' {
pos++
Denom:
for i := pos; ; i++ {
if i >= end {
denom = str[pos:end]
value = str[0:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
denom = str[pos:i]
pos = i
break Denom
}
}
// TODO: we currently allow 1.G, but we may not want to in the future.
// if len(denom) == 0 {
// err = ErrFormatWrong
// return
// }
}
value = str[0:pos]
// grab the elements of the suffix
suffixStart := pos
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
if !strings.ContainsAny(str[i:i+1], "eEinumkKMGTP") {
pos = i
break
}
}
if pos < end {
switch str[pos] {
case '-', '+':
pos++
}
}
Suffix:
for i := pos; ; i++ {
if i >= end {
suffix = str[suffixStart:end]
return
}
switch str[i] {
case '0', '1', '2', '3', '4', '5', '6', '7', '8', '9':
default:
pos = i
break Suffix
}
}
// we encountered a non decimal in the Suffix loop, but the last character
// was not a valid exponent
err = ErrFormatWrong
return
}
// ParseQuantity turns str into a Quantity, or returns an error.
func ParseQuantity(str string) (*Quantity, error) {
parts := splitRE.FindStringSubmatch(strings.TrimSpace(str))
// regexp returns are entire match, followed by an entry for each () section.
if len(parts) != 3 {
return nil, ErrFormatWrong
func ParseQuantity(str string) (Quantity, error) {
if len(str) == 0 {
return Quantity{}, ErrFormatWrong
}
if str == "0" {
return Quantity{Format: DecimalSI}, nil
}
positive, value, num, denom, suf, err := parseQuantityString(str)
if err != nil {
return Quantity{}, err
}
base, exponent, format, ok := quantitySuffixer.interpret(suffix(suf))
if !ok {
return Quantity{}, ErrSuffix
}
precision := int32(0)
scale := int32(0)
mantissa := int64(1)
switch format {
case DecimalExponent, DecimalSI:
scale = exponent
precision = maxInt64Factors - int32(len(num)+len(denom))
case BinarySI:
scale = 0
switch {
case exponent >= 0 && len(denom) == 0:
// only handle positive binary numbers with the fast path
mantissa = int64(int64(mantissa) << uint64(exponent))
// 1Mi (2^20) has ~6 digits of decimal precision, so exponent*3/10 -1 is roughly the precision
precision = 15 - int32(len(num)) - int32(float32(exponent)*3/10) - 1
default:
precision = -1
}
}
if precision >= 0 {
// if we have a denominator, shift the entire value to the left by the number of places in the
// denominator
scale -= int32(len(denom))
if scale >= int32(Nano) {
shifted := num + denom
var value int64
value, err := strconv.ParseInt(shifted, 10, 64)
if err != nil {
return Quantity{}, ErrNumeric
}
if result, ok := int64Multiply(value, int64(mantissa)); ok {
if !positive {
result = -result
}
return Quantity{i: int64Amount{value: result, scale: Scale(scale)}, Format: format}, nil
}
}
}
amount := new(inf.Dec)
if _, ok := amount.SetString(parts[1]); !ok {
return nil, ErrNumeric
}
base, exponent, format, ok := quantitySuffixer.interpret(suffix(parts[2]))
if !ok {
return nil, ErrSuffix
if _, ok := amount.SetString(value); !ok {
return Quantity{}, ErrNumeric
}
// So that no one but us has to think about suffixes, remove it.
@ -217,9 +359,11 @@ func ParseQuantity(str string) (*Quantity, error) {
}
// The max is just a simple cap.
if amount.Cmp(maxAllowed) > 0 {
amount.Set(maxAllowed)
// TODO: this prevents accumulating quantities greater than int64, for instance quota across a cluster
if format == BinarySI && amount.Cmp(maxAllowed.Dec) > 0 {
amount.Set(maxAllowed.Dec)
}
if format == BinarySI && amount.Cmp(decOne) < 0 && amount.Cmp(decZero) > 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
@ -228,25 +372,7 @@ func ParseQuantity(str string) (*Quantity, error) {
amount.Neg(amount)
}
return &Quantity{amount, format}, nil
}
// removeFactors divides in a loop; the return values have the property that
// d == result * factor ^ times
// d may be modified in place.
// If d == 0, then the return values will be (0, 0)
func removeFactors(d, factor *big.Int) (result *big.Int, times int) {
q := big.NewInt(0)
m := big.NewInt(0)
for d.Cmp(bigZero) != 0 {
q.DivMod(d, factor, m)
if m.Cmp(bigZero) != 0 {
break
}
times++
d, q = q, d
}
return d, times
return Quantity{d: infDecAmount{amount}, Format: format}, nil
}
// Canonicalize returns the canonical form of q and its suffix (see comment on Quantity).
@ -256,27 +382,22 @@ func removeFactors(d, factor *big.Int) (result *big.Int, times int) {
// -1 and +1, it will be emitted as if q.Format were DecimalSI.
// * Otherwise, if q.Format is set to BinarySI, frational parts of q.Amount will be
// rounded up. (1.1i becomes 2i.)
func (q *Quantity) Canonicalize() (string, suffix) {
if q.Amount == nil {
return "0", ""
}
// zero is zero always
if q.Amount.Cmp(&inf.Dec{}) == 0 {
return "0", ""
func (q *Quantity) CanonicalizeBytes(out []byte) (result, suffix []byte) {
if q.IsZero() {
return zeroBytes, nil
}
var rounded CanonicalValue
format := q.Format
switch format {
case DecimalExponent, DecimalSI:
case BinarySI:
if q.Amount.Cmp(decMinus1024) > 0 && q.Amount.Cmp(dec1024) < 0 {
if q.CmpInt64(-1024) > 0 && q.CmpInt64(1024) < 0 {
// This avoids rounding and hopefully confusion, too.
format = DecimalSI
} else {
tmp := &inf.Dec{}
tmp.Round(q.Amount, 0, inf.RoundUp)
if tmp.Cmp(q.Amount) != 0 {
var exact bool
if rounded, exact = q.AsScale(0); !exact {
// Don't lose precision-- show as DecimalSI
format = DecimalSI
}
@ -289,125 +410,223 @@ func (q *Quantity) Canonicalize() (string, suffix) {
// one of the other formats.
switch format {
case DecimalExponent, DecimalSI:
mantissa := q.Amount.UnscaledBig()
exponent := int(-q.Amount.Scale())
amount := big.NewInt(0).Set(mantissa)
// move all factors of 10 into the exponent for easy reasoning
amount, times := removeFactors(amount, bigTen)
exponent += times
// make sure exponent is a multiple of 3
for exponent%3 != 0 {
amount.Mul(amount, bigTen)
exponent--
}
suffix, _ := quantitySuffixer.construct(10, exponent, format)
number := amount.String()
number, exponent := q.AsCanonicalBytes(out)
suffix, _ := quantitySuffixer.constructBytes(10, exponent, format)
return number, suffix
case BinarySI:
tmp := &inf.Dec{}
tmp.Round(q.Amount, 0, inf.RoundUp)
amount, exponent := removeFactors(tmp.UnscaledBig(), big1024)
suffix, _ := quantitySuffixer.construct(2, exponent*10, format)
number := amount.String()
default:
// format must be BinarySI
number, exponent := rounded.AsCanonicalBase1024Bytes(out)
suffix, _ := quantitySuffixer.constructBytes(2, exponent*10, format)
return number, suffix
}
return "0", ""
}
// AsInt64 returns a representation of the current value as an int64 if a fast conversion
// is possible. If false is returned, callers must use the inf.Dec form of this quantity.
func (q *Quantity) AsInt64() (int64, bool) {
if q.d.Dec != nil {
return 0, false
}
return q.i.AsInt64()
}
// ToDec promotes the quantity in place to use an inf.Dec representation and returns itself.
func (q *Quantity) ToDec() *Quantity {
if q.d.Dec == nil {
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
}
return q
}
// AsDec returns the quantity as represented by a scaled inf.Dec.
func (q *Quantity) AsDec() *inf.Dec {
if q.d.Dec != nil {
return q.d.Dec
}
q.d.Dec = q.i.AsDec()
q.i = int64Amount{}
return q.d.Dec
}
// AsCanonicalBytes returns the canonical byte representation of this quantity as a mantissa
// and base 10 exponent. The out byte slice may be passed to the method to avoid an extra
// allocation.
func (q *Quantity) AsCanonicalBytes(out []byte) (result []byte, exponent int32) {
if q.d.Dec != nil {
return q.d.AsCanonicalBytes(out)
}
return q.i.AsCanonicalBytes(out)
}
// IsZero returns true if the quantity is equal to zero.
func (q *Quantity) IsZero() bool {
if q.d.Dec != nil {
return q.d.Dec.Sign() == 0
}
return q.i.value == 0
}
// Sign returns 0 if the quantity is zero, -1 if the quantity is less than zero, or 1 if the
// quantity is greater than zero.
func (q *Quantity) Sign() int {
if q.d.Dec != nil {
return q.d.Dec.Sign()
}
return q.i.Sign()
}
// AsScaled returns the current value, rounded up to the provided scale, and returns
// false if the scale resulted in a loss of precision.
func (q *Quantity) AsScale(scale Scale) (CanonicalValue, bool) {
if q.d.Dec != nil {
return q.d.AsScale(scale)
}
return q.i.AsScale(scale)
}
// RoundUp updates the quantity to the provided scale, ensuring that the value is at
// least 1. False is returned if the rounding operation resulted in a loss of precision.
// Negative numbers are rounded away from zero (-9 scale 1 rounds to -10).
func (q *Quantity) RoundUp(scale Scale) bool {
if q.d.Dec != nil {
d, exact := q.d.AsScale(scale)
q.d = d
return exact
}
i, exact := q.i.AsScale(scale)
q.i = i
return exact
}
// Add adds the provide y quantity to the current value. If the current value is zero,
// the format of the quantity will be updated to the format of y.
func (q *Quantity) Add(y Quantity) {
q.s = nil
if q.d.Dec == nil && y.d.Dec == nil {
if q.i.value == 0 {
q.Format = y.Format
}
if q.i.Add(y.i) {
return
}
} else if q.IsZero() {
q.Format = y.Format
}
q.ToDec().d.Dec.Add(q.d.Dec, y.AsDec())
}
// Sub subtracts the provided quantity from the current value in place. If the current
// value is zero, the format of the quantity will be updated to the format of y.
func (q *Quantity) Sub(y Quantity) {
q.s = nil
if q.IsZero() {
q.Format = y.Format
}
if q.d.Dec == nil && y.d.Dec == nil && q.i.Sub(y.i) {
return
}
q.ToDec().d.Dec.Sub(q.d.Dec, y.AsDec())
}
// Cmp returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) Cmp(y Quantity) int {
if q.d.Dec == nil && y.d.Dec == nil {
return q.i.Cmp(y.i)
}
return q.AsDec().Cmp(y.AsDec())
}
// CmpInt64 returns 0 if the quantity is equal to y, -1 if the quantity is less than y, or 1 if the
// quantity is greater than y.
func (q *Quantity) CmpInt64(y int64) int {
if q.d.Dec != nil {
return q.d.Dec.Cmp(inf.NewDec(y, inf.Scale(0)))
}
return q.i.Cmp(int64Amount{value: y})
}
// Neg sets quantity to be the negative value of itself.
func (q *Quantity) Neg() {
q.s = nil
if q.d.Dec == nil {
q.i.value = -q.i.value
return
}
q.d.Dec.Neg(q.d.Dec)
}
// toBytes ensures q.s is set to a byte slice representing the canonical string form of this
// quantity and then returns the value. CanonicalizeBytes is an expensive operation, and caching
// this result significantly reduces the cost of normal parse / marshal operations on Quantity.
func (q *Quantity) toBytes() []byte {
if q.s == nil {
result := make([]byte, 0, int64QuantityExpectedBytes)
number, suffix := q.CanonicalizeBytes(result)
number = append(number, suffix...)
q.s = number
}
return q.s
}
// int64QuantityExpectedBytes is the expected width in bytes of the canonical string representation
// of most Quantity values.
const int64QuantityExpectedBytes = 18
// String formats the Quantity as a string.
func (q *Quantity) String() string {
number, suffix := q.Canonicalize()
return number + string(suffix)
}
// Cmp compares q and y and returns:
//
// -1 if q < y
// 0 if q == y
// +1 if q > y
//
func (q *Quantity) Cmp(y Quantity) int {
if q.Amount == nil {
if y.Amount == nil {
return 0
}
return -y.Amount.Sign()
}
if y.Amount == nil {
return q.Amount.Sign()
}
return q.Amount.Cmp(y.Amount)
}
func (q *Quantity) Add(y Quantity) error {
switch {
case y.Amount == nil:
// Adding 0: do nothing.
case q.Amount == nil:
q.Amount = &inf.Dec{}
return q.Add(y)
default:
// we want to preserve the format of the non-zero value
zero := &inf.Dec{}
if q.Amount.Cmp(zero) == 0 && y.Amount.Cmp(zero) != 0 {
q.Format = y.Format
}
q.Amount.Add(q.Amount, y.Amount)
}
return nil
}
func (q *Quantity) Sub(y Quantity) error {
switch {
case y.Amount == nil:
// Subtracting 0: do nothing.
case q.Amount == nil:
q.Amount = &inf.Dec{}
return q.Sub(y)
default:
// we want to preserve the format of the non-zero value
zero := &inf.Dec{}
if q.Amount.Cmp(zero) == 0 && y.Amount.Cmp(zero) != 0 {
q.Format = y.Format
}
q.Amount.Sub(q.Amount, y.Amount)
}
return nil
}
// Neg sets q to the negative value of y.
// It updates the format of q to match y.
func (q *Quantity) Neg(y Quantity) error {
switch {
case y.Amount == nil:
*q = y
case q.Amount == nil:
q.Amount = &inf.Dec{}
fallthrough
default:
q.Amount.Neg(y.Amount)
q.Format = y.Format
}
return nil
return string(q.toBytes())
}
// MarshalJSON implements the json.Marshaller interface.
func (q Quantity) MarshalJSON() ([]byte, error) {
return []byte(`"` + q.String() + `"`), nil
if q.s != nil {
out := make([]byte, len(q.s)+2)
out[0], out[len(out)-1] = '"', '"'
copy(out[1:], q.s)
return out, nil
}
result := make([]byte, int64QuantityExpectedBytes, int64QuantityExpectedBytes)
result[0] = '"'
number, suffix := q.CanonicalizeBytes(result[1:1])
// if the same slice was returned to us that we passed in, avoid another allocation by copying number into
// the source slice and returning that
if len(number) > 0 && &number[0] == &result[1] && (len(number)+len(suffix)+2) <= int64QuantityExpectedBytes {
number = append(number, suffix...)
number = append(number, '"')
return result[:1+len(number)], nil
}
// if CanonicalizeBytes needed more space than our slice provided, we may need to allocate again so use
// append
result = result[:1]
result = append(result, number...)
result = append(result, suffix...)
result = append(result, '"')
return result, nil
}
// UnmarshalJSON implements the json.Unmarshaller interface.
func (q *Quantity) UnmarshalJSON(value []byte) error {
str := string(value)
parsed, err := ParseQuantity(strings.Trim(str, `"`))
l := len(value)
if l == 4 && bytes.Equal(value, []byte("null")) {
q.d.Dec = nil
q.i = int64Amount{}
return nil
}
if l < 2 {
return ErrFormatWrong
}
if value[0] == '"' && value[l-1] == '"' {
value = value[1 : l-1]
}
parsed, err := ParseQuantity(string(value))
if err != nil {
return err
}
parsed.s = value
// This copy is safe because parsed will not be referred to again.
*q = *parsed
*q = parsed
return nil
}
@ -415,7 +634,7 @@ func (q *Quantity) UnmarshalJSON(value []byte) error {
// value in the given format.
func NewQuantity(value int64, format Format) *Quantity {
return &Quantity{
Amount: inf.NewDec(value, 0),
i: int64Amount{value: value},
Format: format,
}
}
@ -426,7 +645,7 @@ func NewQuantity(value int64, format Format) *Quantity {
// values x where (-1 < x < 1) && (x != 0).
func NewMilliQuantity(value int64, format Format) *Quantity {
return &Quantity{
Amount: inf.NewDec(value, 3),
i: int64Amount{value: value, scale: -3},
Format: format,
}
}
@ -435,7 +654,7 @@ func NewMilliQuantity(value int64, format Format) *Quantity {
// value * 10^scale in DecimalSI format.
func NewScaledQuantity(value int64, scale Scale) *Quantity {
return &Quantity{
Amount: inf.NewDec(value, scale.infScale()),
i: int64Amount{value: value, scale: scale},
Format: DecimalSI,
}
}
@ -454,10 +673,12 @@ func (q *Quantity) MilliValue() int64 {
// ScaledValue returns the value of ceil(q * 10^scale); this could overflow an int64.
// To detect overflow, call Value() first and verify the expected magnitude.
func (q *Quantity) ScaledValue(scale Scale) int64 {
if q.Amount == nil {
return 0
if q.d.Dec == nil {
i, _ := q.i.AsScaledInt64(scale)
return i
}
return scaledValue(q.Amount.UnscaledBig(), int(q.Amount.Scale()), int(scale.infScale()))
dec := q.d.Dec
return scaledValue(dec.UnscaledBig(), int(dec.Scale()), int(scale.infScale()))
}
// Set sets q's value to be value.
@ -472,22 +693,25 @@ func (q *Quantity) SetMilli(value int64) {
// SetScaled sets q's value to be value * 10^scale
func (q *Quantity) SetScaled(value int64, scale Scale) {
if q.Amount == nil {
q.Amount = &inf.Dec{}
}
q.Amount.SetUnscaled(value)
q.Amount.SetScale(scale.infScale())
q.s = nil
q.d.Dec = nil
q.i = int64Amount{value: value, scale: scale}
}
// Copy is a convenience function that makes a deep copy for you. Non-deep
// copies of quantities share pointers and you will regret that.
func (q *Quantity) Copy() *Quantity {
if q.Amount == nil {
return NewQuantity(0, q.Format)
if q.d.Dec == nil {
return &Quantity{
s: q.s,
i: q.i,
Format: q.Format,
}
}
tmp := &inf.Dec{}
return &Quantity{
Amount: tmp.Set(q.Amount),
s: q.s,
d: infDecAmount{tmp.Set(q.d.Dec)},
Format: q.Format,
}
}
@ -504,7 +728,7 @@ func (qf qFlag) Set(val string) error {
return err
}
// This copy is OK because q will not be referenced again.
*qf.dest = *q
*qf.dest = q
return nil
}
@ -531,8 +755,3 @@ func QuantityFlag(flagName, defaultValue, description string) *Quantity {
func NewQuantityFlagValue(q *Quantity) flag.Value {
return qFlag{q}
}
// infScale adapts a Scale value to an inf.Scale value.
func (s Scale) infScale() inf.Scale {
return inf.Scale(-s) // inf.Scale is upside-down
}

View File

@ -17,62 +17,268 @@ limitations under the License.
package resource
import (
"math/big"
"fmt"
"io"
inf "gopkg.in/inf.v0"
"github.com/gogo/protobuf/proto"
)
// QuantityProto is a struct that is equivalent to Quantity, but intended for
// protobuf marshalling/unmarshalling. It is generated into a serialization
// that matches Quantity. Do not use in Go structs.
//
// +protobuf=true
type QuantityProto struct {
// The format of the quantity
Format Format `protobuf:"bytes,1,opt,name=format,casttype=Format"`
// The scale dimension of the value
Scale int32 `protobuf:"varint,2,opt,name=scale"`
// Bigint is serialized as a raw bytes array
Bigint []byte `protobuf:"bytes,3,opt,name=bigint"`
var _ proto.Sizer = &Quantity{}
func (m *Quantity) Marshal() (data []byte, err error) {
size := m.Size()
data = make([]byte, size)
n, err := m.MarshalTo(data)
if err != nil {
return nil, err
}
return data[:n], nil
}
// ProtoTime returns the Time as a new ProtoTime value.
func (q *Quantity) QuantityProto() *QuantityProto {
if q == nil {
return &QuantityProto{}
}
p := &QuantityProto{
Format: q.Format,
}
if q.Amount != nil {
p.Scale = int32(q.Amount.Scale())
p.Bigint = q.Amount.UnscaledBig().Bytes()
}
return p
// MarshalTo is a customized version of the generated Protobuf unmarshaler for a struct
// with a single string field.
func (m *Quantity) MarshalTo(data []byte) (int, error) {
var i int
_ = i
var l int
_ = l
data[i] = 0xa
i++
// BEGIN CUSTOM MARSHAL
out := m.toBytes()
i = encodeVarintGenerated(data, i, uint64(len(out)))
i += copy(data[i:], out)
// END CUSTOM MARSHAL
return i, nil
}
// Size implements the protobuf marshalling interface.
func (q *Quantity) Size() (n int) { return q.QuantityProto().Size() }
// Reset implements the protobuf marshalling interface.
func (q *Quantity) Unmarshal(data []byte) error {
p := QuantityProto{}
if err := p.Unmarshal(data); err != nil {
return err
func encodeVarintGenerated(data []byte, offset int, v uint64) int {
for v >= 1<<7 {
data[offset] = uint8(v&0x7f | 0x80)
v >>= 7
offset++
}
data[offset] = uint8(v)
return offset + 1
}
func (m *Quantity) Size() (n int) {
var l int
_ = l
// BEGIN CUSTOM SIZE
l = len(m.toBytes())
// END CUSTOM SIZE
n += 1 + l + sovGenerated(uint64(l))
return n
}
func sovGenerated(x uint64) (n int) {
for {
n++
x >>= 7
if x == 0 {
break
}
}
return n
}
// Unmarshal is a customized version of the generated Protobuf unmarshaler for a struct
// with a single string field.
func (m *Quantity) Unmarshal(data []byte) error {
l := len(data)
iNdEx := 0
for iNdEx < l {
preIndex := iNdEx
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
fieldNum := int32(wire >> 3)
wireType := int(wire & 0x7)
if wireType == 4 {
return fmt.Errorf("proto: Quantity: wiretype end group for non-group")
}
if fieldNum <= 0 {
return fmt.Errorf("proto: Quantity: illegal tag %d (wire type %d)", fieldNum, wire)
}
switch fieldNum {
case 1:
if wireType != 2 {
return fmt.Errorf("proto: wrong wireType = %d for field String_", wireType)
}
var stringLen uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return ErrIntOverflowGenerated
}
if iNdEx >= l {
return io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
stringLen |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
intStringLen := int(stringLen)
if intStringLen < 0 {
return ErrInvalidLengthGenerated
}
postIndex := iNdEx + intStringLen
if postIndex > l {
return io.ErrUnexpectedEOF
}
s := string(data[iNdEx:postIndex])
// BEGIN CUSTOM DECODE
p, err := ParseQuantity(s)
if err != nil {
return err
}
*m = p
// END CUSTOM DECODE
iNdEx = postIndex
default:
iNdEx = preIndex
skippy, err := skipGenerated(data[iNdEx:])
if err != nil {
return err
}
if skippy < 0 {
return ErrInvalidLengthGenerated
}
if (iNdEx + skippy) > l {
return io.ErrUnexpectedEOF
}
iNdEx += skippy
}
}
if iNdEx > l {
return io.ErrUnexpectedEOF
}
q.Format = p.Format
b := big.NewInt(0)
b.SetBytes(p.Bigint)
q.Amount = inf.NewDecBig(b, inf.Scale(p.Scale))
return nil
}
// Marshal implements the protobuf marshalling interface.
func (q *Quantity) Marshal() (data []byte, err error) {
return q.QuantityProto().Marshal()
func skipGenerated(data []byte) (n int, err error) {
l := len(data)
iNdEx := 0
for iNdEx < l {
var wire uint64
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
wire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
wireType := int(wire & 0x7)
switch wireType {
case 0:
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
iNdEx++
if data[iNdEx-1] < 0x80 {
break
}
}
return iNdEx, nil
case 1:
iNdEx += 8
return iNdEx, nil
case 2:
var length int
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
length |= (int(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
iNdEx += length
if length < 0 {
return 0, ErrInvalidLengthGenerated
}
return iNdEx, nil
case 3:
for {
var innerWire uint64
var start int = iNdEx
for shift := uint(0); ; shift += 7 {
if shift >= 64 {
return 0, ErrIntOverflowGenerated
}
if iNdEx >= l {
return 0, io.ErrUnexpectedEOF
}
b := data[iNdEx]
iNdEx++
innerWire |= (uint64(b) & 0x7F) << shift
if b < 0x80 {
break
}
}
innerWireType := int(innerWire & 0x7)
if innerWireType == 4 {
break
}
next, err := skipGenerated(data[start:])
if err != nil {
return 0, err
}
iNdEx = start + next
}
return iNdEx, nil
case 4:
return iNdEx, nil
case 5:
iNdEx += 4
return iNdEx, nil
default:
return 0, fmt.Errorf("proto: illegal wireType %d", wireType)
}
}
panic("unreachable")
}
// MarshalTo implements the protobuf marshalling interface.
func (q *Quantity) MarshalTo(data []byte) (int, error) {
return q.QuantityProto().MarshalTo(data)
}
var (
ErrInvalidLengthGenerated = fmt.Errorf("proto: negative length found during unmarshaling")
ErrIntOverflowGenerated = fmt.Errorf("proto: integer overflow")
)

View File

@ -24,8 +24,9 @@ type suffix string
// suffixer can interpret and construct suffixes.
type suffixer interface {
interpret(suffix) (base, exponent int, fmt Format, ok bool)
construct(base, exponent int, fmt Format) (s suffix, ok bool)
interpret(suffix) (base, exponent int32, fmt Format, ok bool)
construct(base, exponent int32, fmt Format) (s suffix, ok bool)
constructBytes(base, exponent int32, fmt Format) (s []byte, ok bool)
}
// quantitySuffixer handles suffixes for all three formats that quantity
@ -33,12 +34,13 @@ type suffixer interface {
var quantitySuffixer = newSuffixer()
type bePair struct {
base, exponent int
base, exponent int32
}
type listSuffixer struct {
suffixToBE map[suffix]bePair
beToSuffix map[bePair]suffix
suffixToBE map[suffix]bePair
beToSuffix map[bePair]suffix
beToSuffixBytes map[bePair][]byte
}
func (ls *listSuffixer) addSuffix(s suffix, pair bePair) {
@ -48,11 +50,15 @@ func (ls *listSuffixer) addSuffix(s suffix, pair bePair) {
if ls.beToSuffix == nil {
ls.beToSuffix = map[bePair]suffix{}
}
if ls.beToSuffixBytes == nil {
ls.beToSuffixBytes = map[bePair][]byte{}
}
ls.suffixToBE[s] = pair
ls.beToSuffix[pair] = s
ls.beToSuffixBytes[pair] = []byte(s)
}
func (ls *listSuffixer) lookup(s suffix) (base, exponent int, ok bool) {
func (ls *listSuffixer) lookup(s suffix) (base, exponent int32, ok bool) {
pair, ok := ls.suffixToBE[s]
if !ok {
return 0, 0, false
@ -60,19 +66,50 @@ func (ls *listSuffixer) lookup(s suffix) (base, exponent int, ok bool) {
return pair.base, pair.exponent, true
}
func (ls *listSuffixer) construct(base, exponent int) (s suffix, ok bool) {
func (ls *listSuffixer) construct(base, exponent int32) (s suffix, ok bool) {
s, ok = ls.beToSuffix[bePair{base, exponent}]
return
}
func (ls *listSuffixer) constructBytes(base, exponent int32) (s []byte, ok bool) {
s, ok = ls.beToSuffixBytes[bePair{base, exponent}]
return
}
type suffixHandler struct {
decSuffixes listSuffixer
binSuffixes listSuffixer
}
type fastLookup struct {
*suffixHandler
}
func (l fastLookup) interpret(s suffix) (base, exponent int32, format Format, ok bool) {
switch s {
case "":
return 10, 0, DecimalSI, true
case "n":
return 10, -9, DecimalSI, true
case "u":
return 10, -6, DecimalSI, true
case "m":
return 10, -3, DecimalSI, true
case "k":
return 10, 3, DecimalSI, true
case "M":
return 10, 6, DecimalSI, true
case "G":
return 10, 9, DecimalSI, true
}
return l.suffixHandler.interpret(s)
}
func newSuffixer() suffixer {
sh := &suffixHandler{}
// IMPORTANT: if you change this section you must change fastLookup
sh.binSuffixes.addSuffix("Ki", bePair{2, 10})
sh.binSuffixes.addSuffix("Mi", bePair{2, 20})
sh.binSuffixes.addSuffix("Gi", bePair{2, 30})
@ -94,10 +131,10 @@ func newSuffixer() suffixer {
sh.decSuffixes.addSuffix("P", bePair{10, 15})
sh.decSuffixes.addSuffix("E", bePair{10, 18})
return sh
return fastLookup{sh}
}
func (sh *suffixHandler) construct(base, exponent int, fmt Format) (s suffix, ok bool) {
func (sh *suffixHandler) construct(base, exponent int32, fmt Format) (s suffix, ok bool) {
switch fmt {
case DecimalSI:
return sh.decSuffixes.construct(base, exponent)
@ -115,7 +152,32 @@ func (sh *suffixHandler) construct(base, exponent int, fmt Format) (s suffix, ok
return "", false
}
func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int, fmt Format, ok bool) {
func (sh *suffixHandler) constructBytes(base, exponent int32, format Format) (s []byte, ok bool) {
switch format {
case DecimalSI:
return sh.decSuffixes.constructBytes(base, exponent)
case BinarySI:
return sh.binSuffixes.constructBytes(base, exponent)
case DecimalExponent:
if base != 10 {
return nil, false
}
if exponent == 0 {
return nil, true
}
result := make([]byte, 8, 8)
result[0] = 'e'
number := strconv.AppendInt(result[1:1], int64(exponent), 10)
if &result[1] == &number[0] {
return result[:1+len(number)], true
}
result = append(result[:1], number...)
return result, true
}
return nil, false
}
func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int32, fmt Format, ok bool) {
// Try lookup tables first
if b, e, ok := sh.decSuffixes.lookup(suffix); ok {
return b, e, DecimalSI, true
@ -129,7 +191,7 @@ func (sh *suffixHandler) interpret(suffix suffix) (base, exponent int, fmt Forma
if err != nil {
return 0, 0, DecimalExponent, false
}
return 10, int(parsed), DecimalExponent, true
return 10, int32(parsed), DecimalExponent, true
}
return 0, 0, DecimalExponent, false

View File

@ -92,6 +92,8 @@ func GetPodReadyCondition(status PodStatus) *PodCondition {
return condition
}
// GetPodCondition extracts the provided condition from the given status and returns that.
// Returns nil and -1 if the condition is not present, and the the index of the located condition.
func GetPodCondition(status *PodStatus, conditionType PodConditionType) (int, *PodCondition) {
for i, c := range status.Conditions {
if c.Type == conditionType {
@ -118,13 +120,16 @@ func UpdatePodCondition(status *PodStatus, condition *PodCondition) bool {
if condition.Status == oldCondition.Status {
condition.LastTransitionTime = oldCondition.LastTransitionTime
}
isEqual := condition.Status == oldCondition.Status &&
condition.Reason == oldCondition.Reason &&
condition.Message == oldCondition.Message &&
condition.LastProbeTime.Equal(oldCondition.LastProbeTime) &&
condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime)
status.Conditions[conditionIndex] = *condition
// Return true if one of the fields have changed.
return condition.Status != oldCondition.Status ||
condition.Reason != oldCondition.Reason ||
condition.Message != oldCondition.Message ||
!condition.LastProbeTime.Equal(oldCondition.LastProbeTime) ||
!condition.LastTransitionTime.Equal(oldCondition.LastTransitionTime)
return !isEqual
}
}
@ -146,15 +151,40 @@ func PodRequestsAndLimits(pod *Pod) (reqs map[ResourceName]resource.Quantity, li
for name, quantity := range container.Resources.Requests {
if value, ok := reqs[name]; !ok {
reqs[name] = *quantity.Copy()
} else if err = value.Add(quantity); err != nil {
return nil, nil, err
} else {
value.Add(quantity)
reqs[name] = value
}
}
for name, quantity := range container.Resources.Limits {
if value, ok := limits[name]; !ok {
limits[name] = *quantity.Copy()
} else if err = value.Add(quantity); err != nil {
return nil, nil, err
} else {
value.Add(quantity)
limits[name] = value
}
}
}
// init containers define the minimum of any resource
for _, container := range pod.Spec.InitContainers {
for name, quantity := range container.Resources.Requests {
value, ok := reqs[name]
if !ok {
reqs[name] = *quantity.Copy()
continue
}
if quantity.Cmp(value) > 0 {
reqs[name] = *quantity.Copy()
}
}
for name, quantity := range container.Resources.Limits {
value, ok := limits[name]
if !ok {
limits[name] = *quantity.Copy()
continue
}
if quantity.Cmp(value) > 0 {
limits[name] = *quantity.Copy()
}
}
}

File diff suppressed because it is too large Load Diff

View File

@ -229,6 +229,8 @@ type VolumeSource struct {
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"`
// ConfigMap represents a configMap that should populate this volume
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"`
}
// Similar to VolumeSource but meant for the administrator who creates PVs.
@ -267,6 +269,8 @@ type PersistentVolumeSource struct {
Flocker *FlockerVolumeSource `json:"flocker,omitempty"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty"`
}
type PersistentVolumeClaimVolumeSource struct {
@ -411,6 +415,10 @@ const (
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
// Represents a host path mapped into a pod.
@ -582,6 +590,14 @@ type GitRepoVolumeSource struct {
type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
SecretName string `json:"secretName,omitempty"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error. Paths must be relative and may not contain
// the '..' path or start with '..'.
Items []KeyToPath `json:"items,omitempty"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
@ -704,6 +720,16 @@ type AzureFileVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
@ -1076,6 +1102,8 @@ const (
// PodReady means the pod is able to service requests and should be added to the
// load balancing pools of all matching services.
PodReady PodConditionType = "Ready"
// PodInitialized means that all init containers in the pod have started successfully.
PodInitialized PodConditionType = "Initialized"
)
type PodCondition struct {
@ -1306,10 +1334,79 @@ type PreferredSchedulingTerm struct {
Preference NodeSelectorTerm `json:"preference"`
}
// The node this Taint is attached to has the effect "effect" on
// any pod that that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key"`
// Required. The taint value corresponding to the taint key.
Value string `json:"value,omitempty"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule and PreferNoSchedule.
Effect TaintEffect `json:"effect"`
}
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// do not allow pods to start on Kubelet unless they tolerate the taint,
// but allow all already-running pods to continue running.
// Enforced by the scheduler and Kubelet.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// do not allow pods to start on Kubelet unless they tolerate the taint,
// and evict any already-running pods that do not tolerate the taint.
// Enforced by the scheduler and Kubelet.
// TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Required. Key is the taint key that the toleration applies to.
Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key"`
// operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
Operator TolerationOperator `json:"operator,omitempty"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
Value string `json:"value,omitempty"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule and PreferNoSchedule.
Effect TaintEffect `json:"effect,omitempty"`
// TODO: For forgiveness (#1574), we'd eventually add at least a grace period
// here, and possibly an occurrence threshold and period.
}
// A toleration operator is the set of operators that can be used in a toleration.
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
// PodSpec is a description of a pod
type PodSpec struct {
Volumes []Volume `json:"volumes"`
// Required: there must be at least one container in a pod.
// List of initialization containers belonging to the pod.
InitContainers []Container `json:"-"`
// List of containers belonging to the pod.
Containers []Container `json:"containers"`
RestartPolicy RestartPolicy `json:"restartPolicy,omitempty"`
// Optional duration in seconds the pod needs to terminate gracefully. May be decreased in delete request.
@ -1416,6 +1513,11 @@ type PodStatus struct {
// This is before the Kubelet pulled the container image(s) for the pod.
StartTime *unversioned.Time `json:"startTime,omitempty"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
InitContainerStatuses []ContainerStatus `json:"-"`
// The list has one entry per container in the manifest. Each entry is
// currently the output of `docker inspect`. This output format is *not*
// final and should not be relied upon.
@ -1832,6 +1934,10 @@ type NodeSystemInfo struct {
KubeletVersion string `json:"kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture"`
}
// NodeStatus is information about the current status of a node.
@ -1885,6 +1991,8 @@ const (
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
)
type NodeCondition struct {

View File

@ -21,15 +21,127 @@ limitations under the License.
package unversioned
import (
"time"
conversion "k8s.io/kubernetes/pkg/conversion"
time "time"
)
func DeepCopy_unversioned_APIGroup(in APIGroup, out *APIGroup, c *conversion.Cloner) error {
if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
out.Name = in.Name
if in.Versions != nil {
in, out := in.Versions, &out.Versions
*out = make([]GroupVersionForDiscovery, len(in))
for i := range in {
if err := DeepCopy_unversioned_GroupVersionForDiscovery(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Versions = nil
}
if err := DeepCopy_unversioned_GroupVersionForDiscovery(in.PreferredVersion, &out.PreferredVersion, c); err != nil {
return err
}
if in.ServerAddressByClientCIDRs != nil {
in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs
*out = make([]ServerAddressByClientCIDR, len(in))
for i := range in {
if err := DeepCopy_unversioned_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.ServerAddressByClientCIDRs = nil
}
return nil
}
func DeepCopy_unversioned_APIGroupList(in APIGroupList, out *APIGroupList, c *conversion.Cloner) error {
if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if in.Groups != nil {
in, out := in.Groups, &out.Groups
*out = make([]APIGroup, len(in))
for i := range in {
if err := DeepCopy_unversioned_APIGroup(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Groups = nil
}
return nil
}
func DeepCopy_unversioned_APIResource(in APIResource, out *APIResource, c *conversion.Cloner) error {
out.Name = in.Name
out.Namespaced = in.Namespaced
out.Kind = in.Kind
return nil
}
func DeepCopy_unversioned_APIResourceList(in APIResourceList, out *APIResourceList, c *conversion.Cloner) error {
if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
out.GroupVersion = in.GroupVersion
if in.APIResources != nil {
in, out := in.APIResources, &out.APIResources
*out = make([]APIResource, len(in))
for i := range in {
if err := DeepCopy_unversioned_APIResource(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.APIResources = nil
}
return nil
}
func DeepCopy_unversioned_APIVersions(in APIVersions, out *APIVersions, c *conversion.Cloner) error {
if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if in.Versions != nil {
in, out := in.Versions, &out.Versions
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Versions = nil
}
if in.ServerAddressByClientCIDRs != nil {
in, out := in.ServerAddressByClientCIDRs, &out.ServerAddressByClientCIDRs
*out = make([]ServerAddressByClientCIDR, len(in))
for i := range in {
if err := DeepCopy_unversioned_ServerAddressByClientCIDR(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.ServerAddressByClientCIDRs = nil
}
return nil
}
func DeepCopy_unversioned_Duration(in Duration, out *Duration, c *conversion.Cloner) error {
out.Duration = in.Duration
return nil
}
func DeepCopy_unversioned_ExportOptions(in ExportOptions, out *ExportOptions, c *conversion.Cloner) error {
if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
out.Export = in.Export
out.Exact = in.Exact
return nil
}
func DeepCopy_unversioned_GroupKind(in GroupKind, out *GroupKind, c *conversion.Cloner) error {
out.Group = in.Group
out.Kind = in.Kind
@ -48,6 +160,12 @@ func DeepCopy_unversioned_GroupVersion(in GroupVersion, out *GroupVersion, c *co
return nil
}
func DeepCopy_unversioned_GroupVersionForDiscovery(in GroupVersionForDiscovery, out *GroupVersionForDiscovery, c *conversion.Cloner) error {
out.GroupVersion = in.GroupVersion
out.Version = in.Version
return nil
}
func DeepCopy_unversioned_GroupVersionKind(in GroupVersionKind, out *GroupVersionKind, c *conversion.Cloner) error {
out.Group = in.Group
out.Version = in.Version
@ -105,6 +223,76 @@ func DeepCopy_unversioned_ListMeta(in ListMeta, out *ListMeta, c *conversion.Clo
return nil
}
func DeepCopy_unversioned_Patch(in Patch, out *Patch, c *conversion.Cloner) error {
return nil
}
func DeepCopy_unversioned_RootPaths(in RootPaths, out *RootPaths, c *conversion.Cloner) error {
if in.Paths != nil {
in, out := in.Paths, &out.Paths
*out = make([]string, len(in))
copy(*out, in)
} else {
out.Paths = nil
}
return nil
}
func DeepCopy_unversioned_ServerAddressByClientCIDR(in ServerAddressByClientCIDR, out *ServerAddressByClientCIDR, c *conversion.Cloner) error {
out.ClientCIDR = in.ClientCIDR
out.ServerAddress = in.ServerAddress
return nil
}
func DeepCopy_unversioned_Status(in Status, out *Status, c *conversion.Cloner) error {
if err := DeepCopy_unversioned_TypeMeta(in.TypeMeta, &out.TypeMeta, c); err != nil {
return err
}
if err := DeepCopy_unversioned_ListMeta(in.ListMeta, &out.ListMeta, c); err != nil {
return err
}
out.Status = in.Status
out.Message = in.Message
out.Reason = in.Reason
if in.Details != nil {
in, out := in.Details, &out.Details
*out = new(StatusDetails)
if err := DeepCopy_unversioned_StatusDetails(*in, *out, c); err != nil {
return err
}
} else {
out.Details = nil
}
out.Code = in.Code
return nil
}
func DeepCopy_unversioned_StatusCause(in StatusCause, out *StatusCause, c *conversion.Cloner) error {
out.Type = in.Type
out.Message = in.Message
out.Field = in.Field
return nil
}
func DeepCopy_unversioned_StatusDetails(in StatusDetails, out *StatusDetails, c *conversion.Cloner) error {
out.Name = in.Name
out.Group = in.Group
out.Kind = in.Kind
if in.Causes != nil {
in, out := in.Causes, &out.Causes
*out = make([]StatusCause, len(in))
for i := range in {
if err := DeepCopy_unversioned_StatusCause(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Causes = nil
}
out.RetryAfterSeconds = in.RetryAfterSeconds
return nil
}
func DeepCopy_unversioned_Time(in Time, out *Time, c *conversion.Cloner) error {
if newVal, err := c.DeepCopy(in.Time); err != nil {
return err

View File

@ -29,8 +29,8 @@ import (
// `resource.group.com` -> `group=com, version=group, resource=resource` and `group=group.com, resource=resource`
func ParseResourceArg(arg string) (*GroupVersionResource, GroupResource) {
var gvr *GroupVersionResource
s := strings.SplitN(arg, ".", 3)
if len(s) == 3 {
if strings.Count(arg, ".") >= 2 {
s := strings.SplitN(arg, ".", 3)
gvr = &GroupVersionResource{Group: s[2], Version: s[1], Resource: s[0]}
}
@ -64,12 +64,11 @@ func (gr *GroupResource) String() string {
// ParseGroupResource turns "resource.group" string into a GroupResource struct. Empty strings are allowed
// for each field.
func ParseGroupResource(gr string) GroupResource {
s := strings.SplitN(gr, ".", 2)
if len(s) == 1 {
return GroupResource{Resource: s[0]}
if i := strings.Index(gr, "."); i == -1 {
return GroupResource{Resource: gr}
} else {
return GroupResource{Group: gr[i+1:], Resource: gr[:i]}
}
return GroupResource{Group: s[1], Resource: s[0]}
}
// GroupVersionResource unambiguously identifies a resource. It doesn't anonymously include GroupVersion
@ -189,18 +188,14 @@ func ParseGroupVersion(gv string) (GroupVersion, error) {
return GroupVersion{}, nil
}
s := strings.Split(gv, "/")
// "v1" is the only special case. Otherwise GroupVersion is expected to contain
// one "/" dividing the string into two parts.
switch {
case len(s) == 1 && gv == "v1":
return GroupVersion{"", "v1"}, nil
case len(s) == 1:
return GroupVersion{"", s[0]}, nil
case len(s) == 2:
return GroupVersion{s[0], s[1]}, nil
switch strings.Count(gv, "/") {
case 0:
return GroupVersion{"", gv}, nil
case 1:
i := strings.Index(gv, "/")
return GroupVersion{gv[:i], gv[i+1:]}, nil
default:
return GroupVersion{}, fmt.Errorf("Unexpected GroupVersion string: %v", gv)
return GroupVersion{}, fmt.Errorf("unexpected GroupVersion string: %v", gv)
}
}

View File

@ -1,5 +1,5 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.

View File

@ -17,18 +17,11 @@ limitations under the License.
package validation
import (
"fmt"
"k8s.io/kubernetes/pkg/api/unversioned"
"k8s.io/kubernetes/pkg/util/validation"
"k8s.io/kubernetes/pkg/util/validation/field"
)
var (
labelValueErrorMsg string = fmt.Sprintf(`must have at most %d characters, matching regex %s: e.g. "MyValue" or ""`, validation.LabelValueMaxLength, validation.LabelValueFmt)
qualifiedNameErrorMsg string = fmt.Sprintf(`must be a qualified name (at most %d characters, matching regex %s), with an optional DNS subdomain prefix (at most %d characters, matching regex %s) and slash (/): e.g. "MyName" or "example.com/MyName"`, validation.QualifiedNameMaxLength, validation.QualifiedNameFmt, validation.DNS1123SubdomainMaxLength, validation.DNS1123SubdomainFmt)
)
func ValidateLabelSelector(ps *unversioned.LabelSelector, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if ps == nil {
@ -62,8 +55,8 @@ func ValidateLabelSelectorRequirement(sr unversioned.LabelSelectorRequirement, f
// ValidateLabelName validates that the label name is correctly defined.
func ValidateLabelName(labelName string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if !validation.IsQualifiedName(labelName) {
allErrs = append(allErrs, field.Invalid(fldPath, labelName, qualifiedNameErrorMsg))
for _, msg := range validation.IsQualifiedName(labelName) {
allErrs = append(allErrs, field.Invalid(fldPath, labelName, msg))
}
return allErrs
}
@ -73,8 +66,8 @@ func ValidateLabels(labels map[string]string, fldPath *field.Path) field.ErrorLi
allErrs := field.ErrorList{}
for k, v := range labels {
allErrs = append(allErrs, ValidateLabelName(k, fldPath)...)
if !validation.IsValidLabelValue(v) {
allErrs = append(allErrs, field.Invalid(fldPath, v, labelValueErrorMsg))
for _, msg := range validation.IsValidLabelValue(v) {
allErrs = append(allErrs, field.Invalid(fldPath, v, msg))
}
}
return allErrs

View File

@ -24,4 +24,7 @@ const (
LabelZoneRegion = "failure-domain.beta.kubernetes.io/region"
LabelInstanceType = "beta.kubernetes.io/instance-type"
LabelOS = "beta.kubernetes.io/os"
LabelArch = "beta.kubernetes.io/arch"
)

View File

@ -17,10 +17,9 @@ limitations under the License.
package v1
import (
"encoding/json"
"fmt"
inf "gopkg.in/inf.v0"
"k8s.io/kubernetes/pkg/api"
"k8s.io/kubernetes/pkg/conversion"
"k8s.io/kubernetes/pkg/runtime"
@ -258,6 +257,81 @@ func Convert_v1_ReplicationControllerSpec_To_api_ReplicationControllerSpec(in *R
return nil
}
func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
if err := autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s); err != nil {
return err
}
if len(out.Status.InitContainerStatuses) > 0 {
if out.Annotations == nil {
out.Annotations = make(map[string]string)
}
value, err := json.Marshal(out.Status.InitContainerStatuses)
if err != nil {
return err
}
out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value)
} else {
delete(out.Annotations, PodInitContainerStatusesAnnotationKey)
}
return nil
}
func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error {
// TODO: when we move init container to beta, remove these conversions
if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok {
var values []ContainerStatus
if err := json.Unmarshal([]byte(value), &values); err != nil {
return err
}
in.Status.InitContainerStatuses = values
}
if err := autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s); err != nil {
return err
}
delete(out.Annotations, PodInitContainerStatusesAnnotationKey)
return nil
}
func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
if err := autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s); err != nil {
return err
}
// TODO: when we move init container to beta, remove these conversions
if len(out.Spec.InitContainers) > 0 {
if out.Annotations == nil {
out.Annotations = make(map[string]string)
}
value, err := json.Marshal(out.Spec.InitContainers)
if err != nil {
return err
}
out.Annotations[PodInitContainersAnnotationKey] = string(value)
} else {
delete(out.Annotations, PodInitContainersAnnotationKey)
}
return nil
}
func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error {
// TODO: when we move init container to beta, remove these conversions
if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok {
var values []Container
if err := json.Unmarshal([]byte(value), &values); err != nil {
return err
}
in.Spec.InitContainers = values
}
if err := autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s); err != nil {
return err
}
delete(out.Annotations, PodInitContainersAnnotationKey)
return nil
}
// The following two PodSpec conversions are done here to support ServiceAccount
// as an alias for ServiceAccountName.
func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversion.Scope) error {
@ -271,6 +345,16 @@ func Convert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conversi
} else {
out.Volumes = nil
}
if in.InitContainers != nil {
out.InitContainers = make([]Container, len(in.InitContainers))
for i := range in.InitContainers {
if err := Convert_api_Container_To_v1_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil {
return err
}
}
} else {
out.InitContainers = nil
}
if in.Containers != nil {
out.Containers = make([]Container, len(in.Containers))
for i := range in.Containers {
@ -346,6 +430,16 @@ func Convert_v1_PodSpec_To_api_PodSpec(in *PodSpec, out *api.PodSpec, s conversi
} else {
out.Volumes = nil
}
if in.InitContainers != nil {
out.InitContainers = make([]api.Container, len(in.InitContainers))
for i := range in.InitContainers {
if err := Convert_v1_Container_To_api_Container(&in.InitContainers[i], &out.InitContainers[i], s); err != nil {
return err
}
}
} else {
out.InitContainers = nil
}
if in.Containers != nil {
out.Containers = make([]api.Container, len(in.Containers))
for i := range in.Containers {
@ -419,6 +513,33 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error
if err := autoConvert_api_Pod_To_v1_Pod(in, out, s); err != nil {
return err
}
// TODO: when we move init container to beta, remove these conversions
if len(out.Spec.InitContainers) > 0 {
if out.Annotations == nil {
out.Annotations = make(map[string]string)
}
value, err := json.Marshal(out.Spec.InitContainers)
if err != nil {
return err
}
out.Annotations[PodInitContainersAnnotationKey] = string(value)
} else {
delete(out.Annotations, PodInitContainersAnnotationKey)
}
if len(out.Status.InitContainerStatuses) > 0 {
if out.Annotations == nil {
out.Annotations = make(map[string]string)
}
value, err := json.Marshal(out.Status.InitContainerStatuses)
if err != nil {
return err
}
out.Annotations[PodInitContainerStatusesAnnotationKey] = string(value)
} else {
delete(out.Annotations, PodInitContainerStatusesAnnotationKey)
}
// We need to reset certain fields for mirror pods from pre-v1.1 kubelet
// (#15960).
// TODO: Remove this code after we drop support for v1.0 kubelets.
@ -434,7 +555,28 @@ func Convert_api_Pod_To_v1_Pod(in *api.Pod, out *Pod, s conversion.Scope) error
}
func Convert_v1_Pod_To_api_Pod(in *Pod, out *api.Pod, s conversion.Scope) error {
return autoConvert_v1_Pod_To_api_Pod(in, out, s)
// TODO: when we move init container to beta, remove these conversions
if value, ok := in.Annotations[PodInitContainersAnnotationKey]; ok {
var values []Container
if err := json.Unmarshal([]byte(value), &values); err != nil {
return err
}
in.Spec.InitContainers = values
}
if value, ok := in.Annotations[PodInitContainerStatusesAnnotationKey]; ok {
var values []ContainerStatus
if err := json.Unmarshal([]byte(value), &values); err != nil {
return err
}
in.Status.InitContainerStatuses = values
}
if err := autoConvert_v1_Pod_To_api_Pod(in, out, s); err != nil {
return err
}
delete(out.Annotations, PodInitContainersAnnotationKey)
delete(out.Annotations, PodInitContainerStatusesAnnotationKey)
return nil
}
func Convert_api_ServiceSpec_To_v1_ServiceSpec(in *api.ServiceSpec, out *ServiceSpec, s conversion.Scope) error {
@ -535,8 +677,8 @@ func Convert_v1_ResourceList_To_api_ResourceList(in *ResourceList, out *api.Reso
// TODO(#18538): We round up resource values to milli scale to maintain API compatibility.
// In the future, we should instead reject values that need rounding.
const milliScale = 3
value.Amount.Round(value.Amount, milliScale, inf.RoundUp)
const milliScale = -3
value.RoundUp(milliScale)
converted[api.ResourceName(key)] = *value
}

View File

@ -311,12 +311,18 @@ func init() {
Convert_api_ServiceStatus_To_v1_ServiceStatus,
Convert_v1_TCPSocketAction_To_api_TCPSocketAction,
Convert_api_TCPSocketAction_To_v1_TCPSocketAction,
Convert_v1_Taint_To_api_Taint,
Convert_api_Taint_To_v1_Taint,
Convert_v1_Toleration_To_api_Toleration,
Convert_api_Toleration_To_v1_Toleration,
Convert_v1_Volume_To_api_Volume,
Convert_api_Volume_To_v1_Volume,
Convert_v1_VolumeMount_To_api_VolumeMount,
Convert_api_VolumeMount_To_v1_VolumeMount,
Convert_v1_VolumeSource_To_api_VolumeSource,
Convert_api_VolumeSource_To_v1_VolumeSource,
Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource,
Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource,
Convert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm,
Convert_api_WeightedPodAffinityTerm_To_v1_WeightedPodAffinityTerm,
); err != nil {
@ -3588,6 +3594,8 @@ func autoConvert_v1_NodeSystemInfo_To_api_NodeSystemInfo(in *NodeSystemInfo, out
out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
out.KubeletVersion = in.KubeletVersion
out.KubeProxyVersion = in.KubeProxyVersion
out.OperatingSystem = in.OperatingSystem
out.Architecture = in.Architecture
return nil
}
@ -3604,6 +3612,8 @@ func autoConvert_api_NodeSystemInfo_To_v1_NodeSystemInfo(in *api.NodeSystemInfo,
out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
out.KubeletVersion = in.KubeletVersion
out.KubeProxyVersion = in.KubeProxyVersion
out.OperatingSystem = in.OperatingSystem
out.Architecture = in.Architecture
return nil
}
@ -4240,6 +4250,15 @@ func autoConvert_v1_PersistentVolumeSource_To_api_PersistentVolumeSource(in *Per
} else {
out.AzureFile = nil
}
if in.VsphereVolume != nil {
in, out := &in.VsphereVolume, &out.VsphereVolume
*out = new(api.VsphereVirtualDiskVolumeSource)
if err := Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
@ -4365,6 +4384,15 @@ func autoConvert_api_PersistentVolumeSource_To_v1_PersistentVolumeSource(in *api
} else {
out.AzureFile = nil
}
if in.VsphereVolume != nil {
in, out := &in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
if err := Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
@ -5008,6 +5036,17 @@ func autoConvert_api_PodSpec_To_v1_PodSpec(in *api.PodSpec, out *PodSpec, s conv
} else {
out.Volumes = nil
}
if in.InitContainers != nil {
in, out := &in.InitContainers, &out.InitContainers
*out = make([]Container, len(*in))
for i := range *in {
if err := Convert_api_Container_To_v1_Container(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.InitContainers = nil
}
if in.Containers != nil {
in, out := &in.Containers, &out.Containers
*out = make([]Container, len(*in))
@ -5097,6 +5136,17 @@ func autoConvert_v1_PodStatus_To_api_PodStatus(in *PodStatus, out *api.PodStatus
} else {
out.StartTime = nil
}
if in.InitContainerStatuses != nil {
in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
*out = make([]api.ContainerStatus, len(*in))
for i := range *in {
if err := Convert_v1_ContainerStatus_To_api_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.InitContainerStatuses = nil
}
if in.ContainerStatuses != nil {
in, out := &in.ContainerStatuses, &out.ContainerStatuses
*out = make([]api.ContainerStatus, len(*in))
@ -5141,6 +5191,17 @@ func autoConvert_api_PodStatus_To_v1_PodStatus(in *api.PodStatus, out *PodStatus
} else {
out.StartTime = nil
}
if in.InitContainerStatuses != nil {
in, out := &in.InitContainerStatuses, &out.InitContainerStatuses
*out = make([]ContainerStatus, len(*in))
for i := range *in {
if err := Convert_api_ContainerStatus_To_v1_ContainerStatus(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.InitContainerStatuses = nil
}
if in.ContainerStatuses != nil {
in, out := &in.ContainerStatuses, &out.ContainerStatuses
*out = make([]ContainerStatus, len(*in))
@ -5172,10 +5233,6 @@ func autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult,
return nil
}
func Convert_v1_PodStatusResult_To_api_PodStatusResult(in *PodStatusResult, out *api.PodStatusResult, s conversion.Scope) error {
return autoConvert_v1_PodStatusResult_To_api_PodStatusResult(in, out, s)
}
func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
@ -5189,10 +5246,6 @@ func autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResu
return nil
}
func Convert_api_PodStatusResult_To_v1_PodStatusResult(in *api.PodStatusResult, out *PodStatusResult, s conversion.Scope) error {
return autoConvert_api_PodStatusResult_To_v1_PodStatusResult(in, out, s)
}
func autoConvert_v1_PodTemplate_To_api_PodTemplate(in *PodTemplate, out *api.PodTemplate, s conversion.Scope) error {
if err := api.Convert_unversioned_TypeMeta_To_unversioned_TypeMeta(&in.TypeMeta, &out.TypeMeta, s); err != nil {
return err
@ -5287,10 +5340,6 @@ func autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec,
return nil
}
func Convert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in *PodTemplateSpec, out *api.PodTemplateSpec, s conversion.Scope) error {
return autoConvert_v1_PodTemplateSpec_To_api_PodTemplateSpec(in, out, s)
}
func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
if err := Convert_api_ObjectMeta_To_v1_ObjectMeta(&in.ObjectMeta, &out.ObjectMeta, s); err != nil {
return err
@ -5301,10 +5350,6 @@ func autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSp
return nil
}
func Convert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in *api.PodTemplateSpec, out *PodTemplateSpec, s conversion.Scope) error {
return autoConvert_api_PodTemplateSpec_To_v1_PodTemplateSpec(in, out, s)
}
func autoConvert_v1_Preconditions_To_api_Preconditions(in *Preconditions, out *api.Preconditions, s conversion.Scope) error {
if in.UID != nil {
in, out := &in.UID, &out.UID
@ -5994,6 +6039,17 @@ func Convert_api_SecretList_To_v1_SecretList(in *api.SecretList, out *SecretList
func autoConvert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSource, out *api.SecretVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]api.KeyToPath, len(*in))
for i := range *in {
if err := Convert_v1_KeyToPath_To_api_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
@ -6003,6 +6059,17 @@ func Convert_v1_SecretVolumeSource_To_api_SecretVolumeSource(in *SecretVolumeSou
func autoConvert_api_SecretVolumeSource_To_v1_SecretVolumeSource(in *api.SecretVolumeSource, out *SecretVolumeSource, s conversion.Scope) error {
out.SecretName = in.SecretName
if in.Items != nil {
in, out := &in.Items, &out.Items
*out = make([]KeyToPath, len(*in))
for i := range *in {
if err := Convert_api_KeyToPath_To_v1_KeyToPath(&(*in)[i], &(*out)[i], s); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
@ -6527,6 +6594,52 @@ func Convert_api_TCPSocketAction_To_v1_TCPSocketAction(in *api.TCPSocketAction,
return autoConvert_api_TCPSocketAction_To_v1_TCPSocketAction(in, out, s)
}
func autoConvert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = api.TaintEffect(in.Effect)
return nil
}
func Convert_v1_Taint_To_api_Taint(in *Taint, out *api.Taint, s conversion.Scope) error {
return autoConvert_v1_Taint_To_api_Taint(in, out, s)
}
func autoConvert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = TaintEffect(in.Effect)
return nil
}
func Convert_api_Taint_To_v1_Taint(in *api.Taint, out *Taint, s conversion.Scope) error {
return autoConvert_api_Taint_To_v1_Taint(in, out, s)
}
func autoConvert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = api.TolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = api.TaintEffect(in.Effect)
return nil
}
func Convert_v1_Toleration_To_api_Toleration(in *Toleration, out *api.Toleration, s conversion.Scope) error {
return autoConvert_v1_Toleration_To_api_Toleration(in, out, s)
}
func autoConvert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error {
out.Key = in.Key
out.Operator = TolerationOperator(in.Operator)
out.Value = in.Value
out.Effect = TaintEffect(in.Effect)
return nil
}
func Convert_api_Toleration_To_v1_Toleration(in *api.Toleration, out *Toleration, s conversion.Scope) error {
return autoConvert_api_Toleration_To_v1_Toleration(in, out, s)
}
func autoConvert_v1_Volume_To_api_Volume(in *Volume, out *api.Volume, s conversion.Scope) error {
SetDefaults_Volume(in)
out.Name = in.Name
@ -6748,6 +6861,15 @@ func autoConvert_v1_VolumeSource_To_api_VolumeSource(in *VolumeSource, out *api.
} else {
out.ConfigMap = nil
}
if in.VsphereVolume != nil {
in, out := &in.VsphereVolume, &out.VsphereVolume
*out = new(api.VsphereVirtualDiskVolumeSource)
if err := Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
@ -6927,6 +7049,15 @@ func autoConvert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *
} else {
out.ConfigMap = nil
}
if in.VsphereVolume != nil {
in, out := &in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
if err := Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(*in, *out, s); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
@ -6934,6 +7065,26 @@ func Convert_api_VolumeSource_To_v1_VolumeSource(in *api.VolumeSource, out *Volu
return autoConvert_api_VolumeSource_To_v1_VolumeSource(in, out, s)
}
func autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
out.VolumePath = in.VolumePath
out.FSType = in.FSType
return nil
}
func Convert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in *VsphereVirtualDiskVolumeSource, out *api.VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
return autoConvert_v1_VsphereVirtualDiskVolumeSource_To_api_VsphereVirtualDiskVolumeSource(in, out, s)
}
func autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
out.VolumePath = in.VolumePath
out.FSType = in.FSType
return nil
}
func Convert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in *api.VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, s conversion.Scope) error {
return autoConvert_api_VsphereVirtualDiskVolumeSource_To_v1_VsphereVirtualDiskVolumeSource(in, out, s)
}
func autoConvert_v1_WeightedPodAffinityTerm_To_api_WeightedPodAffinityTerm(in *WeightedPodAffinityTerm, out *api.WeightedPodAffinityTerm, s conversion.Scope) error {
out.Weight = int(in.Weight)
if err := Convert_v1_PodAffinityTerm_To_api_PodAffinityTerm(&in.PodAffinityTerm, &out.PodAffinityTerm, s); err != nil {

View File

@ -172,9 +172,12 @@ func init() {
DeepCopy_v1_ServiceSpec,
DeepCopy_v1_ServiceStatus,
DeepCopy_v1_TCPSocketAction,
DeepCopy_v1_Taint,
DeepCopy_v1_Toleration,
DeepCopy_v1_Volume,
DeepCopy_v1_VolumeMount,
DeepCopy_v1_VolumeSource,
DeepCopy_v1_VsphereVirtualDiskVolumeSource,
DeepCopy_v1_WeightedPodAffinityTerm,
); err != nil {
// if one of the deep copy functions is malformed, detect it immediately.
@ -1532,6 +1535,8 @@ func DeepCopy_v1_NodeSystemInfo(in NodeSystemInfo, out *NodeSystemInfo, c *conve
out.ContainerRuntimeVersion = in.ContainerRuntimeVersion
out.KubeletVersion = in.KubeletVersion
out.KubeProxyVersion = in.KubeProxyVersion
out.OperatingSystem = in.OperatingSystem
out.Architecture = in.Architecture
return nil
}
@ -1868,6 +1873,15 @@ func DeepCopy_v1_PersistentVolumeSource(in PersistentVolumeSource, out *Persiste
} else {
out.AzureFile = nil
}
if in.VsphereVolume != nil {
in, out := in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
if err := DeepCopy_v1_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
@ -2174,6 +2188,17 @@ func DeepCopy_v1_PodSpec(in PodSpec, out *PodSpec, c *conversion.Cloner) error {
} else {
out.Volumes = nil
}
if in.InitContainers != nil {
in, out := in.InitContainers, &out.InitContainers
*out = make([]Container, len(in))
for i := range in {
if err := DeepCopy_v1_Container(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.InitContainers = nil
}
if in.Containers != nil {
in, out := in.Containers, &out.Containers
*out = make([]Container, len(in))
@ -2267,6 +2292,17 @@ func DeepCopy_v1_PodStatus(in PodStatus, out *PodStatus, c *conversion.Cloner) e
} else {
out.StartTime = nil
}
if in.InitContainerStatuses != nil {
in, out := in.InitContainerStatuses, &out.InitContainerStatuses
*out = make([]ContainerStatus, len(in))
for i := range in {
if err := DeepCopy_v1_ContainerStatus(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.InitContainerStatuses = nil
}
if in.ContainerStatuses != nil {
in, out := in.ContainerStatuses, &out.ContainerStatuses
*out = make([]ContainerStatus, len(in))
@ -2676,6 +2712,17 @@ func DeepCopy_v1_SecretList(in SecretList, out *SecretList, c *conversion.Cloner
func DeepCopy_v1_SecretVolumeSource(in SecretVolumeSource, out *SecretVolumeSource, c *conversion.Cloner) error {
out.SecretName = in.SecretName
if in.Items != nil {
in, out := in.Items, &out.Items
*out = make([]KeyToPath, len(in))
for i := range in {
if err := DeepCopy_v1_KeyToPath(in[i], &(*out)[i], c); err != nil {
return err
}
}
} else {
out.Items = nil
}
return nil
}
@ -2904,6 +2951,21 @@ func DeepCopy_v1_TCPSocketAction(in TCPSocketAction, out *TCPSocketAction, c *co
return nil
}
func DeepCopy_v1_Taint(in Taint, out *Taint, c *conversion.Cloner) error {
out.Key = in.Key
out.Value = in.Value
out.Effect = in.Effect
return nil
}
func DeepCopy_v1_Toleration(in Toleration, out *Toleration, c *conversion.Cloner) error {
out.Key = in.Key
out.Operator = in.Operator
out.Value = in.Value
out.Effect = in.Effect
return nil
}
func DeepCopy_v1_Volume(in Volume, out *Volume, c *conversion.Cloner) error {
out.Name = in.Name
if err := DeepCopy_v1_VolumeSource(in.VolumeSource, &out.VolumeSource, c); err != nil {
@ -3092,6 +3154,21 @@ func DeepCopy_v1_VolumeSource(in VolumeSource, out *VolumeSource, c *conversion.
} else {
out.ConfigMap = nil
}
if in.VsphereVolume != nil {
in, out := in.VsphereVolume, &out.VsphereVolume
*out = new(VsphereVirtualDiskVolumeSource)
if err := DeepCopy_v1_VsphereVirtualDiskVolumeSource(*in, *out, c); err != nil {
return err
}
} else {
out.VsphereVolume = nil
}
return nil
}
func DeepCopy_v1_VsphereVirtualDiskVolumeSource(in VsphereVirtualDiskVolumeSource, out *VsphereVirtualDiskVolumeSource, c *conversion.Cloner) error {
out.VolumePath = in.VolumePath
out.FSType = in.FSType
return nil
}

File diff suppressed because it is too large Load Diff

View File

@ -295,7 +295,7 @@ message Container {
// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
optional ResourceRequirements resources = 8;
// Pod volumes to mount into the container's filesyste.
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
repeated VolumeMount volumeMounts = 9;
@ -1322,6 +1322,12 @@ message NodeSystemInfo {
// KubeProxy Version reported by the node.
optional string kubeProxyVersion = 8;
// The Operating System reported by the node
optional string operatingSystem = 9;
// The Architecture reported by the node
optional string architecture = 10;
}
// ObjectFieldSelector selects an APIVersioned field of an object.
@ -1670,6 +1676,9 @@ message PersistentVolumeSource {
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
optional AzureFileVolumeSource azureFile = 13;
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
optional VsphereVirtualDiskVolumeSource vsphereVolume = 14;
}
// PersistentVolumeSpec is the specification of a persistent volume.
@ -2458,6 +2467,15 @@ message SecretVolumeSource {
// Name of the secret in the pod's namespace to use.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets
optional string secretName = 1;
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error. Paths must be relative and may not contain
// the '..' path or start with '..'.
repeated KeyToPath items = 2;
}
// SecurityContext holds security configuration that will be applied to a container.
@ -2680,6 +2698,42 @@ message TCPSocketAction {
optional k8s.io.kubernetes.pkg.util.intstr.IntOrString port = 1;
}
// The node this Taint is attached to has the effect "effect" on
// any pod that that does not tolerate the Taint.
message Taint {
// Required. The taint key to be applied to a node.
optional string key = 1;
// Required. The taint value corresponding to the taint key.
optional string value = 2;
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule and PreferNoSchedule.
optional string effect = 3;
}
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
message Toleration {
// Required. Key is the taint key that the toleration applies to.
optional string key = 1;
// operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
optional string operator = 2;
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
optional string value = 3;
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule and PreferNoSchedule.
optional string effect = 4;
}
// Volume represents a named volume in a pod that may be accessed by any container in the pod.
message Volume {
// Volume's name.
@ -2793,6 +2847,20 @@ message VolumeSource {
// ConfigMap represents a configMap that should populate this volume
optional ConfigMapVolumeSource configMap = 19;
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
optional VsphereVirtualDiskVolumeSource vsphereVolume = 20;
}
// Represents a vSphere volume resource.
message VsphereVirtualDiskVolumeSource {
// Path that identifies vSphere volume vmdk
optional string volumePath = 1;
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
optional string fsType = 2;
}
// The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)

File diff suppressed because it is too large Load Diff

View File

@ -184,7 +184,7 @@ type ObjectMeta struct {
// is an identifier for the responsible component that will remove the entry
// from the list. If the deletionTimestamp of the object is non-nil, entries
// in this list can only be removed.
Finalizers []string `json:"finalizers,omitempty" protobuf:"bytes,14,rep,name=finalizers"`
Finalizers []string `json:"finalizers,omitempty" patchStrategy:"merge" protobuf:"bytes,14,rep,name=finalizers"`
}
const (
@ -273,6 +273,8 @@ type VolumeSource struct {
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,18,opt,name=azureFile"`
// ConfigMap represents a configMap that should populate this volume
ConfigMap *ConfigMapVolumeSource `json:"configMap,omitempty" protobuf:"bytes,19,opt,name=configMap"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,20,opt,name=vsphereVolume"`
}
// PersistentVolumeClaimVolumeSource references the user's PVC in the same namespace.
@ -333,6 +335,8 @@ type PersistentVolumeSource struct {
FlexVolume *FlexVolumeSource `json:"flexVolume,omitempty" protobuf:"bytes,12,opt,name=flexVolume"`
// AzureFile represents an Azure File Service mount on the host and bind mount to the pod.
AzureFile *AzureFileVolumeSource `json:"azureFile,omitempty" protobuf:"bytes,13,opt,name=azureFile"`
// VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine
VsphereVolume *VsphereVirtualDiskVolumeSource `json:"vsphereVolume,omitempty" protobuf:"bytes,14,opt,name=vsphereVolume"`
}
// +genclient=true,nonNamespaced=true
@ -506,6 +510,10 @@ const (
ClaimPending PersistentVolumeClaimPhase = "Pending"
// used for PersistentVolumeClaims that are bound
ClaimBound PersistentVolumeClaimPhase = "Bound"
// used for PersistentVolumeClaims that lost their underlying
// PersistentVolume. The claim was bound to a PersistentVolume and this
// volume does not exist any longer and all data on it was lost.
ClaimLost PersistentVolumeClaimPhase = "Lost"
)
// Represents a host path mapped into a pod.
@ -749,6 +757,14 @@ type SecretVolumeSource struct {
// Name of the secret in the pod's namespace to use.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets
SecretName string `json:"secretName,omitempty" protobuf:"bytes,1,opt,name=secretName"`
// If unspecified, each key-value pair in the Data field of the referenced
// Secret will be projected into the volume as a file whose name is the
// key and content is the value. If specified, the listed keys will be
// projected into the specified paths, and unlisted keys will not be
// present. If a key is specified which is not present in the Secret,
// the volume setup will error. Paths must be relative and may not contain
// the '..' path or start with '..'.
Items []KeyToPath `json:"items,omitempty" protobuf:"bytes,2,rep,name=items"`
}
// Represents an NFS mount that lasts the lifetime of a pod.
@ -822,6 +838,16 @@ type AzureFileVolumeSource struct {
ReadOnly bool `json:"readOnly,omitempty" protobuf:"varint,3,opt,name=readOnly"`
}
// Represents a vSphere volume resource.
type VsphereVirtualDiskVolumeSource struct {
// Path that identifies vSphere volume vmdk
VolumePath string `json:"volumePath" protobuf:"bytes,1,opt,name=volumePath"`
// Filesystem type to mount.
// Must be a filesystem type supported by the host operating system.
// Ex. "ext4", "xfs", "ntfs". Implicitly inferred to be "ext4" if unspecified.
FSType string `json:"fsType,omitempty" protobuf:"bytes,2,opt,name=fsType"`
}
// Adapts a ConfigMap into a volume.
//
// The contents of the target ConfigMap's Data field will be presented in a
@ -1106,7 +1132,7 @@ type Container struct {
// Cannot be updated.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources
Resources ResourceRequirements `json:"resources,omitempty" protobuf:"bytes,8,opt,name=resources"`
// Pod volumes to mount into the container's filesyste.
// Pod volumes to mount into the container's filesystem.
// Cannot be updated.
VolumeMounts []VolumeMount `json:"volumeMounts,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,9,rep,name=volumeMounts"`
// Periodic probe of container liveness.
@ -1541,11 +1567,103 @@ type PreferredSchedulingTerm struct {
Preference NodeSelectorTerm `json:"preference" protobuf:"bytes,2,opt,name=preference"`
}
// The node this Taint is attached to has the effect "effect" on
// any pod that that does not tolerate the Taint.
type Taint struct {
// Required. The taint key to be applied to a node.
Key string `json:"key" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
// Required. The taint value corresponding to the taint key.
Value string `json:"value,omitempty" protobuf:"bytes,2,opt,name=value"`
// Required. The effect of the taint on pods
// that do not tolerate the taint.
// Valid effects are NoSchedule and PreferNoSchedule.
Effect TaintEffect `json:"effect" protobuf:"bytes,3,opt,name=effect,casttype=TaintEffect"`
}
type TaintEffect string
const (
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// but allow all pods submitted to Kubelet without going through the scheduler
// to start, and allow all already-running pods to continue running.
// Enforced by the scheduler.
TaintEffectNoSchedule TaintEffect = "NoSchedule"
// Like TaintEffectNoSchedule, but the scheduler tries not to schedule
// new pods onto the node, rather than prohibiting new pods from scheduling
// onto the node entirely. Enforced by the scheduler.
TaintEffectPreferNoSchedule TaintEffect = "PreferNoSchedule"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// do not allow pods to start on Kubelet unless they tolerate the taint,
// but allow all already-running pods to continue running.
// Enforced by the scheduler and Kubelet.
// TaintEffectNoScheduleNoAdmit TaintEffect = "NoScheduleNoAdmit"
// NOT YET IMPLEMENTED. TODO: Uncomment field once it is implemented.
// Do not allow new pods to schedule onto the node unless they tolerate the taint,
// do not allow pods to start on Kubelet unless they tolerate the taint,
// and evict any already-running pods that do not tolerate the taint.
// Enforced by the scheduler and Kubelet.
// TaintEffectNoScheduleNoAdmitNoExecute = "NoScheduleNoAdmitNoExecute"
)
// The pod this Toleration is attached to tolerates any taint that matches
// the triple <key,value,effect> using the matching operator <operator>.
type Toleration struct {
// Required. Key is the taint key that the toleration applies to.
Key string `json:"key,omitempty" patchStrategy:"merge" patchMergeKey:"key" protobuf:"bytes,1,opt,name=key"`
// operator represents a key's relationship to the value.
// Valid operators are Exists and Equal. Defaults to Equal.
// Exists is equivalent to wildcard for value, so that a pod can
// tolerate all taints of a particular category.
Operator TolerationOperator `json:"operator,omitempty" protobuf:"bytes,2,opt,name=operator,casttype=TolerationOperator"`
// Value is the taint value the toleration matches to.
// If the operator is Exists, the value should be empty, otherwise just a regular string.
Value string `json:"value,omitempty" protobuf:"bytes,3,opt,name=value"`
// Effect indicates the taint effect to match. Empty means match all taint effects.
// When specified, allowed values are NoSchedule and PreferNoSchedule.
Effect TaintEffect `json:"effect,omitempty" protobuf:"bytes,4,opt,name=effect,casttype=TaintEffect"`
// TODO: For forgiveness (#1574), we'd eventually add at least a grace period
// here, and possibly an occurrence threshold and period.
}
// A toleration operator is the set of operators that can be used in a toleration.
type TolerationOperator string
const (
TolerationOpExists TolerationOperator = "Exists"
TolerationOpEqual TolerationOperator = "Equal"
)
const (
// This annotation key will be used to contain an array of v1 JSON encoded Containers
// for init containers. The annotation will be placed into the internal type and cleared.
PodInitContainersAnnotationKey = "pod.alpha.kubernetes.io/init-containers"
// This annotation key will be used to contain an array of v1 JSON encoded
// ContainerStatuses for init containers. The annotation will be placed into the internal
// type and cleared.
PodInitContainerStatusesAnnotationKey = "pod.alpha.kubernetes.io/init-container-statuses"
)
// PodSpec is a description of a pod.
type PodSpec struct {
// List of volumes that can be mounted by containers belonging to the pod.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md
Volumes []Volume `json:"volumes,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,1,rep,name=volumes"`
// List of initialization containers belonging to the pod.
// Init containers are executed in order prior to containers being started. If any
// init container fails, the pod is considered to have failed and is handled according
// to its restartPolicy. The name for an init container or normal container must be
// unique among all containers.
// Init containers may not have Lifecycle actions, Readiness probes, or Liveness probes.
// The resourceRequirements of an init container are taken into account during scheduling
// by finding the highest request/limit for each resource type, and then using the max of
// of that value or the sum of the normal containers. Limits are applied to init containers
// in a similar fashion.
// Init containers cannot currently be added or removed.
// Init containers are in alpha state and may change without notice.
// Cannot be updated.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/containers.md
InitContainers []Container `json:"-" patchStrategy:"merge" patchMergeKey:"name"`
// List of containers belonging to the pod.
// Containers cannot currently be added or removed.
// There must be at least one container in a Pod.
@ -1679,6 +1797,12 @@ type PodStatus struct {
// This is before the Kubelet pulled the container image(s) for the pod.
StartTime *unversioned.Time `json:"startTime,omitempty" protobuf:"bytes,7,opt,name=startTime"`
// The list has one entry per init container in the manifest. The most recent successful
// init container will have ready = true, the most recently started container will have
// startTime set.
// Init containers are in alpha state and may change without notice.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
InitContainerStatuses []ContainerStatus `json:"-"`
// The list has one entry per container in the manifest. Each entry is currently the output
// of `docker inspect`.
// More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-statuses
@ -2208,6 +2332,10 @@ type NodeSystemInfo struct {
KubeletVersion string `json:"kubeletVersion" protobuf:"bytes,7,opt,name=kubeletVersion"`
// KubeProxy Version reported by the node.
KubeProxyVersion string `json:"kubeProxyVersion" protobuf:"bytes,8,opt,name=kubeProxyVersion"`
// The Operating System reported by the node
OperatingSystem string `json:"operatingSystem" protobuf:"bytes,9,opt,name=operatingSystem"`
// The Architecture reported by the node
Architecture string `json:"architecture" protobuf:"bytes,10,opt,name=architecture"`
}
// NodeStatus is information about the current status of a node.
@ -2269,6 +2397,8 @@ const (
// NodeOutOfDisk means the kubelet will not accept new pods due to insufficient free disk
// space on the node.
NodeOutOfDisk NodeConditionType = "OutOfDisk"
// NodeMemoryPressure means the kubelet is under pressure due to insufficient available memory.
NodeMemoryPressure NodeConditionType = "MemoryPressure"
)
// NodeCondition contains condition infromation for a node.

View File

@ -1,5 +1,5 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -186,7 +186,7 @@ var map_Container = map[string]string{
"ports": "List of ports to expose from the container. Exposing a port here gives the system additional information about the network connections a container uses, but is primarily informational. Not specifying a port here DOES NOT prevent that port from being exposed. Any port which is listening on the default \"0.0.0.0\" address inside a container will be accessible from the network. Cannot be updated.",
"env": "List of environment variables to set in the container. Cannot be updated.",
"resources": "Compute Resources required by this container. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#resources",
"volumeMounts": "Pod volumes to mount into the container's filesyste. Cannot be updated.",
"volumeMounts": "Pod volumes to mount into the container's filesystem. Cannot be updated.",
"livenessProbe": "Periodic probe of container liveness. Container will be restarted if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
"readinessProbe": "Periodic probe of container service readiness. Container will be removed from service endpoints if the probe fails. Cannot be updated. More info: http://releases.k8s.io/HEAD/docs/user-guide/pod-states.md#container-probes",
"lifecycle": "Actions that the management system should take in response to container lifecycle events. Cannot be updated.",
@ -894,6 +894,8 @@ var map_NodeSystemInfo = map[string]string{
"containerRuntimeVersion": "ContainerRuntime Version reported by the node through runtime remote API (e.g. docker://1.5.0).",
"kubeletVersion": "Kubelet Version reported by the node.",
"kubeProxyVersion": "KubeProxy Version reported by the node.",
"operatingSystem": "The Operating System reported by the node",
"architecture": "The Architecture reported by the node",
}
func (NodeSystemInfo) SwaggerDoc() map[string]string {
@ -1048,6 +1050,7 @@ var map_PersistentVolumeSource = map[string]string{
"flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine and exposed to the pod for its usage. This depends on the Flocker control service being running",
"flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.",
"azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
"vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
}
func (PersistentVolumeSource) SwaggerDoc() map[string]string {
@ -1487,6 +1490,7 @@ func (SecretList) SwaggerDoc() map[string]string {
var map_SecretVolumeSource = map[string]string{
"": "Adapts a Secret into a volume.\n\nThe contents of the target Secret's Data field will be presented in a volume as files using the keys in the Data field as the file names. Secret volumes support ownership management and SELinux relabeling.",
"secretName": "Name of the secret in the pod's namespace to use. More info: http://releases.k8s.io/HEAD/docs/user-guide/volumes.md#secrets",
"items": "If unspecified, each key-value pair in the Data field of the referenced Secret will be projected into the volume as a file whose name is the key and content is the value. If specified, the listed keys will be projected into the specified paths, and unlisted keys will not be present. If a key is specified which is not present in the Secret, the volume setup will error. Paths must be relative and may not contain the '..' path or start with '..'.",
}
func (SecretVolumeSource) SwaggerDoc() map[string]string {
@ -1614,6 +1618,29 @@ func (TCPSocketAction) SwaggerDoc() map[string]string {
return map_TCPSocketAction
}
var map_Taint = map[string]string{
"": "The node this Taint is attached to has the effect \"effect\" on any pod that that does not tolerate the Taint.",
"key": "Required. The taint key to be applied to a node.",
"value": "Required. The taint value corresponding to the taint key.",
"effect": "Required. The effect of the taint on pods that do not tolerate the taint. Valid effects are NoSchedule and PreferNoSchedule.",
}
func (Taint) SwaggerDoc() map[string]string {
return map_Taint
}
var map_Toleration = map[string]string{
"": "The pod this Toleration is attached to tolerates any taint that matches the triple <key,value,effect> using the matching operator <operator>.",
"key": "Required. Key is the taint key that the toleration applies to.",
"operator": "operator represents a key's relationship to the value. Valid operators are Exists and Equal. Defaults to Equal. Exists is equivalent to wildcard for value, so that a pod can tolerate all taints of a particular category.",
"value": "Value is the taint value the toleration matches to. If the operator is Exists, the value should be empty, otherwise just a regular string.",
"effect": "Effect indicates the taint effect to match. Empty means match all taint effects. When specified, allowed values are NoSchedule and PreferNoSchedule.",
}
func (Toleration) SwaggerDoc() map[string]string {
return map_Toleration
}
var map_Volume = map[string]string{
"": "Volume represents a named volume in a pod that may be accessed by any container in the pod.",
"name": "Volume's name. Must be a DNS_LABEL and unique within the pod. More info: http://releases.k8s.io/HEAD/docs/user-guide/identifiers.md#names",
@ -1647,21 +1674,32 @@ var map_VolumeSource = map[string]string{
"iscsi": "ISCSI represents an ISCSI Disk resource that is attached to a kubelet's host machine and then exposed to the pod. More info: http://releases.k8s.io/HEAD/examples/iscsi/README.md",
"glusterfs": "Glusterfs represents a Glusterfs mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/glusterfs/README.md",
"persistentVolumeClaim": "PersistentVolumeClaimVolumeSource represents a reference to a PersistentVolumeClaim in the same namespace. More info: http://releases.k8s.io/HEAD/docs/user-guide/persistent-volumes.md#persistentvolumeclaims",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
"flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
"flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
"downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
"fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
"azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
"configMap": "ConfigMap represents a configMap that should populate this volume",
"rbd": "RBD represents a Rados Block Device mount on the host that shares a pod's lifetime. More info: http://releases.k8s.io/HEAD/examples/rbd/README.md",
"flexVolume": "FlexVolume represents a generic volume resource that is provisioned/attached using a exec based plugin. This is an alpha feature and may change in future.",
"cinder": "Cinder represents a cinder volume attached and mounted on kubelets host machine More info: http://releases.k8s.io/HEAD/examples/mysql-cinder-pd/README.md",
"cephfs": "CephFS represents a Ceph FS mount on the host that shares a pod's lifetime",
"flocker": "Flocker represents a Flocker volume attached to a kubelet's host machine. This depends on the Flocker control service being running",
"downwardAPI": "DownwardAPI represents downward API about the pod that should populate this volume",
"fc": "FC represents a Fibre Channel resource that is attached to a kubelet's host machine and then exposed to the pod.",
"azureFile": "AzureFile represents an Azure File Service mount on the host and bind mount to the pod.",
"configMap": "ConfigMap represents a configMap that should populate this volume",
"vsphereVolume": "VsphereVolume represents a vSphere volume attached and mounted on kubelets host machine",
}
func (VolumeSource) SwaggerDoc() map[string]string {
return map_VolumeSource
}
var map_VsphereVirtualDiskVolumeSource = map[string]string{
"": "Represents a vSphere volume resource.",
"volumePath": "Path that identifies vSphere volume vmdk",
"fsType": "Filesystem type to mount. Must be a filesystem type supported by the host operating system. Ex. \"ext4\", \"xfs\", \"ntfs\". Implicitly inferred to be \"ext4\" if unspecified.",
}
func (VsphereVirtualDiskVolumeSource) SwaggerDoc() map[string]string {
return map_VsphereVirtualDiskVolumeSource
}
var map_WeightedPodAffinityTerm = map[string]string{
"": "The weights of all of the matched WeightedPodAffinityTerm fields are added per-node to find the most preferred node(s)",
"weight": "weight associated with matching the corresponding podAffinityTerm, in the range 1-100.",

View File

@ -37,8 +37,8 @@ func ValidateEvent(event *api.Event) field.ErrorList {
event.Namespace != event.InvolvedObject.Namespace {
allErrs = append(allErrs, field.Invalid(field.NewPath("involvedObject", "namespace"), event.InvolvedObject.Namespace, "does not match involvedObject"))
}
if !validation.IsDNS1123Subdomain(event.Namespace) {
allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, ""))
for _, msg := range validation.IsDNS1123Subdomain(event.Namespace) {
allErrs = append(allErrs, field.Invalid(field.NewPath("namespace"), event.Namespace, msg))
}
return allErrs
}

View File

@ -28,36 +28,36 @@ var NameMayNotBe = []string{".", ".."}
var NameMayNotContain = []string{"/", "%"}
// IsValidPathSegmentName validates the name can be safely encoded as a path segment
func IsValidPathSegmentName(name string) (bool, string) {
func IsValidPathSegmentName(name string) []string {
for _, illegalName := range NameMayNotBe {
if name == illegalName {
return false, fmt.Sprintf(`name may not be %q`, illegalName)
return []string{fmt.Sprintf(`may not be '%s'`, illegalName)}
}
}
for _, illegalContent := range NameMayNotContain {
if strings.Contains(name, illegalContent) {
return false, fmt.Sprintf(`name may not contain %q`, illegalContent)
return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)}
}
}
return true, ""
return nil
}
// IsValidPathSegmentPrefix validates the name can be used as a prefix for a name which will be encoded as a path segment
// It does not check for exact matches with disallowed names, since an arbitrary suffix might make the name valid
func IsValidPathSegmentPrefix(name string) (bool, string) {
func IsValidPathSegmentPrefix(name string) []string {
for _, illegalContent := range NameMayNotContain {
if strings.Contains(name, illegalContent) {
return false, fmt.Sprintf(`name may not contain %q`, illegalContent)
return []string{fmt.Sprintf(`may not contain '%s'`, illegalContent)}
}
}
return true, ""
return nil
}
// ValidatePathSegmentName validates the name can be safely encoded as a path segment
func ValidatePathSegmentName(name string, prefix bool) (bool, string) {
func ValidatePathSegmentName(name string, prefix bool) []string {
if prefix {
return IsValidPathSegmentPrefix(name)
} else {

View File

@ -58,11 +58,6 @@ func InclusiveRangeErrorMsg(lo, hi int) string {
return fmt.Sprintf(`must be between %d and %d, inclusive`, lo, hi)
}
var labelValueErrorMsg string = fmt.Sprintf(`must have at most %d characters, matching regex %s: e.g. "MyValue" or ""`, validation.LabelValueMaxLength, validation.LabelValueFmt)
var qualifiedNameErrorMsg string = fmt.Sprintf(`must be a qualified name (at most %d characters, matching regex %s), with an optional DNS subdomain prefix (at most %d characters, matching regex %s) and slash (/): e.g. "MyName" or "example.com/MyName"`, validation.QualifiedNameMaxLength, validation.QualifiedNameFmt, validation.DNS1123SubdomainMaxLength, validation.DNS1123SubdomainFmt)
var DNSSubdomainErrorMsg string = fmt.Sprintf(`must be a DNS subdomain (at most %d characters, matching regex %s): e.g. "example.com"`, validation.DNS1123SubdomainMaxLength, validation.DNS1123SubdomainFmt)
var DNS1123LabelErrorMsg string = fmt.Sprintf(`must be a DNS label (at most %d characters, matching regex %s): e.g. "my-name"`, validation.DNS1123LabelMaxLength, validation.DNS1123LabelFmt)
var DNS952LabelErrorMsg string = fmt.Sprintf(`must be a DNS 952 label (at most %d characters, matching regex %s): e.g. "my-name"`, validation.DNS952LabelMaxLength, validation.DNS952LabelFmt)
var pdPartitionErrorMsg string = InclusiveRangeErrorMsg(1, 255)
var PortRangeErrorMsg string = InclusiveRangeErrorMsg(1, 65535)
var IdRangeErrorMsg string = InclusiveRangeErrorMsg(0, math.MaxInt32)
@ -94,8 +89,8 @@ func ValidateAnnotations(annotations map[string]string, fldPath *field.Path) fie
allErrs := field.ErrorList{}
var totalSize int64
for k, v := range annotations {
if !validation.IsQualifiedName(strings.ToLower(k)) {
allErrs = append(allErrs, field.Invalid(fldPath, k, qualifiedNameErrorMsg))
for _, msg := range validation.IsQualifiedName(strings.ToLower(k)) {
allErrs = append(allErrs, field.Invalid(fldPath, k, msg))
}
totalSize += (int64)(len(k)) + (int64)(len(v))
}
@ -111,12 +106,20 @@ func ValidatePodSpecificAnnotations(annotations map[string]string, fldPath *fiel
allErrs = append(allErrs, ValidateAffinityInPodAnnotations(annotations, fldPath)...)
}
if hostname, exists := annotations[utilpod.PodHostnameAnnotation]; exists && !validation.IsDNS1123Label(hostname) {
allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodHostnameAnnotation, DNS1123LabelErrorMsg))
if annotations[api.TolerationsAnnotationKey] != "" {
allErrs = append(allErrs, ValidateTolerationsInPodAnnotations(annotations, fldPath)...)
}
if subdomain, exists := annotations[utilpod.PodSubdomainAnnotation]; exists && !validation.IsDNS1123Label(subdomain) {
allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodSubdomainAnnotation, DNS1123LabelErrorMsg))
if hostname, exists := annotations[utilpod.PodHostnameAnnotation]; exists {
for _, msg := range validation.IsDNS1123Label(hostname) {
allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodHostnameAnnotation, msg))
}
}
if subdomain, exists := annotations[utilpod.PodSubdomainAnnotation]; exists {
for _, msg := range validation.IsDNS1123Label(subdomain) {
allErrs = append(allErrs, field.Invalid(fldPath, utilpod.PodSubdomainAnnotation, msg))
}
}
return allErrs
@ -164,9 +167,11 @@ func ValidateOwnerReferences(ownerReferences []api.OwnerReference, fldPath *fiel
}
// ValidateNameFunc validates that the provided name is valid for a given resource type.
// Not all resources have the same validation rules for names. Prefix is true if the
// name will have a value appended to it.
type ValidateNameFunc func(name string, prefix bool) (bool, string)
// Not all resources have the same validation rules for names. Prefix is true
// if the name will have a value appended to it. If the name is not valid,
// this returns a list of descriptions of individual characteristics of the
// value that were not valid. Otherwise this returns an empty list or nil.
type ValidateNameFunc func(name string, prefix bool) []string
// maskTrailingDash replaces the final character of a string with a subdomain safe
// value if is a dash.
@ -180,106 +185,77 @@ func maskTrailingDash(name string) string {
// ValidatePodName can be used to check whether the given pod name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidatePodName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidatePodName = NameIsDNSSubdomain
// ValidateReplicationControllerName can be used to check whether the given replication
// controller name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateReplicationControllerName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidateReplicationControllerName = NameIsDNSSubdomain
// ValidateServiceName can be used to check whether the given service name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateServiceName(name string, prefix bool) (bool, string) {
return NameIsDNS952Label(name, prefix)
}
var ValidateServiceName = NameIsDNS952Label
// ValidateNodeName can be used to check whether the given node name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateNodeName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidateNodeName = NameIsDNSSubdomain
// ValidateNamespaceName can be used to check whether the given namespace name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateNamespaceName(name string, prefix bool) (bool, string) {
return NameIsDNSLabel(name, prefix)
}
var ValidateNamespaceName = NameIsDNSLabel
// ValidateLimitRangeName can be used to check whether the given limit range name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateLimitRangeName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidateLimitRangeName = NameIsDNSSubdomain
// ValidateResourceQuotaName can be used to check whether the given
// resource quota name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateResourceQuotaName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidateResourceQuotaName = NameIsDNSSubdomain
// ValidateSecretName can be used to check whether the given secret name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateSecretName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidateSecretName = NameIsDNSSubdomain
// ValidateServiceAccountName can be used to check whether the given service account name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateServiceAccountName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidateServiceAccountName = NameIsDNSSubdomain
// ValidateEndpointsName can be used to check whether the given endpoints name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateEndpointsName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidateEndpointsName = NameIsDNSSubdomain
// NameIsDNSSubdomain is a ValidateNameFunc for names that must be a DNS subdomain.
func NameIsDNSSubdomain(name string, prefix bool) (bool, string) {
func NameIsDNSSubdomain(name string, prefix bool) []string {
if prefix {
name = maskTrailingDash(name)
}
if validation.IsDNS1123Subdomain(name) {
return true, ""
}
return false, DNSSubdomainErrorMsg
return validation.IsDNS1123Subdomain(name)
}
// NameIsDNSLabel is a ValidateNameFunc for names that must be a DNS 1123 label.
func NameIsDNSLabel(name string, prefix bool) (bool, string) {
func NameIsDNSLabel(name string, prefix bool) []string {
if prefix {
name = maskTrailingDash(name)
}
if validation.IsDNS1123Label(name) {
return true, ""
}
return false, DNS1123LabelErrorMsg
return validation.IsDNS1123Label(name)
}
// NameIsDNS952Label is a ValidateNameFunc for names that must be a DNS 952 label.
func NameIsDNS952Label(name string, prefix bool) (bool, string) {
func NameIsDNS952Label(name string, prefix bool) []string {
if prefix {
name = maskTrailingDash(name)
}
if validation.IsDNS952Label(name) {
return true, ""
}
return false, DNS952LabelErrorMsg
return validation.IsDNS952Label(name)
}
// Validates that given value is not negative.
@ -316,8 +292,8 @@ func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn Val
allErrs := field.ErrorList{}
if len(meta.GenerateName) != 0 {
if ok, qualifier := nameFn(meta.GenerateName, true); !ok {
allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GenerateName, qualifier))
for _, msg := range nameFn(meta.GenerateName, true) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("generateName"), meta.GenerateName, msg))
}
}
// If the generated name validates, but the calculated value does not, it's a problem with generation, and we
@ -326,15 +302,17 @@ func ValidateObjectMeta(meta *api.ObjectMeta, requiresNamespace bool, nameFn Val
if len(meta.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), "name or generateName is required"))
} else {
if ok, qualifier := nameFn(meta.Name, false); !ok {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.Name, qualifier))
for _, msg := range nameFn(meta.Name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), meta.Name, msg))
}
}
if requiresNamespace {
if len(meta.Namespace) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("namespace"), ""))
} else if ok, _ := ValidateNamespaceName(meta.Namespace, false); !ok {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.Namespace, DNS1123LabelErrorMsg))
} else {
for _, msg := range ValidateNamespaceName(meta.Namespace, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), meta.Namespace, msg))
}
}
} else {
if len(meta.Namespace) != 0 {
@ -413,8 +391,10 @@ func validateVolumes(volumes []api.Volume, fldPath *field.Path) (sets.String, fi
el := validateVolumeSource(&vol.VolumeSource, idxPath)
if len(vol.Name) == 0 {
el = append(el, field.Required(idxPath.Child("name"), ""))
} else if !validation.IsDNS1123Label(vol.Name) {
el = append(el, field.Invalid(idxPath.Child("name"), vol.Name, DNS1123LabelErrorMsg))
} else if msgs := validation.IsDNS1123Label(vol.Name); len(msgs) != 0 {
for i := range msgs {
el = append(el, field.Invalid(idxPath.Child("name"), vol.Name, msgs[i]))
}
} else if allNames.Has(vol.Name) {
el = append(el, field.Duplicate(idxPath.Child("name"), vol.Name))
}
@ -818,9 +798,9 @@ func validateAzureFile(azure *api.AzureFileVolumeSource, fldPath *field.Path) fi
return allErrs
}
func ValidatePersistentVolumeName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
// ValidatePersistentVolumeName checks that a name is appropriate for a
// PersistentVolumeName object.
var ValidatePersistentVolumeName = NameIsDNSSubdomain
var supportedAccessModes = sets.NewString(string(api.ReadWriteOnce), string(api.ReadOnlyMany), string(api.ReadWriteMany))
@ -1327,6 +1307,37 @@ func validatePullPolicy(policy api.PullPolicy, fldPath *field.Path) field.ErrorL
return allErrors
}
func validateInitContainers(containers, otherContainers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
var allErrs field.ErrorList
if len(containers) > 0 {
allErrs = append(allErrs, validateContainers(containers, volumes, fldPath)...)
}
allNames := sets.String{}
for _, ctr := range otherContainers {
allNames.Insert(ctr.Name)
}
for i, ctr := range containers {
idxPath := fldPath.Index(i)
if allNames.Has(ctr.Name) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name))
}
if len(ctr.Name) > 0 {
allNames.Insert(ctr.Name)
}
if ctr.Lifecycle != nil {
allErrs = append(allErrs, field.Invalid(idxPath.Child("lifecycle"), ctr.Lifecycle, "must not be set for init containers"))
}
if ctr.LivenessProbe != nil {
allErrs = append(allErrs, field.Invalid(idxPath.Child("livenessProbe"), ctr.LivenessProbe, "must not be set for init containers"))
}
if ctr.ReadinessProbe != nil {
allErrs = append(allErrs, field.Invalid(idxPath.Child("readinessProbe"), ctr.ReadinessProbe, "must not be set for init containers"))
}
}
return allErrs
}
func validateContainers(containers []api.Container, volumes sets.String, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
@ -1339,8 +1350,10 @@ func validateContainers(containers []api.Container, volumes sets.String, fldPath
idxPath := fldPath.Index(i)
if len(ctr.Name) == 0 {
allErrs = append(allErrs, field.Required(idxPath.Child("name"), ""))
} else if !validation.IsDNS1123Label(ctr.Name) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ctr.Name, DNS1123LabelErrorMsg))
} else if msgs := validation.IsDNS1123Label(ctr.Name); len(msgs) != 0 {
for i := range msgs {
allErrs = append(allErrs, field.Invalid(idxPath.Child("name"), ctr.Name, msgs[i]))
}
} else if allNames.Has(ctr.Name) {
allErrs = append(allErrs, field.Duplicate(idxPath.Child("name"), ctr.Name))
} else {
@ -1433,6 +1446,60 @@ func validateImagePullSecrets(imagePullSecrets []api.LocalObjectReference, fldPa
return allErrors
}
func validateTaintEffect(effect *api.TaintEffect, allowEmpty bool, fldPath *field.Path) field.ErrorList {
if !allowEmpty && len(*effect) == 0 {
return field.ErrorList{field.Required(fldPath, "")}
}
allErrors := field.ErrorList{}
switch *effect {
// TODO: Replace next line with subsequent commented-out line when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute.
case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule:
// case api.TaintEffectNoSchedule, api.TaintEffectPreferNoSchedule, api.TaintEffectNoScheduleNoAdmit, api.TaintEffectNoScheduleNoAdmitNoExecute:
default:
validValues := []string{
string(api.TaintEffectNoSchedule),
string(api.TaintEffectPreferNoSchedule),
// TODO: Uncomment this block when implement TaintEffectNoScheduleNoAdmit, TaintEffectNoScheduleNoAdmitNoExecute.
// string(api.TaintEffectNoScheduleNoAdmit),
// string(api.TaintEffectNoScheduleNoAdmitNoExecute),
}
allErrors = append(allErrors, field.NotSupported(fldPath, effect, validValues))
}
return allErrors
}
// validateTolerations tests if given tolerations have valid data.
func validateTolerations(tolerations []api.Toleration, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
for i, toleration := range tolerations {
idxPath := fldPath.Index(i)
// validate the toleration key
allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(toleration.Key, idxPath.Child("key"))...)
// validate toleration operator and value
switch toleration.Operator {
case api.TolerationOpEqual, "":
if errs := validation.IsValidLabelValue(toleration.Value); len(errs) != 0 {
allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration.Value, strings.Join(errs, ";")))
}
case api.TolerationOpExists:
if len(toleration.Value) > 0 {
allErrors = append(allErrors, field.Invalid(idxPath.Child("operator"), toleration, "value must be empty when `operator` is 'Exists'"))
}
default:
validValues := []string{string(api.TolerationOpEqual), string(api.TolerationOpExists)}
allErrors = append(allErrors, field.NotSupported(idxPath.Child("operator"), toleration.Operator, validValues))
}
// validate toleration effect
if len(toleration.Effect) > 0 {
allErrors = append(allErrors, validateTaintEffect(&toleration.Effect, true, idxPath.Child("effect"))...)
}
}
return allErrors
}
// ValidatePod tests if required fields in the pod are set.
func ValidatePod(pod *api.Pod) field.ErrorList {
fldPath := field.NewPath("metadata")
@ -1452,19 +1519,20 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
allVolumes, vErrs := validateVolumes(spec.Volumes, fldPath.Child("volumes"))
allErrs = append(allErrs, vErrs...)
allErrs = append(allErrs, validateContainers(spec.Containers, allVolumes, fldPath.Child("containers"))...)
allErrs = append(allErrs, validateInitContainers(spec.InitContainers, spec.Containers, allVolumes, fldPath.Child("initContainers"))...)
allErrs = append(allErrs, validateRestartPolicy(&spec.RestartPolicy, fldPath.Child("restartPolicy"))...)
allErrs = append(allErrs, validateDNSPolicy(&spec.DNSPolicy, fldPath.Child("dnsPolicy"))...)
allErrs = append(allErrs, unversionedvalidation.ValidateLabels(spec.NodeSelector, fldPath.Child("nodeSelector"))...)
allErrs = append(allErrs, ValidatePodSecurityContext(spec.SecurityContext, spec, fldPath, fldPath.Child("securityContext"))...)
allErrs = append(allErrs, validateImagePullSecrets(spec.ImagePullSecrets, fldPath.Child("imagePullSecrets"))...)
if len(spec.ServiceAccountName) > 0 {
if ok, msg := ValidateServiceAccountName(spec.ServiceAccountName, false); !ok {
for _, msg := range ValidateServiceAccountName(spec.ServiceAccountName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("serviceAccountName"), spec.ServiceAccountName, msg))
}
}
if len(spec.NodeName) > 0 {
if ok, msg := ValidateNodeName(spec.NodeName, false); !ok {
for _, msg := range ValidateNodeName(spec.NodeName, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("nodeName"), spec.NodeName, msg))
}
}
@ -1475,12 +1543,16 @@ func ValidatePodSpec(spec *api.PodSpec, fldPath *field.Path) field.ErrorList {
}
}
if len(spec.Hostname) > 0 && !validation.IsDNS1123Label(spec.Hostname) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("hostname"), spec.Hostname, DNS1123LabelErrorMsg))
if len(spec.Hostname) > 0 {
for _, msg := range validation.IsDNS1123Label(spec.Hostname) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("hostname"), spec.Hostname, msg))
}
}
if len(spec.Subdomain) > 0 && !validation.IsDNS1123Label(spec.Subdomain) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("subdomain"), spec.Subdomain, DNS1123LabelErrorMsg))
if len(spec.Subdomain) > 0 {
for _, msg := range validation.IsDNS1123Label(spec.Subdomain) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("subdomain"), spec.Subdomain, msg))
}
}
return allErrs
@ -1558,8 +1630,8 @@ func validatePodAffinityTerm(podAffinityTerm api.PodAffinityTerm, allowEmptyTopo
allErrs := field.ErrorList{}
allErrs = append(allErrs, unversionedvalidation.ValidateLabelSelector(podAffinityTerm.LabelSelector, fldPath.Child("matchExpressions"))...)
for _, name := range podAffinityTerm.Namespaces {
if ok, _ := ValidateNamespaceName(name, false); !ok {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, DNS1123LabelErrorMsg))
for _, msg := range ValidateNamespaceName(name, false) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("namespace"), name, msg))
}
}
if !allowEmptyTopologyKey && len(podAffinityTerm.TopologyKey) == 0 {
@ -1671,6 +1743,22 @@ func ValidateAffinityInPodAnnotations(annotations map[string]string, fldPath *fi
return allErrs
}
// ValidateTolerationsInPodAnnotations tests that the serialized tolerations in Pod.Annotations has valid data
func ValidateTolerationsInPodAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
tolerations, err := api.GetTolerationsFromPodAnnotations(annotations)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, api.TolerationsAnnotationKey, err.Error()))
return allErrs
}
if len(tolerations) > 0 {
allErrs = append(allErrs, validateTolerations(tolerations, fldPath.Child(api.TolerationsAnnotationKey))...)
}
return allErrs
}
// ValidatePodSecurityContext test that the specified PodSecurityContext has valid data.
func ValidatePodSecurityContext(securityContext *api.PodSecurityContext, spec *api.PodSpec, specPath, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
@ -1932,8 +2020,10 @@ func validateServicePort(sp *api.ServicePort, requireName, isHeadlessService boo
if requireName && len(sp.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if len(sp.Name) != 0 {
if !validation.IsDNS1123Label(sp.Name) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), sp.Name, DNS1123LabelErrorMsg))
if msgs := validation.IsDNS1123Label(sp.Name); len(msgs) != 0 {
for i := range msgs {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), sp.Name, msgs[i]))
}
} else if allNames.Has(sp.Name) {
allErrs = append(allErrs, field.Duplicate(fldPath.Child("name"), sp.Name))
} else {
@ -2083,9 +2173,51 @@ func ValidateReadOnlyPersistentDisks(volumes []api.Volume, fldPath *field.Path)
return allErrs
}
// validateTaints tests if given taints have valid data.
func validateTaints(taints []api.Taint, fldPath *field.Path) field.ErrorList {
allErrors := field.ErrorList{}
for i, currTaint := range taints {
idxPath := fldPath.Index(i)
// validate the taint key
allErrors = append(allErrors, unversionedvalidation.ValidateLabelName(currTaint.Key, idxPath.Child("key"))...)
// validate the taint value
if errs := validation.IsValidLabelValue(currTaint.Value); len(errs) != 0 {
allErrors = append(allErrors, field.Invalid(idxPath.Child("value"), currTaint.Value, strings.Join(errs, ";")))
}
// validate the taint effect
allErrors = append(allErrors, validateTaintEffect(&currTaint.Effect, false, idxPath.Child("effect"))...)
}
return allErrors
}
// ValidateTaintsInNodeAnnotations tests that the serialized taints in Node.Annotations has valid data
func ValidateTaintsInNodeAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
taints, err := api.GetTaintsFromNodeAnnotations(annotations)
if err != nil {
allErrs = append(allErrs, field.Invalid(fldPath, api.TaintsAnnotationKey, err.Error()))
return allErrs
}
if len(taints) > 0 {
allErrs = append(allErrs, validateTaints(taints, fldPath.Child(api.TaintsAnnotationKey))...)
}
return allErrs
}
func ValidateNodeSpecificAnnotations(annotations map[string]string, fldPath *field.Path) field.ErrorList {
if annotations[api.TaintsAnnotationKey] != "" {
return ValidateTaintsInNodeAnnotations(annotations, fldPath)
}
return field.ErrorList{}
}
// ValidateNode tests if required fields in the node are set.
func ValidateNode(node *api.Node) field.ErrorList {
allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, field.NewPath("metadata"))
fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMeta(&node.ObjectMeta, false, ValidateNodeName, fldPath)
allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
// Only validate spec. All status fields are optional and can be updated later.
@ -2100,7 +2232,9 @@ func ValidateNode(node *api.Node) field.ErrorList {
// ValidateNodeUpdate tests to make sure a node update can be applied. Modifies oldNode.
func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList {
allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, field.NewPath("metadata"))
fldPath := field.NewPath("metadata")
allErrs := ValidateObjectMetaUpdate(&node.ObjectMeta, &oldNode.ObjectMeta, fldPath)
allErrs = append(allErrs, ValidateNodeSpecificAnnotations(node.ObjectMeta.Annotations, fldPath.Child("annotations"))...)
// TODO: Enable the code once we have better api object.status update model. Currently,
// anyone can update node status.
@ -2148,8 +2282,11 @@ func ValidateNodeUpdate(node, oldNode *api.Node) field.ErrorList {
// Refer to docs/design/resources.md for more details.
func validateResourceName(value string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if !validation.IsQualifiedName(value) {
return append(allErrs, field.Invalid(fldPath, value, qualifiedNameErrorMsg))
for _, msg := range validation.IsQualifiedName(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
}
if len(allErrs) != 0 {
return allErrs
}
if len(strings.Split(value, "/")) == 1 {
@ -2188,8 +2325,11 @@ func validateResourceQuotaResourceName(value string, fldPath *field.Path) field.
// Validate limit range types
func validateLimitRangeTypeName(value string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if !validation.IsQualifiedName(value) {
return append(allErrs, field.Invalid(fldPath, value, qualifiedNameErrorMsg))
for _, msg := range validation.IsQualifiedName(value) {
allErrs = append(allErrs, field.Invalid(fldPath, value, msg))
}
if len(allErrs) != 0 {
return allErrs
}
if len(strings.Split(value, "/")) == 1 {
@ -2445,9 +2585,7 @@ func ValidateSecretUpdate(newSecret, oldSecret *api.Secret) field.ErrorList {
// ValidateConfigMapName can be used to check whether the given ConfigMap name is valid.
// Prefix indicates this name will be used as part of generation, in which case
// trailing dashes are allowed.
func ValidateConfigMapName(name string, prefix bool) (bool, string) {
return NameIsDNSSubdomain(name, prefix)
}
var ValidateConfigMapName = NameIsDNSSubdomain
// ValidateConfigMap tests whether required fields in the ConfigMap are set.
func ValidateConfigMap(cfg *api.ConfigMap) field.ErrorList {
@ -2659,8 +2797,11 @@ func ValidateNamespace(namespace *api.Namespace) field.ErrorList {
// Validate finalizer names
func validateFinalizerName(stringValue string, fldPath *field.Path) field.ErrorList {
allErrs := field.ErrorList{}
if !validation.IsQualifiedName(stringValue) {
return append(allErrs, field.Invalid(fldPath, stringValue, qualifiedNameErrorMsg))
for _, msg := range validation.IsQualifiedName(stringValue) {
allErrs = append(allErrs, field.Invalid(fldPath, stringValue, msg))
}
if len(allErrs) != 0 {
return allErrs
}
if len(strings.Split(stringValue, "/")) == 1 {
@ -2753,8 +2894,10 @@ func validateEndpointAddress(address *api.EndpointAddress, fldPath *field.Path)
if !validation.IsValidIP(address.IP) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("ip"), address.IP, "must be a valid IP address"))
}
if len(address.Hostname) > 0 && !validation.IsDNS1123Label(address.Hostname) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("hostname"), address.Hostname, DNS1123LabelErrorMsg))
if len(address.Hostname) > 0 {
for _, msg := range validation.IsDNS1123Label(address.Hostname) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("hostname"), address.Hostname, msg))
}
}
if len(allErrs) > 0 {
return allErrs
@ -2788,8 +2931,8 @@ func validateEndpointPort(port *api.EndpointPort, requireName bool, fldPath *fie
if requireName && len(port.Name) == 0 {
allErrs = append(allErrs, field.Required(fldPath.Child("name"), ""))
} else if len(port.Name) != 0 {
if !validation.IsDNS1123Label(port.Name) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), port.Name, DNS1123LabelErrorMsg))
for _, msg := range validation.IsDNS1123Label(port.Name) {
allErrs = append(allErrs, field.Invalid(fldPath.Child("name"), port.Name, msg))
}
}
if !validation.IsValidPortNum(int(port.Port)) {
@ -2863,8 +3006,8 @@ func ValidateLoadBalancerStatus(status *api.LoadBalancerStatus, fldPath *field.P
}
}
if len(ingress.Hostname) > 0 {
if valid, errMsg := NameIsDNSSubdomain(ingress.Hostname, false); !valid {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, errMsg))
for _, msg := range validation.IsDNS1123Subdomain(ingress.Hostname) {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, msg))
}
if isIP := (net.ParseIP(ingress.Hostname) != nil); isIP {
allErrs = append(allErrs, field.Invalid(idxPath.Child("hostname"), ingress.Hostname, "must be a DNS name, not an IP address"))
@ -2885,7 +3028,7 @@ func isValidHostnamesMap(serializedPodHostNames string) bool {
}
for ip, hostRecord := range podHostNames {
if !validation.IsDNS1123Label(hostRecord.HostName) {
if len(validation.IsDNS1123Label(hostRecord.HostName)) != 0 {
return false
}
if net.ParseIP(ip) == nil {

View File

@ -1,5 +1,5 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -25,12 +25,11 @@ import (
"errors"
"fmt"
codec1978 "github.com/ugorji/go/codec"
pkg5_inf_v0 "gopkg.in/inf.v0"
pkg2_api "k8s.io/kubernetes/pkg/api"
pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
pkg3_types "k8s.io/kubernetes/pkg/types"
pkg6_intstr "k8s.io/kubernetes/pkg/util/intstr"
pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
"reflect"
"runtime"
time "time"
@ -66,14 +65,13 @@ func init() {
panic(err)
}
if false { // reference the types, but skip this branch at build/run time
var v0 pkg5_inf_v0.Dec
var v1 pkg2_api.ObjectMeta
var v2 pkg4_resource.Quantity
var v3 pkg1_unversioned.TypeMeta
var v4 pkg3_types.UID
var v5 pkg6_intstr.IntOrString
var v6 time.Time
_, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6
var v0 pkg2_api.ObjectMeta
var v1 pkg4_resource.Quantity
var v2 pkg1_unversioned.TypeMeta
var v3 pkg3_types.UID
var v4 pkg5_intstr.IntOrString
var v5 time.Time
_, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
}
}
@ -1555,7 +1553,7 @@ func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) {
yyrg1 := len(yyv1) > 0
yyv21 := yyv1
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 720)
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744)
if yyrt1 {
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]

View File

@ -1,5 +1,5 @@
/*
Copyright 2015 The Kubernetes Authors All rights reserved.
Copyright 2016 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
@ -25,12 +25,11 @@ import (
"errors"
"fmt"
codec1978 "github.com/ugorji/go/codec"
pkg5_inf_v0 "gopkg.in/inf.v0"
pkg4_resource "k8s.io/kubernetes/pkg/api/resource"
pkg1_unversioned "k8s.io/kubernetes/pkg/api/unversioned"
pkg2_v1 "k8s.io/kubernetes/pkg/api/v1"
pkg3_types "k8s.io/kubernetes/pkg/types"
pkg6_intstr "k8s.io/kubernetes/pkg/util/intstr"
pkg5_intstr "k8s.io/kubernetes/pkg/util/intstr"
"reflect"
"runtime"
time "time"
@ -66,14 +65,13 @@ func init() {
panic(err)
}
if false { // reference the types, but skip this branch at build/run time
var v0 pkg5_inf_v0.Dec
var v1 pkg4_resource.Quantity
var v2 pkg1_unversioned.TypeMeta
var v3 pkg2_v1.ObjectMeta
var v4 pkg3_types.UID
var v5 pkg6_intstr.IntOrString
var v6 time.Time
_, _, _, _, _, _, _ = v0, v1, v2, v3, v4, v5, v6
var v0 pkg4_resource.Quantity
var v1 pkg1_unversioned.TypeMeta
var v2 pkg2_v1.ObjectMeta
var v3 pkg3_types.UID
var v4 pkg5_intstr.IntOrString
var v5 time.Time
_, _, _, _, _, _ = v0, v1, v2, v3, v4, v5
}
}
@ -1585,7 +1583,7 @@ func (x codecSelfer1234) decSlicePetSet(v *[]PetSet, d *codec1978.Decoder) {
yyrg1 := len(yyv1) > 0
yyv21 := yyv1
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 744)
yyrl1, yyrt1 = z.DecInferLen(yyl1, z.DecBasicHandle().MaxInitLen, 768)
if yyrt1 {
if yyrl1 <= cap(yyv1) {
yyv1 = yyv1[:yyrl1]

Some files were not shown because too many files have changed in this diff Show More