use json merge patches

Signed-off-by: Steve Kriss <steve@heptio.com>
pull/490/head
Steve Kriss 2018-05-14 14:34:24 -07:00
parent 014c0e2c4c
commit 6d6f734bc9
18 changed files with 19 additions and 4112 deletions

11
Gopkg.lock generated
View File

@ -526,19 +526,16 @@
"pkg/util/httpstream/spdy",
"pkg/util/intstr",
"pkg/util/json",
"pkg/util/mergepatch",
"pkg/util/net",
"pkg/util/remotecommand",
"pkg/util/runtime",
"pkg/util/sets",
"pkg/util/strategicpatch",
"pkg/util/validation",
"pkg/util/validation/field",
"pkg/util/wait",
"pkg/util/yaml",
"pkg/version",
"pkg/watch",
"third_party/forked/golang/json",
"third_party/forked/golang/netutil",
"third_party/forked/golang/reflect"
]
@ -617,12 +614,6 @@
revision = "23781f4d6632d88e869066eaebb743857aa1ef9b"
version = "v7.0.0"
[[projects]]
branch = "master"
name = "k8s.io/kube-openapi"
packages = ["pkg/util/proto"]
revision = "61b46af70dfed79c6d24530cd23b41440a7f22a5"
[[projects]]
name = "k8s.io/kubernetes"
packages = ["pkg/printers"]
@ -632,6 +623,6 @@
[solve-meta]
analyzer-name = "dep"
analyzer-version = 1
inputs-digest = "ae537b14a6c7d8b1d160c764469d02c4fe830fcb3426395c739047a72bcc81a4"
inputs-digest = "2f16c6e2f4e0289f8daa9b9f5b22949703e6a3c71ff06fc0aee4312c270f26a6"
solver-name = "gps-cdcl"
solver-version = 1

View File

@ -21,14 +21,13 @@ import (
"fmt"
"strings"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/api/apps/v1beta1"
"k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
@ -124,10 +123,10 @@ func NewAddCommand(f client.Factory) *cobra.Command {
updated, err := json.Marshal(arkDeploy)
cmd.CheckError(err)
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
patchBytes, err := jsonpatch.CreateMergePatch(original, updated)
cmd.CheckError(err)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.MergePatchType, patchBytes)
cmd.CheckError(err)
},
}

View File

@ -19,13 +19,12 @@ package plugin
import (
"encoding/json"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/spf13/cobra"
"k8s.io/api/apps/v1beta1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"github.com/heptio/ark/pkg/client"
"github.com/heptio/ark/pkg/cmd"
@ -71,10 +70,10 @@ func NewRemoveCommand(f client.Factory) *cobra.Command {
updated, err := json.Marshal(arkDeploy)
cmd.CheckError(err)
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
patchBytes, err := jsonpatch.CreateMergePatch(original, updated)
cmd.CheckError(err)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
_, err = kubeClient.AppsV1beta1().Deployments(arkDeploy.Namespace).Patch(arkDeploy.Name, types.MergePatchType, patchBytes)
cmd.CheckError(err)
},
}

View File

@ -27,6 +27,7 @@ import (
"sync"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -34,7 +35,6 @@ import (
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
kerrors "k8s.io/apimachinery/pkg/util/errors"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -293,9 +293,9 @@ func patchBackup(original, updated *api.Backup, client arkv1client.BackupsGetter
return nil, errors.Wrap(err, "error marshalling updated backup")
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(origBytes, updatedBytes, api.Backup{})
patchBytes, err := jsonpatch.CreateMergePatch(origBytes, updatedBytes)
if err != nil {
return nil, errors.Wrap(err, "error creating two-way merge patch for backup")
return nil, errors.Wrap(err, "error creating json merge patch for backup")
}
res, err := client.Backups(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)

View File

@ -22,6 +22,7 @@ import (
"sync"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
@ -30,7 +31,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -292,9 +292,9 @@ func patchDownloadRequest(original, updated *v1.DownloadRequest, client arkv1cli
return nil, errors.Wrap(err, "error marshalling updated download request")
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(origBytes, updatedBytes, v1.DownloadRequest{})
patchBytes, err := jsonpatch.CreateMergePatch(origBytes, updatedBytes)
if err != nil {
return nil, errors.Wrap(err, "error creating two-way merge patch for download request")
return nil, errors.Wrap(err, "error creating json merge patch for download request")
}
res, err := client.DownloadRequests(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)

View File

@ -27,13 +27,13 @@ import (
"sync"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/sirupsen/logrus"
apierrors "k8s.io/apimachinery/pkg/api/errors"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -497,9 +497,9 @@ func patchRestore(original, updated *api.Restore, client arkv1client.RestoresGet
return nil, errors.Wrap(err, "error marshalling updated restore")
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(origBytes, updatedBytes, api.Restore{})
patchBytes, err := jsonpatch.CreateMergePatch(origBytes, updatedBytes)
if err != nil {
return nil, errors.Wrap(err, "error creating two-way merge patch for restore")
return nil, errors.Wrap(err, "error creating json merge patch for restore")
}
res, err := client.Restores(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)

View File

@ -23,6 +23,7 @@ import (
"sync"
"time"
jsonpatch "github.com/evanphx/json-patch"
"github.com/pkg/errors"
"github.com/robfig/cron"
"github.com/sirupsen/logrus"
@ -32,7 +33,6 @@ import (
"k8s.io/apimachinery/pkg/labels"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/clock"
"k8s.io/apimachinery/pkg/util/strategicpatch"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/tools/cache"
"k8s.io/client-go/util/workqueue"
@ -386,9 +386,9 @@ func patchSchedule(original, updated *api.Schedule, client arkv1client.Schedules
return nil, errors.Wrap(err, "error marshalling updated schedule")
}
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(origBytes, updatedBytes, api.Schedule{})
patchBytes, err := jsonpatch.CreateMergePatch(origBytes, updatedBytes)
if err != nil {
return nil, errors.Wrap(err, "error creating two-way merge patch for schedule")
return nil, errors.Wrap(err, "error creating json merge patch for schedule")
}
res, err := client.Schedules(original.Namespace).Patch(original.Name, types.MergePatchType, patchBytes)

View File

@ -1,102 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mergepatch
import (
"errors"
"fmt"
"reflect"
)
var (
ErrBadJSONDoc = errors.New("invalid JSON document")
ErrNoListOfLists = errors.New("lists of lists are not supported")
ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list")
ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys")
ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list")
ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list")
ErrUnsupportedStrategicMergePatchFormat = errors.New("strategic merge patch format is not supported")
)
func ErrNoMergeKey(m map[string]interface{}, k string) error {
return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k)
}
func ErrBadArgType(expected, actual interface{}) error {
return fmt.Errorf("expected a %s, but received a %s",
reflect.TypeOf(expected),
reflect.TypeOf(actual))
}
func ErrBadArgKind(expected, actual interface{}) error {
var expectedKindString, actualKindString string
if expected == nil {
expectedKindString = "nil"
} else {
expectedKindString = reflect.TypeOf(expected).Kind().String()
}
if actual == nil {
actualKindString = "nil"
} else {
actualKindString = reflect.TypeOf(actual).Kind().String()
}
return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString)
}
func ErrBadPatchType(t interface{}, m map[string]interface{}) error {
return fmt.Errorf("unknown patch type: %s in map: %v", t, m)
}
// IsPreconditionFailed returns true if the provided error indicates
// a precondition failed.
func IsPreconditionFailed(err error) bool {
_, ok := err.(ErrPreconditionFailed)
return ok
}
type ErrPreconditionFailed struct {
message string
}
func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed {
s := fmt.Sprintf("precondition failed for: %v", target)
return ErrPreconditionFailed{s}
}
func (err ErrPreconditionFailed) Error() string {
return err.message
}
type ErrConflict struct {
message string
}
func NewErrConflict(patch, current string) ErrConflict {
s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current)
return ErrConflict{s}
}
func (err ErrConflict) Error() string {
return err.message
}
// IsConflict returns true if the provided error indicates
// a conflict between the patch and the current configuration.
func IsConflict(err error) bool {
_, ok := err.(ErrConflict)
return ok
}

View File

@ -1,133 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package mergepatch
import (
"fmt"
"reflect"
"github.com/davecgh/go-spew/spew"
"github.com/ghodss/yaml"
)
// PreconditionFunc asserts that an incompatible change is not present within a patch.
type PreconditionFunc func(interface{}) bool
// RequireKeyUnchanged returns a precondition function that fails if the provided key
// is present in the patch (indicating that its value has changed).
func RequireKeyUnchanged(key string) PreconditionFunc {
return func(patch interface{}) bool {
patchMap, ok := patch.(map[string]interface{})
if !ok {
return true
}
// The presence of key means that its value has been changed, so the test fails.
_, ok = patchMap[key]
return !ok
}
}
// RequireMetadataKeyUnchanged creates a precondition function that fails
// if the metadata.key is present in the patch (indicating its value
// has changed).
func RequireMetadataKeyUnchanged(key string) PreconditionFunc {
return func(patch interface{}) bool {
patchMap, ok := patch.(map[string]interface{})
if !ok {
return true
}
patchMap1, ok := patchMap["metadata"]
if !ok {
return true
}
patchMap2, ok := patchMap1.(map[string]interface{})
if !ok {
return true
}
_, ok = patchMap2[key]
return !ok
}
}
func ToYAMLOrError(v interface{}) string {
y, err := toYAML(v)
if err != nil {
return err.Error()
}
return y
}
func toYAML(v interface{}) (string, error) {
y, err := yaml.Marshal(v)
if err != nil {
return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v))
}
return string(y), nil
}
// HasConflicts returns true if the left and right JSON interface objects overlap with
// different values in any key. All keys are required to be strings. Since patches of the
// same Type have congruent keys, this is valid for multiple patch types. This method
// supports JSON merge patch semantics.
//
// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts.
// Make sure the unmarshaling of left and right are consistent (e.g. use the same library).
func HasConflicts(left, right interface{}) (bool, error) {
switch typedLeft := left.(type) {
case map[string]interface{}:
switch typedRight := right.(type) {
case map[string]interface{}:
for key, leftValue := range typedLeft {
rightValue, ok := typedRight[key]
if !ok {
continue
}
if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict {
return conflict, err
}
}
return false, nil
default:
return true, nil
}
case []interface{}:
switch typedRight := right.(type) {
case []interface{}:
if len(typedLeft) != len(typedRight) {
return true, nil
}
for i := range typedLeft {
if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict {
return conflict, err
}
}
return false, nil
default:
return true, nil
}
case string, float64, bool, int, int64, nil:
return !reflect.DeepEqual(left, right), nil
default:
return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left))
}
}

View File

@ -1,49 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategicpatch
import (
"fmt"
)
type LookupPatchMetaError struct {
Path string
Err error
}
func (e LookupPatchMetaError) Error() string {
return fmt.Sprintf("LookupPatchMetaError(%s): %v", e.Path, e.Err)
}
type FieldNotFoundError struct {
Path string
Field string
}
func (e FieldNotFoundError) Error() string {
return fmt.Sprintf("unable to find api field %q in %s", e.Field, e.Path)
}
type InvalidTypeError struct {
Path string
Expected string
Actual string
}
func (e InvalidTypeError) Error() string {
return fmt.Sprintf("invalid type for %s: got %q, expected %q", e.Path, e.Actual, e.Expected)
}

View File

@ -1,194 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategicpatch
import (
"errors"
"fmt"
"reflect"
"k8s.io/apimachinery/pkg/util/mergepatch"
forkedjson "k8s.io/apimachinery/third_party/forked/golang/json"
openapi "k8s.io/kube-openapi/pkg/util/proto"
)
type PatchMeta struct {
patchStrategies []string
patchMergeKey string
}
func (pm PatchMeta) GetPatchStrategies() []string {
if pm.patchStrategies == nil {
return []string{}
}
return pm.patchStrategies
}
func (pm PatchMeta) SetPatchStrategies(ps []string) {
pm.patchStrategies = ps
}
func (pm PatchMeta) GetPatchMergeKey() string {
return pm.patchMergeKey
}
func (pm PatchMeta) SetPatchMergeKey(pmk string) {
pm.patchMergeKey = pmk
}
type LookupPatchMeta interface {
// LookupPatchMetadataForStruct gets subschema and the patch metadata (e.g. patch strategy and merge key) for map.
LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error)
// LookupPatchMetadataForSlice get subschema and the patch metadata for slice.
LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error)
// Get the type name of the field
Name() string
}
type PatchMetaFromStruct struct {
T reflect.Type
}
func NewPatchMetaFromStruct(dataStruct interface{}) (PatchMetaFromStruct, error) {
t, err := getTagStructType(dataStruct)
return PatchMetaFromStruct{T: t}, err
}
var _ LookupPatchMeta = PatchMetaFromStruct{}
func (s PatchMetaFromStruct) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) {
fieldType, fieldPatchStrategies, fieldPatchMergeKey, err := forkedjson.LookupPatchMetadataForStruct(s.T, key)
if err != nil {
return nil, PatchMeta{}, err
}
return PatchMetaFromStruct{T: fieldType},
PatchMeta{
patchStrategies: fieldPatchStrategies,
patchMergeKey: fieldPatchMergeKey,
}, nil
}
func (s PatchMetaFromStruct) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) {
subschema, patchMeta, err := s.LookupPatchMetadataForStruct(key)
if err != nil {
return nil, PatchMeta{}, err
}
elemPatchMetaFromStruct := subschema.(PatchMetaFromStruct)
t := elemPatchMetaFromStruct.T
var elemType reflect.Type
switch t.Kind() {
// If t is an array or a slice, get the element type.
// If element is still an array or a slice, return an error.
// Otherwise, return element type.
case reflect.Array, reflect.Slice:
elemType = t.Elem()
if elemType.Kind() == reflect.Array || elemType.Kind() == reflect.Slice {
return nil, PatchMeta{}, errors.New("unexpected slice of slice")
}
// If t is an pointer, get the underlying element.
// If the underlying element is neither an array nor a slice, the pointer is pointing to a slice,
// e.g. https://github.com/kubernetes/kubernetes/blob/bc22e206c79282487ea0bf5696d5ccec7e839a76/staging/src/k8s.io/apimachinery/pkg/util/strategicpatch/patch_test.go#L2782-L2822
// If the underlying element is either an array or a slice, return its element type.
case reflect.Ptr:
t = t.Elem()
if t.Kind() == reflect.Array || t.Kind() == reflect.Slice {
t = t.Elem()
}
elemType = t
default:
return nil, PatchMeta{}, fmt.Errorf("expected slice or array type, but got: %s", s.T.Kind().String())
}
return PatchMetaFromStruct{T: elemType}, patchMeta, nil
}
func (s PatchMetaFromStruct) Name() string {
return s.T.Kind().String()
}
func getTagStructType(dataStruct interface{}) (reflect.Type, error) {
if dataStruct == nil {
return nil, mergepatch.ErrBadArgKind(struct{}{}, nil)
}
t := reflect.TypeOf(dataStruct)
// Get the underlying type for pointers
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
return nil, mergepatch.ErrBadArgKind(struct{}{}, dataStruct)
}
return t, nil
}
func GetTagStructTypeOrDie(dataStruct interface{}) reflect.Type {
t, err := getTagStructType(dataStruct)
if err != nil {
panic(err)
}
return t
}
type PatchMetaFromOpenAPI struct {
Schema openapi.Schema
}
func NewPatchMetaFromOpenAPI(s openapi.Schema) PatchMetaFromOpenAPI {
return PatchMetaFromOpenAPI{Schema: s}
}
var _ LookupPatchMeta = PatchMetaFromOpenAPI{}
func (s PatchMetaFromOpenAPI) LookupPatchMetadataForStruct(key string) (LookupPatchMeta, PatchMeta, error) {
if s.Schema == nil {
return nil, PatchMeta{}, nil
}
kindItem := NewKindItem(key, s.Schema.GetPath())
s.Schema.Accept(kindItem)
err := kindItem.Error()
if err != nil {
return nil, PatchMeta{}, err
}
return PatchMetaFromOpenAPI{Schema: kindItem.subschema},
kindItem.patchmeta, nil
}
func (s PatchMetaFromOpenAPI) LookupPatchMetadataForSlice(key string) (LookupPatchMeta, PatchMeta, error) {
if s.Schema == nil {
return nil, PatchMeta{}, nil
}
sliceItem := NewSliceItem(key, s.Schema.GetPath())
s.Schema.Accept(sliceItem)
err := sliceItem.Error()
if err != nil {
return nil, PatchMeta{}, err
}
return PatchMetaFromOpenAPI{Schema: sliceItem.subschema},
sliceItem.patchmeta, nil
}
func (s PatchMetaFromOpenAPI) Name() string {
schema := s.Schema
return schema.GetName()
}

File diff suppressed because it is too large Load Diff

View File

@ -1,193 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package strategicpatch
import (
"errors"
"strings"
"k8s.io/apimachinery/pkg/util/mergepatch"
openapi "k8s.io/kube-openapi/pkg/util/proto"
)
const (
patchStrategyOpenapiextensionKey = "x-kubernetes-patch-strategy"
patchMergeKeyOpenapiextensionKey = "x-kubernetes-patch-merge-key"
)
type LookupPatchItem interface {
openapi.SchemaVisitor
Error() error
Path() *openapi.Path
}
type kindItem struct {
key string
path *openapi.Path
err error
patchmeta PatchMeta
subschema openapi.Schema
hasVisitKind bool
}
func NewKindItem(key string, path *openapi.Path) *kindItem {
return &kindItem{
key: key,
path: path,
}
}
var _ LookupPatchItem = &kindItem{}
func (item *kindItem) Error() error {
return item.err
}
func (item *kindItem) Path() *openapi.Path {
return item.path
}
func (item *kindItem) VisitPrimitive(schema *openapi.Primitive) {
item.err = errors.New("expected kind, but got primitive")
}
func (item *kindItem) VisitArray(schema *openapi.Array) {
item.err = errors.New("expected kind, but got slice")
}
func (item *kindItem) VisitMap(schema *openapi.Map) {
item.err = errors.New("expected kind, but got map")
}
func (item *kindItem) VisitReference(schema openapi.Reference) {
if !item.hasVisitKind {
schema.SubSchema().Accept(item)
}
}
func (item *kindItem) VisitKind(schema *openapi.Kind) {
subschema, ok := schema.Fields[item.key]
if !ok {
item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key}
return
}
mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions())
if err != nil {
item.err = err
return
}
item.patchmeta = PatchMeta{
patchStrategies: patchStrategies,
patchMergeKey: mergeKey,
}
item.subschema = subschema
}
type sliceItem struct {
key string
path *openapi.Path
err error
patchmeta PatchMeta
subschema openapi.Schema
hasVisitKind bool
}
func NewSliceItem(key string, path *openapi.Path) *sliceItem {
return &sliceItem{
key: key,
path: path,
}
}
var _ LookupPatchItem = &sliceItem{}
func (item *sliceItem) Error() error {
return item.err
}
func (item *sliceItem) Path() *openapi.Path {
return item.path
}
func (item *sliceItem) VisitPrimitive(schema *openapi.Primitive) {
item.err = errors.New("expected slice, but got primitive")
}
func (item *sliceItem) VisitArray(schema *openapi.Array) {
if !item.hasVisitKind {
item.err = errors.New("expected visit kind first, then visit array")
}
subschema := schema.SubType
item.subschema = subschema
}
func (item *sliceItem) VisitMap(schema *openapi.Map) {
item.err = errors.New("expected slice, but got map")
}
func (item *sliceItem) VisitReference(schema openapi.Reference) {
if !item.hasVisitKind {
schema.SubSchema().Accept(item)
} else {
item.subschema = schema.SubSchema()
}
}
func (item *sliceItem) VisitKind(schema *openapi.Kind) {
subschema, ok := schema.Fields[item.key]
if !ok {
item.err = FieldNotFoundError{Path: schema.GetPath().String(), Field: item.key}
return
}
mergeKey, patchStrategies, err := parsePatchMetadata(subschema.GetExtensions())
if err != nil {
item.err = err
return
}
item.patchmeta = PatchMeta{
patchStrategies: patchStrategies,
patchMergeKey: mergeKey,
}
item.hasVisitKind = true
subschema.Accept(item)
}
func parsePatchMetadata(extensions map[string]interface{}) (string, []string, error) {
ps, foundPS := extensions[patchStrategyOpenapiextensionKey]
var patchStrategies []string
var mergeKey, patchStrategy string
var ok bool
if foundPS {
patchStrategy, ok = ps.(string)
if ok {
patchStrategies = strings.Split(patchStrategy, ",")
} else {
return "", nil, mergepatch.ErrBadArgType(patchStrategy, ps)
}
}
mk, foundMK := extensions[patchMergeKeyOpenapiextensionKey]
if foundMK {
mergeKey, ok = mk.(string)
if !ok {
return "", nil, mergepatch.ErrBadArgType(mergeKey, mk)
}
}
return mergeKey, patchStrategies, nil
}

View File

@ -1,513 +0,0 @@
// Copyright 2013 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package json is forked from the Go standard library to enable us to find the
// field of a struct that a given JSON key maps to.
package json
import (
"bytes"
"fmt"
"reflect"
"sort"
"strings"
"sync"
"unicode"
"unicode/utf8"
)
const (
patchStrategyTagKey = "patchStrategy"
patchMergeKeyTagKey = "patchMergeKey"
)
// Finds the patchStrategy and patchMergeKey struct tag fields on a given
// struct field given the struct type and the JSON name of the field.
// It returns field type, a slice of patch strategies, merge key and error.
// TODO: fix the returned errors to be introspectable.
func LookupPatchMetadataForStruct(t reflect.Type, jsonField string) (
elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) {
if t.Kind() == reflect.Ptr {
t = t.Elem()
}
if t.Kind() != reflect.Struct {
e = fmt.Errorf("merging an object in json but data type is not struct, instead is: %s",
t.Kind().String())
return
}
jf := []byte(jsonField)
// Find the field that the JSON library would use.
var f *field
fields := cachedTypeFields(t)
for i := range fields {
ff := &fields[i]
if bytes.Equal(ff.nameBytes, jf) {
f = ff
break
}
// Do case-insensitive comparison.
if f == nil && ff.equalFold(ff.nameBytes, jf) {
f = ff
}
}
if f != nil {
// Find the reflect.Value of the most preferential struct field.
tjf := t.Field(f.index[0])
// we must navigate down all the anonymously included structs in the chain
for i := 1; i < len(f.index); i++ {
tjf = tjf.Type.Field(f.index[i])
}
patchStrategy := tjf.Tag.Get(patchStrategyTagKey)
patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey)
patchStrategies = strings.Split(patchStrategy, ",")
elemType = tjf.Type
return
}
e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField)
return
}
// A field represents a single field found in a struct.
type field struct {
name string
nameBytes []byte // []byte(name)
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
tag bool
// index is the sequence of indexes from the containing type fields to this field.
// it is a slice because anonymous structs will need multiple navigation steps to correctly
// resolve the proper fields
index []int
typ reflect.Type
omitEmpty bool
quoted bool
}
func (f field) String() string {
return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted)
}
func fillField(f field) field {
f.nameBytes = []byte(f.name)
f.equalFold = foldFunc(f.nameBytes)
return f
}
// byName sorts field by name, breaking ties with depth,
// then breaking ties with "name came from json tag", then
// breaking ties with index sequence.
type byName []field
func (x byName) Len() int { return len(x) }
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byName) Less(i, j int) bool {
if x[i].name != x[j].name {
return x[i].name < x[j].name
}
if len(x[i].index) != len(x[j].index) {
return len(x[i].index) < len(x[j].index)
}
if x[i].tag != x[j].tag {
return x[i].tag
}
return byIndex(x).Less(i, j)
}
// byIndex sorts field by index sequence.
type byIndex []field
func (x byIndex) Len() int { return len(x) }
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
func (x byIndex) Less(i, j int) bool {
for k, xik := range x[i].index {
if k >= len(x[j].index) {
return false
}
if xik != x[j].index[k] {
return xik < x[j].index[k]
}
}
return len(x[i].index) < len(x[j].index)
}
// typeFields returns a list of fields that JSON should recognize for the given type.
// The algorithm is breadth-first search over the set of structs to include - the top struct
// and then any reachable anonymous structs.
func typeFields(t reflect.Type) []field {
// Anonymous fields to explore at the current level and the next.
current := []field{}
next := []field{{typ: t}}
// Count of queued names for current level and the next.
count := map[reflect.Type]int{}
nextCount := map[reflect.Type]int{}
// Types already visited at an earlier level.
visited := map[reflect.Type]bool{}
// Fields found.
var fields []field
for len(next) > 0 {
current, next = next, current[:0]
count, nextCount = nextCount, map[reflect.Type]int{}
for _, f := range current {
if visited[f.typ] {
continue
}
visited[f.typ] = true
// Scan f.typ for fields to include.
for i := 0; i < f.typ.NumField(); i++ {
sf := f.typ.Field(i)
if sf.PkgPath != "" { // unexported
continue
}
tag := sf.Tag.Get("json")
if tag == "-" {
continue
}
name, opts := parseTag(tag)
if !isValidTag(name) {
name = ""
}
index := make([]int, len(f.index)+1)
copy(index, f.index)
index[len(f.index)] = i
ft := sf.Type
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
// Follow pointer.
ft = ft.Elem()
}
// Record found field and index sequence.
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
tagged := name != ""
if name == "" {
name = sf.Name
}
fields = append(fields, fillField(field{
name: name,
tag: tagged,
index: index,
typ: ft,
omitEmpty: opts.Contains("omitempty"),
quoted: opts.Contains("string"),
}))
if count[f.typ] > 1 {
// If there were multiple instances, add a second,
// so that the annihilation code will see a duplicate.
// It only cares about the distinction between 1 or 2,
// so don't bother generating any more copies.
fields = append(fields, fields[len(fields)-1])
}
continue
}
// Record new anonymous struct to explore in next round.
nextCount[ft]++
if nextCount[ft] == 1 {
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
}
}
}
}
sort.Sort(byName(fields))
// Delete all fields that are hidden by the Go rules for embedded fields,
// except that fields with JSON tags are promoted.
// The fields are sorted in primary order of name, secondary order
// of field index length. Loop over names; for each name, delete
// hidden fields by choosing the one dominant field that survives.
out := fields[:0]
for advance, i := 0, 0; i < len(fields); i += advance {
// One iteration per name.
// Find the sequence of fields with the name of this first field.
fi := fields[i]
name := fi.name
for advance = 1; i+advance < len(fields); advance++ {
fj := fields[i+advance]
if fj.name != name {
break
}
}
if advance == 1 { // Only one field with this name
out = append(out, fi)
continue
}
dominant, ok := dominantField(fields[i : i+advance])
if ok {
out = append(out, dominant)
}
}
fields = out
sort.Sort(byIndex(fields))
return fields
}
// dominantField looks through the fields, all of which are known to
// have the same name, to find the single field that dominates the
// others using Go's embedding rules, modified by the presence of
// JSON tags. If there are multiple top-level fields, the boolean
// will be false: This condition is an error in Go and we skip all
// the fields.
func dominantField(fields []field) (field, bool) {
// The fields are sorted in increasing index-length order. The winner
// must therefore be one with the shortest index length. Drop all
// longer entries, which is easy: just truncate the slice.
length := len(fields[0].index)
tagged := -1 // Index of first tagged field.
for i, f := range fields {
if len(f.index) > length {
fields = fields[:i]
break
}
if f.tag {
if tagged >= 0 {
// Multiple tagged fields at the same level: conflict.
// Return no field.
return field{}, false
}
tagged = i
}
}
if tagged >= 0 {
return fields[tagged], true
}
// All remaining fields have the same length. If there's more than one,
// we have a conflict (two fields named "X" at the same level) and we
// return no field.
if len(fields) > 1 {
return field{}, false
}
return fields[0], true
}
var fieldCache struct {
sync.RWMutex
m map[reflect.Type][]field
}
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
func cachedTypeFields(t reflect.Type) []field {
fieldCache.RLock()
f := fieldCache.m[t]
fieldCache.RUnlock()
if f != nil {
return f
}
// Compute fields without lock.
// Might duplicate effort but won't hold other computations back.
f = typeFields(t)
if f == nil {
f = []field{}
}
fieldCache.Lock()
if fieldCache.m == nil {
fieldCache.m = map[reflect.Type][]field{}
}
fieldCache.m[t] = f
fieldCache.Unlock()
return f
}
func isValidTag(s string) bool {
if s == "" {
return false
}
for _, c := range s {
switch {
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
// Backslash and quote chars are reserved, but
// otherwise any punctuation chars are allowed
// in a tag name.
default:
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
return false
}
}
}
return true
}
const (
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
kelvin = '\u212a'
smallLongEss = '\u017f'
)
// foldFunc returns one of four different case folding equivalence
// functions, from most general (and slow) to fastest:
//
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
// 3) asciiEqualFold, no special, but includes non-letters (including _)
// 4) simpleLetterEqualFold, no specials, no non-letters.
//
// The letters S and K are special because they map to 3 runes, not just 2:
// * S maps to s and to U+017F 'ſ' Latin small letter long s
// * k maps to K and to U+212A '' Kelvin sign
// See http://play.golang.org/p/tTxjOc0OGo
//
// The returned function is specialized for matching against s and
// should only be given s. It's not curried for performance reasons.
func foldFunc(s []byte) func(s, t []byte) bool {
nonLetter := false
special := false // special letter
for _, b := range s {
if b >= utf8.RuneSelf {
return bytes.EqualFold
}
upper := b & caseMask
if upper < 'A' || upper > 'Z' {
nonLetter = true
} else if upper == 'K' || upper == 'S' {
// See above for why these letters are special.
special = true
}
}
if special {
return equalFoldRight
}
if nonLetter {
return asciiEqualFold
}
return simpleLetterEqualFold
}
// equalFoldRight is a specialization of bytes.EqualFold when s is
// known to be all ASCII (including punctuation), but contains an 's',
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
// See comments on foldFunc.
func equalFoldRight(s, t []byte) bool {
for _, sb := range s {
if len(t) == 0 {
return false
}
tb := t[0]
if tb < utf8.RuneSelf {
if sb != tb {
sbUpper := sb & caseMask
if 'A' <= sbUpper && sbUpper <= 'Z' {
if sbUpper != tb&caseMask {
return false
}
} else {
return false
}
}
t = t[1:]
continue
}
// sb is ASCII and t is not. t must be either kelvin
// sign or long s; sb must be s, S, k, or K.
tr, size := utf8.DecodeRune(t)
switch sb {
case 's', 'S':
if tr != smallLongEss {
return false
}
case 'k', 'K':
if tr != kelvin {
return false
}
default:
return false
}
t = t[size:]
}
if len(t) > 0 {
return false
}
return true
}
// asciiEqualFold is a specialization of bytes.EqualFold for use when
// s is all ASCII (but may contain non-letters) and contains no
// special-folding letters.
// See comments on foldFunc.
func asciiEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, sb := range s {
tb := t[i]
if sb == tb {
continue
}
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
if sb&caseMask != tb&caseMask {
return false
}
} else {
return false
}
}
return true
}
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
// use when s is all ASCII letters (no underscores, etc) and also
// doesn't contain 'k', 'K', 's', or 'S'.
// See comments on foldFunc.
func simpleLetterEqualFold(s, t []byte) bool {
if len(s) != len(t) {
return false
}
for i, b := range s {
if b&caseMask != t[i]&caseMask {
return false
}
}
return true
}
// tagOptions is the string following a comma in a struct field's "json"
// tag, or the empty string. It does not include the leading comma.
type tagOptions string
// parseTag splits a struct field's json tag into its name and
// comma-separated options.
func parseTag(tag string) (string, tagOptions) {
if idx := strings.Index(tag, ","); idx != -1 {
return tag[:idx], tagOptions(tag[idx+1:])
}
return tag, tagOptions("")
}
// Contains reports whether a comma-separated list of options
// contains a particular substr flag. substr must be surrounded by a
// string boundary or commas.
func (o tagOptions) Contains(optionName string) bool {
if len(o) == 0 {
return false
}
s := string(o)
for s != "" {
var next string
i := strings.Index(s, ",")
if i >= 0 {
s, next = s[:i], s[i+1:]
}
if s == optionName {
return true
}
s = next
}
return false
}

202
vendor/k8s.io/kube-openapi/LICENSE generated vendored
View File

@ -1,202 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,19 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package proto is a collection of libraries for parsing and indexing the type definitions.
// The openapi spec contains the object model definitions and extensions metadata.
package proto

View File

@ -1,275 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proto
import (
"fmt"
"sort"
"strings"
openapi_v2 "github.com/googleapis/gnostic/OpenAPIv2"
yaml "gopkg.in/yaml.v2"
)
func newSchemaError(path *Path, format string, a ...interface{}) error {
err := fmt.Sprintf(format, a...)
if path.Len() == 0 {
return fmt.Errorf("SchemaError: %v", err)
}
return fmt.Errorf("SchemaError(%v): %v", path, err)
}
// VendorExtensionToMap converts openapi VendorExtension to a map.
func VendorExtensionToMap(e []*openapi_v2.NamedAny) map[string]interface{} {
values := map[string]interface{}{}
for _, na := range e {
if na.GetName() == "" || na.GetValue() == nil {
continue
}
if na.GetValue().GetYaml() == "" {
continue
}
var value interface{}
err := yaml.Unmarshal([]byte(na.GetValue().GetYaml()), &value)
if err != nil {
continue
}
values[na.GetName()] = value
}
return values
}
// Definitions is an implementation of `Models`. It looks for
// models in an openapi Schema.
type Definitions struct {
models map[string]Schema
}
var _ Models = &Definitions{}
// NewOpenAPIData creates a new `Models` out of the openapi document.
func NewOpenAPIData(doc *openapi_v2.Document) (Models, error) {
definitions := Definitions{
models: map[string]Schema{},
}
// Save the list of all models first. This will allow us to
// validate that we don't have any dangling reference.
for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() {
definitions.models[namedSchema.GetName()] = nil
}
// Now, parse each model. We can validate that references exists.
for _, namedSchema := range doc.GetDefinitions().GetAdditionalProperties() {
path := NewPath(namedSchema.GetName())
schema, err := definitions.ParseSchema(namedSchema.GetValue(), &path)
if err != nil {
return nil, err
}
definitions.models[namedSchema.GetName()] = schema
}
return &definitions, nil
}
// We believe the schema is a reference, verify that and returns a new
// Schema
func (d *Definitions) parseReference(s *openapi_v2.Schema, path *Path) (Schema, error) {
if len(s.GetProperties().GetAdditionalProperties()) > 0 {
return nil, newSchemaError(path, "unallowed embedded type definition")
}
if len(s.GetType().GetValue()) > 0 {
return nil, newSchemaError(path, "definition reference can't have a type")
}
if !strings.HasPrefix(s.GetXRef(), "#/definitions/") {
return nil, newSchemaError(path, "unallowed reference to non-definition %q", s.GetXRef())
}
reference := strings.TrimPrefix(s.GetXRef(), "#/definitions/")
if _, ok := d.models[reference]; !ok {
return nil, newSchemaError(path, "unknown model in reference: %q", reference)
}
return &Ref{
BaseSchema: d.parseBaseSchema(s, path),
reference: reference,
definitions: d,
}, nil
}
func (d *Definitions) parseBaseSchema(s *openapi_v2.Schema, path *Path) BaseSchema {
return BaseSchema{
Description: s.GetDescription(),
Extensions: VendorExtensionToMap(s.GetVendorExtension()),
Path: *path,
}
}
// We believe the schema is a map, verify and return a new schema
func (d *Definitions) parseMap(s *openapi_v2.Schema, path *Path) (Schema, error) {
if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object {
return nil, newSchemaError(path, "invalid object type")
}
if s.GetAdditionalProperties().GetSchema() == nil {
return nil, newSchemaError(path, "invalid object doesn't have additional properties")
}
sub, err := d.ParseSchema(s.GetAdditionalProperties().GetSchema(), path)
if err != nil {
return nil, err
}
return &Map{
BaseSchema: d.parseBaseSchema(s, path),
SubType: sub,
}, nil
}
func (d *Definitions) parsePrimitive(s *openapi_v2.Schema, path *Path) (Schema, error) {
var t string
if len(s.GetType().GetValue()) > 1 {
return nil, newSchemaError(path, "primitive can't have more than 1 type")
}
if len(s.GetType().GetValue()) == 1 {
t = s.GetType().GetValue()[0]
}
switch t {
case String:
case Number:
case Integer:
case Boolean:
case "": // Some models are completely empty, and can be safely ignored.
// Do nothing
default:
return nil, newSchemaError(path, "Unknown primitive type: %q", t)
}
return &Primitive{
BaseSchema: d.parseBaseSchema(s, path),
Type: t,
Format: s.GetFormat(),
}, nil
}
func (d *Definitions) parseArray(s *openapi_v2.Schema, path *Path) (Schema, error) {
if len(s.GetType().GetValue()) != 1 {
return nil, newSchemaError(path, "array should have exactly one type")
}
if s.GetType().GetValue()[0] != array {
return nil, newSchemaError(path, `array should have type "array"`)
}
if len(s.GetItems().GetSchema()) != 1 {
return nil, newSchemaError(path, "array should have exactly one sub-item")
}
sub, err := d.ParseSchema(s.GetItems().GetSchema()[0], path)
if err != nil {
return nil, err
}
return &Array{
BaseSchema: d.parseBaseSchema(s, path),
SubType: sub,
}, nil
}
func (d *Definitions) parseKind(s *openapi_v2.Schema, path *Path) (Schema, error) {
if len(s.GetType().GetValue()) != 0 && s.GetType().GetValue()[0] != object {
return nil, newSchemaError(path, "invalid object type")
}
if s.GetProperties() == nil {
return nil, newSchemaError(path, "object doesn't have properties")
}
fields := map[string]Schema{}
for _, namedSchema := range s.GetProperties().GetAdditionalProperties() {
var err error
path := path.FieldPath(namedSchema.GetName())
fields[namedSchema.GetName()], err = d.ParseSchema(namedSchema.GetValue(), &path)
if err != nil {
return nil, err
}
}
return &Kind{
BaseSchema: d.parseBaseSchema(s, path),
RequiredFields: s.GetRequired(),
Fields: fields,
}, nil
}
// ParseSchema creates a walkable Schema from an openapi schema. While
// this function is public, it doesn't leak through the interface.
func (d *Definitions) ParseSchema(s *openapi_v2.Schema, path *Path) (Schema, error) {
if len(s.GetType().GetValue()) == 1 {
t := s.GetType().GetValue()[0]
switch t {
case object:
return d.parseMap(s, path)
case array:
return d.parseArray(s, path)
}
}
if s.GetXRef() != "" {
return d.parseReference(s, path)
}
if s.GetProperties() != nil {
return d.parseKind(s, path)
}
return d.parsePrimitive(s, path)
}
// LookupModel is public through the interface of Models. It
// returns a visitable schema from the given model name.
func (d *Definitions) LookupModel(model string) Schema {
return d.models[model]
}
func (d *Definitions) ListModels() []string {
models := []string{}
for model := range d.models {
models = append(models, model)
}
sort.Strings(models)
return models
}
type Ref struct {
BaseSchema
reference string
definitions *Definitions
}
var _ Reference = &Ref{}
func (r *Ref) Reference() string {
return r.reference
}
func (r *Ref) SubSchema() Schema {
return r.definitions.models[r.reference]
}
func (r *Ref) Accept(v SchemaVisitor) {
v.VisitReference(r)
}
func (r *Ref) GetName() string {
return fmt.Sprintf("Reference to %q", r.reference)
}

View File

@ -1,251 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package proto
import (
"fmt"
"sort"
"strings"
)
// Defines openapi types.
const (
Integer = "integer"
Number = "number"
String = "string"
Boolean = "boolean"
// These types are private as they should never leak, and are
// represented by actual structs.
array = "array"
object = "object"
)
// Models interface describe a model provider. They can give you the
// schema for a specific model.
type Models interface {
LookupModel(string) Schema
ListModels() []string
}
// SchemaVisitor is an interface that you need to implement if you want
// to "visit" an openapi schema. A dispatch on the Schema type will call
// the appropriate function based on its actual type:
// - Array is a list of one and only one given subtype
// - Map is a map of string to one and only one given subtype
// - Primitive can be string, integer, number and boolean.
// - Kind is an object with specific fields mapping to specific types.
// - Reference is a link to another definition.
type SchemaVisitor interface {
VisitArray(*Array)
VisitMap(*Map)
VisitPrimitive(*Primitive)
VisitKind(*Kind)
VisitReference(Reference)
}
// Schema is the base definition of an openapi type.
type Schema interface {
// Giving a visitor here will let you visit the actual type.
Accept(SchemaVisitor)
// Pretty print the name of the type.
GetName() string
// Describes how to access this field.
GetPath() *Path
// Describes the field.
GetDescription() string
// Returns type extensions.
GetExtensions() map[string]interface{}
}
// Path helps us keep track of type paths
type Path struct {
parent *Path
key string
}
func NewPath(key string) Path {
return Path{key: key}
}
func (p *Path) Get() []string {
if p == nil {
return []string{}
}
if p.key == "" {
return p.parent.Get()
}
return append(p.parent.Get(), p.key)
}
func (p *Path) Len() int {
return len(p.Get())
}
func (p *Path) String() string {
return strings.Join(p.Get(), "")
}
// ArrayPath appends an array index and creates a new path
func (p *Path) ArrayPath(i int) Path {
return Path{
parent: p,
key: fmt.Sprintf("[%d]", i),
}
}
// FieldPath appends a field name and creates a new path
func (p *Path) FieldPath(field string) Path {
return Path{
parent: p,
key: fmt.Sprintf(".%s", field),
}
}
// BaseSchema holds data used by each types of schema.
type BaseSchema struct {
Description string
Extensions map[string]interface{}
Path Path
}
func (b *BaseSchema) GetDescription() string {
return b.Description
}
func (b *BaseSchema) GetExtensions() map[string]interface{} {
return b.Extensions
}
func (b *BaseSchema) GetPath() *Path {
return &b.Path
}
// Array must have all its element of the same `SubType`.
type Array struct {
BaseSchema
SubType Schema
}
var _ Schema = &Array{}
func (a *Array) Accept(v SchemaVisitor) {
v.VisitArray(a)
}
func (a *Array) GetName() string {
return fmt.Sprintf("Array of %s", a.SubType.GetName())
}
// Kind is a complex object. It can have multiple different
// subtypes for each field, as defined in the `Fields` field. Mandatory
// fields are listed in `RequiredFields`. The key of the object is
// always of type `string`.
type Kind struct {
BaseSchema
// Lists names of required fields.
RequiredFields []string
// Maps field names to types.
Fields map[string]Schema
}
var _ Schema = &Kind{}
func (k *Kind) Accept(v SchemaVisitor) {
v.VisitKind(k)
}
func (k *Kind) GetName() string {
properties := []string{}
for key := range k.Fields {
properties = append(properties, key)
}
return fmt.Sprintf("Kind(%v)", properties)
}
// IsRequired returns true if `field` is a required field for this type.
func (k *Kind) IsRequired(field string) bool {
for _, f := range k.RequiredFields {
if f == field {
return true
}
}
return false
}
// Keys returns a alphabetically sorted list of keys.
func (k *Kind) Keys() []string {
keys := make([]string, 0)
for key := range k.Fields {
keys = append(keys, key)
}
sort.Strings(keys)
return keys
}
// Map is an object who values must all be of the same `SubType`.
// The key of the object is always of type `string`.
type Map struct {
BaseSchema
SubType Schema
}
var _ Schema = &Map{}
func (m *Map) Accept(v SchemaVisitor) {
v.VisitMap(m)
}
func (m *Map) GetName() string {
return fmt.Sprintf("Map of %s", m.SubType.GetName())
}
// Primitive is a literal. There can be multiple types of primitives,
// and this subtype can be visited through the `subType` field.
type Primitive struct {
BaseSchema
// Type of a primitive must be one of: integer, number, string, boolean.
Type string
Format string
}
var _ Schema = &Primitive{}
func (p *Primitive) Accept(v SchemaVisitor) {
v.VisitPrimitive(p)
}
func (p *Primitive) GetName() string {
if p.Format == "" {
return p.Type
}
return fmt.Sprintf("%s (%s)", p.Type, p.Format)
}
// Reference implementation depends on the type of document.
type Reference interface {
Schema
Reference() string
SubSchema() Schema
}