commit
7e3fc0884e
|
@ -391,7 +391,7 @@
|
|||
|
||||
[[projects]]
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apimachinery","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/net","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/netutil","third_party/forked/golang/reflect"]
|
||||
packages = ["pkg/api/equality","pkg/api/errors","pkg/api/meta","pkg/api/resource","pkg/apimachinery","pkg/apimachinery/registered","pkg/apis/meta/internalversion","pkg/apis/meta/v1","pkg/apis/meta/v1/unstructured","pkg/apis/meta/v1alpha1","pkg/conversion","pkg/conversion/queryparams","pkg/conversion/unstructured","pkg/fields","pkg/labels","pkg/runtime","pkg/runtime/schema","pkg/runtime/serializer","pkg/runtime/serializer/json","pkg/runtime/serializer/protobuf","pkg/runtime/serializer/recognizer","pkg/runtime/serializer/streaming","pkg/runtime/serializer/versioning","pkg/selection","pkg/types","pkg/util/cache","pkg/util/clock","pkg/util/diff","pkg/util/errors","pkg/util/framer","pkg/util/httpstream","pkg/util/httpstream/spdy","pkg/util/intstr","pkg/util/json","pkg/util/mergepatch","pkg/util/net","pkg/util/remotecommand","pkg/util/runtime","pkg/util/sets","pkg/util/strategicpatch","pkg/util/validation","pkg/util/validation/field","pkg/util/wait","pkg/util/yaml","pkg/version","pkg/watch","third_party/forked/golang/json","third_party/forked/golang/netutil","third_party/forked/golang/reflect"]
|
||||
revision = "019ae5ada31de202164b118aee88ee2d14075c31"
|
||||
|
||||
[[projects]]
|
||||
|
@ -426,6 +426,6 @@
|
|||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
analyzer-version = 1
|
||||
inputs-digest = "9d8af931a171a23e1a04151e37c15ebadf7457512a6b149da20e228b6469314d"
|
||||
inputs-digest = "6287197115277ba882d5bb5dc20d74a8cb8e13d90c4e783c518a4e4aed55245f"
|
||||
solver-name = "gps-cdcl"
|
||||
solver-version = 1
|
||||
|
|
|
@ -32,6 +32,7 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.
|
|||
* [ark create](ark_create.md) - Create ark resources
|
||||
* [ark describe](ark_describe.md) - Describe ark resources
|
||||
* [ark get](ark_get.md) - Get ark resources
|
||||
* [ark plugin](ark_plugin.md) - Work with plugins
|
||||
* [ark restore](ark_restore.md) - Work with restores
|
||||
* [ark schedule](ark_schedule.md) - Work with schedules
|
||||
* [ark server](ark_server.md) - Run the ark server
|
||||
|
|
|
@ -0,0 +1,33 @@
|
|||
## ark plugin
|
||||
|
||||
Work with plugins
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Work with plugins
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for plugin
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark](ark.md) - Back up and restore Kubernetes cluster resources.
|
||||
* [ark plugin add](ark_plugin_add.md) - Add a plugin
|
||||
* [ark plugin remove](ark_plugin_remove.md) - Remove a plugin
|
||||
|
|
@ -0,0 +1,36 @@
|
|||
## ark plugin add
|
||||
|
||||
Add a plugin
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Add a plugin
|
||||
|
||||
```
|
||||
ark plugin add IMAGE [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for add
|
||||
--image-pull-policy the imagePullPolicy for the plugin container. Valid values are Always, IfNotPresent, Never. (default IfNotPresent)
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark plugin](ark_plugin.md) - Work with plugins
|
||||
|
|
@ -0,0 +1,35 @@
|
|||
## ark plugin remove
|
||||
|
||||
Remove a plugin
|
||||
|
||||
### Synopsis
|
||||
|
||||
|
||||
Remove a plugin
|
||||
|
||||
```
|
||||
ark plugin remove [NAME | IMAGE] [flags]
|
||||
```
|
||||
|
||||
### Options
|
||||
|
||||
```
|
||||
-h, --help help for remove
|
||||
```
|
||||
|
||||
### Options inherited from parent commands
|
||||
|
||||
```
|
||||
--alsologtostderr log to standard error as well as files
|
||||
--kubeconfig string Path to the kubeconfig file to use to talk to the Kubernetes apiserver. If unset, try the environment variable KUBECONFIG, as well as in-cluster configuration
|
||||
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
|
||||
--log_dir string If non-empty, write log files in this directory
|
||||
--logtostderr log to standard error instead of files
|
||||
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
|
||||
-v, --v Level log level for V logs
|
||||
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
|
||||
```
|
||||
|
||||
### SEE ALSO
|
||||
* [ark plugin](ark_plugin.md) - Work with plugins
|
||||
|
|
@ -37,3 +37,9 @@ spec:
|
|||
envFrom:
|
||||
- secretRef:
|
||||
name: cloud-credentials
|
||||
volumeMounts:
|
||||
- name: plugins
|
||||
mountPath: /plugins
|
||||
volumes:
|
||||
- name: plugins
|
||||
emptyDir: {}
|
||||
|
|
|
@ -37,6 +37,8 @@ spec:
|
|||
volumeMounts:
|
||||
- name: cloud-credentials
|
||||
mountPath: /credentials
|
||||
- name: plugins
|
||||
mountPath: /plugins
|
||||
env:
|
||||
- name: AWS_SHARED_CREDENTIALS_FILE
|
||||
value: /credentials/cloud
|
||||
|
@ -44,3 +46,5 @@ spec:
|
|||
- name: cloud-credentials
|
||||
secret:
|
||||
secretName: cloud-credentials
|
||||
- name: plugins
|
||||
emptyDir: {}
|
||||
|
|
|
@ -20,16 +20,21 @@ import (
|
|||
"github.com/pkg/errors"
|
||||
"github.com/spf13/pflag"
|
||||
|
||||
"k8s.io/client-go/kubernetes"
|
||||
|
||||
clientset "github.com/heptio/ark/pkg/generated/clientset/versioned"
|
||||
)
|
||||
|
||||
// Factory knows how to create an ArkClient.
|
||||
// Factory knows how to create an ArkClient and Kubernetes client.
|
||||
type Factory interface {
|
||||
// BindFlags binds common flags such as --kubeconfig to the passed-in FlagSet.
|
||||
BindFlags(flags *pflag.FlagSet)
|
||||
// Client returns an ArkClient. It uses the following priority to specify the cluster
|
||||
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
|
||||
Client() (clientset.Interface, error)
|
||||
// KubeClient returns a Kubernetes client. It uses the following priority to specify the cluster
|
||||
// configuration: --kubeconfig flag, KUBECONFIG environment variable, in-cluster configuration.
|
||||
KubeClient() (kubernetes.Interface, error)
|
||||
}
|
||||
|
||||
type factory struct {
|
||||
|
@ -65,3 +70,16 @@ func (f *factory) Client() (clientset.Interface, error) {
|
|||
}
|
||||
return arkClient, nil
|
||||
}
|
||||
|
||||
func (f *factory) KubeClient() (kubernetes.Interface, error) {
|
||||
clientConfig, err := Config(f.kubeconfig, f.baseName)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
kubeClient, err := kubernetes.NewForConfig(clientConfig)
|
||||
if err != nil {
|
||||
return nil, errors.WithStack(err)
|
||||
}
|
||||
return kubeClient, nil
|
||||
}
|
||||
|
|
|
@ -26,10 +26,11 @@ import (
|
|||
"github.com/heptio/ark/pkg/cmd/cli/create"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/describe"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/get"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/plugin"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/restore"
|
||||
"github.com/heptio/ark/pkg/cmd/cli/schedule"
|
||||
"github.com/heptio/ark/pkg/cmd/server"
|
||||
"github.com/heptio/ark/pkg/cmd/server/plugin"
|
||||
runplugin "github.com/heptio/ark/pkg/cmd/server/plugin"
|
||||
"github.com/heptio/ark/pkg/cmd/version"
|
||||
)
|
||||
|
||||
|
@ -58,7 +59,8 @@ operations can also be performed as 'ark backup get' and 'ark schedule create'.`
|
|||
get.NewCommand(f),
|
||||
describe.NewCommand(f),
|
||||
create.NewCommand(f),
|
||||
plugin.NewCommand(),
|
||||
runplugin.NewCommand(),
|
||||
plugin.NewCommand(f),
|
||||
)
|
||||
|
||||
// add the glog flags
|
||||
|
|
|
@ -0,0 +1,162 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
"k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
|
||||
ark "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
"github.com/heptio/ark/pkg/cmd/util/flag"
|
||||
)
|
||||
|
||||
const (
|
||||
pluginsVolumeName = "plugins"
|
||||
arkDeployment = "ark"
|
||||
arkContainer = "ark"
|
||||
)
|
||||
|
||||
func NewAddCommand(f client.Factory) *cobra.Command {
|
||||
var (
|
||||
imagePullPolicies = []string{string(v1.PullAlways), string(v1.PullIfNotPresent), string(v1.PullNever)}
|
||||
imagePullPolicyFlag = flag.NewEnum(string(v1.PullIfNotPresent), imagePullPolicies...)
|
||||
)
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "add IMAGE",
|
||||
Short: "Add a plugin",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
cmd.CheckError(errors.New("you must specify only one argument, the plugin container image"))
|
||||
}
|
||||
|
||||
kubeClient, err := f.KubeClient()
|
||||
if err != nil {
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
arkDeploy, err := kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Get(arkDeployment, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
original, err := json.Marshal(arkDeploy)
|
||||
cmd.CheckError(err)
|
||||
|
||||
// ensure the plugins volume & mount exist
|
||||
volumeExists := false
|
||||
for _, volume := range arkDeploy.Spec.Template.Spec.Volumes {
|
||||
if volume.Name == pluginsVolumeName {
|
||||
volumeExists = true
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if !volumeExists {
|
||||
volume := v1.Volume{
|
||||
Name: pluginsVolumeName,
|
||||
VolumeSource: v1.VolumeSource{
|
||||
EmptyDir: &v1.EmptyDirVolumeSource{},
|
||||
},
|
||||
}
|
||||
|
||||
volumeMount := v1.VolumeMount{
|
||||
Name: pluginsVolumeName,
|
||||
MountPath: "/plugins",
|
||||
}
|
||||
|
||||
arkDeploy.Spec.Template.Spec.Volumes = append(arkDeploy.Spec.Template.Spec.Volumes, volume)
|
||||
|
||||
containers := arkDeploy.Spec.Template.Spec.Containers
|
||||
containerIndex := -1
|
||||
for x, container := range containers {
|
||||
if container.Name == arkContainer {
|
||||
containerIndex = x
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if containerIndex < 0 {
|
||||
cmd.CheckError(errors.New("ark container not found in ark deployment"))
|
||||
}
|
||||
|
||||
containers[containerIndex].VolumeMounts = append(containers[containerIndex].VolumeMounts, volumeMount)
|
||||
}
|
||||
|
||||
// add the plugin as an init container
|
||||
plugin := v1.Container{
|
||||
Name: getName(args[0]),
|
||||
Image: args[0],
|
||||
ImagePullPolicy: v1.PullPolicy(imagePullPolicyFlag.String()),
|
||||
VolumeMounts: []v1.VolumeMount{
|
||||
{
|
||||
Name: pluginsVolumeName,
|
||||
MountPath: "/target",
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
arkDeploy.Spec.Template.Spec.InitContainers = append(arkDeploy.Spec.Template.Spec.InitContainers, plugin)
|
||||
|
||||
// create & apply the patch
|
||||
updated, err := json.Marshal(arkDeploy)
|
||||
cmd.CheckError(err)
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
|
||||
cmd.CheckError(err)
|
||||
|
||||
_, err = kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
c.Flags().Var(imagePullPolicyFlag, "image-pull-policy", fmt.Sprintf("the imagePullPolicy for the plugin container. Valid values are %s.", strings.Join(imagePullPolicies, ", ")))
|
||||
|
||||
return c
|
||||
}
|
||||
|
||||
// getName returns the 'name' component of a docker
|
||||
// image (i.e. everything after the last '/' and before
|
||||
// any subsequent ':')
|
||||
func getName(image string) string {
|
||||
slashIndex := strings.LastIndex(image, "/")
|
||||
colonIndex := strings.LastIndex(image, ":")
|
||||
|
||||
start := 0
|
||||
if slashIndex > 0 {
|
||||
start = slashIndex + 1
|
||||
}
|
||||
|
||||
end := len(image)
|
||||
if colonIndex > slashIndex {
|
||||
end = colonIndex
|
||||
}
|
||||
|
||||
return image[start:end]
|
||||
}
|
|
@ -0,0 +1,47 @@
|
|||
package plugin
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
)
|
||||
|
||||
func TestGetName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
image string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "image name with registry hostname and tag",
|
||||
image: "gcr.io/my-repo/my-image:latest",
|
||||
expected: "my-image",
|
||||
},
|
||||
{
|
||||
name: "image name with registry hostname, without tag",
|
||||
image: "gcr.io/my-repo/my-image",
|
||||
expected: "my-image",
|
||||
},
|
||||
{
|
||||
name: "image name without registry hostname, with tag",
|
||||
image: "my-repo/my-image:latest",
|
||||
expected: "my-image",
|
||||
},
|
||||
{
|
||||
name: "image name without registry hostname, without tag",
|
||||
image: "my-repo/my-image",
|
||||
expected: "my-image",
|
||||
},
|
||||
{
|
||||
name: "image name with registry hostname and port, and tag",
|
||||
image: "mycustomregistry.io:8080/my-repo/my-image:latest",
|
||||
expected: "my-image",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.name, func(t *testing.T) {
|
||||
assert.Equal(t, test.expected, getName(test.image))
|
||||
})
|
||||
}
|
||||
}
|
|
@ -0,0 +1,38 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
)
|
||||
|
||||
func NewCommand(f client.Factory) *cobra.Command {
|
||||
c := &cobra.Command{
|
||||
Use: "plugin",
|
||||
Short: "Work with plugins",
|
||||
Long: "Work with plugins",
|
||||
}
|
||||
|
||||
c.AddCommand(
|
||||
NewAddCommand(f),
|
||||
NewRemoveCommand(f),
|
||||
)
|
||||
|
||||
return c
|
||||
}
|
|
@ -0,0 +1,87 @@
|
|||
/*
|
||||
Copyright 2017 the Heptio Ark contributors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package plugin
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/cobra"
|
||||
|
||||
"k8s.io/api/apps/v1beta1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/strategicpatch"
|
||||
|
||||
ark "github.com/heptio/ark/pkg/apis/ark/v1"
|
||||
"github.com/heptio/ark/pkg/client"
|
||||
"github.com/heptio/ark/pkg/cmd"
|
||||
)
|
||||
|
||||
func NewRemoveCommand(f client.Factory) *cobra.Command {
|
||||
c := &cobra.Command{
|
||||
Use: "remove [NAME | IMAGE]",
|
||||
Short: "Remove a plugin",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
if len(args) != 1 {
|
||||
cmd.CheckError(errors.New("you must specify only one argument, the plugin container's name or image"))
|
||||
}
|
||||
|
||||
kubeClient, err := f.KubeClient()
|
||||
if err != nil {
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
arkDeploy, err := kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Get(arkDeployment, metav1.GetOptions{})
|
||||
if err != nil {
|
||||
cmd.CheckError(err)
|
||||
}
|
||||
|
||||
original, err := json.Marshal(arkDeploy)
|
||||
cmd.CheckError(err)
|
||||
|
||||
var (
|
||||
initContainers = arkDeploy.Spec.Template.Spec.InitContainers
|
||||
index = -1
|
||||
)
|
||||
|
||||
for x, container := range initContainers {
|
||||
if container.Name == args[0] || container.Image == args[0] {
|
||||
index = x
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
if index == -1 {
|
||||
cmd.CheckError(errors.Errorf("init container %s not found in Ark server deployment", args[0]))
|
||||
}
|
||||
|
||||
arkDeploy.Spec.Template.Spec.InitContainers = append(initContainers[0:index], initContainers[index+1:]...)
|
||||
|
||||
updated, err := json.Marshal(arkDeploy)
|
||||
cmd.CheckError(err)
|
||||
|
||||
patchBytes, err := strategicpatch.CreateTwoWayMergePatch(original, updated, v1beta1.Deployment{})
|
||||
cmd.CheckError(err)
|
||||
|
||||
_, err = kubeClient.AppsV1beta1().Deployments(ark.DefaultNamespace).Patch(arkDeploy.Name, types.StrategicMergePatchType, patchBytes)
|
||||
cmd.CheckError(err)
|
||||
},
|
||||
}
|
||||
|
||||
return c
|
||||
}
|
|
@ -56,7 +56,7 @@ func NewCommand() *cobra.Command {
|
|||
}
|
||||
|
||||
c := &cobra.Command{
|
||||
Use: "plugin [KIND] [NAME]",
|
||||
Use: "run-plugin [KIND] [NAME]",
|
||||
Hidden: true,
|
||||
Short: "INTERNAL COMMAND ONLY - not intended to be run directly by users",
|
||||
Run: func(c *cobra.Command, args []string) {
|
||||
|
@ -74,7 +74,7 @@ func NewCommand() *cobra.Command {
|
|||
GRPCServer: plugin.DefaultGRPCServer,
|
||||
}
|
||||
|
||||
logger.Debugf("Running plugin command")
|
||||
logger.Debugf("Executing run-plugin command")
|
||||
|
||||
switch kind {
|
||||
case "cloudprovider":
|
||||
|
|
|
@ -176,13 +176,13 @@ func getPluginInstance(client *plugin.Client, kind PluginKind) (interface{}, err
|
|||
func (m *manager) registerPlugins() error {
|
||||
// first, register internal plugins
|
||||
for _, provider := range []string{"aws", "gcp", "azure"} {
|
||||
m.pluginRegistry.register(provider, "/ark", []string{"plugin", "cloudprovider", provider}, PluginKindObjectStore, PluginKindBlockStore)
|
||||
m.pluginRegistry.register(provider, "/ark", []string{"run-plugin", "cloudprovider", provider}, PluginKindObjectStore, PluginKindBlockStore)
|
||||
}
|
||||
m.pluginRegistry.register("pv", "/ark", []string{"plugin", string(PluginKindBackupItemAction), "pv"}, PluginKindBackupItemAction)
|
||||
m.pluginRegistry.register("pv", "/ark", []string{"run-plugin", string(PluginKindBackupItemAction), "pv"}, PluginKindBackupItemAction)
|
||||
|
||||
m.pluginRegistry.register("job", "/ark", []string{"plugin", string(PluginKindRestoreItemAction), "job"}, PluginKindRestoreItemAction)
|
||||
m.pluginRegistry.register("pod", "/ark", []string{"plugin", string(PluginKindRestoreItemAction), "pod"}, PluginKindRestoreItemAction)
|
||||
m.pluginRegistry.register("svc", "/ark", []string{"plugin", string(PluginKindRestoreItemAction), "svc"}, PluginKindRestoreItemAction)
|
||||
m.pluginRegistry.register("job", "/ark", []string{"run-plugin", string(PluginKindRestoreItemAction), "job"}, PluginKindRestoreItemAction)
|
||||
m.pluginRegistry.register("pod", "/ark", []string{"run-plugin", string(PluginKindRestoreItemAction), "pod"}, PluginKindRestoreItemAction)
|
||||
m.pluginRegistry.register("svc", "/ark", []string{"run-plugin", string(PluginKindRestoreItemAction), "svc"}, PluginKindRestoreItemAction)
|
||||
|
||||
// second, register external plugins (these will override internal plugins, if applicable)
|
||||
if _, err := os.Stat(pluginDir); err != nil {
|
||||
|
|
|
@ -0,0 +1,5 @@
|
|||
approvers:
|
||||
- pwittrock
|
||||
reviewers:
|
||||
- mengqiy
|
||||
- apelisse
|
|
@ -0,0 +1,101 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mergepatch
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"reflect"
|
||||
)
|
||||
|
||||
var (
|
||||
ErrBadJSONDoc = errors.New("invalid JSON document")
|
||||
ErrNoListOfLists = errors.New("lists of lists are not supported")
|
||||
ErrBadPatchFormatForPrimitiveList = errors.New("invalid patch format of primitive list")
|
||||
ErrBadPatchFormatForRetainKeys = errors.New("invalid patch format of retainKeys")
|
||||
ErrBadPatchFormatForSetElementOrderList = errors.New("invalid patch format of setElementOrder list")
|
||||
ErrPatchContentNotMatchRetainKeys = errors.New("patch content doesn't match retainKeys list")
|
||||
)
|
||||
|
||||
func ErrNoMergeKey(m map[string]interface{}, k string) error {
|
||||
return fmt.Errorf("map: %v does not contain declared merge key: %s", m, k)
|
||||
}
|
||||
|
||||
func ErrBadArgType(expected, actual interface{}) error {
|
||||
return fmt.Errorf("expected a %s, but received a %s",
|
||||
reflect.TypeOf(expected),
|
||||
reflect.TypeOf(actual))
|
||||
}
|
||||
|
||||
func ErrBadArgKind(expected, actual interface{}) error {
|
||||
var expectedKindString, actualKindString string
|
||||
if expected == nil {
|
||||
expectedKindString = "nil"
|
||||
} else {
|
||||
expectedKindString = reflect.TypeOf(expected).Kind().String()
|
||||
}
|
||||
if actual == nil {
|
||||
actualKindString = "nil"
|
||||
} else {
|
||||
actualKindString = reflect.TypeOf(actual).Kind().String()
|
||||
}
|
||||
return fmt.Errorf("expected a %s, but received a %s", expectedKindString, actualKindString)
|
||||
}
|
||||
|
||||
func ErrBadPatchType(t interface{}, m map[string]interface{}) error {
|
||||
return fmt.Errorf("unknown patch type: %s in map: %v", t, m)
|
||||
}
|
||||
|
||||
// IsPreconditionFailed returns true if the provided error indicates
|
||||
// a precondition failed.
|
||||
func IsPreconditionFailed(err error) bool {
|
||||
_, ok := err.(ErrPreconditionFailed)
|
||||
return ok
|
||||
}
|
||||
|
||||
type ErrPreconditionFailed struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func NewErrPreconditionFailed(target map[string]interface{}) ErrPreconditionFailed {
|
||||
s := fmt.Sprintf("precondition failed for: %v", target)
|
||||
return ErrPreconditionFailed{s}
|
||||
}
|
||||
|
||||
func (err ErrPreconditionFailed) Error() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
type ErrConflict struct {
|
||||
message string
|
||||
}
|
||||
|
||||
func NewErrConflict(patch, current string) ErrConflict {
|
||||
s := fmt.Sprintf("patch:\n%s\nconflicts with changes made from original to current:\n%s\n", patch, current)
|
||||
return ErrConflict{s}
|
||||
}
|
||||
|
||||
func (err ErrConflict) Error() string {
|
||||
return err.message
|
||||
}
|
||||
|
||||
// IsConflict returns true if the provided error indicates
|
||||
// a conflict between the patch and the current configuration.
|
||||
func IsConflict(err error) bool {
|
||||
_, ok := err.(ErrConflict)
|
||||
return ok
|
||||
}
|
|
@ -0,0 +1,133 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package mergepatch
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
|
||||
"github.com/davecgh/go-spew/spew"
|
||||
"github.com/ghodss/yaml"
|
||||
)
|
||||
|
||||
// PreconditionFunc asserts that an incompatible change is not present within a patch.
|
||||
type PreconditionFunc func(interface{}) bool
|
||||
|
||||
// RequireKeyUnchanged returns a precondition function that fails if the provided key
|
||||
// is present in the patch (indicating that its value has changed).
|
||||
func RequireKeyUnchanged(key string) PreconditionFunc {
|
||||
return func(patch interface{}) bool {
|
||||
patchMap, ok := patch.(map[string]interface{})
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
|
||||
// The presence of key means that its value has been changed, so the test fails.
|
||||
_, ok = patchMap[key]
|
||||
return !ok
|
||||
}
|
||||
}
|
||||
|
||||
// RequireMetadataKeyUnchanged creates a precondition function that fails
|
||||
// if the metadata.key is present in the patch (indicating its value
|
||||
// has changed).
|
||||
func RequireMetadataKeyUnchanged(key string) PreconditionFunc {
|
||||
return func(patch interface{}) bool {
|
||||
patchMap, ok := patch.(map[string]interface{})
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
patchMap1, ok := patchMap["metadata"]
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
patchMap2, ok := patchMap1.(map[string]interface{})
|
||||
if !ok {
|
||||
return true
|
||||
}
|
||||
_, ok = patchMap2[key]
|
||||
return !ok
|
||||
}
|
||||
}
|
||||
|
||||
func ToYAMLOrError(v interface{}) string {
|
||||
y, err := toYAML(v)
|
||||
if err != nil {
|
||||
return err.Error()
|
||||
}
|
||||
|
||||
return y
|
||||
}
|
||||
|
||||
func toYAML(v interface{}) (string, error) {
|
||||
y, err := yaml.Marshal(v)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("yaml marshal failed:%v\n%v\n", err, spew.Sdump(v))
|
||||
}
|
||||
|
||||
return string(y), nil
|
||||
}
|
||||
|
||||
// HasConflicts returns true if the left and right JSON interface objects overlap with
|
||||
// different values in any key. All keys are required to be strings. Since patches of the
|
||||
// same Type have congruent keys, this is valid for multiple patch types. This method
|
||||
// supports JSON merge patch semantics.
|
||||
//
|
||||
// NOTE: Numbers with different types (e.g. int(0) vs int64(0)) will be detected as conflicts.
|
||||
// Make sure the unmarshaling of left and right are consistent (e.g. use the same library).
|
||||
func HasConflicts(left, right interface{}) (bool, error) {
|
||||
switch typedLeft := left.(type) {
|
||||
case map[string]interface{}:
|
||||
switch typedRight := right.(type) {
|
||||
case map[string]interface{}:
|
||||
for key, leftValue := range typedLeft {
|
||||
rightValue, ok := typedRight[key]
|
||||
if !ok {
|
||||
continue
|
||||
}
|
||||
if conflict, err := HasConflicts(leftValue, rightValue); err != nil || conflict {
|
||||
return conflict, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
case []interface{}:
|
||||
switch typedRight := right.(type) {
|
||||
case []interface{}:
|
||||
if len(typedLeft) != len(typedRight) {
|
||||
return true, nil
|
||||
}
|
||||
|
||||
for i := range typedLeft {
|
||||
if conflict, err := HasConflicts(typedLeft[i], typedRight[i]); err != nil || conflict {
|
||||
return conflict, err
|
||||
}
|
||||
}
|
||||
|
||||
return false, nil
|
||||
default:
|
||||
return true, nil
|
||||
}
|
||||
case string, float64, bool, int, int64, nil:
|
||||
return !reflect.DeepEqual(left, right), nil
|
||||
default:
|
||||
return true, fmt.Errorf("unknown type: %v", reflect.TypeOf(left))
|
||||
}
|
||||
}
|
|
@ -0,0 +1,5 @@
|
|||
approvers:
|
||||
- pwittrock
|
||||
reviewers:
|
||||
- mengqiy
|
||||
- apelisse
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,5 @@
|
|||
approvers:
|
||||
- pwittrock
|
||||
reviewers:
|
||||
- mengqiy
|
||||
- apelisse
|
516
vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go
generated
vendored
Normal file
516
vendor/k8s.io/apimachinery/third_party/forked/golang/json/fields.go
generated
vendored
Normal file
|
@ -0,0 +1,516 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package json is forked from the Go standard library to enable us to find the
|
||||
// field of a struct that a given JSON key maps to.
|
||||
package json
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
const (
|
||||
patchStrategyTagKey = "patchStrategy"
|
||||
patchMergeKeyTagKey = "patchMergeKey"
|
||||
)
|
||||
|
||||
// Finds the patchStrategy and patchMergeKey struct tag fields on a given
|
||||
// struct field given the struct type and the JSON name of the field.
|
||||
// It returns field type, a slice of patch strategies, merge key and error.
|
||||
// TODO: fix the returned errors to be introspectable.
|
||||
func LookupPatchMetadata(t reflect.Type, jsonField string) (
|
||||
elemType reflect.Type, patchStrategies []string, patchMergeKey string, e error) {
|
||||
if t.Kind() == reflect.Ptr {
|
||||
t = t.Elem()
|
||||
}
|
||||
if t.Kind() == reflect.Map {
|
||||
elemType = t.Elem()
|
||||
return
|
||||
}
|
||||
if t.Kind() != reflect.Struct {
|
||||
e = fmt.Errorf("merging an object in json but data type is not map or struct, instead is: %s",
|
||||
t.Kind().String())
|
||||
return
|
||||
}
|
||||
jf := []byte(jsonField)
|
||||
// Find the field that the JSON library would use.
|
||||
var f *field
|
||||
fields := cachedTypeFields(t)
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if bytes.Equal(ff.nameBytes, jf) {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
// Do case-insensitive comparison.
|
||||
if f == nil && ff.equalFold(ff.nameBytes, jf) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
// Find the reflect.Value of the most preferential struct field.
|
||||
tjf := t.Field(f.index[0])
|
||||
// we must navigate down all the anonymously included structs in the chain
|
||||
for i := 1; i < len(f.index); i++ {
|
||||
tjf = tjf.Type.Field(f.index[i])
|
||||
}
|
||||
patchStrategy := tjf.Tag.Get(patchStrategyTagKey)
|
||||
patchMergeKey = tjf.Tag.Get(patchMergeKeyTagKey)
|
||||
patchStrategies = strings.Split(patchStrategy, ",")
|
||||
elemType = tjf.Type
|
||||
return
|
||||
}
|
||||
e = fmt.Errorf("unable to find api field in struct %s for the json field %q", t.Name(), jsonField)
|
||||
return
|
||||
}
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string
|
||||
nameBytes []byte // []byte(name)
|
||||
equalFold func(s, t []byte) bool // bytes.EqualFold or equivalent
|
||||
|
||||
tag bool
|
||||
// index is the sequence of indexes from the containing type fields to this field.
|
||||
// it is a slice because anonymous structs will need multiple navigation steps to correctly
|
||||
// resolve the proper fields
|
||||
index []int
|
||||
typ reflect.Type
|
||||
omitEmpty bool
|
||||
quoted bool
|
||||
}
|
||||
|
||||
func (f field) String() string {
|
||||
return fmt.Sprintf("{name: %s, type: %v, tag: %v, index: %v, omitEmpty: %v, quoted: %v}", f.name, f.typ, f.tag, f.index, f.omitEmpty, f.quoted)
|
||||
}
|
||||
|
||||
func fillField(f field) field {
|
||||
f.nameBytes = []byte(f.name)
|
||||
f.equalFold = foldFunc(f.nameBytes)
|
||||
return f
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from json tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that JSON should recognize for the given type.
|
||||
// The algorithm is breadth-first search over the set of structs to include - the top struct
|
||||
// and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" { // unexported
|
||||
continue
|
||||
}
|
||||
tag := sf.Tag.Get("json")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
name, opts := parseTag(tag)
|
||||
if !isValidTag(name) {
|
||||
name = ""
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, fillField(field{
|
||||
name: name,
|
||||
tag: tagged,
|
||||
index: index,
|
||||
typ: ft,
|
||||
omitEmpty: opts.Contains("omitempty"),
|
||||
quoted: opts.Contains("string"),
|
||||
}))
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
next = append(next, fillField(field{name: ft.Name(), index: index, typ: ft}))
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with JSON tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// JSON tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
||||
|
||||
func isValidTag(s string) bool {
|
||||
if s == "" {
|
||||
return false
|
||||
}
|
||||
for _, c := range s {
|
||||
switch {
|
||||
case strings.ContainsRune("!#$%&()*+-./:<=>?@[]^_{|}~ ", c):
|
||||
// Backslash and quote chars are reserved, but
|
||||
// otherwise any punctuation chars are allowed
|
||||
// in a tag name.
|
||||
default:
|
||||
if !unicode.IsLetter(c) && !unicode.IsDigit(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
const (
|
||||
caseMask = ^byte(0x20) // Mask to ignore case in ASCII.
|
||||
kelvin = '\u212a'
|
||||
smallLongEss = '\u017f'
|
||||
)
|
||||
|
||||
// foldFunc returns one of four different case folding equivalence
|
||||
// functions, from most general (and slow) to fastest:
|
||||
//
|
||||
// 1) bytes.EqualFold, if the key s contains any non-ASCII UTF-8
|
||||
// 2) equalFoldRight, if s contains special folding ASCII ('k', 'K', 's', 'S')
|
||||
// 3) asciiEqualFold, no special, but includes non-letters (including _)
|
||||
// 4) simpleLetterEqualFold, no specials, no non-letters.
|
||||
//
|
||||
// The letters S and K are special because they map to 3 runes, not just 2:
|
||||
// * S maps to s and to U+017F 'ſ' Latin small letter long s
|
||||
// * k maps to K and to U+212A 'K' Kelvin sign
|
||||
// See http://play.golang.org/p/tTxjOc0OGo
|
||||
//
|
||||
// The returned function is specialized for matching against s and
|
||||
// should only be given s. It's not curried for performance reasons.
|
||||
func foldFunc(s []byte) func(s, t []byte) bool {
|
||||
nonLetter := false
|
||||
special := false // special letter
|
||||
for _, b := range s {
|
||||
if b >= utf8.RuneSelf {
|
||||
return bytes.EqualFold
|
||||
}
|
||||
upper := b & caseMask
|
||||
if upper < 'A' || upper > 'Z' {
|
||||
nonLetter = true
|
||||
} else if upper == 'K' || upper == 'S' {
|
||||
// See above for why these letters are special.
|
||||
special = true
|
||||
}
|
||||
}
|
||||
if special {
|
||||
return equalFoldRight
|
||||
}
|
||||
if nonLetter {
|
||||
return asciiEqualFold
|
||||
}
|
||||
return simpleLetterEqualFold
|
||||
}
|
||||
|
||||
// equalFoldRight is a specialization of bytes.EqualFold when s is
|
||||
// known to be all ASCII (including punctuation), but contains an 's',
|
||||
// 'S', 'k', or 'K', requiring a Unicode fold on the bytes in t.
|
||||
// See comments on foldFunc.
|
||||
func equalFoldRight(s, t []byte) bool {
|
||||
for _, sb := range s {
|
||||
if len(t) == 0 {
|
||||
return false
|
||||
}
|
||||
tb := t[0]
|
||||
if tb < utf8.RuneSelf {
|
||||
if sb != tb {
|
||||
sbUpper := sb & caseMask
|
||||
if 'A' <= sbUpper && sbUpper <= 'Z' {
|
||||
if sbUpper != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
t = t[1:]
|
||||
continue
|
||||
}
|
||||
// sb is ASCII and t is not. t must be either kelvin
|
||||
// sign or long s; sb must be s, S, k, or K.
|
||||
tr, size := utf8.DecodeRune(t)
|
||||
switch sb {
|
||||
case 's', 'S':
|
||||
if tr != smallLongEss {
|
||||
return false
|
||||
}
|
||||
case 'k', 'K':
|
||||
if tr != kelvin {
|
||||
return false
|
||||
}
|
||||
default:
|
||||
return false
|
||||
}
|
||||
t = t[size:]
|
||||
|
||||
}
|
||||
if len(t) > 0 {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// asciiEqualFold is a specialization of bytes.EqualFold for use when
|
||||
// s is all ASCII (but may contain non-letters) and contains no
|
||||
// special-folding letters.
|
||||
// See comments on foldFunc.
|
||||
func asciiEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, sb := range s {
|
||||
tb := t[i]
|
||||
if sb == tb {
|
||||
continue
|
||||
}
|
||||
if ('a' <= sb && sb <= 'z') || ('A' <= sb && sb <= 'Z') {
|
||||
if sb&caseMask != tb&caseMask {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// simpleLetterEqualFold is a specialization of bytes.EqualFold for
|
||||
// use when s is all ASCII letters (no underscores, etc) and also
|
||||
// doesn't contain 'k', 'K', 's', or 'S'.
|
||||
// See comments on foldFunc.
|
||||
func simpleLetterEqualFold(s, t []byte) bool {
|
||||
if len(s) != len(t) {
|
||||
return false
|
||||
}
|
||||
for i, b := range s {
|
||||
if b&caseMask != t[i]&caseMask {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// tagOptions is the string following a comma in a struct field's "json"
|
||||
// tag, or the empty string. It does not include the leading comma.
|
||||
type tagOptions string
|
||||
|
||||
// parseTag splits a struct field's json tag into its name and
|
||||
// comma-separated options.
|
||||
func parseTag(tag string) (string, tagOptions) {
|
||||
if idx := strings.Index(tag, ","); idx != -1 {
|
||||
return tag[:idx], tagOptions(tag[idx+1:])
|
||||
}
|
||||
return tag, tagOptions("")
|
||||
}
|
||||
|
||||
// Contains reports whether a comma-separated list of options
|
||||
// contains a particular substr flag. substr must be surrounded by a
|
||||
// string boundary or commas.
|
||||
func (o tagOptions) Contains(optionName string) bool {
|
||||
if len(o) == 0 {
|
||||
return false
|
||||
}
|
||||
s := string(o)
|
||||
for s != "" {
|
||||
var next string
|
||||
i := strings.Index(s, ",")
|
||||
if i >= 0 {
|
||||
s, next = s[:i], s[i+1:]
|
||||
}
|
||||
if s == optionName {
|
||||
return true
|
||||
}
|
||||
s = next
|
||||
}
|
||||
return false
|
||||
}
|
Loading…
Reference in New Issue