Merge pull request #1510 from skriss/1.14-deps
upgrade to kubernetes 1.14 dependenciespull/1513/head
commit
ddb335475b
|
@ -17,7 +17,7 @@
|
|||
version = "v0.11.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5b71d15be52cbb93f5115f51ace93798204f6b4a3df0992d0b6da8644f505984"
|
||||
digest = "1:623dad7b6ddc6b93f983e9852a0785ed606f804d3541fa4b6178d7055b361306"
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
packages = [
|
||||
"services/compute/mgmt/2018-04-01/compute",
|
||||
|
@ -26,11 +26,11 @@
|
|||
"version",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "520918e6c8e8e1064154f51d13e02fad92b287b8"
|
||||
version = "v19.0.0"
|
||||
revision = "32916f57ad7b421f5fdaab86b73a795632fff117"
|
||||
version = "v21.4.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b825d8578481c8877ff3b9a3654d77a48577cc33e65f33c3678d7e3f134bf73d"
|
||||
digest = "1:90df11ad9349a69d46e08211d47eb8db80311bf985a447dd88cb30d5b5f54add"
|
||||
name = "github.com/Azure/go-autorest"
|
||||
packages = [
|
||||
"autorest",
|
||||
|
@ -39,11 +39,12 @@
|
|||
"autorest/date",
|
||||
"autorest/to",
|
||||
"autorest/validation",
|
||||
"logger",
|
||||
"version",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "bca49d5b51a50dc5bb17bbf6204c711c6dbded06"
|
||||
version = "v10.14.0"
|
||||
revision = "1ffcc8896ef6dfe022d90a4317d866f925cf0f9e"
|
||||
version = "v11.1.2"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:f41188abdb95b92995643a927f5bdd208389822a8e1aba00d85633ae51b85c85"
|
||||
|
@ -128,14 +129,6 @@
|
|||
pruneopts = "NUT"
|
||||
revision = "944e07253867aacae43c04b2e6a239005443f33a"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:81466b4218bf6adddac2572a30ac733a9255919bc2f470b4827a317bd4ee1756"
|
||||
name = "github.com/ghodss/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "0ca9ea5df5451ffdf184b4428c902747c2c11cd7"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:021d6ee454d87208dd1cd731cd702d3521aa8a51ad2072fa7beffbb3d677d8bb"
|
||||
name = "github.com/go-ini/ini"
|
||||
|
@ -178,14 +171,6 @@
|
|||
revision = "b5d812f8a3706043e23a9cd5babf2e5423744d30"
|
||||
version = "v1.3.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:245bd4eb633039cd66106a5d340ae826d87f4e36a8602fcc940e14176fd26ea7"
|
||||
name = "github.com/google/btree"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "e89373fe6b4a7413d7acd6da1725b83ef713e6e4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:52c5834e2bebac9030c97cc0798ac11c3aa8a39f098aeb419f142533da6cd3cc"
|
||||
|
@ -214,17 +199,6 @@
|
|||
revision = "ee43cbb60db7bd22502942cccbc39059117352ab"
|
||||
version = "v0.1.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:7fdf3223c7372d1ced0b98bf53457c5e89d89aecbad9a77ba9fcc6e01f9e5621"
|
||||
name = "github.com/gregjones/httpcache"
|
||||
packages = [
|
||||
".",
|
||||
"diskcache",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "9cad4c3443a7200dd6400aef47183728de563a38"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:32e5a56c443b5581e4bf6e74cdc78b5826d7e4c5df43883e2dc31e4d7f4ae98a"
|
||||
|
@ -294,12 +268,20 @@
|
|||
version = "v1.3.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8e36686e8b139f8fe240c1d5cf3a145bc675c22ff8e707857cdd3ae17b00d728"
|
||||
digest = "1:0243cffa4a3410f161ee613dfdd903a636d07e838a42d341da95d81f42cd1d41"
|
||||
name = "github.com/json-iterator/go"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "1624edc4454b8682399def8740d46db5e4362ba4"
|
||||
version = "v1.1.5"
|
||||
revision = "ab8a2e0c74be9d3be70b3184d9acc634935ded82"
|
||||
version = "1.1.4"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:e94e69261097d7067fa28052bcf209be4c47b12c665b7e88116c96f905a77364"
|
||||
name = "github.com/liggitt/tabwriter"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "89fcab3d43de07060e4fd4c1547430ed57e87f24"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:13ada91f079028d1b4ca88e10a16439dcfa6541d26ed2e61e770f56d06301933"
|
||||
|
@ -349,22 +331,6 @@
|
|||
revision = "4dadeb3030eda0273a12382bb2348ffc7c9d1a39"
|
||||
version = "v1.0.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:3bf17a6e6eaa6ad24152148a631d18662f7212e21637c2699bff3369b7f00fa2"
|
||||
name = "github.com/petar/GoLLRB"
|
||||
packages = ["llrb"]
|
||||
pruneopts = "NUT"
|
||||
revision = "53be0d36a84c2a886ca057d34b6aa4468df9ccb4"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:6c6d91dc326ed6778783cff869c49fb2f61303cdd2ebbcf90abe53505793f3b6"
|
||||
name = "github.com/peterbourgon/diskv"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "5f041e8faa004a95c88a202771f4cc3e991971e6"
|
||||
version = "v2.0.1"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5cf3f025cbee5951a4ee961de067c8a89fc95a5adabead774f82822efabab121"
|
||||
name = "github.com/pkg/errors"
|
||||
|
@ -569,18 +535,12 @@
|
|||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:0f6792185947c44cd78bc6a2f4399c44c7e85d406b3229a27d41f6cd0a8e982b"
|
||||
digest = "1:97337ef8cb438f9e3a99ea91a300e916ed9a96fbf3ad50f9a020d30ea9f8692f"
|
||||
name = "golang.org/x/text"
|
||||
packages = [
|
||||
"encoding",
|
||||
"encoding/internal",
|
||||
"encoding/internal/identifier",
|
||||
"encoding/unicode",
|
||||
"internal/gen",
|
||||
"internal/triegen",
|
||||
"internal/ucd",
|
||||
"internal/utf8internal",
|
||||
"runes",
|
||||
"secure/bidirule",
|
||||
"transform",
|
||||
"unicode/bidi",
|
||||
|
@ -709,15 +669,14 @@
|
|||
revision = "eb3733d160e74a9c7e442f435eb3bea458e1d19f"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:93e9a6515f47aaaf7f1c84617fc8c82db9216f7290c4d4149afeaf6936d9aa5e"
|
||||
digest = "1:a937ed4322409fa22924f02124fd0727c19662f73cf15406646d19bdce972df2"
|
||||
name = "k8s.io/api"
|
||||
packages = [
|
||||
"admission/v1beta1",
|
||||
"admissionregistration/v1alpha1",
|
||||
"admissionregistration/v1beta1",
|
||||
"apps/v1",
|
||||
"apps/v1beta1",
|
||||
"apps/v1beta2",
|
||||
"auditregistration/v1alpha1",
|
||||
"authentication/v1",
|
||||
"authentication/v1beta1",
|
||||
"authorization/v1",
|
||||
|
@ -729,16 +688,20 @@
|
|||
"batch/v1beta1",
|
||||
"batch/v2alpha1",
|
||||
"certificates/v1beta1",
|
||||
"coordination/v1",
|
||||
"coordination/v1beta1",
|
||||
"core/v1",
|
||||
"events/v1beta1",
|
||||
"extensions/v1beta1",
|
||||
"imagepolicy/v1alpha1",
|
||||
"networking/v1",
|
||||
"networking/v1beta1",
|
||||
"node/v1alpha1",
|
||||
"node/v1beta1",
|
||||
"policy/v1beta1",
|
||||
"rbac/v1",
|
||||
"rbac/v1alpha1",
|
||||
"rbac/v1beta1",
|
||||
"scheduling/v1",
|
||||
"scheduling/v1alpha1",
|
||||
"scheduling/v1beta1",
|
||||
"settings/v1alpha1",
|
||||
|
@ -747,22 +710,22 @@
|
|||
"storage/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "fd83cbc87e7632ccd8bbab63d2b673d4e0c631cc"
|
||||
version = "kubernetes-1.12.0"
|
||||
revision = "40a48860b5abbba9aa891b02b32da429b08d96a0"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:b8a1dcc5f4e559b7af185ba12dd341cb8c175ea3d36227a02699b251ae5fde05"
|
||||
digest = "1:1d6160800196e00fc394f13ca8c1c0cdc360a170c1b6a9db0f0a1f9f1c4e9342"
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
packages = [
|
||||
"pkg/apis/apiextensions",
|
||||
"pkg/apis/apiextensions/v1beta1",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1748dfb29e8a4432b78514bc88a1b07937a9805a"
|
||||
version = "kubernetes-1.12.0"
|
||||
revision = "53c4693659ed354d76121458fb819202dd1635fa"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:ca279c0bb7a72618aff5b77440d5a5e2f92857fdb7e0e4c7a1a77a7895929c49"
|
||||
digest = "1:f249ae79e492647bb0640d656ccf70fd272359a75a35ac5b9748bd19ac42c1f0"
|
||||
name = "k8s.io/apimachinery"
|
||||
packages = [
|
||||
"pkg/api/equality",
|
||||
|
@ -772,7 +735,6 @@
|
|||
"pkg/apis/meta/internalversion",
|
||||
"pkg/apis/meta/v1",
|
||||
"pkg/apis/meta/v1/unstructured",
|
||||
"pkg/apis/meta/v1/unstructured/unstructuredscheme",
|
||||
"pkg/apis/meta/v1beta1",
|
||||
"pkg/conversion",
|
||||
"pkg/conversion/queryparams",
|
||||
|
@ -816,23 +778,11 @@
|
|||
"third_party/forked/golang/reflect",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "6dd46049f39503a1fc8d65de4bd566829e95faff"
|
||||
version = "kubernetes-1.12.0"
|
||||
revision = "d7deff9243b165ee192f5551710ea4285dcfd615"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "release-1.12"
|
||||
digest = "1:7991e5074de01462e0cf6ef77060895b50e9026d16152a6e925cb99b67a1f8ae"
|
||||
name = "k8s.io/cli-runtime"
|
||||
packages = [
|
||||
"pkg/genericclioptions",
|
||||
"pkg/genericclioptions/printers",
|
||||
"pkg/genericclioptions/resource",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "11047e25a94a7eaa541b92a8bbfd3e1243607219"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:5d9f76731330e62bede1e4eb9d519b282a26621a5368e5db1a18a8eb1ccda1ff"
|
||||
digest = "1:99bbba82ebf3c42acee133a855cb77431980d2eb0c7ef786661fee60df047e69"
|
||||
name = "k8s.io/client-go"
|
||||
packages = [
|
||||
"discovery",
|
||||
|
@ -840,12 +790,13 @@
|
|||
"dynamic",
|
||||
"informers",
|
||||
"informers/admissionregistration",
|
||||
"informers/admissionregistration/v1alpha1",
|
||||
"informers/admissionregistration/v1beta1",
|
||||
"informers/apps",
|
||||
"informers/apps/v1",
|
||||
"informers/apps/v1beta1",
|
||||
"informers/apps/v1beta2",
|
||||
"informers/auditregistration",
|
||||
"informers/auditregistration/v1alpha1",
|
||||
"informers/autoscaling",
|
||||
"informers/autoscaling/v1",
|
||||
"informers/autoscaling/v2beta1",
|
||||
|
@ -857,6 +808,7 @@
|
|||
"informers/certificates",
|
||||
"informers/certificates/v1beta1",
|
||||
"informers/coordination",
|
||||
"informers/coordination/v1",
|
||||
"informers/coordination/v1beta1",
|
||||
"informers/core",
|
||||
"informers/core/v1",
|
||||
|
@ -867,6 +819,10 @@
|
|||
"informers/internalinterfaces",
|
||||
"informers/networking",
|
||||
"informers/networking/v1",
|
||||
"informers/networking/v1beta1",
|
||||
"informers/node",
|
||||
"informers/node/v1alpha1",
|
||||
"informers/node/v1beta1",
|
||||
"informers/policy",
|
||||
"informers/policy/v1beta1",
|
||||
"informers/rbac",
|
||||
|
@ -874,6 +830,7 @@
|
|||
"informers/rbac/v1alpha1",
|
||||
"informers/rbac/v1beta1",
|
||||
"informers/scheduling",
|
||||
"informers/scheduling/v1",
|
||||
"informers/scheduling/v1alpha1",
|
||||
"informers/scheduling/v1beta1",
|
||||
"informers/settings",
|
||||
|
@ -884,11 +841,11 @@
|
|||
"informers/storage/v1beta1",
|
||||
"kubernetes",
|
||||
"kubernetes/scheme",
|
||||
"kubernetes/typed/admissionregistration/v1alpha1",
|
||||
"kubernetes/typed/admissionregistration/v1beta1",
|
||||
"kubernetes/typed/apps/v1",
|
||||
"kubernetes/typed/apps/v1beta1",
|
||||
"kubernetes/typed/apps/v1beta2",
|
||||
"kubernetes/typed/auditregistration/v1alpha1",
|
||||
"kubernetes/typed/authentication/v1",
|
||||
"kubernetes/typed/authentication/v1beta1",
|
||||
"kubernetes/typed/authorization/v1",
|
||||
|
@ -900,26 +857,31 @@
|
|||
"kubernetes/typed/batch/v1beta1",
|
||||
"kubernetes/typed/batch/v2alpha1",
|
||||
"kubernetes/typed/certificates/v1beta1",
|
||||
"kubernetes/typed/coordination/v1",
|
||||
"kubernetes/typed/coordination/v1beta1",
|
||||
"kubernetes/typed/core/v1",
|
||||
"kubernetes/typed/events/v1beta1",
|
||||
"kubernetes/typed/extensions/v1beta1",
|
||||
"kubernetes/typed/networking/v1",
|
||||
"kubernetes/typed/networking/v1beta1",
|
||||
"kubernetes/typed/node/v1alpha1",
|
||||
"kubernetes/typed/node/v1beta1",
|
||||
"kubernetes/typed/policy/v1beta1",
|
||||
"kubernetes/typed/rbac/v1",
|
||||
"kubernetes/typed/rbac/v1alpha1",
|
||||
"kubernetes/typed/rbac/v1beta1",
|
||||
"kubernetes/typed/scheduling/v1",
|
||||
"kubernetes/typed/scheduling/v1alpha1",
|
||||
"kubernetes/typed/scheduling/v1beta1",
|
||||
"kubernetes/typed/settings/v1alpha1",
|
||||
"kubernetes/typed/storage/v1",
|
||||
"kubernetes/typed/storage/v1alpha1",
|
||||
"kubernetes/typed/storage/v1beta1",
|
||||
"listers/admissionregistration/v1alpha1",
|
||||
"listers/admissionregistration/v1beta1",
|
||||
"listers/apps/v1",
|
||||
"listers/apps/v1beta1",
|
||||
"listers/apps/v1beta2",
|
||||
"listers/auditregistration/v1alpha1",
|
||||
"listers/autoscaling/v1",
|
||||
"listers/autoscaling/v2beta1",
|
||||
"listers/autoscaling/v2beta2",
|
||||
|
@ -927,15 +889,20 @@
|
|||
"listers/batch/v1beta1",
|
||||
"listers/batch/v2alpha1",
|
||||
"listers/certificates/v1beta1",
|
||||
"listers/coordination/v1",
|
||||
"listers/coordination/v1beta1",
|
||||
"listers/core/v1",
|
||||
"listers/events/v1beta1",
|
||||
"listers/extensions/v1beta1",
|
||||
"listers/networking/v1",
|
||||
"listers/networking/v1beta1",
|
||||
"listers/node/v1alpha1",
|
||||
"listers/node/v1beta1",
|
||||
"listers/policy/v1beta1",
|
||||
"listers/rbac/v1",
|
||||
"listers/rbac/v1alpha1",
|
||||
"listers/rbac/v1beta1",
|
||||
"listers/scheduling/v1",
|
||||
"listers/scheduling/v1alpha1",
|
||||
"listers/scheduling/v1beta1",
|
||||
"listers/settings/v1alpha1",
|
||||
|
@ -967,20 +934,27 @@
|
|||
"tools/remotecommand",
|
||||
"transport",
|
||||
"transport/spdy",
|
||||
"util/buffer",
|
||||
"util/cert",
|
||||
"util/connrotation",
|
||||
"util/exec",
|
||||
"util/flowcontrol",
|
||||
"util/homedir",
|
||||
"util/integer",
|
||||
"util/jsonpath",
|
||||
"util/keyutil",
|
||||
"util/retry",
|
||||
"util/workqueue",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "1638f8970cefaa404ff3a62950f88b08292b2696"
|
||||
version = "v9.0.0"
|
||||
revision = "6ee68ca5fd8355d024d02f9db0b3b667e8357a0f"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:2c16dda1c44c2564a7818fbacb701323c16d77c21b969987c1bec08d3ee0b050"
|
||||
name = "k8s.io/klog"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "e531227889390a39d9533dde61f590fe9f4b0035"
|
||||
version = "v0.3.0"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
|
@ -991,15 +965,32 @@
|
|||
revision = "d83b052f768a50a309c692a9c271da3f3276ff88"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8a9b1e755afd7ea778cd451a955977eb3fe0abcc4e32079644b6b7afc42d7ff8"
|
||||
digest = "1:c2ad4e18f35cf651af430e4115e9d26bdd266e61b4076cb76d23a15078c5d58e"
|
||||
name = "k8s.io/kubernetes"
|
||||
packages = ["pkg/printers"]
|
||||
pruneopts = "NUT"
|
||||
revision = "b7394102d6ef778017f2ca4046abbaa23b88c290"
|
||||
version = "v1.14.1"
|
||||
|
||||
[[projects]]
|
||||
branch = "master"
|
||||
digest = "1:14e8a3b53e6d8cb5f44783056b71bb2ca1ac7e333939cc97f3e50b579c920845"
|
||||
name = "k8s.io/utils"
|
||||
packages = [
|
||||
"pkg/kubectl/scheme",
|
||||
"pkg/printers",
|
||||
"buffer",
|
||||
"integer",
|
||||
"trace",
|
||||
]
|
||||
pruneopts = "NUT"
|
||||
revision = "51dd616cdd25d6ee22c83a858773b607328a18ec"
|
||||
version = "v1.12.5"
|
||||
revision = "21c4ce38f2a793ec01e925ddc31216500183b773"
|
||||
|
||||
[[projects]]
|
||||
digest = "1:8730e0150dfb2b7e173890c8b9868e7a273082ef8e39f4940e3506a481cf895c"
|
||||
name = "sigs.k8s.io/yaml"
|
||||
packages = ["."]
|
||||
pruneopts = "NUT"
|
||||
revision = "fd68e9863619f6ec2fdd8625fe1f02e7c877e480"
|
||||
version = "v1.1.0"
|
||||
|
||||
[solve-meta]
|
||||
analyzer-name = "dep"
|
||||
|
@ -1029,6 +1020,7 @@
|
|||
"github.com/hashicorp/go-hclog",
|
||||
"github.com/hashicorp/go-plugin",
|
||||
"github.com/joho/godotenv",
|
||||
"github.com/json-iterator/go",
|
||||
"github.com/pkg/errors",
|
||||
"github.com/prometheus/client_golang/prometheus",
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp",
|
||||
|
@ -1069,6 +1061,7 @@
|
|||
"k8s.io/apimachinery/pkg/runtime/serializer",
|
||||
"k8s.io/apimachinery/pkg/types",
|
||||
"k8s.io/apimachinery/pkg/util/clock",
|
||||
"k8s.io/apimachinery/pkg/util/diff",
|
||||
"k8s.io/apimachinery/pkg/util/duration",
|
||||
"k8s.io/apimachinery/pkg/util/errors",
|
||||
"k8s.io/apimachinery/pkg/util/runtime",
|
||||
|
|
20
Gopkg.toml
20
Gopkg.toml
|
@ -31,28 +31,28 @@
|
|||
|
||||
[[constraint]]
|
||||
name = "k8s.io/kubernetes"
|
||||
version = "~1.12"
|
||||
version = "~1.14"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/client-go"
|
||||
version = "~9.0"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apimachinery"
|
||||
version = "kubernetes-1.12.0"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/api"
|
||||
version = "kubernetes-1.12.0"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
[[constraint]]
|
||||
name = "k8s.io/apiextensions-apiserver"
|
||||
version = "kubernetes-1.12.0"
|
||||
version = "kubernetes-1.14.0"
|
||||
|
||||
# k8s.io/client-go v9.0 uses f2b4162afba35581b6d4a50d3b8f34e33c144682 (released in v1.1.4)
|
||||
# k8s.io/client-go kubernetes-1.14.0 uses v1.1.4
|
||||
[[override]]
|
||||
name = "github.com/json-iterator/go"
|
||||
version = "~1.1.4"
|
||||
version = "=1.1.4"
|
||||
|
||||
#
|
||||
# Cloud provider packages
|
||||
|
@ -63,12 +63,12 @@
|
|||
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/azure-sdk-for-go"
|
||||
version = "~19.0.0"
|
||||
version = "~21.4.0"
|
||||
|
||||
# k8s.io/client-go v9.0 uses bca49d5b51a50dc5bb17bbf6204c711c6dbded06 (v10.14.0)
|
||||
# k8s.io/client-go kubernetes-1.14.0 uses v11.1.2
|
||||
[[constraint]]
|
||||
name = "github.com/Azure/go-autorest"
|
||||
version = "~10.14.0"
|
||||
version = "11.1.2"
|
||||
|
||||
[[constraint]]
|
||||
name = "cloud.google.com/go"
|
||||
|
|
|
@ -17,8 +17,8 @@ FROM golang:1.12
|
|||
RUN mkdir -p /go/src/k8s.io && \
|
||||
cd /go/src/k8s.io && \
|
||||
git config --global advice.detachedHead false && \
|
||||
git clone -b kubernetes-1.12.0 https://github.com/kubernetes/code-generator && \
|
||||
git clone -b kubernetes-1.12.0 https://github.com/kubernetes/apimachinery && \
|
||||
git clone -b kubernetes-1.14.0 https://github.com/kubernetes/code-generator && \
|
||||
git clone -b kubernetes-1.14.0 https://github.com/kubernetes/apimachinery && \
|
||||
go get golang.org/x/tools/cmd/goimports && \
|
||||
cd /go/src/golang.org/x/tools && \
|
||||
git checkout 40a48ad93fbe707101afb2099b738471f70594ec && \
|
||||
|
|
|
@ -114,5 +114,5 @@ func (d *dynamicResourceClient) Get(name string, opts metav1.GetOptions) (*unstr
|
|||
}
|
||||
|
||||
func (d *dynamicResourceClient) Patch(name string, data []byte) (*unstructured.Unstructured, error) {
|
||||
return d.resourceClient.Patch(name, types.MergePatchType, data, metav1.UpdateOptions{})
|
||||
return d.resourceClient.Patch(name, types.MergePatchType, data, metav1.PatchOptions{})
|
||||
}
|
||||
|
|
|
@ -28,6 +28,7 @@ import (
|
|||
apierrors "k8s.io/apimachinery/pkg/api/errors"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
"k8s.io/apimachinery/pkg/util/clock"
|
||||
"k8s.io/apimachinery/pkg/util/sets"
|
||||
core "k8s.io/client-go/testing"
|
||||
|
@ -122,13 +123,16 @@ type backupDeletionControllerTestData struct {
|
|||
}
|
||||
|
||||
func setupBackupDeletionControllerTest(objects ...runtime.Object) *backupDeletionControllerTestData {
|
||||
req := pkgbackup.NewDeleteBackupRequest("foo", "uid")
|
||||
req.Namespace = "velero"
|
||||
req.Name = "foo-abcde"
|
||||
|
||||
var (
|
||||
client = fake.NewSimpleClientset(objects...)
|
||||
client = fake.NewSimpleClientset(append(objects, req)...)
|
||||
sharedInformers = informers.NewSharedInformerFactory(client, 0)
|
||||
volumeSnapshotter = &velerotest.FakeVolumeSnapshotter{SnapshotsTaken: sets.NewString()}
|
||||
pluginManager = &pluginmocks.Manager{}
|
||||
backupStore = &persistencemocks.BackupStore{}
|
||||
req = pkgbackup.NewDeleteBackupRequest("foo", "uid")
|
||||
)
|
||||
|
||||
data := &backupDeletionControllerTestData{
|
||||
|
@ -161,9 +165,6 @@ func setupBackupDeletionControllerTest(objects ...runtime.Object) *backupDeletio
|
|||
|
||||
pluginManager.On("CleanupClients").Return(nil)
|
||||
|
||||
req.Namespace = "velero"
|
||||
req.Name = "foo-abcde"
|
||||
|
||||
return data
|
||||
}
|
||||
|
||||
|
@ -181,6 +182,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"status":{"errors":["spec.backupName is required"],"phase":"Processed"}}`),
|
||||
),
|
||||
}
|
||||
|
@ -256,6 +258,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"status":{"errors":["backup is still in progress"],"phase":"Processed"}}`),
|
||||
),
|
||||
}
|
||||
|
@ -308,6 +311,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"status":{"phase":"InProgress"}}`),
|
||||
),
|
||||
core.NewGetAction(
|
||||
|
@ -319,6 +323,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"status":{"errors":["backup not found"],"phase":"Processed"}}`),
|
||||
),
|
||||
}
|
||||
|
@ -413,6 +418,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"metadata":{"labels":{"velero.io/backup-name":"foo"}},"status":{"phase":"InProgress"}}`),
|
||||
),
|
||||
core.NewGetAction(
|
||||
|
@ -424,12 +430,14 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"metadata":{"labels":{"velero.io/backup-uid":"uid"}}}`),
|
||||
),
|
||||
core.NewPatchAction(
|
||||
v1.SchemeGroupVersion.WithResource("backups"),
|
||||
td.req.Namespace,
|
||||
td.req.Spec.BackupName,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"status":{"phase":"Deleting"}}`),
|
||||
),
|
||||
core.NewDeleteAction(
|
||||
|
@ -451,6 +459,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"status":{"phase":"Processed"}}`),
|
||||
),
|
||||
core.NewDeleteCollectionAction(
|
||||
|
@ -558,6 +567,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"metadata":{"labels":{"velero.io/backup-name":"the-really-long-backup-name-that-is-much-more-than-63-cha6ca4bc"}},"status":{"phase":"InProgress"}}`),
|
||||
),
|
||||
core.NewGetAction(
|
||||
|
@ -569,12 +579,14 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"metadata":{"labels":{"velero.io/backup-uid":"uid"}}}`),
|
||||
),
|
||||
core.NewPatchAction(
|
||||
v1.SchemeGroupVersion.WithResource("backups"),
|
||||
td.req.Namespace,
|
||||
td.req.Spec.BackupName,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"status":{"phase":"Deleting"}}`),
|
||||
),
|
||||
core.NewDeleteAction(
|
||||
|
@ -596,6 +608,7 @@ func TestBackupDeletionControllerProcessRequest(t *testing.T) {
|
|||
v1.SchemeGroupVersion.WithResource("deletebackuprequests"),
|
||||
td.req.Namespace,
|
||||
td.req.Name,
|
||||
types.MergePatchType,
|
||||
[]byte(`{"status":{"phase":"Processed"}}`),
|
||||
),
|
||||
core.NewDeleteCollectionAction(
|
||||
|
|
|
@ -28,8 +28,6 @@ import (
|
|||
type Interface interface {
|
||||
Discovery() discovery.DiscoveryInterface
|
||||
VeleroV1() velerov1.VeleroV1Interface
|
||||
// Deprecated: please explicitly pick a version if possible.
|
||||
Velero() velerov1.VeleroV1Interface
|
||||
}
|
||||
|
||||
// Clientset contains the clients for groups. Each group has exactly one
|
||||
|
@ -44,12 +42,6 @@ func (c *Clientset) VeleroV1() velerov1.VeleroV1Interface {
|
|||
return c.veleroV1
|
||||
}
|
||||
|
||||
// Deprecated: Velero retrieves the default version of VeleroClient.
|
||||
// Please explicitly pick a version.
|
||||
func (c *Clientset) Velero() velerov1.VeleroV1Interface {
|
||||
return c.veleroV1
|
||||
}
|
||||
|
||||
// Discovery retrieves the DiscoveryClient
|
||||
func (c *Clientset) Discovery() discovery.DiscoveryInterface {
|
||||
if c == nil {
|
||||
|
|
|
@ -75,8 +75,3 @@ var _ clientset.Interface = &Clientset{}
|
|||
func (c *Clientset) VeleroV1() velerov1.VeleroV1Interface {
|
||||
return &fakevelerov1.FakeVeleroV1{Fake: &c.Fake}
|
||||
}
|
||||
|
||||
// Velero retrieves the VeleroV1Client
|
||||
func (c *Clientset) Velero() velerov1.VeleroV1Interface {
|
||||
return &fakevelerov1.FakeVeleroV1{Fake: &c.Fake}
|
||||
}
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *backups) Get(name string, options metav1.GetOptions) (result *v1.Backup
|
|||
|
||||
// List takes label and field selectors, and returns the list of Backups that match those selectors.
|
||||
func (c *backups) List(opts metav1.ListOptions) (result *v1.BackupList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.BackupList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("backups").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *backups) List(opts metav1.ListOptions) (result *v1.BackupList, err erro
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested backups.
|
||||
func (c *backups) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("backups").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *backups) Delete(name string, options *metav1.DeleteOptions) error {
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *backups) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("backups").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *backupStorageLocations) Get(name string, options metav1.GetOptions) (re
|
|||
|
||||
// List takes label and field selectors, and returns the list of BackupStorageLocations that match those selectors.
|
||||
func (c *backupStorageLocations) List(opts metav1.ListOptions) (result *v1.BackupStorageLocationList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.BackupStorageLocationList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *backupStorageLocations) List(opts metav1.ListOptions) (result *v1.Backu
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested backupStorageLocations.
|
||||
func (c *backupStorageLocations) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *backupStorageLocations) Delete(name string, options *metav1.DeleteOptio
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *backupStorageLocations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("backupstoragelocations").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *deleteBackupRequests) Get(name string, options metav1.GetOptions) (resu
|
|||
|
||||
// List takes label and field selectors, and returns the list of DeleteBackupRequests that match those selectors.
|
||||
func (c *deleteBackupRequests) List(opts metav1.ListOptions) (result *v1.DeleteBackupRequestList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.DeleteBackupRequestList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("deletebackuprequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *deleteBackupRequests) List(opts metav1.ListOptions) (result *v1.DeleteB
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested deleteBackupRequests.
|
||||
func (c *deleteBackupRequests) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("deletebackuprequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *deleteBackupRequests) Delete(name string, options *metav1.DeleteOptions
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *deleteBackupRequests) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("deletebackuprequests").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *downloadRequests) Get(name string, options metav1.GetOptions) (result *
|
|||
|
||||
// List takes label and field selectors, and returns the list of DownloadRequests that match those selectors.
|
||||
func (c *downloadRequests) List(opts metav1.ListOptions) (result *v1.DownloadRequestList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.DownloadRequestList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("downloadrequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *downloadRequests) List(opts metav1.ListOptions) (result *v1.DownloadReq
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested downloadRequests.
|
||||
func (c *downloadRequests) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("downloadrequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *downloadRequests) Delete(name string, options *metav1.DeleteOptions) er
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *downloadRequests) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("downloadrequests").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeBackups) DeleteCollection(options *v1.DeleteOptions, listOptions v1
|
|||
// Patch applies the patch and returns the patched backup.
|
||||
func (c *FakeBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.Backup, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(backupsResource, c.ns, name, data, subresources...), &velerov1.Backup{})
|
||||
Invokes(testing.NewPatchSubresourceAction(backupsResource, c.ns, name, pt, data, subresources...), &velerov1.Backup{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeBackupStorageLocations) DeleteCollection(options *v1.DeleteOptions,
|
|||
// Patch applies the patch and returns the patched backupStorageLocation.
|
||||
func (c *FakeBackupStorageLocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.BackupStorageLocation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(backupstoragelocationsResource, c.ns, name, data, subresources...), &velerov1.BackupStorageLocation{})
|
||||
Invokes(testing.NewPatchSubresourceAction(backupstoragelocationsResource, c.ns, name, pt, data, subresources...), &velerov1.BackupStorageLocation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeDeleteBackupRequests) DeleteCollection(options *v1.DeleteOptions, l
|
|||
// Patch applies the patch and returns the patched deleteBackupRequest.
|
||||
func (c *FakeDeleteBackupRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.DeleteBackupRequest, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(deletebackuprequestsResource, c.ns, name, data, subresources...), &velerov1.DeleteBackupRequest{})
|
||||
Invokes(testing.NewPatchSubresourceAction(deletebackuprequestsResource, c.ns, name, pt, data, subresources...), &velerov1.DeleteBackupRequest{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeDownloadRequests) DeleteCollection(options *v1.DeleteOptions, listO
|
|||
// Patch applies the patch and returns the patched downloadRequest.
|
||||
func (c *FakeDownloadRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.DownloadRequest, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(downloadrequestsResource, c.ns, name, data, subresources...), &velerov1.DownloadRequest{})
|
||||
Invokes(testing.NewPatchSubresourceAction(downloadrequestsResource, c.ns, name, pt, data, subresources...), &velerov1.DownloadRequest{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakePodVolumeBackups) DeleteCollection(options *v1.DeleteOptions, listO
|
|||
// Patch applies the patch and returns the patched podVolumeBackup.
|
||||
func (c *FakePodVolumeBackups) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.PodVolumeBackup, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(podvolumebackupsResource, c.ns, name, data, subresources...), &velerov1.PodVolumeBackup{})
|
||||
Invokes(testing.NewPatchSubresourceAction(podvolumebackupsResource, c.ns, name, pt, data, subresources...), &velerov1.PodVolumeBackup{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakePodVolumeRestores) DeleteCollection(options *v1.DeleteOptions, list
|
|||
// Patch applies the patch and returns the patched podVolumeRestore.
|
||||
func (c *FakePodVolumeRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.PodVolumeRestore, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(podvolumerestoresResource, c.ns, name, data, subresources...), &velerov1.PodVolumeRestore{})
|
||||
Invokes(testing.NewPatchSubresourceAction(podvolumerestoresResource, c.ns, name, pt, data, subresources...), &velerov1.PodVolumeRestore{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeResticRepositories) DeleteCollection(options *v1.DeleteOptions, lis
|
|||
// Patch applies the patch and returns the patched resticRepository.
|
||||
func (c *FakeResticRepositories) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.ResticRepository, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(resticrepositoriesResource, c.ns, name, data, subresources...), &velerov1.ResticRepository{})
|
||||
Invokes(testing.NewPatchSubresourceAction(resticrepositoriesResource, c.ns, name, pt, data, subresources...), &velerov1.ResticRepository{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeRestores) DeleteCollection(options *v1.DeleteOptions, listOptions v
|
|||
// Patch applies the patch and returns the patched restore.
|
||||
func (c *FakeRestores) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.Restore, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(restoresResource, c.ns, name, data, subresources...), &velerov1.Restore{})
|
||||
Invokes(testing.NewPatchSubresourceAction(restoresResource, c.ns, name, pt, data, subresources...), &velerov1.Restore{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeSchedules) DeleteCollection(options *v1.DeleteOptions, listOptions
|
|||
// Patch applies the patch and returns the patched schedule.
|
||||
func (c *FakeSchedules) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.Schedule, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(schedulesResource, c.ns, name, data, subresources...), &velerov1.Schedule{})
|
||||
Invokes(testing.NewPatchSubresourceAction(schedulesResource, c.ns, name, pt, data, subresources...), &velerov1.Schedule{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeServerStatusRequests) DeleteCollection(options *v1.DeleteOptions, l
|
|||
// Patch applies the patch and returns the patched serverStatusRequest.
|
||||
func (c *FakeServerStatusRequests) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.ServerStatusRequest, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(serverstatusrequestsResource, c.ns, name, data, subresources...), &velerov1.ServerStatusRequest{})
|
||||
Invokes(testing.NewPatchSubresourceAction(serverstatusrequestsResource, c.ns, name, pt, data, subresources...), &velerov1.ServerStatusRequest{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -131,7 +131,7 @@ func (c *FakeVolumeSnapshotLocations) DeleteCollection(options *v1.DeleteOptions
|
|||
// Patch applies the patch and returns the patched volumeSnapshotLocation.
|
||||
func (c *FakeVolumeSnapshotLocations) Patch(name string, pt types.PatchType, data []byte, subresources ...string) (result *velerov1.VolumeSnapshotLocation, err error) {
|
||||
obj, err := c.Fake.
|
||||
Invokes(testing.NewPatchSubresourceAction(volumesnapshotlocationsResource, c.ns, name, data, subresources...), &velerov1.VolumeSnapshotLocation{})
|
||||
Invokes(testing.NewPatchSubresourceAction(volumesnapshotlocationsResource, c.ns, name, pt, data, subresources...), &velerov1.VolumeSnapshotLocation{})
|
||||
|
||||
if obj == nil {
|
||||
return nil, err
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *podVolumeBackups) Get(name string, options metav1.GetOptions) (result *
|
|||
|
||||
// List takes label and field selectors, and returns the list of PodVolumeBackups that match those selectors.
|
||||
func (c *podVolumeBackups) List(opts metav1.ListOptions) (result *v1.PodVolumeBackupList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.PodVolumeBackupList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("podvolumebackups").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *podVolumeBackups) List(opts metav1.ListOptions) (result *v1.PodVolumeBa
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested podVolumeBackups.
|
||||
func (c *podVolumeBackups) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("podvolumebackups").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *podVolumeBackups) Delete(name string, options *metav1.DeleteOptions) er
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *podVolumeBackups) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("podvolumebackups").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *podVolumeRestores) Get(name string, options metav1.GetOptions) (result
|
|||
|
||||
// List takes label and field selectors, and returns the list of PodVolumeRestores that match those selectors.
|
||||
func (c *podVolumeRestores) List(opts metav1.ListOptions) (result *v1.PodVolumeRestoreList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.PodVolumeRestoreList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("podvolumerestores").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *podVolumeRestores) List(opts metav1.ListOptions) (result *v1.PodVolumeR
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested podVolumeRestores.
|
||||
func (c *podVolumeRestores) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("podvolumerestores").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *podVolumeRestores) Delete(name string, options *metav1.DeleteOptions) e
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *podVolumeRestores) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("podvolumerestores").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *resticRepositories) Get(name string, options metav1.GetOptions) (result
|
|||
|
||||
// List takes label and field selectors, and returns the list of ResticRepositories that match those selectors.
|
||||
func (c *resticRepositories) List(opts metav1.ListOptions) (result *v1.ResticRepositoryList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.ResticRepositoryList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("resticrepositories").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *resticRepositories) List(opts metav1.ListOptions) (result *v1.ResticRep
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested resticRepositories.
|
||||
func (c *resticRepositories) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("resticrepositories").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *resticRepositories) Delete(name string, options *metav1.DeleteOptions)
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *resticRepositories) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("resticrepositories").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *restores) Get(name string, options metav1.GetOptions) (result *v1.Resto
|
|||
|
||||
// List takes label and field selectors, and returns the list of Restores that match those selectors.
|
||||
func (c *restores) List(opts metav1.ListOptions) (result *v1.RestoreList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.RestoreList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("restores").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *restores) List(opts metav1.ListOptions) (result *v1.RestoreList, err er
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested restores.
|
||||
func (c *restores) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("restores").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *restores) Delete(name string, options *metav1.DeleteOptions) error {
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *restores) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("restores").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *schedules) Get(name string, options metav1.GetOptions) (result *v1.Sche
|
|||
|
||||
// List takes label and field selectors, and returns the list of Schedules that match those selectors.
|
||||
func (c *schedules) List(opts metav1.ListOptions) (result *v1.ScheduleList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.ScheduleList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("schedules").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *schedules) List(opts metav1.ListOptions) (result *v1.ScheduleList, err
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested schedules.
|
||||
func (c *schedules) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("schedules").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *schedules) Delete(name string, options *metav1.DeleteOptions) error {
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *schedules) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("schedules").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *serverStatusRequests) Get(name string, options metav1.GetOptions) (resu
|
|||
|
||||
// List takes label and field selectors, and returns the list of ServerStatusRequests that match those selectors.
|
||||
func (c *serverStatusRequests) List(opts metav1.ListOptions) (result *v1.ServerStatusRequestList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.ServerStatusRequestList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("serverstatusrequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *serverStatusRequests) List(opts metav1.ListOptions) (result *v1.ServerS
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested serverStatusRequests.
|
||||
func (c *serverStatusRequests) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("serverstatusrequests").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *serverStatusRequests) Delete(name string, options *metav1.DeleteOptions
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *serverStatusRequests) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("serverstatusrequests").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -19,6 +19,8 @@ limitations under the License.
|
|||
package v1
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
v1 "github.com/heptio/velero/pkg/apis/velero/v1"
|
||||
scheme "github.com/heptio/velero/pkg/generated/clientset/versioned/scheme"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
|
@ -76,11 +78,16 @@ func (c *volumeSnapshotLocations) Get(name string, options metav1.GetOptions) (r
|
|||
|
||||
// List takes label and field selectors, and returns the list of VolumeSnapshotLocations that match those selectors.
|
||||
func (c *volumeSnapshotLocations) List(opts metav1.ListOptions) (result *v1.VolumeSnapshotLocationList, err error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
result = &v1.VolumeSnapshotLocationList{}
|
||||
err = c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("volumesnapshotlocations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Do().
|
||||
Into(result)
|
||||
return
|
||||
|
@ -88,11 +95,16 @@ func (c *volumeSnapshotLocations) List(opts metav1.ListOptions) (result *v1.Volu
|
|||
|
||||
// Watch returns a watch.Interface that watches the requested volumeSnapshotLocations.
|
||||
func (c *volumeSnapshotLocations) Watch(opts metav1.ListOptions) (watch.Interface, error) {
|
||||
var timeout time.Duration
|
||||
if opts.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*opts.TimeoutSeconds) * time.Second
|
||||
}
|
||||
opts.Watch = true
|
||||
return c.client.Get().
|
||||
Namespace(c.ns).
|
||||
Resource("volumesnapshotlocations").
|
||||
VersionedParams(&opts, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Watch()
|
||||
}
|
||||
|
||||
|
@ -150,10 +162,15 @@ func (c *volumeSnapshotLocations) Delete(name string, options *metav1.DeleteOpti
|
|||
|
||||
// DeleteCollection deletes a collection of objects.
|
||||
func (c *volumeSnapshotLocations) DeleteCollection(options *metav1.DeleteOptions, listOptions metav1.ListOptions) error {
|
||||
var timeout time.Duration
|
||||
if listOptions.TimeoutSeconds != nil {
|
||||
timeout = time.Duration(*listOptions.TimeoutSeconds) * time.Second
|
||||
}
|
||||
return c.client.Delete().
|
||||
Namespace(c.ns).
|
||||
Resource("volumesnapshotlocations").
|
||||
VersionedParams(&listOptions, scheme.ParameterCodec).
|
||||
Timeout(timeout).
|
||||
Body(options).
|
||||
Do().
|
||||
Error()
|
||||
|
|
|
@ -27,6 +27,7 @@ import (
|
|||
cache "k8s.io/client-go/tools/cache"
|
||||
)
|
||||
|
||||
// NewInformerFunc takes versioned.Interface and time.Duration to return a SharedIndexInformer.
|
||||
type NewInformerFunc func(versioned.Interface, time.Duration) cache.SharedIndexInformer
|
||||
|
||||
// SharedInformerFactory a small interface to allow for adding an informer without an import cycle
|
||||
|
@ -35,4 +36,5 @@ type SharedInformerFactory interface {
|
|||
InformerFor(obj runtime.Object, newFunc NewInformerFunc) cache.SharedIndexInformer
|
||||
}
|
||||
|
||||
// TweakListOptionsFunc is a function that transforms a v1.ListOptions.
|
||||
type TweakListOptionsFunc func(*v1.ListOptions)
|
||||
|
|
|
@ -798,84 +798,6 @@ func PossibleVirtualMachineSizeTypesValues() []VirtualMachineSizeTypes {
|
|||
// AccessURI a disk access SAS uri.
|
||||
type AccessURI struct {
|
||||
autorest.Response `json:"-"`
|
||||
// AccessURIOutput - Operation output data (raw JSON)
|
||||
*AccessURIOutput `json:"properties,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalJSON is the custom marshaler for AccessURI.
|
||||
func (au AccessURI) MarshalJSON() ([]byte, error) {
|
||||
objectMap := make(map[string]interface{})
|
||||
if au.AccessURIOutput != nil {
|
||||
objectMap["properties"] = au.AccessURIOutput
|
||||
}
|
||||
return json.Marshal(objectMap)
|
||||
}
|
||||
|
||||
// UnmarshalJSON is the custom unmarshaler for AccessURI struct.
|
||||
func (au *AccessURI) UnmarshalJSON(body []byte) error {
|
||||
var m map[string]*json.RawMessage
|
||||
err := json.Unmarshal(body, &m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "properties":
|
||||
if v != nil {
|
||||
var accessURIOutput AccessURIOutput
|
||||
err = json.Unmarshal(*v, &accessURIOutput)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
au.AccessURIOutput = &accessURIOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AccessURIOutput azure properties, including output.
|
||||
type AccessURIOutput struct {
|
||||
// AccessURIRaw - Operation output data (raw JSON)
|
||||
*AccessURIRaw `json:"output,omitempty"`
|
||||
}
|
||||
|
||||
// MarshalJSON is the custom marshaler for AccessURIOutput.
|
||||
func (auo AccessURIOutput) MarshalJSON() ([]byte, error) {
|
||||
objectMap := make(map[string]interface{})
|
||||
if auo.AccessURIRaw != nil {
|
||||
objectMap["output"] = auo.AccessURIRaw
|
||||
}
|
||||
return json.Marshal(objectMap)
|
||||
}
|
||||
|
||||
// UnmarshalJSON is the custom unmarshaler for AccessURIOutput struct.
|
||||
func (auo *AccessURIOutput) UnmarshalJSON(body []byte) error {
|
||||
var m map[string]*json.RawMessage
|
||||
err := json.Unmarshal(body, &m)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
for k, v := range m {
|
||||
switch k {
|
||||
case "output":
|
||||
if v != nil {
|
||||
var accessURIRaw AccessURIRaw
|
||||
err = json.Unmarshal(*v, &accessURIRaw)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
auo.AccessURIRaw = &accessURIRaw
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// AccessURIRaw a disk access SAS uri.
|
||||
type AccessURIRaw struct {
|
||||
// AccessSAS - A SAS uri for accessing a disk.
|
||||
AccessSAS *string `json:"accessSAS,omitempty"`
|
||||
}
|
||||
|
@ -1290,7 +1212,7 @@ type DataDisk struct {
|
|||
WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
|
||||
// CreateOption - Specifies how the virtual machine should be created.<br><br> Possible values are:<br><br> **Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.<br><br> **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach'
|
||||
CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
|
||||
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
||||
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
||||
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
|
||||
// ManagedDisk - The managed disk parameters.
|
||||
ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
|
||||
|
@ -2718,7 +2640,7 @@ type OSDisk struct {
|
|||
WriteAcceleratorEnabled *bool `json:"writeAcceleratorEnabled,omitempty"`
|
||||
// CreateOption - Specifies how the virtual machine should be created.<br><br> Possible values are:<br><br> **Attach** \u2013 This value is used when you are using a specialized disk to create the virtual machine.<br><br> **FromImage** \u2013 This value is used when you are using an image to create the virtual machine. If you are using a platform image, you also use the imageReference element described above. If you are using a marketplace image, you also use the plan element previously described. Possible values include: 'DiskCreateOptionTypesFromImage', 'DiskCreateOptionTypesEmpty', 'DiskCreateOptionTypesAttach'
|
||||
CreateOption DiskCreateOptionTypes `json:"createOption,omitempty"`
|
||||
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the name of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
||||
// DiskSizeGB - Specifies the size of an empty data disk in gigabytes. This element can be used to overwrite the size of the disk in a virtual machine image. <br><br> This value cannot be larger than 1023 GB
|
||||
DiskSizeGB *int32 `json:"diskSizeGB,omitempty"`
|
||||
// ManagedDisk - The managed disk parameters.
|
||||
ManagedDisk *ManagedDiskParameters `json:"managedDisk,omitempty"`
|
||||
|
|
|
@ -1676,6 +1676,8 @@ type OperationDisplay struct {
|
|||
Resource *string `json:"resource,omitempty"`
|
||||
// Operation - Type of operation: get, read, delete, etc.
|
||||
Operation *string `json:"operation,omitempty"`
|
||||
// Description - Description of the operation.
|
||||
Description *string `json:"description,omitempty"`
|
||||
}
|
||||
|
||||
// OperationListResult result of the request to list Storage operations. It contains a list of operations and a URL
|
||||
|
@ -1719,7 +1721,7 @@ type Restriction struct {
|
|||
Type *string `json:"type,omitempty"`
|
||||
// Values - The value of restrictions. If the restriction type is set to location. This would be different locations where the SKU is restricted.
|
||||
Values *[]string `json:"values,omitempty"`
|
||||
// ReasonCode - The reason for the restriction. As of now this can be “QuotaId” or “NotAvailableForSubscription”. Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The “NotAvailableForSubscription” is related to capacity at DC. Possible values include: 'QuotaID', 'NotAvailableForSubscription'
|
||||
// ReasonCode - The reason for the restriction. As of now this can be "QuotaId" or "NotAvailableForSubscription". Quota Id is set when the SKU has requiredQuotas parameter as the subscription does not belong to that quota. The "NotAvailableForSubscription" is related to capacity at DC. Possible values include: 'QuotaID', 'NotAvailableForSubscription'
|
||||
ReasonCode ReasonCode `json:"reasonCode,omitempty"`
|
||||
}
|
||||
|
||||
|
|
|
@ -169,7 +169,7 @@ func (q *Queue) GetMetadata(options *QueueServiceOptions) error {
|
|||
params = addTimeout(params, options.Timeout)
|
||||
headers = mergeHeaders(headers, headersFromStruct(*options))
|
||||
}
|
||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), url.Values{"comp": {"metadata"}})
|
||||
uri := q.qsc.client.getEndpoint(queueServiceName, q.buildPath(), params)
|
||||
|
||||
resp, err := q.qsc.client.exec(http.MethodGet, uri, headers, nil, q.qsc.auth)
|
||||
if err != nil {
|
||||
|
|
|
@ -18,4 +18,4 @@ package version
|
|||
// Changes may cause incorrect behavior and will be lost if the code is regenerated.
|
||||
|
||||
// Number contains the semantic version of this SDK.
|
||||
const Number = "v19.0.0"
|
||||
const Number = "v21.4.0"
|
||||
|
|
|
@ -19,10 +19,6 @@ import (
|
|||
"net/url"
|
||||
)
|
||||
|
||||
const (
|
||||
activeDirectoryAPIVersion = "1.0"
|
||||
)
|
||||
|
||||
// OAuthConfig represents the endpoints needed
|
||||
// in OAuth operations
|
||||
type OAuthConfig struct {
|
||||
|
@ -46,11 +42,25 @@ func validateStringParam(param, name string) error {
|
|||
|
||||
// NewOAuthConfig returns an OAuthConfig with tenant specific urls
|
||||
func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, error) {
|
||||
apiVer := "1.0"
|
||||
return NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID, &apiVer)
|
||||
}
|
||||
|
||||
// NewOAuthConfigWithAPIVersion returns an OAuthConfig with tenant specific urls.
|
||||
// If apiVersion is not nil the "api-version" query parameter will be appended to the endpoint URLs with the specified value.
|
||||
func NewOAuthConfigWithAPIVersion(activeDirectoryEndpoint, tenantID string, apiVersion *string) (*OAuthConfig, error) {
|
||||
if err := validateStringParam(activeDirectoryEndpoint, "activeDirectoryEndpoint"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
api := ""
|
||||
// it's legal for tenantID to be empty so don't validate it
|
||||
const activeDirectoryEndpointTemplate = "%s/oauth2/%s?api-version=%s"
|
||||
if apiVersion != nil {
|
||||
if err := validateStringParam(*apiVersion, "apiVersion"); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
api = fmt.Sprintf("?api-version=%s", *apiVersion)
|
||||
}
|
||||
const activeDirectoryEndpointTemplate = "%s/oauth2/%s%s"
|
||||
u, err := url.Parse(activeDirectoryEndpoint)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
@ -59,15 +69,15 @@ func NewOAuthConfig(activeDirectoryEndpoint, tenantID string) (*OAuthConfig, err
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", activeDirectoryAPIVersion))
|
||||
authorizeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "authorize", api))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", activeDirectoryAPIVersion))
|
||||
tokenURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "token", api))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", activeDirectoryAPIVersion))
|
||||
deviceCodeURL, err := u.Parse(fmt.Sprintf(activeDirectoryEndpointTemplate, tenantID, "devicecode", api))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
@ -38,7 +38,7 @@ func (sf SenderFunc) Do(r *http.Request) (*http.Response, error) {
|
|||
return sf(r)
|
||||
}
|
||||
|
||||
// SendDecorator takes and possibily decorates, by wrapping, a Sender. Decorators may affect the
|
||||
// SendDecorator takes and possibly decorates, by wrapping, a Sender. Decorators may affect the
|
||||
// http.Request and pass it along or, first, pass the http.Request along then react to the
|
||||
// http.Response result.
|
||||
type SendDecorator func(Sender) Sender
|
||||
|
|
|
@ -29,7 +29,6 @@ import (
|
|||
"net"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
@ -97,18 +96,27 @@ type RefresherWithContext interface {
|
|||
type TokenRefreshCallback func(Token) error
|
||||
|
||||
// Token encapsulates the access token used to authorize Azure requests.
|
||||
// https://docs.microsoft.com/en-us/azure/active-directory/develop/v1-oauth2-client-creds-grant-flow#service-to-service-access-token-response
|
||||
type Token struct {
|
||||
AccessToken string `json:"access_token"`
|
||||
RefreshToken string `json:"refresh_token"`
|
||||
|
||||
ExpiresIn string `json:"expires_in"`
|
||||
ExpiresOn string `json:"expires_on"`
|
||||
NotBefore string `json:"not_before"`
|
||||
ExpiresIn json.Number `json:"expires_in"`
|
||||
ExpiresOn json.Number `json:"expires_on"`
|
||||
NotBefore json.Number `json:"not_before"`
|
||||
|
||||
Resource string `json:"resource"`
|
||||
Type string `json:"token_type"`
|
||||
}
|
||||
|
||||
func newToken() Token {
|
||||
return Token{
|
||||
ExpiresIn: "0",
|
||||
ExpiresOn: "0",
|
||||
NotBefore: "0",
|
||||
}
|
||||
}
|
||||
|
||||
// IsZero returns true if the token object is zero-initialized.
|
||||
func (t Token) IsZero() bool {
|
||||
return t == Token{}
|
||||
|
@ -116,12 +124,12 @@ func (t Token) IsZero() bool {
|
|||
|
||||
// Expires returns the time.Time when the Token expires.
|
||||
func (t Token) Expires() time.Time {
|
||||
s, err := strconv.Atoi(t.ExpiresOn)
|
||||
s, err := t.ExpiresOn.Float64()
|
||||
if err != nil {
|
||||
s = -3600
|
||||
}
|
||||
|
||||
expiration := date.NewUnixTimeFromSeconds(float64(s))
|
||||
expiration := date.NewUnixTimeFromSeconds(s)
|
||||
|
||||
return time.Time(expiration).UTC()
|
||||
}
|
||||
|
@ -218,6 +226,8 @@ func (secret *ServicePrincipalCertificateSecret) SignJwt(spt *ServicePrincipalTo
|
|||
|
||||
token := jwt.New(jwt.SigningMethodRS256)
|
||||
token.Header["x5t"] = thumbprint
|
||||
x5c := []string{base64.StdEncoding.EncodeToString(secret.Certificate.Raw)}
|
||||
token.Header["x5c"] = x5c
|
||||
token.Claims = jwt.MapClaims{
|
||||
"aud": spt.inner.OauthConfig.TokenEndpoint.String(),
|
||||
"iss": spt.inner.ClientID,
|
||||
|
@ -414,6 +424,7 @@ func NewServicePrincipalTokenWithSecret(oauthConfig OAuthConfig, id string, reso
|
|||
}
|
||||
spt := &ServicePrincipalToken{
|
||||
inner: servicePrincipalToken{
|
||||
Token: newToken(),
|
||||
OauthConfig: oauthConfig,
|
||||
Secret: secret,
|
||||
ClientID: id,
|
||||
|
@ -653,6 +664,7 @@ func newServicePrincipalTokenFromMSI(msiEndpoint, resource string, userAssignedI
|
|||
|
||||
spt := &ServicePrincipalToken{
|
||||
inner: servicePrincipalToken{
|
||||
Token: newToken(),
|
||||
OauthConfig: OAuthConfig{
|
||||
TokenEndpoint: *msiEndpointURL,
|
||||
},
|
||||
|
|
|
@ -58,10 +58,7 @@ func NewFuture(req *http.Request) Future {
|
|||
// with the initial response from an asynchronous operation.
|
||||
func NewFutureFromResponse(resp *http.Response) (Future, error) {
|
||||
pt, err := createPollingTracker(resp)
|
||||
if err != nil {
|
||||
return Future{}, err
|
||||
}
|
||||
return Future{pt: pt}, nil
|
||||
return Future{pt: pt}, err
|
||||
}
|
||||
|
||||
// Response returns the last HTTP response.
|
||||
|
@ -89,7 +86,13 @@ func (f Future) PollingMethod() PollingMethodType {
|
|||
}
|
||||
|
||||
// Done queries the service to see if the operation has completed.
|
||||
// Deprecated: Use DoneWithContext()
|
||||
func (f *Future) Done(sender autorest.Sender) (bool, error) {
|
||||
return f.DoneWithContext(context.Background(), sender)
|
||||
}
|
||||
|
||||
// DoneWithContext queries the service to see if the operation has completed.
|
||||
func (f *Future) DoneWithContext(ctx context.Context, sender autorest.Sender) (done bool, err error) {
|
||||
// support for legacy Future implementation
|
||||
if f.req != nil {
|
||||
resp, err := sender.Do(f.req)
|
||||
|
@ -110,7 +113,7 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) {
|
|||
if f.pt.hasTerminated() {
|
||||
return true, f.pt.pollingError()
|
||||
}
|
||||
if err := f.pt.pollForStatus(sender); err != nil {
|
||||
if err := f.pt.pollForStatus(ctx, sender); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := f.pt.checkForErrors(); err != nil {
|
||||
|
@ -119,7 +122,10 @@ func (f *Future) Done(sender autorest.Sender) (bool, error) {
|
|||
if err := f.pt.updatePollingState(f.pt.provisioningStateApplicable()); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := f.pt.updateHeaders(); err != nil {
|
||||
if err := f.pt.initPollingMethod(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
if err := f.pt.updatePollingMethod(); err != nil {
|
||||
return false, err
|
||||
}
|
||||
return f.pt.hasTerminated(), f.pt.pollingError()
|
||||
|
@ -164,11 +170,22 @@ func (f Future) WaitForCompletion(ctx context.Context, client autorest.Client) e
|
|||
// running operation has completed, the provided context is cancelled, or the client's
|
||||
// polling duration has been exceeded. It will retry failed polling attempts based on
|
||||
// the retry value defined in the client up to the maximum retry attempts.
|
||||
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) error {
|
||||
ctx, cancel := context.WithTimeout(ctx, client.PollingDuration)
|
||||
defer cancel()
|
||||
done, err := f.Done(client)
|
||||
for attempts := 0; !done; done, err = f.Done(client) {
|
||||
// If no deadline is specified in the context then the client.PollingDuration will be
|
||||
// used to determine if a default deadline should be used.
|
||||
// If PollingDuration is greater than zero the value will be used as the context's timeout.
|
||||
// If PollingDuration is zero then no default deadline will be used.
|
||||
func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Client) (err error) {
|
||||
cancelCtx := ctx
|
||||
// if the provided context already has a deadline don't override it
|
||||
_, hasDeadline := ctx.Deadline()
|
||||
if d := client.PollingDuration; !hasDeadline && d != 0 {
|
||||
var cancel context.CancelFunc
|
||||
cancelCtx, cancel = context.WithTimeout(ctx, d)
|
||||
defer cancel()
|
||||
}
|
||||
|
||||
done, err := f.DoneWithContext(ctx, client)
|
||||
for attempts := 0; !done; done, err = f.DoneWithContext(ctx, client) {
|
||||
if attempts >= client.RetryAttempts {
|
||||
return autorest.NewErrorWithError(err, "Future", "WaitForCompletion", f.pt.latestResponse(), "the number of retries has been exceeded")
|
||||
}
|
||||
|
@ -192,12 +209,12 @@ func (f *Future) WaitForCompletionRef(ctx context.Context, client autorest.Clien
|
|||
attempts++
|
||||
}
|
||||
// wait until the delay elapses or the context is cancelled
|
||||
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, ctx.Done())
|
||||
delayElapsed := autorest.DelayForBackoff(delay, delayAttempt, cancelCtx.Done())
|
||||
if !delayElapsed {
|
||||
return autorest.NewErrorWithError(ctx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
|
||||
return autorest.NewErrorWithError(cancelCtx.Err(), "Future", "WaitForCompletion", f.pt.latestResponse(), "context has been cancelled")
|
||||
}
|
||||
}
|
||||
return err
|
||||
return
|
||||
}
|
||||
|
||||
// MarshalJSON implements the json.Marshaler interface.
|
||||
|
@ -264,7 +281,7 @@ type pollingTracker interface {
|
|||
// these methods can differ per tracker
|
||||
|
||||
// checks the response headers and status code to determine the polling mechanism
|
||||
updateHeaders() error
|
||||
updatePollingMethod() error
|
||||
|
||||
// checks the response for tracker-specific error conditions
|
||||
checkForErrors() error
|
||||
|
@ -274,11 +291,15 @@ type pollingTracker interface {
|
|||
|
||||
// methods common to all trackers
|
||||
|
||||
// initializes a tracker's polling URL and method, called for each iteration.
|
||||
// these values can be overridden by each polling tracker as required.
|
||||
initPollingMethod() error
|
||||
|
||||
// initializes the tracker's internal state, call this when the tracker is created
|
||||
initializeState() error
|
||||
|
||||
// makes an HTTP request to check the status of the LRO
|
||||
pollForStatus(sender autorest.Sender) error
|
||||
pollForStatus(ctx context.Context, sender autorest.Sender) error
|
||||
|
||||
// updates internal tracker state, call this after each call to pollForStatus
|
||||
updatePollingState(provStateApl bool) error
|
||||
|
@ -348,6 +369,10 @@ func (pt *pollingTrackerBase) initializeState() error {
|
|||
case http.StatusOK:
|
||||
if ps := pt.getProvisioningState(); ps != nil {
|
||||
pt.State = *ps
|
||||
if pt.hasFailed() {
|
||||
pt.updateErrorFromResponse()
|
||||
return pt.pollingError()
|
||||
}
|
||||
} else {
|
||||
pt.State = operationSucceeded
|
||||
}
|
||||
|
@ -364,8 +389,9 @@ func (pt *pollingTrackerBase) initializeState() error {
|
|||
default:
|
||||
pt.State = operationFailed
|
||||
pt.updateErrorFromResponse()
|
||||
return pt.pollingError()
|
||||
}
|
||||
return nil
|
||||
return pt.initPollingMethod()
|
||||
}
|
||||
|
||||
func (pt pollingTrackerBase) getProvisioningState() *string {
|
||||
|
@ -387,6 +413,10 @@ func (pt *pollingTrackerBase) updateRawBody() error {
|
|||
if err != nil {
|
||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "updateRawBody", nil, "failed to read response body")
|
||||
}
|
||||
// observed in 204 responses over HTTP/2.0; the content length is -1 but body is empty
|
||||
if len(b) == 0 {
|
||||
return nil
|
||||
}
|
||||
// put the body back so it's available to other callers
|
||||
pt.resp.Body = ioutil.NopCloser(bytes.NewReader(b))
|
||||
if err = json.Unmarshal(b, &pt.rawBody); err != nil {
|
||||
|
@ -396,15 +426,13 @@ func (pt *pollingTrackerBase) updateRawBody() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error {
|
||||
func (pt *pollingTrackerBase) pollForStatus(ctx context.Context, sender autorest.Sender) error {
|
||||
req, err := http.NewRequest(http.MethodGet, pt.URI, nil)
|
||||
if err != nil {
|
||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to create HTTP request")
|
||||
}
|
||||
// attach the context from the original request if available (it will be absent for deserialized futures)
|
||||
if pt.resp != nil {
|
||||
req = req.WithContext(pt.resp.Request.Context())
|
||||
}
|
||||
|
||||
req = req.WithContext(ctx)
|
||||
pt.resp, err = sender.Do(req)
|
||||
if err != nil {
|
||||
return autorest.NewErrorWithError(err, "pollingTrackerBase", "pollForStatus", nil, "failed to send HTTP request")
|
||||
|
@ -416,12 +444,14 @@ func (pt *pollingTrackerBase) pollForStatus(sender autorest.Sender) error {
|
|||
} else {
|
||||
// check response body for error content
|
||||
pt.updateErrorFromResponse()
|
||||
err = pt.pollingError()
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// attempts to unmarshal a ServiceError type from the response body.
|
||||
// if that fails then make a best attempt at creating something meaningful.
|
||||
// NOTE: this assumes that the async operation has failed.
|
||||
func (pt *pollingTrackerBase) updateErrorFromResponse() {
|
||||
var err error
|
||||
if pt.resp.ContentLength != 0 {
|
||||
|
@ -431,8 +461,7 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
|
|||
re := respErr{}
|
||||
defer pt.resp.Body.Close()
|
||||
var b []byte
|
||||
b, err = ioutil.ReadAll(pt.resp.Body)
|
||||
if err != nil {
|
||||
if b, err = ioutil.ReadAll(pt.resp.Body); err != nil || len(b) == 0 {
|
||||
goto Default
|
||||
}
|
||||
if err = json.Unmarshal(b, &re); err != nil {
|
||||
|
@ -445,20 +474,29 @@ func (pt *pollingTrackerBase) updateErrorFromResponse() {
|
|||
goto Default
|
||||
}
|
||||
}
|
||||
if re.ServiceError != nil {
|
||||
// the unmarshaller will ensure re.ServiceError is non-nil
|
||||
// even if there was no content unmarshalled so check the code.
|
||||
if re.ServiceError.Code != "" {
|
||||
pt.Err = re.ServiceError
|
||||
return
|
||||
}
|
||||
}
|
||||
Default:
|
||||
se := &ServiceError{
|
||||
Code: fmt.Sprintf("HTTP status code %v", pt.resp.StatusCode),
|
||||
Message: pt.resp.Status,
|
||||
Code: pt.pollingStatus(),
|
||||
Message: "The async operation failed.",
|
||||
}
|
||||
if err != nil {
|
||||
se.InnerError = make(map[string]interface{})
|
||||
se.InnerError["unmarshalError"] = err.Error()
|
||||
}
|
||||
// stick the response body into the error object in hopes
|
||||
// it contains something useful to help diagnose the failure.
|
||||
if len(pt.rawBody) > 0 {
|
||||
se.AdditionalInfo = []map[string]interface{}{
|
||||
pt.rawBody,
|
||||
}
|
||||
}
|
||||
pt.Err = se
|
||||
}
|
||||
|
||||
|
@ -538,13 +576,33 @@ func (pt pollingTrackerBase) baseCheckForErrors() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// default initialization of polling URL/method. each verb tracker will update this as required.
|
||||
func (pt *pollingTrackerBase) initPollingMethod() error {
|
||||
if ao, err := getURLFromAsyncOpHeader(pt.resp); err != nil {
|
||||
return err
|
||||
} else if ao != "" {
|
||||
pt.URI = ao
|
||||
pt.Pm = PollingAsyncOperation
|
||||
return nil
|
||||
}
|
||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
|
||||
return err
|
||||
} else if lh != "" {
|
||||
pt.URI = lh
|
||||
pt.Pm = PollingLocation
|
||||
return nil
|
||||
}
|
||||
// it's ok if we didn't find a polling header, this will be handled elsewhere
|
||||
return nil
|
||||
}
|
||||
|
||||
// DELETE
|
||||
|
||||
type pollingTrackerDelete struct {
|
||||
pollingTrackerBase
|
||||
}
|
||||
|
||||
func (pt *pollingTrackerDelete) updateHeaders() error {
|
||||
func (pt *pollingTrackerDelete) updatePollingMethod() error {
|
||||
// for 201 the Location header is required
|
||||
if pt.resp.StatusCode == http.StatusCreated {
|
||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
|
||||
|
@ -600,7 +658,7 @@ type pollingTrackerPatch struct {
|
|||
pollingTrackerBase
|
||||
}
|
||||
|
||||
func (pt *pollingTrackerPatch) updateHeaders() error {
|
||||
func (pt *pollingTrackerPatch) updatePollingMethod() error {
|
||||
// by default we can use the original URL for polling and final GET
|
||||
if pt.URI == "" {
|
||||
pt.URI = pt.resp.Request.URL.String()
|
||||
|
@ -621,7 +679,7 @@ func (pt *pollingTrackerPatch) updateHeaders() error {
|
|||
}
|
||||
}
|
||||
// for 202 prefer the Azure-AsyncOperation header but fall back to Location if necessary
|
||||
// note the absense of the "final GET" mechanism for PATCH
|
||||
// note the absence of the "final GET" mechanism for PATCH
|
||||
if pt.resp.StatusCode == http.StatusAccepted {
|
||||
ao, err := getURLFromAsyncOpHeader(pt.resp)
|
||||
if err != nil {
|
||||
|
@ -658,7 +716,7 @@ type pollingTrackerPost struct {
|
|||
pollingTrackerBase
|
||||
}
|
||||
|
||||
func (pt *pollingTrackerPost) updateHeaders() error {
|
||||
func (pt *pollingTrackerPost) updatePollingMethod() error {
|
||||
// 201 requires Location header
|
||||
if pt.resp.StatusCode == http.StatusCreated {
|
||||
if lh, err := getURLFromLocationHeader(pt.resp); err != nil {
|
||||
|
@ -714,7 +772,7 @@ type pollingTrackerPut struct {
|
|||
pollingTrackerBase
|
||||
}
|
||||
|
||||
func (pt *pollingTrackerPut) updateHeaders() error {
|
||||
func (pt *pollingTrackerPut) updatePollingMethod() error {
|
||||
// by default we can use the original URL for polling and final GET
|
||||
if pt.URI == "" {
|
||||
pt.URI = pt.resp.Request.URL.String()
|
||||
|
@ -752,8 +810,6 @@ func (pt *pollingTrackerPut) updateHeaders() error {
|
|||
pt.URI = lh
|
||||
pt.Pm = PollingLocation
|
||||
}
|
||||
// when both headers are returned we use the value in the Location header for the final GET
|
||||
pt.FinalGetURI = lh
|
||||
}
|
||||
// make sure a polling URL was found
|
||||
if pt.URI == "" {
|
||||
|
@ -808,7 +864,7 @@ func createPollingTracker(resp *http.Response) (pollingTracker, error) {
|
|||
// this initializes the polling header values, we do this during creation in case the
|
||||
// initial response send us invalid values; this way the API call will return a non-nil
|
||||
// error (not doing this means the error shows up in Future.Done)
|
||||
return pt, pt.updateHeaders()
|
||||
return pt, pt.updatePollingMethod()
|
||||
}
|
||||
|
||||
// gets the polling URL from the Azure-AsyncOperation header.
|
||||
|
|
|
@ -140,8 +140,8 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
|
|||
}
|
||||
|
||||
// poll for registered provisioning state
|
||||
now := time.Now()
|
||||
for err == nil && time.Since(now) < client.PollingDuration {
|
||||
registrationStartTime := time.Now()
|
||||
for err == nil && (client.PollingDuration == 0 || (client.PollingDuration != 0 && time.Since(registrationStartTime) < client.PollingDuration)) {
|
||||
// taken from the resources SDK
|
||||
// https://github.com/Azure/azure-sdk-for-go/blob/9f366792afa3e0ddaecdc860e793ba9d75e76c27/arm/resources/resources/providers.go#L45
|
||||
preparer := autorest.CreatePreparer(
|
||||
|
@ -183,7 +183,7 @@ func register(client autorest.Client, originalReq *http.Request, re RequestError
|
|||
return originalReq.Context().Err()
|
||||
}
|
||||
}
|
||||
if !(time.Since(now) < client.PollingDuration) {
|
||||
if client.PollingDuration != 0 && !(time.Since(registrationStartTime) < client.PollingDuration) {
|
||||
return errors.New("polling for resource provider registration has exceeded the polling duration")
|
||||
}
|
||||
return err
|
||||
|
|
|
@ -22,8 +22,10 @@ import (
|
|||
"log"
|
||||
"net/http"
|
||||
"net/http/cookiejar"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/Azure/go-autorest/logger"
|
||||
"github.com/Azure/go-autorest/version"
|
||||
)
|
||||
|
||||
|
@ -145,6 +147,7 @@ type Client struct {
|
|||
PollingDelay time.Duration
|
||||
|
||||
// PollingDuration sets the maximum polling time after which an error is returned.
|
||||
// Setting this to zero will use the provided context to control the duration.
|
||||
PollingDuration time.Duration
|
||||
|
||||
// RetryAttempts sets the default number of retry attempts for client.
|
||||
|
@ -208,8 +211,17 @@ func (c Client) Do(r *http.Request) (*http.Response, error) {
|
|||
}
|
||||
return resp, NewErrorWithError(err, "autorest/Client", "Do", nil, "Preparing request failed")
|
||||
}
|
||||
|
||||
logger.Instance.WriteRequest(r, logger.Filter{
|
||||
Header: func(k string, v []string) (bool, []string) {
|
||||
// remove the auth token from the log
|
||||
if strings.EqualFold(k, "Authorization") || strings.EqualFold(k, "Ocp-Apim-Subscription-Key") {
|
||||
v = []string{"**REDACTED**"}
|
||||
}
|
||||
return true, v
|
||||
},
|
||||
})
|
||||
resp, err := SendWithSender(c.sender(), r)
|
||||
logger.Instance.WriteResponse(resp, logger.Filter{})
|
||||
Respond(resp, c.ByInspecting())
|
||||
return resp, err
|
||||
}
|
||||
|
|
|
@ -234,7 +234,7 @@ func DoRetryForStatusCodes(attempts int, backoff time.Duration, codes ...int) Se
|
|||
}
|
||||
delayed := DelayWithRetryAfter(resp, r.Context().Done())
|
||||
if !delayed && !DelayForBackoff(backoff, attempt, r.Context().Done()) {
|
||||
return nil, r.Context().Err()
|
||||
return resp, r.Context().Err()
|
||||
}
|
||||
// don't count a 429 against the number of attempts
|
||||
// so that we continue to retry until it succeeds
|
||||
|
|
|
@ -0,0 +1,328 @@
|
|||
package logger
|
||||
|
||||
// Copyright 2017 Microsoft Corporation
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
// LevelType tells a logger the minimum level to log. When code reports a log entry,
|
||||
// the LogLevel indicates the level of the log entry. The logger only records entries
|
||||
// whose level is at least the level it was told to log. See the Log* constants.
|
||||
// For example, if a logger is configured with LogError, then LogError, LogPanic,
|
||||
// and LogFatal entries will be logged; lower level entries are ignored.
|
||||
type LevelType uint32
|
||||
|
||||
const (
|
||||
// LogNone tells a logger not to log any entries passed to it.
|
||||
LogNone LevelType = iota
|
||||
|
||||
// LogFatal tells a logger to log all LogFatal entries passed to it.
|
||||
LogFatal
|
||||
|
||||
// LogPanic tells a logger to log all LogPanic and LogFatal entries passed to it.
|
||||
LogPanic
|
||||
|
||||
// LogError tells a logger to log all LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogError
|
||||
|
||||
// LogWarning tells a logger to log all LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogWarning
|
||||
|
||||
// LogInfo tells a logger to log all LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogInfo
|
||||
|
||||
// LogDebug tells a logger to log all LogDebug, LogInfo, LogWarning, LogError, LogPanic and LogFatal entries passed to it.
|
||||
LogDebug
|
||||
)
|
||||
|
||||
const (
|
||||
logNone = "NONE"
|
||||
logFatal = "FATAL"
|
||||
logPanic = "PANIC"
|
||||
logError = "ERROR"
|
||||
logWarning = "WARNING"
|
||||
logInfo = "INFO"
|
||||
logDebug = "DEBUG"
|
||||
logUnknown = "UNKNOWN"
|
||||
)
|
||||
|
||||
// ParseLevel converts the specified string into the corresponding LevelType.
|
||||
func ParseLevel(s string) (lt LevelType, err error) {
|
||||
switch strings.ToUpper(s) {
|
||||
case logFatal:
|
||||
lt = LogFatal
|
||||
case logPanic:
|
||||
lt = LogPanic
|
||||
case logError:
|
||||
lt = LogError
|
||||
case logWarning:
|
||||
lt = LogWarning
|
||||
case logInfo:
|
||||
lt = LogInfo
|
||||
case logDebug:
|
||||
lt = LogDebug
|
||||
default:
|
||||
err = fmt.Errorf("bad log level '%s'", s)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// String implements the stringer interface for LevelType.
|
||||
func (lt LevelType) String() string {
|
||||
switch lt {
|
||||
case LogNone:
|
||||
return logNone
|
||||
case LogFatal:
|
||||
return logFatal
|
||||
case LogPanic:
|
||||
return logPanic
|
||||
case LogError:
|
||||
return logError
|
||||
case LogWarning:
|
||||
return logWarning
|
||||
case LogInfo:
|
||||
return logInfo
|
||||
case LogDebug:
|
||||
return logDebug
|
||||
default:
|
||||
return logUnknown
|
||||
}
|
||||
}
|
||||
|
||||
// Filter defines functions for filtering HTTP request/response content.
|
||||
type Filter struct {
|
||||
// URL returns a potentially modified string representation of a request URL.
|
||||
URL func(u *url.URL) string
|
||||
|
||||
// Header returns a potentially modified set of values for the specified key.
|
||||
// To completely exclude the header key/values return false.
|
||||
Header func(key string, val []string) (bool, []string)
|
||||
|
||||
// Body returns a potentially modified request/response body.
|
||||
Body func(b []byte) []byte
|
||||
}
|
||||
|
||||
func (f Filter) processURL(u *url.URL) string {
|
||||
if f.URL == nil {
|
||||
return u.String()
|
||||
}
|
||||
return f.URL(u)
|
||||
}
|
||||
|
||||
func (f Filter) processHeader(k string, val []string) (bool, []string) {
|
||||
if f.Header == nil {
|
||||
return true, val
|
||||
}
|
||||
return f.Header(k, val)
|
||||
}
|
||||
|
||||
func (f Filter) processBody(b []byte) []byte {
|
||||
if f.Body == nil {
|
||||
return b
|
||||
}
|
||||
return f.Body(b)
|
||||
}
|
||||
|
||||
// Writer defines methods for writing to a logging facility.
|
||||
type Writer interface {
|
||||
// Writeln writes the specified message with the standard log entry header and new-line character.
|
||||
Writeln(level LevelType, message string)
|
||||
|
||||
// Writef writes the specified format specifier with the standard log entry header and no new-line character.
|
||||
Writef(level LevelType, format string, a ...interface{})
|
||||
|
||||
// WriteRequest writes the specified HTTP request to the logger if the log level is greater than
|
||||
// or equal to LogInfo. The request body, if set, is logged at level LogDebug or higher.
|
||||
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
|
||||
// By default no request content is excluded.
|
||||
WriteRequest(req *http.Request, filter Filter)
|
||||
|
||||
// WriteResponse writes the specified HTTP response to the logger if the log level is greater than
|
||||
// or equal to LogInfo. The response body, if set, is logged at level LogDebug or higher.
|
||||
// Custom filters can be specified to exclude URL, header, and/or body content from the log.
|
||||
// By default no respone content is excluded.
|
||||
WriteResponse(resp *http.Response, filter Filter)
|
||||
}
|
||||
|
||||
// Instance is the default log writer initialized during package init.
|
||||
// This can be replaced with a custom implementation as required.
|
||||
var Instance Writer
|
||||
|
||||
// default log level
|
||||
var logLevel = LogNone
|
||||
|
||||
// Level returns the value specified in AZURE_GO_AUTOREST_LOG_LEVEL.
|
||||
// If no value was specified the default value is LogNone.
|
||||
// Custom loggers can call this to retrieve the configured log level.
|
||||
func Level() LevelType {
|
||||
return logLevel
|
||||
}
|
||||
|
||||
func init() {
|
||||
// separated for testing purposes
|
||||
initDefaultLogger()
|
||||
}
|
||||
|
||||
func initDefaultLogger() {
|
||||
// init with nilLogger so callers don't have to do a nil check on Default
|
||||
Instance = nilLogger{}
|
||||
llStr := strings.ToLower(os.Getenv("AZURE_GO_SDK_LOG_LEVEL"))
|
||||
if llStr == "" {
|
||||
return
|
||||
}
|
||||
var err error
|
||||
logLevel, err = ParseLevel(llStr)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "go-autorest: failed to parse log level: %s\n", err.Error())
|
||||
return
|
||||
}
|
||||
if logLevel == LogNone {
|
||||
return
|
||||
}
|
||||
// default to stderr
|
||||
dest := os.Stderr
|
||||
lfStr := os.Getenv("AZURE_GO_SDK_LOG_FILE")
|
||||
if strings.EqualFold(lfStr, "stdout") {
|
||||
dest = os.Stdout
|
||||
} else if lfStr != "" {
|
||||
lf, err := os.Create(lfStr)
|
||||
if err == nil {
|
||||
dest = lf
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "go-autorest: failed to create log file, using stderr: %s\n", err.Error())
|
||||
}
|
||||
}
|
||||
Instance = fileLogger{
|
||||
logLevel: logLevel,
|
||||
mu: &sync.Mutex{},
|
||||
logFile: dest,
|
||||
}
|
||||
}
|
||||
|
||||
// the nil logger does nothing
|
||||
type nilLogger struct{}
|
||||
|
||||
func (nilLogger) Writeln(LevelType, string) {}
|
||||
|
||||
func (nilLogger) Writef(LevelType, string, ...interface{}) {}
|
||||
|
||||
func (nilLogger) WriteRequest(*http.Request, Filter) {}
|
||||
|
||||
func (nilLogger) WriteResponse(*http.Response, Filter) {}
|
||||
|
||||
// A File is used instead of a Logger so the stream can be flushed after every write.
|
||||
type fileLogger struct {
|
||||
logLevel LevelType
|
||||
mu *sync.Mutex // for synchronizing writes to logFile
|
||||
logFile *os.File
|
||||
}
|
||||
|
||||
func (fl fileLogger) Writeln(level LevelType, message string) {
|
||||
fl.Writef(level, "%s\n", message)
|
||||
}
|
||||
|
||||
func (fl fileLogger) Writef(level LevelType, format string, a ...interface{}) {
|
||||
if fl.logLevel >= level {
|
||||
fl.mu.Lock()
|
||||
defer fl.mu.Unlock()
|
||||
fmt.Fprintf(fl.logFile, "%s %s", entryHeader(level), fmt.Sprintf(format, a...))
|
||||
fl.logFile.Sync()
|
||||
}
|
||||
}
|
||||
|
||||
func (fl fileLogger) WriteRequest(req *http.Request, filter Filter) {
|
||||
if req == nil || fl.logLevel < LogInfo {
|
||||
return
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "%s REQUEST: %s %s\n", entryHeader(LogInfo), req.Method, filter.processURL(req.URL))
|
||||
// dump headers
|
||||
for k, v := range req.Header {
|
||||
if ok, mv := filter.processHeader(k, v); ok {
|
||||
fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ","))
|
||||
}
|
||||
}
|
||||
if fl.shouldLogBody(req.Header, req.Body) {
|
||||
// dump body
|
||||
body, err := ioutil.ReadAll(req.Body)
|
||||
if err == nil {
|
||||
fmt.Fprintln(b, string(filter.processBody(body)))
|
||||
if nc, ok := req.Body.(io.Seeker); ok {
|
||||
// rewind to the beginning
|
||||
nc.Seek(0, io.SeekStart)
|
||||
} else {
|
||||
// recreate the body
|
||||
req.Body = ioutil.NopCloser(bytes.NewReader(body))
|
||||
}
|
||||
} else {
|
||||
fmt.Fprintf(b, "failed to read body: %v\n", err)
|
||||
}
|
||||
}
|
||||
fl.mu.Lock()
|
||||
defer fl.mu.Unlock()
|
||||
fmt.Fprint(fl.logFile, b.String())
|
||||
fl.logFile.Sync()
|
||||
}
|
||||
|
||||
func (fl fileLogger) WriteResponse(resp *http.Response, filter Filter) {
|
||||
if resp == nil || fl.logLevel < LogInfo {
|
||||
return
|
||||
}
|
||||
b := &bytes.Buffer{}
|
||||
fmt.Fprintf(b, "%s RESPONSE: %d %s\n", entryHeader(LogInfo), resp.StatusCode, filter.processURL(resp.Request.URL))
|
||||
// dump headers
|
||||
for k, v := range resp.Header {
|
||||
if ok, mv := filter.processHeader(k, v); ok {
|
||||
fmt.Fprintf(b, "%s: %s\n", k, strings.Join(mv, ","))
|
||||
}
|
||||
}
|
||||
if fl.shouldLogBody(resp.Header, resp.Body) {
|
||||
// dump body
|
||||
defer resp.Body.Close()
|
||||
body, err := ioutil.ReadAll(resp.Body)
|
||||
if err == nil {
|
||||
fmt.Fprintln(b, string(filter.processBody(body)))
|
||||
resp.Body = ioutil.NopCloser(bytes.NewReader(body))
|
||||
} else {
|
||||
fmt.Fprintf(b, "failed to read body: %v\n", err)
|
||||
}
|
||||
}
|
||||
fl.mu.Lock()
|
||||
defer fl.mu.Unlock()
|
||||
fmt.Fprint(fl.logFile, b.String())
|
||||
fl.logFile.Sync()
|
||||
}
|
||||
|
||||
// returns true if the provided body should be included in the log
|
||||
func (fl fileLogger) shouldLogBody(header http.Header, body io.ReadCloser) bool {
|
||||
ct := header.Get("Content-Type")
|
||||
return fl.logLevel >= LogDebug && body != nil && strings.Index(ct, "application/octet-stream") == -1
|
||||
}
|
||||
|
||||
// creates standard header for log entries, it contains a timestamp and the log level
|
||||
func entryHeader(level LevelType) string {
|
||||
// this format provides a fixed number of digits so the size of the timestamp is constant
|
||||
return fmt.Sprintf("(%s) %s:", time.Now().Format("2006-01-02T15:04:05.0000000Z07:00"), level.String())
|
||||
}
|
|
@ -20,7 +20,7 @@ import (
|
|||
)
|
||||
|
||||
// Number contains the semantic version of this SDK.
|
||||
const Number = "v10.14.0"
|
||||
const Number = "v11.1.2"
|
||||
|
||||
var (
|
||||
userAgent = fmt.Sprintf("Go/%s (%s-%s) go-autorest/%s",
|
||||
|
|
|
@ -1,881 +0,0 @@
|
|||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// Package btree implements in-memory B-Trees of arbitrary degree.
|
||||
//
|
||||
// btree implements an in-memory B-Tree for use as an ordered data structure.
|
||||
// It is not meant for persistent storage solutions.
|
||||
//
|
||||
// It has a flatter structure than an equivalent red-black or other binary tree,
|
||||
// which in some cases yields better memory usage and/or performance.
|
||||
// See some discussion on the matter here:
|
||||
// http://google-opensource.blogspot.com/2013/01/c-containers-that-save-memory-and-time.html
|
||||
// Note, though, that this project is in no way related to the C++ B-Tree
|
||||
// implementation written about there.
|
||||
//
|
||||
// Within this tree, each node contains a slice of items and a (possibly nil)
|
||||
// slice of children. For basic numeric values or raw structs, this can cause
|
||||
// efficiency differences when compared to equivalent C++ template code that
|
||||
// stores values in arrays within the node:
|
||||
// * Due to the overhead of storing values as interfaces (each
|
||||
// value needs to be stored as the value itself, then 2 words for the
|
||||
// interface pointing to that value and its type), resulting in higher
|
||||
// memory use.
|
||||
// * Since interfaces can point to values anywhere in memory, values are
|
||||
// most likely not stored in contiguous blocks, resulting in a higher
|
||||
// number of cache misses.
|
||||
// These issues don't tend to matter, though, when working with strings or other
|
||||
// heap-allocated structures, since C++-equivalent structures also must store
|
||||
// pointers and also distribute their values across the heap.
|
||||
//
|
||||
// This implementation is designed to be a drop-in replacement to gollrb.LLRB
|
||||
// trees, (http://github.com/petar/gollrb), an excellent and probably the most
|
||||
// widely used ordered tree implementation in the Go ecosystem currently.
|
||||
// Its functions, therefore, exactly mirror those of
|
||||
// llrb.LLRB where possible. Unlike gollrb, though, we currently don't
|
||||
// support storing multiple equivalent values.
|
||||
package btree
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Item represents a single object in the tree.
|
||||
type Item interface {
|
||||
// Less tests whether the current item is less than the given argument.
|
||||
//
|
||||
// This must provide a strict weak ordering.
|
||||
// If !a.Less(b) && !b.Less(a), we treat this to mean a == b (i.e. we can only
|
||||
// hold one of either a or b in the tree).
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
const (
|
||||
DefaultFreeListSize = 32
|
||||
)
|
||||
|
||||
var (
|
||||
nilItems = make(items, 16)
|
||||
nilChildren = make(children, 16)
|
||||
)
|
||||
|
||||
// FreeList represents a free list of btree nodes. By default each
|
||||
// BTree has its own FreeList, but multiple BTrees can share the same
|
||||
// FreeList.
|
||||
// Two Btrees using the same freelist are safe for concurrent write access.
|
||||
type FreeList struct {
|
||||
mu sync.Mutex
|
||||
freelist []*node
|
||||
}
|
||||
|
||||
// NewFreeList creates a new free list.
|
||||
// size is the maximum size of the returned free list.
|
||||
func NewFreeList(size int) *FreeList {
|
||||
return &FreeList{freelist: make([]*node, 0, size)}
|
||||
}
|
||||
|
||||
func (f *FreeList) newNode() (n *node) {
|
||||
f.mu.Lock()
|
||||
index := len(f.freelist) - 1
|
||||
if index < 0 {
|
||||
f.mu.Unlock()
|
||||
return new(node)
|
||||
}
|
||||
n = f.freelist[index]
|
||||
f.freelist[index] = nil
|
||||
f.freelist = f.freelist[:index]
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// freeNode adds the given node to the list, returning true if it was added
|
||||
// and false if it was discarded.
|
||||
func (f *FreeList) freeNode(n *node) (out bool) {
|
||||
f.mu.Lock()
|
||||
if len(f.freelist) < cap(f.freelist) {
|
||||
f.freelist = append(f.freelist, n)
|
||||
out = true
|
||||
}
|
||||
f.mu.Unlock()
|
||||
return
|
||||
}
|
||||
|
||||
// ItemIterator allows callers of Ascend* to iterate in-order over portions of
|
||||
// the tree. When this function returns false, iteration will stop and the
|
||||
// associated Ascend* function will immediately return.
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
// New creates a new B-Tree with the given degree.
|
||||
//
|
||||
// New(2), for example, will create a 2-3-4 tree (each node contains 1-3 items
|
||||
// and 2-4 children).
|
||||
func New(degree int) *BTree {
|
||||
return NewWithFreeList(degree, NewFreeList(DefaultFreeListSize))
|
||||
}
|
||||
|
||||
// NewWithFreeList creates a new B-Tree that uses the given node free list.
|
||||
func NewWithFreeList(degree int, f *FreeList) *BTree {
|
||||
if degree <= 1 {
|
||||
panic("bad degree")
|
||||
}
|
||||
return &BTree{
|
||||
degree: degree,
|
||||
cow: ©OnWriteContext{freelist: f},
|
||||
}
|
||||
}
|
||||
|
||||
// items stores items in a node.
|
||||
type items []Item
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *items) insertAt(index int, item Item) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = item
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *items) removeAt(index int) Item {
|
||||
item := (*s)[index]
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
(*s)[len(*s)-1] = nil
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return item
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *items) pop() (out Item) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// truncate truncates this instance at index so that it contains only the
|
||||
// first index items. index must be less than or equal to length.
|
||||
func (s *items) truncate(index int) {
|
||||
var toClear items
|
||||
*s, toClear = (*s)[:index], (*s)[index:]
|
||||
for len(toClear) > 0 {
|
||||
toClear = toClear[copy(toClear, nilItems):]
|
||||
}
|
||||
}
|
||||
|
||||
// find returns the index where the given item should be inserted into this
|
||||
// list. 'found' is true if the item already exists in the list at the given
|
||||
// index.
|
||||
func (s items) find(item Item) (index int, found bool) {
|
||||
i := sort.Search(len(s), func(i int) bool {
|
||||
return item.Less(s[i])
|
||||
})
|
||||
if i > 0 && !s[i-1].Less(item) {
|
||||
return i - 1, true
|
||||
}
|
||||
return i, false
|
||||
}
|
||||
|
||||
// children stores child nodes in a node.
|
||||
type children []*node
|
||||
|
||||
// insertAt inserts a value into the given index, pushing all subsequent values
|
||||
// forward.
|
||||
func (s *children) insertAt(index int, n *node) {
|
||||
*s = append(*s, nil)
|
||||
if index < len(*s) {
|
||||
copy((*s)[index+1:], (*s)[index:])
|
||||
}
|
||||
(*s)[index] = n
|
||||
}
|
||||
|
||||
// removeAt removes a value at a given index, pulling all subsequent values
|
||||
// back.
|
||||
func (s *children) removeAt(index int) *node {
|
||||
n := (*s)[index]
|
||||
copy((*s)[index:], (*s)[index+1:])
|
||||
(*s)[len(*s)-1] = nil
|
||||
*s = (*s)[:len(*s)-1]
|
||||
return n
|
||||
}
|
||||
|
||||
// pop removes and returns the last element in the list.
|
||||
func (s *children) pop() (out *node) {
|
||||
index := len(*s) - 1
|
||||
out = (*s)[index]
|
||||
(*s)[index] = nil
|
||||
*s = (*s)[:index]
|
||||
return
|
||||
}
|
||||
|
||||
// truncate truncates this instance at index so that it contains only the
|
||||
// first index children. index must be less than or equal to length.
|
||||
func (s *children) truncate(index int) {
|
||||
var toClear children
|
||||
*s, toClear = (*s)[:index], (*s)[index:]
|
||||
for len(toClear) > 0 {
|
||||
toClear = toClear[copy(toClear, nilChildren):]
|
||||
}
|
||||
}
|
||||
|
||||
// node is an internal node in a tree.
|
||||
//
|
||||
// It must at all times maintain the invariant that either
|
||||
// * len(children) == 0, len(items) unconstrained
|
||||
// * len(children) == len(items) + 1
|
||||
type node struct {
|
||||
items items
|
||||
children children
|
||||
cow *copyOnWriteContext
|
||||
}
|
||||
|
||||
func (n *node) mutableFor(cow *copyOnWriteContext) *node {
|
||||
if n.cow == cow {
|
||||
return n
|
||||
}
|
||||
out := cow.newNode()
|
||||
if cap(out.items) >= len(n.items) {
|
||||
out.items = out.items[:len(n.items)]
|
||||
} else {
|
||||
out.items = make(items, len(n.items), cap(n.items))
|
||||
}
|
||||
copy(out.items, n.items)
|
||||
// Copy children
|
||||
if cap(out.children) >= len(n.children) {
|
||||
out.children = out.children[:len(n.children)]
|
||||
} else {
|
||||
out.children = make(children, len(n.children), cap(n.children))
|
||||
}
|
||||
copy(out.children, n.children)
|
||||
return out
|
||||
}
|
||||
|
||||
func (n *node) mutableChild(i int) *node {
|
||||
c := n.children[i].mutableFor(n.cow)
|
||||
n.children[i] = c
|
||||
return c
|
||||
}
|
||||
|
||||
// split splits the given node at the given index. The current node shrinks,
|
||||
// and this function returns the item that existed at that index and a new node
|
||||
// containing all items/children after it.
|
||||
func (n *node) split(i int) (Item, *node) {
|
||||
item := n.items[i]
|
||||
next := n.cow.newNode()
|
||||
next.items = append(next.items, n.items[i+1:]...)
|
||||
n.items.truncate(i)
|
||||
if len(n.children) > 0 {
|
||||
next.children = append(next.children, n.children[i+1:]...)
|
||||
n.children.truncate(i + 1)
|
||||
}
|
||||
return item, next
|
||||
}
|
||||
|
||||
// maybeSplitChild checks if a child should be split, and if so splits it.
|
||||
// Returns whether or not a split occurred.
|
||||
func (n *node) maybeSplitChild(i, maxItems int) bool {
|
||||
if len(n.children[i].items) < maxItems {
|
||||
return false
|
||||
}
|
||||
first := n.mutableChild(i)
|
||||
item, second := first.split(maxItems / 2)
|
||||
n.items.insertAt(i, item)
|
||||
n.children.insertAt(i+1, second)
|
||||
return true
|
||||
}
|
||||
|
||||
// insert inserts an item into the subtree rooted at this node, making sure
|
||||
// no nodes in the subtree exceed maxItems items. Should an equivalent item be
|
||||
// be found/replaced by insert, it will be returned.
|
||||
func (n *node) insert(item Item, maxItems int) Item {
|
||||
i, found := n.items.find(item)
|
||||
if found {
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
if len(n.children) == 0 {
|
||||
n.items.insertAt(i, item)
|
||||
return nil
|
||||
}
|
||||
if n.maybeSplitChild(i, maxItems) {
|
||||
inTree := n.items[i]
|
||||
switch {
|
||||
case item.Less(inTree):
|
||||
// no change, we want first split node
|
||||
case inTree.Less(item):
|
||||
i++ // we want second split node
|
||||
default:
|
||||
out := n.items[i]
|
||||
n.items[i] = item
|
||||
return out
|
||||
}
|
||||
}
|
||||
return n.mutableChild(i).insert(item, maxItems)
|
||||
}
|
||||
|
||||
// get finds the given key in the subtree and returns it.
|
||||
func (n *node) get(key Item) Item {
|
||||
i, found := n.items.find(key)
|
||||
if found {
|
||||
return n.items[i]
|
||||
} else if len(n.children) > 0 {
|
||||
return n.children[i].get(key)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// min returns the first item in the subtree.
|
||||
func min(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[0]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[0]
|
||||
}
|
||||
|
||||
// max returns the last item in the subtree.
|
||||
func max(n *node) Item {
|
||||
if n == nil {
|
||||
return nil
|
||||
}
|
||||
for len(n.children) > 0 {
|
||||
n = n.children[len(n.children)-1]
|
||||
}
|
||||
if len(n.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
return n.items[len(n.items)-1]
|
||||
}
|
||||
|
||||
// toRemove details what item to remove in a node.remove call.
|
||||
type toRemove int
|
||||
|
||||
const (
|
||||
removeItem toRemove = iota // removes the given item
|
||||
removeMin // removes smallest item in the subtree
|
||||
removeMax // removes largest item in the subtree
|
||||
)
|
||||
|
||||
// remove removes an item from the subtree rooted at this node.
|
||||
func (n *node) remove(item Item, minItems int, typ toRemove) Item {
|
||||
var i int
|
||||
var found bool
|
||||
switch typ {
|
||||
case removeMax:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.pop()
|
||||
}
|
||||
i = len(n.items)
|
||||
case removeMin:
|
||||
if len(n.children) == 0 {
|
||||
return n.items.removeAt(0)
|
||||
}
|
||||
i = 0
|
||||
case removeItem:
|
||||
i, found = n.items.find(item)
|
||||
if len(n.children) == 0 {
|
||||
if found {
|
||||
return n.items.removeAt(i)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
default:
|
||||
panic("invalid type")
|
||||
}
|
||||
// If we get to here, we have children.
|
||||
if len(n.children[i].items) <= minItems {
|
||||
return n.growChildAndRemove(i, item, minItems, typ)
|
||||
}
|
||||
child := n.mutableChild(i)
|
||||
// Either we had enough items to begin with, or we've done some
|
||||
// merging/stealing, because we've got enough now and we're ready to return
|
||||
// stuff.
|
||||
if found {
|
||||
// The item exists at index 'i', and the child we've selected can give us a
|
||||
// predecessor, since if we've gotten here it's got > minItems items in it.
|
||||
out := n.items[i]
|
||||
// We use our special-case 'remove' call with typ=maxItem to pull the
|
||||
// predecessor of item i (the rightmost leaf of our immediate left child)
|
||||
// and set it into where we pulled the item from.
|
||||
n.items[i] = child.remove(nil, minItems, removeMax)
|
||||
return out
|
||||
}
|
||||
// Final recursive call. Once we're here, we know that the item isn't in this
|
||||
// node and that the child is big enough to remove from.
|
||||
return child.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
// growChildAndRemove grows child 'i' to make sure it's possible to remove an
|
||||
// item from it while keeping it at minItems, then calls remove to actually
|
||||
// remove it.
|
||||
//
|
||||
// Most documentation says we have to do two sets of special casing:
|
||||
// 1) item is in this node
|
||||
// 2) item is in child
|
||||
// In both cases, we need to handle the two subcases:
|
||||
// A) node has enough values that it can spare one
|
||||
// B) node doesn't have enough values
|
||||
// For the latter, we have to check:
|
||||
// a) left sibling has node to spare
|
||||
// b) right sibling has node to spare
|
||||
// c) we must merge
|
||||
// To simplify our code here, we handle cases #1 and #2 the same:
|
||||
// If a node doesn't have enough items, we make sure it does (using a,b,c).
|
||||
// We then simply redo our remove call, and the second time (regardless of
|
||||
// whether we're in case 1 or 2), we'll have enough items and can guarantee
|
||||
// that we hit case A.
|
||||
func (n *node) growChildAndRemove(i int, item Item, minItems int, typ toRemove) Item {
|
||||
if i > 0 && len(n.children[i-1].items) > minItems {
|
||||
// Steal from left child
|
||||
child := n.mutableChild(i)
|
||||
stealFrom := n.mutableChild(i - 1)
|
||||
stolenItem := stealFrom.items.pop()
|
||||
child.items.insertAt(0, n.items[i-1])
|
||||
n.items[i-1] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children.insertAt(0, stealFrom.children.pop())
|
||||
}
|
||||
} else if i < len(n.items) && len(n.children[i+1].items) > minItems {
|
||||
// steal from right child
|
||||
child := n.mutableChild(i)
|
||||
stealFrom := n.mutableChild(i + 1)
|
||||
stolenItem := stealFrom.items.removeAt(0)
|
||||
child.items = append(child.items, n.items[i])
|
||||
n.items[i] = stolenItem
|
||||
if len(stealFrom.children) > 0 {
|
||||
child.children = append(child.children, stealFrom.children.removeAt(0))
|
||||
}
|
||||
} else {
|
||||
if i >= len(n.items) {
|
||||
i--
|
||||
}
|
||||
child := n.mutableChild(i)
|
||||
// merge with right child
|
||||
mergeItem := n.items.removeAt(i)
|
||||
mergeChild := n.children.removeAt(i + 1)
|
||||
child.items = append(child.items, mergeItem)
|
||||
child.items = append(child.items, mergeChild.items...)
|
||||
child.children = append(child.children, mergeChild.children...)
|
||||
n.cow.freeNode(mergeChild)
|
||||
}
|
||||
return n.remove(item, minItems, typ)
|
||||
}
|
||||
|
||||
type direction int
|
||||
|
||||
const (
|
||||
descend = direction(-1)
|
||||
ascend = direction(+1)
|
||||
)
|
||||
|
||||
// iterate provides a simple method for iterating over elements in the tree.
|
||||
//
|
||||
// When ascending, the 'start' should be less than 'stop' and when descending,
|
||||
// the 'start' should be greater than 'stop'. Setting 'includeStart' to true
|
||||
// will force the iterator to include the first item when it equals 'start',
|
||||
// thus creating a "greaterOrEqual" or "lessThanEqual" rather than just a
|
||||
// "greaterThan" or "lessThan" queries.
|
||||
func (n *node) iterate(dir direction, start, stop Item, includeStart bool, hit bool, iter ItemIterator) (bool, bool) {
|
||||
var ok bool
|
||||
switch dir {
|
||||
case ascend:
|
||||
for i := 0; i < len(n.items); i++ {
|
||||
if start != nil && n.items[i].Less(start) {
|
||||
continue
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[i].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if !includeStart && !hit && start != nil && !start.Less(n.items[i]) {
|
||||
hit = true
|
||||
continue
|
||||
}
|
||||
hit = true
|
||||
if stop != nil && !n.items[i].Less(stop) {
|
||||
return hit, false
|
||||
}
|
||||
if !iter(n.items[i]) {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[len(n.children)-1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
case descend:
|
||||
for i := len(n.items) - 1; i >= 0; i-- {
|
||||
if start != nil && !n.items[i].Less(start) {
|
||||
if !includeStart || hit || start.Less(n.items[i]) {
|
||||
continue
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[i+1].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if stop != nil && !stop.Less(n.items[i]) {
|
||||
return hit, false // continue
|
||||
}
|
||||
hit = true
|
||||
if !iter(n.items[i]) {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
if len(n.children) > 0 {
|
||||
if hit, ok = n.children[0].iterate(dir, start, stop, includeStart, hit, iter); !ok {
|
||||
return hit, false
|
||||
}
|
||||
}
|
||||
}
|
||||
return hit, true
|
||||
}
|
||||
|
||||
// Used for testing/debugging purposes.
|
||||
func (n *node) print(w io.Writer, level int) {
|
||||
fmt.Fprintf(w, "%sNODE:%v\n", strings.Repeat(" ", level), n.items)
|
||||
for _, c := range n.children {
|
||||
c.print(w, level+1)
|
||||
}
|
||||
}
|
||||
|
||||
// BTree is an implementation of a B-Tree.
|
||||
//
|
||||
// BTree stores Item instances in an ordered structure, allowing easy insertion,
|
||||
// removal, and iteration.
|
||||
//
|
||||
// Write operations are not safe for concurrent mutation by multiple
|
||||
// goroutines, but Read operations are.
|
||||
type BTree struct {
|
||||
degree int
|
||||
length int
|
||||
root *node
|
||||
cow *copyOnWriteContext
|
||||
}
|
||||
|
||||
// copyOnWriteContext pointers determine node ownership... a tree with a write
|
||||
// context equivalent to a node's write context is allowed to modify that node.
|
||||
// A tree whose write context does not match a node's is not allowed to modify
|
||||
// it, and must create a new, writable copy (IE: it's a Clone).
|
||||
//
|
||||
// When doing any write operation, we maintain the invariant that the current
|
||||
// node's context is equal to the context of the tree that requested the write.
|
||||
// We do this by, before we descend into any node, creating a copy with the
|
||||
// correct context if the contexts don't match.
|
||||
//
|
||||
// Since the node we're currently visiting on any write has the requesting
|
||||
// tree's context, that node is modifiable in place. Children of that node may
|
||||
// not share context, but before we descend into them, we'll make a mutable
|
||||
// copy.
|
||||
type copyOnWriteContext struct {
|
||||
freelist *FreeList
|
||||
}
|
||||
|
||||
// Clone clones the btree, lazily. Clone should not be called concurrently,
|
||||
// but the original tree (t) and the new tree (t2) can be used concurrently
|
||||
// once the Clone call completes.
|
||||
//
|
||||
// The internal tree structure of b is marked read-only and shared between t and
|
||||
// t2. Writes to both t and t2 use copy-on-write logic, creating new nodes
|
||||
// whenever one of b's original nodes would have been modified. Read operations
|
||||
// should have no performance degredation. Write operations for both t and t2
|
||||
// will initially experience minor slow-downs caused by additional allocs and
|
||||
// copies due to the aforementioned copy-on-write logic, but should converge to
|
||||
// the original performance characteristics of the original tree.
|
||||
func (t *BTree) Clone() (t2 *BTree) {
|
||||
// Create two entirely new copy-on-write contexts.
|
||||
// This operation effectively creates three trees:
|
||||
// the original, shared nodes (old b.cow)
|
||||
// the new b.cow nodes
|
||||
// the new out.cow nodes
|
||||
cow1, cow2 := *t.cow, *t.cow
|
||||
out := *t
|
||||
t.cow = &cow1
|
||||
out.cow = &cow2
|
||||
return &out
|
||||
}
|
||||
|
||||
// maxItems returns the max number of items to allow per node.
|
||||
func (t *BTree) maxItems() int {
|
||||
return t.degree*2 - 1
|
||||
}
|
||||
|
||||
// minItems returns the min number of items to allow per node (ignored for the
|
||||
// root node).
|
||||
func (t *BTree) minItems() int {
|
||||
return t.degree - 1
|
||||
}
|
||||
|
||||
func (c *copyOnWriteContext) newNode() (n *node) {
|
||||
n = c.freelist.newNode()
|
||||
n.cow = c
|
||||
return
|
||||
}
|
||||
|
||||
type freeType int
|
||||
|
||||
const (
|
||||
ftFreelistFull freeType = iota // node was freed (available for GC, not stored in freelist)
|
||||
ftStored // node was stored in the freelist for later use
|
||||
ftNotOwned // node was ignored by COW, since it's owned by another one
|
||||
)
|
||||
|
||||
// freeNode frees a node within a given COW context, if it's owned by that
|
||||
// context. It returns what happened to the node (see freeType const
|
||||
// documentation).
|
||||
func (c *copyOnWriteContext) freeNode(n *node) freeType {
|
||||
if n.cow == c {
|
||||
// clear to allow GC
|
||||
n.items.truncate(0)
|
||||
n.children.truncate(0)
|
||||
n.cow = nil
|
||||
if c.freelist.freeNode(n) {
|
||||
return ftStored
|
||||
} else {
|
||||
return ftFreelistFull
|
||||
}
|
||||
} else {
|
||||
return ftNotOwned
|
||||
}
|
||||
}
|
||||
|
||||
// ReplaceOrInsert adds the given item to the tree. If an item in the tree
|
||||
// already equals the given one, it is removed from the tree and returned.
|
||||
// Otherwise, nil is returned.
|
||||
//
|
||||
// nil cannot be added to the tree (will panic).
|
||||
func (t *BTree) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("nil item being added to BTree")
|
||||
}
|
||||
if t.root == nil {
|
||||
t.root = t.cow.newNode()
|
||||
t.root.items = append(t.root.items, item)
|
||||
t.length++
|
||||
return nil
|
||||
} else {
|
||||
t.root = t.root.mutableFor(t.cow)
|
||||
if len(t.root.items) >= t.maxItems() {
|
||||
item2, second := t.root.split(t.maxItems() / 2)
|
||||
oldroot := t.root
|
||||
t.root = t.cow.newNode()
|
||||
t.root.items = append(t.root.items, item2)
|
||||
t.root.children = append(t.root.children, oldroot, second)
|
||||
}
|
||||
}
|
||||
out := t.root.insert(item, t.maxItems())
|
||||
if out == nil {
|
||||
t.length++
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// Delete removes an item equal to the passed in item from the tree, returning
|
||||
// it. If no such item exists, returns nil.
|
||||
func (t *BTree) Delete(item Item) Item {
|
||||
return t.deleteItem(item, removeItem)
|
||||
}
|
||||
|
||||
// DeleteMin removes the smallest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMin() Item {
|
||||
return t.deleteItem(nil, removeMin)
|
||||
}
|
||||
|
||||
// DeleteMax removes the largest item in the tree and returns it.
|
||||
// If no such item exists, returns nil.
|
||||
func (t *BTree) DeleteMax() Item {
|
||||
return t.deleteItem(nil, removeMax)
|
||||
}
|
||||
|
||||
func (t *BTree) deleteItem(item Item, typ toRemove) Item {
|
||||
if t.root == nil || len(t.root.items) == 0 {
|
||||
return nil
|
||||
}
|
||||
t.root = t.root.mutableFor(t.cow)
|
||||
out := t.root.remove(item, t.minItems(), typ)
|
||||
if len(t.root.items) == 0 && len(t.root.children) > 0 {
|
||||
oldroot := t.root
|
||||
t.root = t.root.children[0]
|
||||
t.cow.freeNode(oldroot)
|
||||
}
|
||||
if out != nil {
|
||||
t.length--
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
// AscendRange calls the iterator for every value in the tree within the range
|
||||
// [greaterOrEqual, lessThan), until iterator returns false.
|
||||
func (t *BTree) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, greaterOrEqual, lessThan, true, false, iterator)
|
||||
}
|
||||
|
||||
// AscendLessThan calls the iterator for every value in the tree within the range
|
||||
// [first, pivot), until iterator returns false.
|
||||
func (t *BTree) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, nil, pivot, false, false, iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual calls the iterator for every value in the tree within
|
||||
// the range [pivot, last], until iterator returns false.
|
||||
func (t *BTree) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, pivot, nil, true, false, iterator)
|
||||
}
|
||||
|
||||
// Ascend calls the iterator for every value in the tree within the range
|
||||
// [first, last], until iterator returns false.
|
||||
func (t *BTree) Ascend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(ascend, nil, nil, false, false, iterator)
|
||||
}
|
||||
|
||||
// DescendRange calls the iterator for every value in the tree within the range
|
||||
// [lessOrEqual, greaterThan), until iterator returns false.
|
||||
func (t *BTree) DescendRange(lessOrEqual, greaterThan Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, lessOrEqual, greaterThan, true, false, iterator)
|
||||
}
|
||||
|
||||
// DescendLessOrEqual calls the iterator for every value in the tree within the range
|
||||
// [pivot, first], until iterator returns false.
|
||||
func (t *BTree) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, pivot, nil, true, false, iterator)
|
||||
}
|
||||
|
||||
// DescendGreaterThan calls the iterator for every value in the tree within
|
||||
// the range (pivot, last], until iterator returns false.
|
||||
func (t *BTree) DescendGreaterThan(pivot Item, iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, nil, pivot, false, false, iterator)
|
||||
}
|
||||
|
||||
// Descend calls the iterator for every value in the tree within the range
|
||||
// [last, first], until iterator returns false.
|
||||
func (t *BTree) Descend(iterator ItemIterator) {
|
||||
if t.root == nil {
|
||||
return
|
||||
}
|
||||
t.root.iterate(descend, nil, nil, false, false, iterator)
|
||||
}
|
||||
|
||||
// Get looks for the key item in the tree, returning it. It returns nil if
|
||||
// unable to find that item.
|
||||
func (t *BTree) Get(key Item) Item {
|
||||
if t.root == nil {
|
||||
return nil
|
||||
}
|
||||
return t.root.get(key)
|
||||
}
|
||||
|
||||
// Min returns the smallest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Min() Item {
|
||||
return min(t.root)
|
||||
}
|
||||
|
||||
// Max returns the largest item in the tree, or nil if the tree is empty.
|
||||
func (t *BTree) Max() Item {
|
||||
return max(t.root)
|
||||
}
|
||||
|
||||
// Has returns true if the given key is in the tree.
|
||||
func (t *BTree) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Len returns the number of items currently in the tree.
|
||||
func (t *BTree) Len() int {
|
||||
return t.length
|
||||
}
|
||||
|
||||
// Clear removes all items from the btree. If addNodesToFreelist is true,
|
||||
// t's nodes are added to its freelist as part of this call, until the freelist
|
||||
// is full. Otherwise, the root node is simply dereferenced and the subtree
|
||||
// left to Go's normal GC processes.
|
||||
//
|
||||
// This can be much faster
|
||||
// than calling Delete on all elements, because that requires finding/removing
|
||||
// each element in the tree and updating the tree accordingly. It also is
|
||||
// somewhat faster than creating a new tree to replace the old one, because
|
||||
// nodes from the old tree are reclaimed into the freelist for use by the new
|
||||
// one, instead of being lost to the garbage collector.
|
||||
//
|
||||
// This call takes:
|
||||
// O(1): when addNodesToFreelist is false, this is a single operation.
|
||||
// O(1): when the freelist is already full, it breaks out immediately
|
||||
// O(freelist size): when the freelist is empty and the nodes are all owned
|
||||
// by this tree, nodes are added to the freelist until full.
|
||||
// O(tree size): when all nodes are owned by another tree, all nodes are
|
||||
// iterated over looking for nodes to add to the freelist, and due to
|
||||
// ownership, none are.
|
||||
func (t *BTree) Clear(addNodesToFreelist bool) {
|
||||
if t.root != nil && addNodesToFreelist {
|
||||
t.root.reset(t.cow)
|
||||
}
|
||||
t.root, t.length = nil, 0
|
||||
}
|
||||
|
||||
// reset returns a subtree to the freelist. It breaks out immediately if the
|
||||
// freelist is full, since the only benefit of iterating is to fill that
|
||||
// freelist up. Returns true if parent reset call should continue.
|
||||
func (n *node) reset(c *copyOnWriteContext) bool {
|
||||
for _, child := range n.children {
|
||||
if !child.reset(c) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return c.freeNode(n) != ftFreelistFull
|
||||
}
|
||||
|
||||
// Int implements the Item interface for integers.
|
||||
type Int int
|
||||
|
||||
// Less returns true if int(a) < int(b).
|
||||
func (a Int) Less(b Item) bool {
|
||||
return a < b.(Int)
|
||||
}
|
|
@ -1,76 +0,0 @@
|
|||
// Copyright 2014 Google Inc.
|
||||
//
|
||||
// Licensed under the Apache License, Version 2.0 (the "License");
|
||||
// you may not use this file except in compliance with the License.
|
||||
// You may obtain a copy of the License at
|
||||
//
|
||||
// http://www.apache.org/licenses/LICENSE-2.0
|
||||
//
|
||||
// Unless required by applicable law or agreed to in writing, software
|
||||
// distributed under the License is distributed on an "AS IS" BASIS,
|
||||
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
// See the License for the specific language governing permissions and
|
||||
// limitations under the License.
|
||||
|
||||
// +build ignore
|
||||
|
||||
// This binary compares memory usage between btree and gollrb.
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"math/rand"
|
||||
"runtime"
|
||||
"time"
|
||||
|
||||
"github.com/google/btree"
|
||||
"github.com/petar/GoLLRB/llrb"
|
||||
)
|
||||
|
||||
var (
|
||||
size = flag.Int("size", 1000000, "size of the tree to build")
|
||||
degree = flag.Int("degree", 8, "degree of btree")
|
||||
gollrb = flag.Bool("llrb", false, "use llrb instead of btree")
|
||||
)
|
||||
|
||||
func main() {
|
||||
flag.Parse()
|
||||
vals := rand.Perm(*size)
|
||||
var t, v interface{}
|
||||
v = vals
|
||||
var stats runtime.MemStats
|
||||
for i := 0; i < 10; i++ {
|
||||
runtime.GC()
|
||||
}
|
||||
fmt.Println("-------- BEFORE ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
start := time.Now()
|
||||
if *gollrb {
|
||||
tr := llrb.New()
|
||||
for _, v := range vals {
|
||||
tr.ReplaceOrInsert(llrb.Int(v))
|
||||
}
|
||||
t = tr // keep it around
|
||||
} else {
|
||||
tr := btree.New(*degree)
|
||||
for _, v := range vals {
|
||||
tr.ReplaceOrInsert(btree.Int(v))
|
||||
}
|
||||
t = tr // keep it around
|
||||
}
|
||||
fmt.Printf("%v inserts in %v\n", *size, time.Since(start))
|
||||
fmt.Println("-------- AFTER ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
for i := 0; i < 10; i++ {
|
||||
runtime.GC()
|
||||
}
|
||||
fmt.Println("-------- AFTER GC ----------")
|
||||
runtime.ReadMemStats(&stats)
|
||||
fmt.Printf("%+v\n", stats)
|
||||
if t == v {
|
||||
fmt.Println("to make sure vals and tree aren't GC'd")
|
||||
}
|
||||
}
|
|
@ -1,7 +0,0 @@
|
|||
Copyright © 2012 Greg Jones (greg.jones@gmail.com)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the “Software”), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED “AS IS”, WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,61 +0,0 @@
|
|||
// Package diskcache provides an implementation of httpcache.Cache that uses the diskv package
|
||||
// to supplement an in-memory map with persistent storage
|
||||
//
|
||||
package diskcache
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"crypto/md5"
|
||||
"encoding/hex"
|
||||
"github.com/peterbourgon/diskv"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Cache is an implementation of httpcache.Cache that supplements the in-memory map with persistent storage
|
||||
type Cache struct {
|
||||
d *diskv.Diskv
|
||||
}
|
||||
|
||||
// Get returns the response corresponding to key if present
|
||||
func (c *Cache) Get(key string) (resp []byte, ok bool) {
|
||||
key = keyToFilename(key)
|
||||
resp, err := c.d.Read(key)
|
||||
if err != nil {
|
||||
return []byte{}, false
|
||||
}
|
||||
return resp, true
|
||||
}
|
||||
|
||||
// Set saves a response to the cache as key
|
||||
func (c *Cache) Set(key string, resp []byte) {
|
||||
key = keyToFilename(key)
|
||||
c.d.WriteStream(key, bytes.NewReader(resp), true)
|
||||
}
|
||||
|
||||
// Delete removes the response with key from the cache
|
||||
func (c *Cache) Delete(key string) {
|
||||
key = keyToFilename(key)
|
||||
c.d.Erase(key)
|
||||
}
|
||||
|
||||
func keyToFilename(key string) string {
|
||||
h := md5.New()
|
||||
io.WriteString(h, key)
|
||||
return hex.EncodeToString(h.Sum(nil))
|
||||
}
|
||||
|
||||
// New returns a new Cache that will store files in basePath
|
||||
func New(basePath string) *Cache {
|
||||
return &Cache{
|
||||
d: diskv.New(diskv.Options{
|
||||
BasePath: basePath,
|
||||
CacheSizeMax: 100 * 1024 * 1024, // 100MB
|
||||
}),
|
||||
}
|
||||
}
|
||||
|
||||
// NewWithDiskv returns a new Cache using the provided Diskv as underlying
|
||||
// storage.
|
||||
func NewWithDiskv(d *diskv.Diskv) *Cache {
|
||||
return &Cache{d}
|
||||
}
|
|
@ -1,551 +0,0 @@
|
|||
// Package httpcache provides a http.RoundTripper implementation that works as a
|
||||
// mostly RFC-compliant cache for http responses.
|
||||
//
|
||||
// It is only suitable for use as a 'private' cache (i.e. for a web-browser or an API-client
|
||||
// and not for a shared proxy).
|
||||
//
|
||||
package httpcache
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"bytes"
|
||||
"errors"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"net/http"
|
||||
"net/http/httputil"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
stale = iota
|
||||
fresh
|
||||
transparent
|
||||
// XFromCache is the header added to responses that are returned from the cache
|
||||
XFromCache = "X-From-Cache"
|
||||
)
|
||||
|
||||
// A Cache interface is used by the Transport to store and retrieve responses.
|
||||
type Cache interface {
|
||||
// Get returns the []byte representation of a cached response and a bool
|
||||
// set to true if the value isn't empty
|
||||
Get(key string) (responseBytes []byte, ok bool)
|
||||
// Set stores the []byte representation of a response against a key
|
||||
Set(key string, responseBytes []byte)
|
||||
// Delete removes the value associated with the key
|
||||
Delete(key string)
|
||||
}
|
||||
|
||||
// cacheKey returns the cache key for req.
|
||||
func cacheKey(req *http.Request) string {
|
||||
if req.Method == http.MethodGet {
|
||||
return req.URL.String()
|
||||
} else {
|
||||
return req.Method + " " + req.URL.String()
|
||||
}
|
||||
}
|
||||
|
||||
// CachedResponse returns the cached http.Response for req if present, and nil
|
||||
// otherwise.
|
||||
func CachedResponse(c Cache, req *http.Request) (resp *http.Response, err error) {
|
||||
cachedVal, ok := c.Get(cacheKey(req))
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
|
||||
b := bytes.NewBuffer(cachedVal)
|
||||
return http.ReadResponse(bufio.NewReader(b), req)
|
||||
}
|
||||
|
||||
// MemoryCache is an implemtation of Cache that stores responses in an in-memory map.
|
||||
type MemoryCache struct {
|
||||
mu sync.RWMutex
|
||||
items map[string][]byte
|
||||
}
|
||||
|
||||
// Get returns the []byte representation of the response and true if present, false if not
|
||||
func (c *MemoryCache) Get(key string) (resp []byte, ok bool) {
|
||||
c.mu.RLock()
|
||||
resp, ok = c.items[key]
|
||||
c.mu.RUnlock()
|
||||
return resp, ok
|
||||
}
|
||||
|
||||
// Set saves response resp to the cache with key
|
||||
func (c *MemoryCache) Set(key string, resp []byte) {
|
||||
c.mu.Lock()
|
||||
c.items[key] = resp
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// Delete removes key from the cache
|
||||
func (c *MemoryCache) Delete(key string) {
|
||||
c.mu.Lock()
|
||||
delete(c.items, key)
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
// NewMemoryCache returns a new Cache that will store items in an in-memory map
|
||||
func NewMemoryCache() *MemoryCache {
|
||||
c := &MemoryCache{items: map[string][]byte{}}
|
||||
return c
|
||||
}
|
||||
|
||||
// Transport is an implementation of http.RoundTripper that will return values from a cache
|
||||
// where possible (avoiding a network request) and will additionally add validators (etag/if-modified-since)
|
||||
// to repeated requests allowing servers to return 304 / Not Modified
|
||||
type Transport struct {
|
||||
// The RoundTripper interface actually used to make requests
|
||||
// If nil, http.DefaultTransport is used
|
||||
Transport http.RoundTripper
|
||||
Cache Cache
|
||||
// If true, responses returned from the cache will be given an extra header, X-From-Cache
|
||||
MarkCachedResponses bool
|
||||
}
|
||||
|
||||
// NewTransport returns a new Transport with the
|
||||
// provided Cache implementation and MarkCachedResponses set to true
|
||||
func NewTransport(c Cache) *Transport {
|
||||
return &Transport{Cache: c, MarkCachedResponses: true}
|
||||
}
|
||||
|
||||
// Client returns an *http.Client that caches responses.
|
||||
func (t *Transport) Client() *http.Client {
|
||||
return &http.Client{Transport: t}
|
||||
}
|
||||
|
||||
// varyMatches will return false unless all of the cached values for the headers listed in Vary
|
||||
// match the new request
|
||||
func varyMatches(cachedResp *http.Response, req *http.Request) bool {
|
||||
for _, header := range headerAllCommaSepValues(cachedResp.Header, "vary") {
|
||||
header = http.CanonicalHeaderKey(header)
|
||||
if header != "" && req.Header.Get(header) != cachedResp.Header.Get("X-Varied-"+header) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// RoundTrip takes a Request and returns a Response
|
||||
//
|
||||
// If there is a fresh Response already in cache, then it will be returned without connecting to
|
||||
// the server.
|
||||
//
|
||||
// If there is a stale Response, then any validators it contains will be set on the new request
|
||||
// to give the server a chance to respond with NotModified. If this happens, then the cached Response
|
||||
// will be returned.
|
||||
func (t *Transport) RoundTrip(req *http.Request) (resp *http.Response, err error) {
|
||||
cacheKey := cacheKey(req)
|
||||
cacheable := (req.Method == "GET" || req.Method == "HEAD") && req.Header.Get("range") == ""
|
||||
var cachedResp *http.Response
|
||||
if cacheable {
|
||||
cachedResp, err = CachedResponse(t.Cache, req)
|
||||
} else {
|
||||
// Need to invalidate an existing value
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
|
||||
transport := t.Transport
|
||||
if transport == nil {
|
||||
transport = http.DefaultTransport
|
||||
}
|
||||
|
||||
if cacheable && cachedResp != nil && err == nil {
|
||||
if t.MarkCachedResponses {
|
||||
cachedResp.Header.Set(XFromCache, "1")
|
||||
}
|
||||
|
||||
if varyMatches(cachedResp, req) {
|
||||
// Can only use cached value if the new request doesn't Vary significantly
|
||||
freshness := getFreshness(cachedResp.Header, req.Header)
|
||||
if freshness == fresh {
|
||||
return cachedResp, nil
|
||||
}
|
||||
|
||||
if freshness == stale {
|
||||
var req2 *http.Request
|
||||
// Add validators if caller hasn't already done so
|
||||
etag := cachedResp.Header.Get("etag")
|
||||
if etag != "" && req.Header.Get("etag") == "" {
|
||||
req2 = cloneRequest(req)
|
||||
req2.Header.Set("if-none-match", etag)
|
||||
}
|
||||
lastModified := cachedResp.Header.Get("last-modified")
|
||||
if lastModified != "" && req.Header.Get("last-modified") == "" {
|
||||
if req2 == nil {
|
||||
req2 = cloneRequest(req)
|
||||
}
|
||||
req2.Header.Set("if-modified-since", lastModified)
|
||||
}
|
||||
if req2 != nil {
|
||||
req = req2
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err == nil && req.Method == "GET" && resp.StatusCode == http.StatusNotModified {
|
||||
// Replace the 304 response with the one from cache, but update with some new headers
|
||||
endToEndHeaders := getEndToEndHeaders(resp.Header)
|
||||
for _, header := range endToEndHeaders {
|
||||
cachedResp.Header[header] = resp.Header[header]
|
||||
}
|
||||
resp = cachedResp
|
||||
} else if (err != nil || (cachedResp != nil && resp.StatusCode >= 500)) &&
|
||||
req.Method == "GET" && canStaleOnError(cachedResp.Header, req.Header) {
|
||||
// In case of transport failure and stale-if-error activated, returns cached content
|
||||
// when available
|
||||
return cachedResp, nil
|
||||
} else {
|
||||
if err != nil || resp.StatusCode != http.StatusOK {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
} else {
|
||||
reqCacheControl := parseCacheControl(req.Header)
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
resp = newGatewayTimeoutResponse(req)
|
||||
} else {
|
||||
resp, err = transport.RoundTrip(req)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cacheable && canStore(parseCacheControl(req.Header), parseCacheControl(resp.Header)) {
|
||||
for _, varyKey := range headerAllCommaSepValues(resp.Header, "vary") {
|
||||
varyKey = http.CanonicalHeaderKey(varyKey)
|
||||
fakeHeader := "X-Varied-" + varyKey
|
||||
reqValue := req.Header.Get(varyKey)
|
||||
if reqValue != "" {
|
||||
resp.Header.Set(fakeHeader, reqValue)
|
||||
}
|
||||
}
|
||||
switch req.Method {
|
||||
case "GET":
|
||||
// Delay caching until EOF is reached.
|
||||
resp.Body = &cachingReadCloser{
|
||||
R: resp.Body,
|
||||
OnEOF: func(r io.Reader) {
|
||||
resp := *resp
|
||||
resp.Body = ioutil.NopCloser(r)
|
||||
respBytes, err := httputil.DumpResponse(&resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
},
|
||||
}
|
||||
default:
|
||||
respBytes, err := httputil.DumpResponse(resp, true)
|
||||
if err == nil {
|
||||
t.Cache.Set(cacheKey, respBytes)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
t.Cache.Delete(cacheKey)
|
||||
}
|
||||
return resp, nil
|
||||
}
|
||||
|
||||
// ErrNoDateHeader indicates that the HTTP headers contained no Date header.
|
||||
var ErrNoDateHeader = errors.New("no Date header")
|
||||
|
||||
// Date parses and returns the value of the Date header.
|
||||
func Date(respHeaders http.Header) (date time.Time, err error) {
|
||||
dateHeader := respHeaders.Get("date")
|
||||
if dateHeader == "" {
|
||||
err = ErrNoDateHeader
|
||||
return
|
||||
}
|
||||
|
||||
return time.Parse(time.RFC1123, dateHeader)
|
||||
}
|
||||
|
||||
type realClock struct{}
|
||||
|
||||
func (c *realClock) since(d time.Time) time.Duration {
|
||||
return time.Since(d)
|
||||
}
|
||||
|
||||
type timer interface {
|
||||
since(d time.Time) time.Duration
|
||||
}
|
||||
|
||||
var clock timer = &realClock{}
|
||||
|
||||
// getFreshness will return one of fresh/stale/transparent based on the cache-control
|
||||
// values of the request and the response
|
||||
//
|
||||
// fresh indicates the response can be returned
|
||||
// stale indicates that the response needs validating before it is returned
|
||||
// transparent indicates the response should not be used to fulfil the request
|
||||
//
|
||||
// Because this is only a private cache, 'public' and 'private' in cache-control aren't
|
||||
// signficant. Similarly, smax-age isn't used.
|
||||
func getFreshness(respHeaders, reqHeaders http.Header) (freshness int) {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
if _, ok := reqCacheControl["no-cache"]; ok {
|
||||
return transparent
|
||||
}
|
||||
if _, ok := respCacheControl["no-cache"]; ok {
|
||||
return stale
|
||||
}
|
||||
if _, ok := reqCacheControl["only-if-cached"]; ok {
|
||||
return fresh
|
||||
}
|
||||
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return stale
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
|
||||
var lifetime time.Duration
|
||||
var zeroDuration time.Duration
|
||||
|
||||
// If a response includes both an Expires header and a max-age directive,
|
||||
// the max-age directive overrides the Expires header, even if the Expires header is more restrictive.
|
||||
if maxAge, ok := respCacheControl["max-age"]; ok {
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
} else {
|
||||
expiresHeader := respHeaders.Get("Expires")
|
||||
if expiresHeader != "" {
|
||||
expires, err := time.Parse(time.RFC1123, expiresHeader)
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
} else {
|
||||
lifetime = expires.Sub(date)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if maxAge, ok := reqCacheControl["max-age"]; ok {
|
||||
// the client is willing to accept a response whose age is no greater than the specified time in seconds
|
||||
lifetime, err = time.ParseDuration(maxAge + "s")
|
||||
if err != nil {
|
||||
lifetime = zeroDuration
|
||||
}
|
||||
}
|
||||
if minfresh, ok := reqCacheControl["min-fresh"]; ok {
|
||||
// the client wants a response that will still be fresh for at least the specified number of seconds.
|
||||
minfreshDuration, err := time.ParseDuration(minfresh + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge + minfreshDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if maxstale, ok := reqCacheControl["max-stale"]; ok {
|
||||
// Indicates that the client is willing to accept a response that has exceeded its expiration time.
|
||||
// If max-stale is assigned a value, then the client is willing to accept a response that has exceeded
|
||||
// its expiration time by no more than the specified number of seconds.
|
||||
// If no value is assigned to max-stale, then the client is willing to accept a stale response of any age.
|
||||
//
|
||||
// Responses served only because of a max-stale value are supposed to have a Warning header added to them,
|
||||
// but that seems like a hassle, and is it actually useful? If so, then there needs to be a different
|
||||
// return-value available here.
|
||||
if maxstale == "" {
|
||||
return fresh
|
||||
}
|
||||
maxstaleDuration, err := time.ParseDuration(maxstale + "s")
|
||||
if err == nil {
|
||||
currentAge = time.Duration(currentAge - maxstaleDuration)
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime > currentAge {
|
||||
return fresh
|
||||
}
|
||||
|
||||
return stale
|
||||
}
|
||||
|
||||
// Returns true if either the request or the response includes the stale-if-error
|
||||
// cache control extension: https://tools.ietf.org/html/rfc5861
|
||||
func canStaleOnError(respHeaders, reqHeaders http.Header) bool {
|
||||
respCacheControl := parseCacheControl(respHeaders)
|
||||
reqCacheControl := parseCacheControl(reqHeaders)
|
||||
|
||||
var err error
|
||||
lifetime := time.Duration(-1)
|
||||
|
||||
if staleMaxAge, ok := respCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
if staleMaxAge, ok := reqCacheControl["stale-if-error"]; ok {
|
||||
if staleMaxAge != "" {
|
||||
lifetime, err = time.ParseDuration(staleMaxAge + "s")
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
if lifetime >= 0 {
|
||||
date, err := Date(respHeaders)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
currentAge := clock.since(date)
|
||||
if lifetime > currentAge {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func getEndToEndHeaders(respHeaders http.Header) []string {
|
||||
// These headers are always hop-by-hop
|
||||
hopByHopHeaders := map[string]struct{}{
|
||||
"Connection": struct{}{},
|
||||
"Keep-Alive": struct{}{},
|
||||
"Proxy-Authenticate": struct{}{},
|
||||
"Proxy-Authorization": struct{}{},
|
||||
"Te": struct{}{},
|
||||
"Trailers": struct{}{},
|
||||
"Transfer-Encoding": struct{}{},
|
||||
"Upgrade": struct{}{},
|
||||
}
|
||||
|
||||
for _, extra := range strings.Split(respHeaders.Get("connection"), ",") {
|
||||
// any header listed in connection, if present, is also considered hop-by-hop
|
||||
if strings.Trim(extra, " ") != "" {
|
||||
hopByHopHeaders[http.CanonicalHeaderKey(extra)] = struct{}{}
|
||||
}
|
||||
}
|
||||
endToEndHeaders := []string{}
|
||||
for respHeader, _ := range respHeaders {
|
||||
if _, ok := hopByHopHeaders[respHeader]; !ok {
|
||||
endToEndHeaders = append(endToEndHeaders, respHeader)
|
||||
}
|
||||
}
|
||||
return endToEndHeaders
|
||||
}
|
||||
|
||||
func canStore(reqCacheControl, respCacheControl cacheControl) (canStore bool) {
|
||||
if _, ok := respCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
if _, ok := reqCacheControl["no-store"]; ok {
|
||||
return false
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func newGatewayTimeoutResponse(req *http.Request) *http.Response {
|
||||
var braw bytes.Buffer
|
||||
braw.WriteString("HTTP/1.1 504 Gateway Timeout\r\n\r\n")
|
||||
resp, err := http.ReadResponse(bufio.NewReader(&braw), req)
|
||||
if err != nil {
|
||||
panic(err)
|
||||
}
|
||||
return resp
|
||||
}
|
||||
|
||||
// cloneRequest returns a clone of the provided *http.Request.
|
||||
// The clone is a shallow copy of the struct and its Header map.
|
||||
// (This function copyright goauth2 authors: https://code.google.com/p/goauth2)
|
||||
func cloneRequest(r *http.Request) *http.Request {
|
||||
// shallow copy of the struct
|
||||
r2 := new(http.Request)
|
||||
*r2 = *r
|
||||
// deep copy of the Header
|
||||
r2.Header = make(http.Header)
|
||||
for k, s := range r.Header {
|
||||
r2.Header[k] = s
|
||||
}
|
||||
return r2
|
||||
}
|
||||
|
||||
type cacheControl map[string]string
|
||||
|
||||
func parseCacheControl(headers http.Header) cacheControl {
|
||||
cc := cacheControl{}
|
||||
ccHeader := headers.Get("Cache-Control")
|
||||
for _, part := range strings.Split(ccHeader, ",") {
|
||||
part = strings.Trim(part, " ")
|
||||
if part == "" {
|
||||
continue
|
||||
}
|
||||
if strings.ContainsRune(part, '=') {
|
||||
keyval := strings.Split(part, "=")
|
||||
cc[strings.Trim(keyval[0], " ")] = strings.Trim(keyval[1], ",")
|
||||
} else {
|
||||
cc[part] = ""
|
||||
}
|
||||
}
|
||||
return cc
|
||||
}
|
||||
|
||||
// headerAllCommaSepValues returns all comma-separated values (each
|
||||
// with whitespace trimmed) for header name in headers. According to
|
||||
// Section 4.2 of the HTTP/1.1 spec
|
||||
// (http://www.w3.org/Protocols/rfc2616/rfc2616-sec4.html#sec4.2),
|
||||
// values from multiple occurrences of a header should be concatenated, if
|
||||
// the header's value is a comma-separated list.
|
||||
func headerAllCommaSepValues(headers http.Header, name string) []string {
|
||||
var vals []string
|
||||
for _, val := range headers[http.CanonicalHeaderKey(name)] {
|
||||
fields := strings.Split(val, ",")
|
||||
for i, f := range fields {
|
||||
fields[i] = strings.TrimSpace(f)
|
||||
}
|
||||
vals = append(vals, fields...)
|
||||
}
|
||||
return vals
|
||||
}
|
||||
|
||||
// cachingReadCloser is a wrapper around ReadCloser R that calls OnEOF
|
||||
// handler with a full copy of the content read from R when EOF is
|
||||
// reached.
|
||||
type cachingReadCloser struct {
|
||||
// Underlying ReadCloser.
|
||||
R io.ReadCloser
|
||||
// OnEOF is called with a copy of the content of R when EOF is reached.
|
||||
OnEOF func(io.Reader)
|
||||
|
||||
buf bytes.Buffer // buf stores a copy of the content of R.
|
||||
}
|
||||
|
||||
// Read reads the next len(p) bytes from R or until R is drained. The
|
||||
// return value n is the number of bytes read. If R has no data to
|
||||
// return, err is io.EOF and OnEOF is called with a full copy of what
|
||||
// has been read so far.
|
||||
func (r *cachingReadCloser) Read(p []byte) (n int, err error) {
|
||||
n, err = r.R.Read(p)
|
||||
r.buf.Write(p[:n])
|
||||
if err == io.EOF {
|
||||
r.OnEOF(bytes.NewReader(r.buf.Bytes()))
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (r *cachingReadCloser) Close() error {
|
||||
return r.R.Close()
|
||||
}
|
||||
|
||||
// NewMemoryCacheTransport returns a new Transport using the in-memory cache implementation
|
||||
func NewMemoryCacheTransport() *Transport {
|
||||
c := NewMemoryCache()
|
||||
t := NewTransport(c)
|
||||
return t
|
||||
}
|
|
@ -81,12 +81,10 @@ func (adapter *Decoder) More() bool {
|
|||
if iter.Error != nil {
|
||||
return false
|
||||
}
|
||||
c := iter.nextToken()
|
||||
if c == 0 {
|
||||
return false
|
||||
if iter.head != iter.tail {
|
||||
return true
|
||||
}
|
||||
iter.unreadByte()
|
||||
return c != ']' && c != '}'
|
||||
return iter.loadMore()
|
||||
}
|
||||
|
||||
// Buffered remaining buffer
|
||||
|
@ -100,7 +98,7 @@ func (adapter *Decoder) Buffered() io.Reader {
|
|||
func (adapter *Decoder) UseNumber() {
|
||||
cfg := adapter.iter.cfg.configBeforeFrozen
|
||||
cfg.UseNumber = true
|
||||
adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
|
||||
adapter.iter.cfg = cfg.frozeWithCacheReuse()
|
||||
}
|
||||
|
||||
// DisallowUnknownFields causes the Decoder to return an error when the destination
|
||||
|
@ -109,7 +107,7 @@ func (adapter *Decoder) UseNumber() {
|
|||
func (adapter *Decoder) DisallowUnknownFields() {
|
||||
cfg := adapter.iter.cfg.configBeforeFrozen
|
||||
cfg.DisallowUnknownFields = true
|
||||
adapter.iter.cfg = cfg.frozeWithCacheReuse(adapter.iter.cfg.extraExtensions)
|
||||
adapter.iter.cfg = cfg.frozeWithCacheReuse()
|
||||
}
|
||||
|
||||
// NewEncoder same as json.NewEncoder
|
||||
|
@ -134,14 +132,14 @@ func (adapter *Encoder) Encode(val interface{}) error {
|
|||
func (adapter *Encoder) SetIndent(prefix, indent string) {
|
||||
config := adapter.stream.cfg.configBeforeFrozen
|
||||
config.IndentionStep = len(indent)
|
||||
adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
|
||||
adapter.stream.cfg = config.frozeWithCacheReuse()
|
||||
}
|
||||
|
||||
// SetEscapeHTML escape html by default, set to false to disable
|
||||
func (adapter *Encoder) SetEscapeHTML(escapeHTML bool) {
|
||||
config := adapter.stream.cfg.configBeforeFrozen
|
||||
config.EscapeHTML = escapeHTML
|
||||
adapter.stream.cfg = config.frozeWithCacheReuse(adapter.stream.cfg.extraExtensions)
|
||||
adapter.stream.cfg = config.frozeWithCacheReuse()
|
||||
}
|
||||
|
||||
// Valid reports whether data is a valid JSON encoding.
|
||||
|
|
|
@ -74,9 +74,7 @@ type frozenConfig struct {
|
|||
disallowUnknownFields bool
|
||||
decoderCache *concurrent.Map
|
||||
encoderCache *concurrent.Map
|
||||
encoderExtension Extension
|
||||
decoderExtension Extension
|
||||
extraExtensions []Extension
|
||||
extensions []Extension
|
||||
streamPool *sync.Pool
|
||||
iteratorPool *sync.Pool
|
||||
caseSensitive bool
|
||||
|
@ -160,21 +158,22 @@ func (cfg Config) Froze() API {
|
|||
if cfg.ValidateJsonRawMessage {
|
||||
api.validateJsonRawMessage(encoderExtension)
|
||||
}
|
||||
api.encoderExtension = encoderExtension
|
||||
api.decoderExtension = decoderExtension
|
||||
if len(encoderExtension) > 0 {
|
||||
api.extensions = append(api.extensions, encoderExtension)
|
||||
}
|
||||
if len(decoderExtension) > 0 {
|
||||
api.extensions = append(api.extensions, decoderExtension)
|
||||
}
|
||||
api.configBeforeFrozen = cfg
|
||||
return api
|
||||
}
|
||||
|
||||
func (cfg Config) frozeWithCacheReuse(extraExtensions []Extension) *frozenConfig {
|
||||
func (cfg Config) frozeWithCacheReuse() *frozenConfig {
|
||||
api := getFrozenConfigFromCache(cfg)
|
||||
if api != nil {
|
||||
return api
|
||||
}
|
||||
api = cfg.Froze().(*frozenConfig)
|
||||
for _, extension := range extraExtensions {
|
||||
api.RegisterExtension(extension)
|
||||
}
|
||||
addFrozenConfigToCache(cfg, api)
|
||||
return api
|
||||
}
|
||||
|
@ -191,7 +190,7 @@ func (cfg *frozenConfig) validateJsonRawMessage(extension EncoderExtension) {
|
|||
stream.WriteRaw(string(rawMessage))
|
||||
}
|
||||
}, func(ptr unsafe.Pointer) bool {
|
||||
return len(*((*json.RawMessage)(ptr))) == 0
|
||||
return false
|
||||
}}
|
||||
extension[reflect2.TypeOfPtr((*json.RawMessage)(nil)).Elem()] = encoder
|
||||
extension[reflect2.TypeOfPtr((*RawMessage)(nil)).Elem()] = encoder
|
||||
|
@ -220,9 +219,7 @@ func (cfg *frozenConfig) getTagKey() string {
|
|||
}
|
||||
|
||||
func (cfg *frozenConfig) RegisterExtension(extension Extension) {
|
||||
cfg.extraExtensions = append(cfg.extraExtensions, extension)
|
||||
copied := cfg.configBeforeFrozen
|
||||
cfg.configBeforeFrozen = copied
|
||||
cfg.extensions = append(cfg.extensions, extension)
|
||||
}
|
||||
|
||||
type lossyFloat32Encoder struct {
|
||||
|
@ -317,7 +314,7 @@ func (cfg *frozenConfig) MarshalIndent(v interface{}, prefix, indent string) ([]
|
|||
}
|
||||
newCfg := cfg.configBeforeFrozen
|
||||
newCfg.IndentionStep = len(indent)
|
||||
return newCfg.frozeWithCacheReuse(cfg.extraExtensions).Marshal(v)
|
||||
return newCfg.frozeWithCacheReuse().Marshal(v)
|
||||
}
|
||||
|
||||
func (cfg *frozenConfig) UnmarshalFromString(str string, v interface{}) error {
|
||||
|
|
|
@ -2,7 +2,7 @@ package jsoniter
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode"
|
||||
)
|
||||
|
||||
// ReadObject read one field from object.
|
||||
|
@ -96,12 +96,13 @@ func (iter *Iterator) readFieldHash() int64 {
|
|||
}
|
||||
|
||||
func calcHash(str string, caseSensitive bool) int64 {
|
||||
if !caseSensitive {
|
||||
str = strings.ToLower(str)
|
||||
}
|
||||
hash := int64(0x811c9dc5)
|
||||
for _, b := range []byte(str) {
|
||||
hash ^= int64(b)
|
||||
for _, b := range str {
|
||||
if caseSensitive {
|
||||
hash ^= int64(b)
|
||||
} else {
|
||||
hash ^= int64(unicode.ToLower(b))
|
||||
}
|
||||
hash *= 0x1000193
|
||||
}
|
||||
return int64(hash)
|
||||
|
|
|
@ -120,8 +120,7 @@ func decoderOfType(ctx *ctx, typ reflect2.Type) ValDecoder {
|
|||
for _, extension := range extensions {
|
||||
decoder = extension.DecorateDecoder(typ, decoder)
|
||||
}
|
||||
decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
decoder = extension.DecorateDecoder(typ, decoder)
|
||||
}
|
||||
return decoder
|
||||
|
@ -223,8 +222,7 @@ func encoderOfType(ctx *ctx, typ reflect2.Type) ValEncoder {
|
|||
for _, extension := range extensions {
|
||||
encoder = extension.DecorateEncoder(typ, encoder)
|
||||
}
|
||||
encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
encoder = extension.DecorateEncoder(typ, encoder)
|
||||
}
|
||||
return encoder
|
||||
|
|
|
@ -246,8 +246,7 @@ func getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
|
|||
for _, extension := range extensions {
|
||||
decoder = extension.DecorateDecoder(typ, decoder)
|
||||
}
|
||||
decoder = ctx.decoderExtension.DecorateDecoder(typ, decoder)
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
decoder = extension.DecorateDecoder(typ, decoder)
|
||||
}
|
||||
}
|
||||
|
@ -260,18 +259,14 @@ func _getTypeDecoderFromExtension(ctx *ctx, typ reflect2.Type) ValDecoder {
|
|||
return decoder
|
||||
}
|
||||
}
|
||||
decoder := ctx.decoderExtension.CreateDecoder(typ)
|
||||
if decoder != nil {
|
||||
return decoder
|
||||
}
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
decoder := extension.CreateDecoder(typ)
|
||||
if decoder != nil {
|
||||
return decoder
|
||||
}
|
||||
}
|
||||
typeName := typ.String()
|
||||
decoder = typeDecoders[typeName]
|
||||
decoder := typeDecoders[typeName]
|
||||
if decoder != nil {
|
||||
return decoder
|
||||
}
|
||||
|
@ -291,8 +286,7 @@ func getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
|
|||
for _, extension := range extensions {
|
||||
encoder = extension.DecorateEncoder(typ, encoder)
|
||||
}
|
||||
encoder = ctx.encoderExtension.DecorateEncoder(typ, encoder)
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
encoder = extension.DecorateEncoder(typ, encoder)
|
||||
}
|
||||
}
|
||||
|
@ -306,18 +300,14 @@ func _getTypeEncoderFromExtension(ctx *ctx, typ reflect2.Type) ValEncoder {
|
|||
return encoder
|
||||
}
|
||||
}
|
||||
encoder := ctx.encoderExtension.CreateEncoder(typ)
|
||||
if encoder != nil {
|
||||
return encoder
|
||||
}
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
encoder := extension.CreateEncoder(typ)
|
||||
if encoder != nil {
|
||||
return encoder
|
||||
}
|
||||
}
|
||||
typeName := typ.String()
|
||||
encoder = typeEncoders[typeName]
|
||||
encoder := typeEncoders[typeName]
|
||||
if encoder != nil {
|
||||
return encoder
|
||||
}
|
||||
|
@ -403,9 +393,7 @@ func createStructDescriptor(ctx *ctx, typ reflect2.Type, bindings []*Binding, em
|
|||
for _, extension := range extensions {
|
||||
extension.UpdateStructDescriptor(structDescriptor)
|
||||
}
|
||||
ctx.encoderExtension.UpdateStructDescriptor(structDescriptor)
|
||||
ctx.decoderExtension.UpdateStructDescriptor(structDescriptor)
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
extension.UpdateStructDescriptor(structDescriptor)
|
||||
}
|
||||
processTags(structDescriptor, ctx.frozenConfig)
|
||||
|
|
|
@ -39,11 +39,7 @@ func encoderOfMap(ctx *ctx, typ reflect2.Type) ValEncoder {
|
|||
}
|
||||
|
||||
func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
|
||||
decoder := ctx.decoderExtension.CreateMapKeyDecoder(typ)
|
||||
if decoder != nil {
|
||||
return decoder
|
||||
}
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
decoder := extension.CreateMapKeyDecoder(typ)
|
||||
if decoder != nil {
|
||||
return decoder
|
||||
|
@ -81,11 +77,7 @@ func decoderOfMapKey(ctx *ctx, typ reflect2.Type) ValDecoder {
|
|||
}
|
||||
|
||||
func encoderOfMapKey(ctx *ctx, typ reflect2.Type) ValEncoder {
|
||||
encoder := ctx.encoderExtension.CreateMapKeyEncoder(typ)
|
||||
if encoder != nil {
|
||||
return encoder
|
||||
}
|
||||
for _, extension := range ctx.extraExtensions {
|
||||
for _, extension := range ctx.extensions {
|
||||
encoder := extension.CreateMapKeyEncoder(typ)
|
||||
if encoder != nil {
|
||||
return encoder
|
||||
|
|
|
@ -0,0 +1,27 @@
|
|||
Copyright (c) 2009 The Go Authors. All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
* Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
* Redistributions in binary form must reproduce the above
|
||||
copyright notice, this list of conditions and the following disclaimer
|
||||
in the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
* Neither the name of Google Inc. nor the names of its
|
||||
contributors may be used to endorse or promote products derived from
|
||||
this software without specific prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -0,0 +1,637 @@
|
|||
// Copyright 2009 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package tabwriter implements a write filter (tabwriter.Writer) that
|
||||
// translates tabbed columns in input into properly aligned text.
|
||||
//
|
||||
// It is a drop-in replacement for the golang text/tabwriter package (https://golang.org/pkg/text/tabwriter),
|
||||
// based on that package at https://github.com/golang/go/tree/cf2c2ea89d09d486bb018b1817c5874388038c3a
|
||||
// with support for additional features.
|
||||
//
|
||||
// The package is using the Elastic Tabstops algorithm described at
|
||||
// http://nickgravgaard.com/elastictabstops/index.html.
|
||||
package tabwriter
|
||||
|
||||
import (
|
||||
"io"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
// ----------------------------------------------------------------------------
|
||||
// Filter implementation
|
||||
|
||||
// A cell represents a segment of text terminated by tabs or line breaks.
|
||||
// The text itself is stored in a separate buffer; cell only describes the
|
||||
// segment's size in bytes, its width in runes, and whether it's an htab
|
||||
// ('\t') terminated cell.
|
||||
//
|
||||
type cell struct {
|
||||
size int // cell size in bytes
|
||||
width int // cell width in runes
|
||||
htab bool // true if the cell is terminated by an htab ('\t')
|
||||
}
|
||||
|
||||
// A Writer is a filter that inserts padding around tab-delimited
|
||||
// columns in its input to align them in the output.
|
||||
//
|
||||
// The Writer treats incoming bytes as UTF-8-encoded text consisting
|
||||
// of cells terminated by horizontal ('\t') or vertical ('\v') tabs,
|
||||
// and newline ('\n') or formfeed ('\f') characters; both newline and
|
||||
// formfeed act as line breaks.
|
||||
//
|
||||
// Tab-terminated cells in contiguous lines constitute a column. The
|
||||
// Writer inserts padding as needed to make all cells in a column have
|
||||
// the same width, effectively aligning the columns. It assumes that
|
||||
// all characters have the same width, except for tabs for which a
|
||||
// tabwidth must be specified. Column cells must be tab-terminated, not
|
||||
// tab-separated: non-tab terminated trailing text at the end of a line
|
||||
// forms a cell but that cell is not part of an aligned column.
|
||||
// For instance, in this example (where | stands for a horizontal tab):
|
||||
//
|
||||
// aaaa|bbb|d
|
||||
// aa |b |dd
|
||||
// a |
|
||||
// aa |cccc|eee
|
||||
//
|
||||
// the b and c are in distinct columns (the b column is not contiguous
|
||||
// all the way). The d and e are not in a column at all (there's no
|
||||
// terminating tab, nor would the column be contiguous).
|
||||
//
|
||||
// The Writer assumes that all Unicode code points have the same width;
|
||||
// this may not be true in some fonts or if the string contains combining
|
||||
// characters.
|
||||
//
|
||||
// If DiscardEmptyColumns is set, empty columns that are terminated
|
||||
// entirely by vertical (or "soft") tabs are discarded. Columns
|
||||
// terminated by horizontal (or "hard") tabs are not affected by
|
||||
// this flag.
|
||||
//
|
||||
// If a Writer is configured to filter HTML, HTML tags and entities
|
||||
// are passed through. The widths of tags and entities are
|
||||
// assumed to be zero (tags) and one (entities) for formatting purposes.
|
||||
//
|
||||
// A segment of text may be escaped by bracketing it with Escape
|
||||
// characters. The tabwriter passes escaped text segments through
|
||||
// unchanged. In particular, it does not interpret any tabs or line
|
||||
// breaks within the segment. If the StripEscape flag is set, the
|
||||
// Escape characters are stripped from the output; otherwise they
|
||||
// are passed through as well. For the purpose of formatting, the
|
||||
// width of the escaped text is always computed excluding the Escape
|
||||
// characters.
|
||||
//
|
||||
// The formfeed character acts like a newline but it also terminates
|
||||
// all columns in the current line (effectively calling Flush). Tab-
|
||||
// terminated cells in the next line start new columns. Unless found
|
||||
// inside an HTML tag or inside an escaped text segment, formfeed
|
||||
// characters appear as newlines in the output.
|
||||
//
|
||||
// The Writer must buffer input internally, because proper spacing
|
||||
// of one line may depend on the cells in future lines. Clients must
|
||||
// call Flush when done calling Write.
|
||||
//
|
||||
type Writer struct {
|
||||
// configuration
|
||||
output io.Writer
|
||||
minwidth int
|
||||
tabwidth int
|
||||
padding int
|
||||
padbytes [8]byte
|
||||
flags uint
|
||||
|
||||
// current state
|
||||
buf []byte // collected text excluding tabs or line breaks
|
||||
pos int // buffer position up to which cell.width of incomplete cell has been computed
|
||||
cell cell // current incomplete cell; cell.width is up to buf[pos] excluding ignored sections
|
||||
endChar byte // terminating char of escaped sequence (Escape for escapes, '>', ';' for HTML tags/entities, or 0)
|
||||
lines [][]cell // list of lines; each line is a list of cells
|
||||
widths []int // list of column widths in runes - re-used during formatting
|
||||
|
||||
maxwidths []int // list of max column widths in runes
|
||||
}
|
||||
|
||||
// addLine adds a new line.
|
||||
// flushed is a hint indicating whether the underlying writer was just flushed.
|
||||
// If so, the previous line is not likely to be a good indicator of the new line's cells.
|
||||
func (b *Writer) addLine(flushed bool) {
|
||||
// Grow slice instead of appending,
|
||||
// as that gives us an opportunity
|
||||
// to re-use an existing []cell.
|
||||
if n := len(b.lines) + 1; n <= cap(b.lines) {
|
||||
b.lines = b.lines[:n]
|
||||
b.lines[n-1] = b.lines[n-1][:0]
|
||||
} else {
|
||||
b.lines = append(b.lines, nil)
|
||||
}
|
||||
|
||||
if !flushed {
|
||||
// The previous line is probably a good indicator
|
||||
// of how many cells the current line will have.
|
||||
// If the current line's capacity is smaller than that,
|
||||
// abandon it and make a new one.
|
||||
if n := len(b.lines); n >= 2 {
|
||||
if prev := len(b.lines[n-2]); prev > cap(b.lines[n-1]) {
|
||||
b.lines[n-1] = make([]cell, 0, prev)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Reset the current state.
|
||||
func (b *Writer) reset() {
|
||||
b.buf = b.buf[:0]
|
||||
b.pos = 0
|
||||
b.cell = cell{}
|
||||
b.endChar = 0
|
||||
b.lines = b.lines[0:0]
|
||||
b.widths = b.widths[0:0]
|
||||
b.addLine(true)
|
||||
}
|
||||
|
||||
// Internal representation (current state):
|
||||
//
|
||||
// - all text written is appended to buf; tabs and line breaks are stripped away
|
||||
// - at any given time there is a (possibly empty) incomplete cell at the end
|
||||
// (the cell starts after a tab or line break)
|
||||
// - cell.size is the number of bytes belonging to the cell so far
|
||||
// - cell.width is text width in runes of that cell from the start of the cell to
|
||||
// position pos; html tags and entities are excluded from this width if html
|
||||
// filtering is enabled
|
||||
// - the sizes and widths of processed text are kept in the lines list
|
||||
// which contains a list of cells for each line
|
||||
// - the widths list is a temporary list with current widths used during
|
||||
// formatting; it is kept in Writer because it's re-used
|
||||
//
|
||||
// |<---------- size ---------->|
|
||||
// | |
|
||||
// |<- width ->|<- ignored ->| |
|
||||
// | | | |
|
||||
// [---processed---tab------------<tag>...</tag>...]
|
||||
// ^ ^ ^
|
||||
// | | |
|
||||
// buf start of incomplete cell pos
|
||||
|
||||
// Formatting can be controlled with these flags.
|
||||
const (
|
||||
// Ignore html tags and treat entities (starting with '&'
|
||||
// and ending in ';') as single characters (width = 1).
|
||||
FilterHTML uint = 1 << iota
|
||||
|
||||
// Strip Escape characters bracketing escaped text segments
|
||||
// instead of passing them through unchanged with the text.
|
||||
StripEscape
|
||||
|
||||
// Force right-alignment of cell content.
|
||||
// Default is left-alignment.
|
||||
AlignRight
|
||||
|
||||
// Handle empty columns as if they were not present in
|
||||
// the input in the first place.
|
||||
DiscardEmptyColumns
|
||||
|
||||
// Always use tabs for indentation columns (i.e., padding of
|
||||
// leading empty cells on the left) independent of padchar.
|
||||
TabIndent
|
||||
|
||||
// Print a vertical bar ('|') between columns (after formatting).
|
||||
// Discarded columns appear as zero-width columns ("||").
|
||||
Debug
|
||||
|
||||
// Remember maximum widths seen per column even after Flush() is called.
|
||||
RememberWidths
|
||||
)
|
||||
|
||||
// A Writer must be initialized with a call to Init. The first parameter (output)
|
||||
// specifies the filter output. The remaining parameters control the formatting:
|
||||
//
|
||||
// minwidth minimal cell width including any padding
|
||||
// tabwidth width of tab characters (equivalent number of spaces)
|
||||
// padding padding added to a cell before computing its width
|
||||
// padchar ASCII char used for padding
|
||||
// if padchar == '\t', the Writer will assume that the
|
||||
// width of a '\t' in the formatted output is tabwidth,
|
||||
// and cells are left-aligned independent of align_left
|
||||
// (for correct-looking results, tabwidth must correspond
|
||||
// to the tab width in the viewer displaying the result)
|
||||
// flags formatting control
|
||||
//
|
||||
func (b *Writer) Init(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
||||
if minwidth < 0 || tabwidth < 0 || padding < 0 {
|
||||
panic("negative minwidth, tabwidth, or padding")
|
||||
}
|
||||
b.output = output
|
||||
b.minwidth = minwidth
|
||||
b.tabwidth = tabwidth
|
||||
b.padding = padding
|
||||
for i := range b.padbytes {
|
||||
b.padbytes[i] = padchar
|
||||
}
|
||||
if padchar == '\t' {
|
||||
// tab padding enforces left-alignment
|
||||
flags &^= AlignRight
|
||||
}
|
||||
b.flags = flags
|
||||
|
||||
b.reset()
|
||||
|
||||
return b
|
||||
}
|
||||
|
||||
// debugging support (keep code around)
|
||||
func (b *Writer) dump() {
|
||||
pos := 0
|
||||
for i, line := range b.lines {
|
||||
print("(", i, ") ")
|
||||
for _, c := range line {
|
||||
print("[", string(b.buf[pos:pos+c.size]), "]")
|
||||
pos += c.size
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
print("\n")
|
||||
}
|
||||
|
||||
// local error wrapper so we can distinguish errors we want to return
|
||||
// as errors from genuine panics (which we don't want to return as errors)
|
||||
type osError struct {
|
||||
err error
|
||||
}
|
||||
|
||||
func (b *Writer) write0(buf []byte) {
|
||||
n, err := b.output.Write(buf)
|
||||
if n != len(buf) && err == nil {
|
||||
err = io.ErrShortWrite
|
||||
}
|
||||
if err != nil {
|
||||
panic(osError{err})
|
||||
}
|
||||
}
|
||||
|
||||
func (b *Writer) writeN(src []byte, n int) {
|
||||
for n > len(src) {
|
||||
b.write0(src)
|
||||
n -= len(src)
|
||||
}
|
||||
b.write0(src[0:n])
|
||||
}
|
||||
|
||||
var (
|
||||
newline = []byte{'\n'}
|
||||
tabs = []byte("\t\t\t\t\t\t\t\t")
|
||||
)
|
||||
|
||||
func (b *Writer) writePadding(textw, cellw int, useTabs bool) {
|
||||
if b.padbytes[0] == '\t' || useTabs {
|
||||
// padding is done with tabs
|
||||
if b.tabwidth == 0 {
|
||||
return // tabs have no width - can't do any padding
|
||||
}
|
||||
// make cellw the smallest multiple of b.tabwidth
|
||||
cellw = (cellw + b.tabwidth - 1) / b.tabwidth * b.tabwidth
|
||||
n := cellw - textw // amount of padding
|
||||
if n < 0 {
|
||||
panic("internal error")
|
||||
}
|
||||
b.writeN(tabs, (n+b.tabwidth-1)/b.tabwidth)
|
||||
return
|
||||
}
|
||||
|
||||
// padding is done with non-tab characters
|
||||
b.writeN(b.padbytes[0:], cellw-textw)
|
||||
}
|
||||
|
||||
var vbar = []byte{'|'}
|
||||
|
||||
func (b *Writer) writeLines(pos0 int, line0, line1 int) (pos int) {
|
||||
pos = pos0
|
||||
for i := line0; i < line1; i++ {
|
||||
line := b.lines[i]
|
||||
|
||||
// if TabIndent is set, use tabs to pad leading empty cells
|
||||
useTabs := b.flags&TabIndent != 0
|
||||
|
||||
for j, c := range line {
|
||||
if j > 0 && b.flags&Debug != 0 {
|
||||
// indicate column break
|
||||
b.write0(vbar)
|
||||
}
|
||||
|
||||
if c.size == 0 {
|
||||
// empty cell
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], useTabs)
|
||||
}
|
||||
} else {
|
||||
// non-empty cell
|
||||
useTabs = false
|
||||
if b.flags&AlignRight == 0 { // align left
|
||||
b.write0(b.buf[pos : pos+c.size])
|
||||
pos += c.size
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], false)
|
||||
}
|
||||
} else { // align right
|
||||
if j < len(b.widths) {
|
||||
b.writePadding(c.width, b.widths[j], false)
|
||||
}
|
||||
b.write0(b.buf[pos : pos+c.size])
|
||||
pos += c.size
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if i+1 == len(b.lines) {
|
||||
// last buffered line - we don't have a newline, so just write
|
||||
// any outstanding buffered data
|
||||
b.write0(b.buf[pos : pos+b.cell.size])
|
||||
pos += b.cell.size
|
||||
} else {
|
||||
// not the last line - write newline
|
||||
b.write0(newline)
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Format the text between line0 and line1 (excluding line1); pos
|
||||
// is the buffer position corresponding to the beginning of line0.
|
||||
// Returns the buffer position corresponding to the beginning of
|
||||
// line1 and an error, if any.
|
||||
//
|
||||
func (b *Writer) format(pos0 int, line0, line1 int) (pos int) {
|
||||
pos = pos0
|
||||
column := len(b.widths)
|
||||
for this := line0; this < line1; this++ {
|
||||
line := b.lines[this]
|
||||
|
||||
if column >= len(line)-1 {
|
||||
continue
|
||||
}
|
||||
// cell exists in this column => this line
|
||||
// has more cells than the previous line
|
||||
// (the last cell per line is ignored because cells are
|
||||
// tab-terminated; the last cell per line describes the
|
||||
// text before the newline/formfeed and does not belong
|
||||
// to a column)
|
||||
|
||||
// print unprinted lines until beginning of block
|
||||
pos = b.writeLines(pos, line0, this)
|
||||
line0 = this
|
||||
|
||||
// column block begin
|
||||
width := b.minwidth // minimal column width
|
||||
discardable := true // true if all cells in this column are empty and "soft"
|
||||
for ; this < line1; this++ {
|
||||
line = b.lines[this]
|
||||
if column >= len(line)-1 {
|
||||
break
|
||||
}
|
||||
// cell exists in this column
|
||||
c := line[column]
|
||||
// update width
|
||||
if w := c.width + b.padding; w > width {
|
||||
width = w
|
||||
}
|
||||
// update discardable
|
||||
if c.width > 0 || c.htab {
|
||||
discardable = false
|
||||
}
|
||||
}
|
||||
// column block end
|
||||
|
||||
// discard empty columns if necessary
|
||||
if discardable && b.flags&DiscardEmptyColumns != 0 {
|
||||
width = 0
|
||||
}
|
||||
|
||||
if b.flags&RememberWidths != 0 {
|
||||
if len(b.maxwidths) < len(b.widths) {
|
||||
b.maxwidths = append(b.maxwidths, b.widths[len(b.maxwidths):]...)
|
||||
}
|
||||
|
||||
switch {
|
||||
case len(b.maxwidths) == len(b.widths):
|
||||
b.maxwidths = append(b.maxwidths, width)
|
||||
case b.maxwidths[len(b.widths)] > width:
|
||||
width = b.maxwidths[len(b.widths)]
|
||||
case b.maxwidths[len(b.widths)] < width:
|
||||
b.maxwidths[len(b.widths)] = width
|
||||
}
|
||||
}
|
||||
|
||||
// format and print all columns to the right of this column
|
||||
// (we know the widths of this column and all columns to the left)
|
||||
b.widths = append(b.widths, width) // push width
|
||||
pos = b.format(pos, line0, this)
|
||||
b.widths = b.widths[0 : len(b.widths)-1] // pop width
|
||||
line0 = this
|
||||
}
|
||||
|
||||
// print unprinted lines until end
|
||||
return b.writeLines(pos, line0, line1)
|
||||
}
|
||||
|
||||
// Append text to current cell.
|
||||
func (b *Writer) append(text []byte) {
|
||||
b.buf = append(b.buf, text...)
|
||||
b.cell.size += len(text)
|
||||
}
|
||||
|
||||
// Update the cell width.
|
||||
func (b *Writer) updateWidth() {
|
||||
b.cell.width += utf8.RuneCount(b.buf[b.pos:])
|
||||
b.pos = len(b.buf)
|
||||
}
|
||||
|
||||
// To escape a text segment, bracket it with Escape characters.
|
||||
// For instance, the tab in this string "Ignore this tab: \xff\t\xff"
|
||||
// does not terminate a cell and constitutes a single character of
|
||||
// width one for formatting purposes.
|
||||
//
|
||||
// The value 0xff was chosen because it cannot appear in a valid UTF-8 sequence.
|
||||
//
|
||||
const Escape = '\xff'
|
||||
|
||||
// Start escaped mode.
|
||||
func (b *Writer) startEscape(ch byte) {
|
||||
switch ch {
|
||||
case Escape:
|
||||
b.endChar = Escape
|
||||
case '<':
|
||||
b.endChar = '>'
|
||||
case '&':
|
||||
b.endChar = ';'
|
||||
}
|
||||
}
|
||||
|
||||
// Terminate escaped mode. If the escaped text was an HTML tag, its width
|
||||
// is assumed to be zero for formatting purposes; if it was an HTML entity,
|
||||
// its width is assumed to be one. In all other cases, the width is the
|
||||
// unicode width of the text.
|
||||
//
|
||||
func (b *Writer) endEscape() {
|
||||
switch b.endChar {
|
||||
case Escape:
|
||||
b.updateWidth()
|
||||
if b.flags&StripEscape == 0 {
|
||||
b.cell.width -= 2 // don't count the Escape chars
|
||||
}
|
||||
case '>': // tag of zero width
|
||||
case ';':
|
||||
b.cell.width++ // entity, count as one rune
|
||||
}
|
||||
b.pos = len(b.buf)
|
||||
b.endChar = 0
|
||||
}
|
||||
|
||||
// Terminate the current cell by adding it to the list of cells of the
|
||||
// current line. Returns the number of cells in that line.
|
||||
//
|
||||
func (b *Writer) terminateCell(htab bool) int {
|
||||
b.cell.htab = htab
|
||||
line := &b.lines[len(b.lines)-1]
|
||||
*line = append(*line, b.cell)
|
||||
b.cell = cell{}
|
||||
return len(*line)
|
||||
}
|
||||
|
||||
func handlePanic(err *error, op string) {
|
||||
if e := recover(); e != nil {
|
||||
if nerr, ok := e.(osError); ok {
|
||||
*err = nerr.err
|
||||
return
|
||||
}
|
||||
panic("tabwriter: panic during " + op)
|
||||
}
|
||||
}
|
||||
|
||||
// RememberedWidths returns a copy of the remembered per-column maximum widths.
|
||||
// Requires use of the RememberWidths flag, and is not threadsafe.
|
||||
func (b *Writer) RememberedWidths() []int {
|
||||
retval := make([]int, len(b.maxwidths))
|
||||
copy(retval, b.maxwidths)
|
||||
return retval
|
||||
}
|
||||
|
||||
// SetRememberedWidths sets the remembered per-column maximum widths.
|
||||
// Requires use of the RememberWidths flag, and is not threadsafe.
|
||||
func (b *Writer) SetRememberedWidths(widths []int) *Writer {
|
||||
b.maxwidths = make([]int, len(widths))
|
||||
copy(b.maxwidths, widths)
|
||||
return b
|
||||
}
|
||||
|
||||
// Flush should be called after the last call to Write to ensure
|
||||
// that any data buffered in the Writer is written to output. Any
|
||||
// incomplete escape sequence at the end is considered
|
||||
// complete for formatting purposes.
|
||||
func (b *Writer) Flush() error {
|
||||
return b.flush()
|
||||
}
|
||||
|
||||
func (b *Writer) flush() (err error) {
|
||||
defer b.reset() // even in the presence of errors
|
||||
defer handlePanic(&err, "Flush")
|
||||
|
||||
// add current cell if not empty
|
||||
if b.cell.size > 0 {
|
||||
if b.endChar != 0 {
|
||||
// inside escape - terminate it even if incomplete
|
||||
b.endEscape()
|
||||
}
|
||||
b.terminateCell(false)
|
||||
}
|
||||
|
||||
// format contents of buffer
|
||||
b.format(0, 0, len(b.lines))
|
||||
return nil
|
||||
}
|
||||
|
||||
var hbar = []byte("---\n")
|
||||
|
||||
// Write writes buf to the writer b.
|
||||
// The only errors returned are ones encountered
|
||||
// while writing to the underlying output stream.
|
||||
//
|
||||
func (b *Writer) Write(buf []byte) (n int, err error) {
|
||||
defer handlePanic(&err, "Write")
|
||||
|
||||
// split text into cells
|
||||
n = 0
|
||||
for i, ch := range buf {
|
||||
if b.endChar == 0 {
|
||||
// outside escape
|
||||
switch ch {
|
||||
case '\t', '\v', '\n', '\f':
|
||||
// end of cell
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i + 1 // ch consumed
|
||||
ncells := b.terminateCell(ch == '\t')
|
||||
if ch == '\n' || ch == '\f' {
|
||||
// terminate line
|
||||
b.addLine(ch == '\f')
|
||||
if ch == '\f' || ncells == 1 {
|
||||
// A '\f' always forces a flush. Otherwise, if the previous
|
||||
// line has only one cell which does not have an impact on
|
||||
// the formatting of the following lines (the last cell per
|
||||
// line is ignored by format()), thus we can flush the
|
||||
// Writer contents.
|
||||
if err = b.Flush(); err != nil {
|
||||
return
|
||||
}
|
||||
if ch == '\f' && b.flags&Debug != 0 {
|
||||
// indicate section break
|
||||
b.write0(hbar)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
case Escape:
|
||||
// start of escaped sequence
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i
|
||||
if b.flags&StripEscape != 0 {
|
||||
n++ // strip Escape
|
||||
}
|
||||
b.startEscape(Escape)
|
||||
|
||||
case '<', '&':
|
||||
// possibly an html tag/entity
|
||||
if b.flags&FilterHTML != 0 {
|
||||
// begin of tag/entity
|
||||
b.append(buf[n:i])
|
||||
b.updateWidth()
|
||||
n = i
|
||||
b.startEscape(ch)
|
||||
}
|
||||
}
|
||||
|
||||
} else {
|
||||
// inside escape
|
||||
if ch == b.endChar {
|
||||
// end of tag/entity
|
||||
j := i + 1
|
||||
if ch == Escape && b.flags&StripEscape != 0 {
|
||||
j = i // strip Escape
|
||||
}
|
||||
b.append(buf[n:j])
|
||||
n = i + 1 // ch consumed
|
||||
b.endEscape()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// append leftover text
|
||||
b.append(buf[n:])
|
||||
n = len(buf)
|
||||
return
|
||||
}
|
||||
|
||||
// NewWriter allocates and initializes a new tabwriter.Writer.
|
||||
// The parameters are the same as for the Init function.
|
||||
//
|
||||
func NewWriter(output io.Writer, minwidth, tabwidth, padding int, padchar byte, flags uint) *Writer {
|
||||
return new(Writer).Init(output, minwidth, tabwidth, padding, padchar, flags)
|
||||
}
|
|
@ -1,4 +0,0 @@
|
|||
Petar Maymounkov <petar@5ttt.org>
|
||||
Vadim Vygonets <vadik@vygo.net>
|
||||
Ian Smith <iansmith@acm.org>
|
||||
Martin Bruse
|
|
@ -1,27 +0,0 @@
|
|||
Copyright (c) 2010, Petar Maymounkov
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without modification,
|
||||
are permitted provided that the following conditions are met:
|
||||
|
||||
(*) Redistributions of source code must retain the above copyright notice, this list
|
||||
of conditions and the following disclaimer.
|
||||
|
||||
(*) Redistributions in binary form must reproduce the above copyright notice, this
|
||||
list of conditions and the following disclaimer in the documentation and/or
|
||||
other materials provided with the distribution.
|
||||
|
||||
(*) Neither the name of Petar Maymounkov nor the names of its contributors may be
|
||||
used to endorse or promote products derived from this software without specific
|
||||
prior written permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
|
||||
ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
|
||||
WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
|
||||
DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
|
||||
ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
|
||||
(INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
|
||||
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
|
||||
ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
|
||||
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
|
@ -1,39 +0,0 @@
|
|||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
import "math"
|
||||
|
||||
// avgVar maintains the average and variance of a stream of numbers
|
||||
// in a space-efficient manner.
|
||||
type avgVar struct {
|
||||
count int64
|
||||
sum, sumsq float64
|
||||
}
|
||||
|
||||
func (av *avgVar) Init() {
|
||||
av.count = 0
|
||||
av.sum = 0.0
|
||||
av.sumsq = 0.0
|
||||
}
|
||||
|
||||
func (av *avgVar) Add(sample float64) {
|
||||
av.count++
|
||||
av.sum += sample
|
||||
av.sumsq += sample * sample
|
||||
}
|
||||
|
||||
func (av *avgVar) GetCount() int64 { return av.count }
|
||||
|
||||
func (av *avgVar) GetAvg() float64 { return av.sum / float64(av.count) }
|
||||
|
||||
func (av *avgVar) GetTotal() float64 { return av.sum }
|
||||
|
||||
func (av *avgVar) GetVar() float64 {
|
||||
a := av.GetAvg()
|
||||
return av.sumsq/float64(av.count) - a*a
|
||||
}
|
||||
|
||||
func (av *avgVar) GetStdDev() float64 { return math.Sqrt(av.GetVar()) }
|
|
@ -1,93 +0,0 @@
|
|||
package llrb
|
||||
|
||||
type ItemIterator func(i Item) bool
|
||||
|
||||
//func (t *Tree) Ascend(iterator ItemIterator) {
|
||||
// t.AscendGreaterOrEqual(Inf(-1), iterator)
|
||||
//}
|
||||
|
||||
func (t *LLRB) AscendRange(greaterOrEqual, lessThan Item, iterator ItemIterator) {
|
||||
t.ascendRange(t.root, greaterOrEqual, lessThan, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendRange(h *Node, inf, sup Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !less(h.Item, sup) {
|
||||
return t.ascendRange(h.Left, inf, sup, iterator)
|
||||
}
|
||||
if less(h.Item, inf) {
|
||||
return t.ascendRange(h.Right, inf, sup, iterator)
|
||||
}
|
||||
|
||||
if !t.ascendRange(h.Left, inf, sup, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
return t.ascendRange(h.Right, inf, sup, iterator)
|
||||
}
|
||||
|
||||
// AscendGreaterOrEqual will call iterator once for each element greater or equal to
|
||||
// pivot in ascending order. It will stop whenever the iterator returns false.
|
||||
func (t *LLRB) AscendGreaterOrEqual(pivot Item, iterator ItemIterator) {
|
||||
t.ascendGreaterOrEqual(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendGreaterOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !less(h.Item, pivot) {
|
||||
if !t.ascendGreaterOrEqual(h.Left, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return t.ascendGreaterOrEqual(h.Right, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) AscendLessThan(pivot Item, iterator ItemIterator) {
|
||||
t.ascendLessThan(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) ascendLessThan(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if !t.ascendLessThan(h.Left, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
if less(h.Item, pivot) {
|
||||
return t.ascendLessThan(h.Left, pivot, iterator)
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// DescendLessOrEqual will call iterator once for each element less than the
|
||||
// pivot in descending order. It will stop whenever the iterator returns false.
|
||||
func (t *LLRB) DescendLessOrEqual(pivot Item, iterator ItemIterator) {
|
||||
t.descendLessOrEqual(t.root, pivot, iterator)
|
||||
}
|
||||
|
||||
func (t *LLRB) descendLessOrEqual(h *Node, pivot Item, iterator ItemIterator) bool {
|
||||
if h == nil {
|
||||
return true
|
||||
}
|
||||
if less(h.Item, pivot) || !less(pivot, h.Item) {
|
||||
if !t.descendLessOrEqual(h.Right, pivot, iterator) {
|
||||
return false
|
||||
}
|
||||
if !iterator(h.Item) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return t.descendLessOrEqual(h.Left, pivot, iterator)
|
||||
}
|
|
@ -1,46 +0,0 @@
|
|||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
// GetHeight() returns an item in the tree with key @key, and it's height in the tree
|
||||
func (t *LLRB) GetHeight(key Item) (result Item, depth int) {
|
||||
return t.getHeight(t.root, key)
|
||||
}
|
||||
|
||||
func (t *LLRB) getHeight(h *Node, item Item) (Item, int) {
|
||||
if h == nil {
|
||||
return nil, 0
|
||||
}
|
||||
if less(item, h.Item) {
|
||||
result, depth := t.getHeight(h.Left, item)
|
||||
return result, depth + 1
|
||||
}
|
||||
if less(h.Item, item) {
|
||||
result, depth := t.getHeight(h.Right, item)
|
||||
return result, depth + 1
|
||||
}
|
||||
return h.Item, 0
|
||||
}
|
||||
|
||||
// HeightStats() returns the average and standard deviation of the height
|
||||
// of elements in the tree
|
||||
func (t *LLRB) HeightStats() (avg, stddev float64) {
|
||||
av := &avgVar{}
|
||||
heightStats(t.root, 0, av)
|
||||
return av.GetAvg(), av.GetStdDev()
|
||||
}
|
||||
|
||||
func heightStats(h *Node, d int, av *avgVar) {
|
||||
if h == nil {
|
||||
return
|
||||
}
|
||||
av.Add(float64(d))
|
||||
if h.Left != nil {
|
||||
heightStats(h.Left, d+1, av)
|
||||
}
|
||||
if h.Right != nil {
|
||||
heightStats(h.Right, d+1, av)
|
||||
}
|
||||
}
|
|
@ -1,456 +0,0 @@
|
|||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// A Left-Leaning Red-Black (LLRB) implementation of 2-3 balanced binary search trees,
|
||||
// based on the following work:
|
||||
//
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/08Penn.pdf
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/LLRB.pdf
|
||||
// http://www.cs.princeton.edu/~rs/talks/LLRB/Java/RedBlackBST.java
|
||||
//
|
||||
// 2-3 trees (and the run-time equivalent 2-3-4 trees) are the de facto standard BST
|
||||
// algoritms found in implementations of Python, Java, and other libraries. The LLRB
|
||||
// implementation of 2-3 trees is a recent improvement on the traditional implementation,
|
||||
// observed and documented by Robert Sedgewick.
|
||||
//
|
||||
package llrb
|
||||
|
||||
// Tree is a Left-Leaning Red-Black (LLRB) implementation of 2-3 trees
|
||||
type LLRB struct {
|
||||
count int
|
||||
root *Node
|
||||
}
|
||||
|
||||
type Node struct {
|
||||
Item
|
||||
Left, Right *Node // Pointers to left and right child nodes
|
||||
Black bool // If set, the color of the link (incoming from the parent) is black
|
||||
// In the LLRB, new nodes are always red, hence the zero-value for node
|
||||
}
|
||||
|
||||
type Item interface {
|
||||
Less(than Item) bool
|
||||
}
|
||||
|
||||
//
|
||||
func less(x, y Item) bool {
|
||||
if x == pinf {
|
||||
return false
|
||||
}
|
||||
if x == ninf {
|
||||
return true
|
||||
}
|
||||
return x.Less(y)
|
||||
}
|
||||
|
||||
// Inf returns an Item that is "bigger than" any other item, if sign is positive.
|
||||
// Otherwise it returns an Item that is "smaller than" any other item.
|
||||
func Inf(sign int) Item {
|
||||
if sign == 0 {
|
||||
panic("sign")
|
||||
}
|
||||
if sign > 0 {
|
||||
return pinf
|
||||
}
|
||||
return ninf
|
||||
}
|
||||
|
||||
var (
|
||||
ninf = nInf{}
|
||||
pinf = pInf{}
|
||||
)
|
||||
|
||||
type nInf struct{}
|
||||
|
||||
func (nInf) Less(Item) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
type pInf struct{}
|
||||
|
||||
func (pInf) Less(Item) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
// New() allocates a new tree
|
||||
func New() *LLRB {
|
||||
return &LLRB{}
|
||||
}
|
||||
|
||||
// SetRoot sets the root node of the tree.
|
||||
// It is intended to be used by functions that deserialize the tree.
|
||||
func (t *LLRB) SetRoot(r *Node) {
|
||||
t.root = r
|
||||
}
|
||||
|
||||
// Root returns the root node of the tree.
|
||||
// It is intended to be used by functions that serialize the tree.
|
||||
func (t *LLRB) Root() *Node {
|
||||
return t.root
|
||||
}
|
||||
|
||||
// Len returns the number of nodes in the tree.
|
||||
func (t *LLRB) Len() int { return t.count }
|
||||
|
||||
// Has returns true if the tree contains an element whose order is the same as that of key.
|
||||
func (t *LLRB) Has(key Item) bool {
|
||||
return t.Get(key) != nil
|
||||
}
|
||||
|
||||
// Get retrieves an element from the tree whose order is the same as that of key.
|
||||
func (t *LLRB) Get(key Item) Item {
|
||||
h := t.root
|
||||
for h != nil {
|
||||
switch {
|
||||
case less(key, h.Item):
|
||||
h = h.Left
|
||||
case less(h.Item, key):
|
||||
h = h.Right
|
||||
default:
|
||||
return h.Item
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Min returns the minimum element in the tree.
|
||||
func (t *LLRB) Min() Item {
|
||||
h := t.root
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
for h.Left != nil {
|
||||
h = h.Left
|
||||
}
|
||||
return h.Item
|
||||
}
|
||||
|
||||
// Max returns the maximum element in the tree.
|
||||
func (t *LLRB) Max() Item {
|
||||
h := t.root
|
||||
if h == nil {
|
||||
return nil
|
||||
}
|
||||
for h.Right != nil {
|
||||
h = h.Right
|
||||
}
|
||||
return h.Item
|
||||
}
|
||||
|
||||
func (t *LLRB) ReplaceOrInsertBulk(items ...Item) {
|
||||
for _, i := range items {
|
||||
t.ReplaceOrInsert(i)
|
||||
}
|
||||
}
|
||||
|
||||
func (t *LLRB) InsertNoReplaceBulk(items ...Item) {
|
||||
for _, i := range items {
|
||||
t.InsertNoReplace(i)
|
||||
}
|
||||
}
|
||||
|
||||
// ReplaceOrInsert inserts item into the tree. If an existing
|
||||
// element has the same order, it is removed from the tree and returned.
|
||||
func (t *LLRB) ReplaceOrInsert(item Item) Item {
|
||||
if item == nil {
|
||||
panic("inserting nil item")
|
||||
}
|
||||
var replaced Item
|
||||
t.root, replaced = t.replaceOrInsert(t.root, item)
|
||||
t.root.Black = true
|
||||
if replaced == nil {
|
||||
t.count++
|
||||
}
|
||||
return replaced
|
||||
}
|
||||
|
||||
func (t *LLRB) replaceOrInsert(h *Node, item Item) (*Node, Item) {
|
||||
if h == nil {
|
||||
return newNode(item), nil
|
||||
}
|
||||
|
||||
h = walkDownRot23(h)
|
||||
|
||||
var replaced Item
|
||||
if less(item, h.Item) { // BUG
|
||||
h.Left, replaced = t.replaceOrInsert(h.Left, item)
|
||||
} else if less(h.Item, item) {
|
||||
h.Right, replaced = t.replaceOrInsert(h.Right, item)
|
||||
} else {
|
||||
replaced, h.Item = h.Item, item
|
||||
}
|
||||
|
||||
h = walkUpRot23(h)
|
||||
|
||||
return h, replaced
|
||||
}
|
||||
|
||||
// InsertNoReplace inserts item into the tree. If an existing
|
||||
// element has the same order, both elements remain in the tree.
|
||||
func (t *LLRB) InsertNoReplace(item Item) {
|
||||
if item == nil {
|
||||
panic("inserting nil item")
|
||||
}
|
||||
t.root = t.insertNoReplace(t.root, item)
|
||||
t.root.Black = true
|
||||
t.count++
|
||||
}
|
||||
|
||||
func (t *LLRB) insertNoReplace(h *Node, item Item) *Node {
|
||||
if h == nil {
|
||||
return newNode(item)
|
||||
}
|
||||
|
||||
h = walkDownRot23(h)
|
||||
|
||||
if less(item, h.Item) {
|
||||
h.Left = t.insertNoReplace(h.Left, item)
|
||||
} else {
|
||||
h.Right = t.insertNoReplace(h.Right, item)
|
||||
}
|
||||
|
||||
return walkUpRot23(h)
|
||||
}
|
||||
|
||||
// Rotation driver routines for 2-3 algorithm
|
||||
|
||||
func walkDownRot23(h *Node) *Node { return h }
|
||||
|
||||
func walkUpRot23(h *Node) *Node {
|
||||
if isRed(h.Right) && !isRed(h.Left) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// Rotation driver routines for 2-3-4 algorithm
|
||||
|
||||
func walkDownRot234(h *Node) *Node {
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
func walkUpRot234(h *Node) *Node {
|
||||
if isRed(h.Right) && !isRed(h.Left) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
||||
|
||||
// DeleteMin deletes the minimum element in the tree and returns the
|
||||
// deleted item or nil otherwise.
|
||||
func (t *LLRB) DeleteMin() Item {
|
||||
var deleted Item
|
||||
t.root, deleted = deleteMin(t.root)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
// deleteMin code for LLRB 2-3 trees
|
||||
func deleteMin(h *Node) (*Node, Item) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if h.Left == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
|
||||
if !isRed(h.Left) && !isRed(h.Left.Left) {
|
||||
h = moveRedLeft(h)
|
||||
}
|
||||
|
||||
var deleted Item
|
||||
h.Left, deleted = deleteMin(h.Left)
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// DeleteMax deletes the maximum element in the tree and returns
|
||||
// the deleted item or nil otherwise
|
||||
func (t *LLRB) DeleteMax() Item {
|
||||
var deleted Item
|
||||
t.root, deleted = deleteMax(t.root)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func deleteMax(h *Node) (*Node, Item) {
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if isRed(h.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
if h.Right == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
if !isRed(h.Right) && !isRed(h.Right.Left) {
|
||||
h = moveRedRight(h)
|
||||
}
|
||||
var deleted Item
|
||||
h.Right, deleted = deleteMax(h.Right)
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// Delete deletes an item from the tree whose key equals key.
|
||||
// The deleted item is return, otherwise nil is returned.
|
||||
func (t *LLRB) Delete(key Item) Item {
|
||||
var deleted Item
|
||||
t.root, deleted = t.delete(t.root, key)
|
||||
if t.root != nil {
|
||||
t.root.Black = true
|
||||
}
|
||||
if deleted != nil {
|
||||
t.count--
|
||||
}
|
||||
return deleted
|
||||
}
|
||||
|
||||
func (t *LLRB) delete(h *Node, item Item) (*Node, Item) {
|
||||
var deleted Item
|
||||
if h == nil {
|
||||
return nil, nil
|
||||
}
|
||||
if less(item, h.Item) {
|
||||
if h.Left == nil { // item not present. Nothing to delete
|
||||
return h, nil
|
||||
}
|
||||
if !isRed(h.Left) && !isRed(h.Left.Left) {
|
||||
h = moveRedLeft(h)
|
||||
}
|
||||
h.Left, deleted = t.delete(h.Left, item)
|
||||
} else {
|
||||
if isRed(h.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
// If @item equals @h.Item and no right children at @h
|
||||
if !less(h.Item, item) && h.Right == nil {
|
||||
return nil, h.Item
|
||||
}
|
||||
// PETAR: Added 'h.Right != nil' below
|
||||
if h.Right != nil && !isRed(h.Right) && !isRed(h.Right.Left) {
|
||||
h = moveRedRight(h)
|
||||
}
|
||||
// If @item equals @h.Item, and (from above) 'h.Right != nil'
|
||||
if !less(h.Item, item) {
|
||||
var subDeleted Item
|
||||
h.Right, subDeleted = deleteMin(h.Right)
|
||||
if subDeleted == nil {
|
||||
panic("logic")
|
||||
}
|
||||
deleted, h.Item = h.Item, subDeleted
|
||||
} else { // Else, @item is bigger than @h.Item
|
||||
h.Right, deleted = t.delete(h.Right, item)
|
||||
}
|
||||
}
|
||||
|
||||
return fixUp(h), deleted
|
||||
}
|
||||
|
||||
// Internal node manipulation routines
|
||||
|
||||
func newNode(item Item) *Node { return &Node{Item: item} }
|
||||
|
||||
func isRed(h *Node) bool {
|
||||
if h == nil {
|
||||
return false
|
||||
}
|
||||
return !h.Black
|
||||
}
|
||||
|
||||
func rotateLeft(h *Node) *Node {
|
||||
x := h.Right
|
||||
if x.Black {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.Right = x.Left
|
||||
x.Left = h
|
||||
x.Black = h.Black
|
||||
h.Black = false
|
||||
return x
|
||||
}
|
||||
|
||||
func rotateRight(h *Node) *Node {
|
||||
x := h.Left
|
||||
if x.Black {
|
||||
panic("rotating a black link")
|
||||
}
|
||||
h.Left = x.Right
|
||||
x.Right = h
|
||||
x.Black = h.Black
|
||||
h.Black = false
|
||||
return x
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func flip(h *Node) {
|
||||
h.Black = !h.Black
|
||||
h.Left.Black = !h.Left.Black
|
||||
h.Right.Black = !h.Right.Black
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func moveRedLeft(h *Node) *Node {
|
||||
flip(h)
|
||||
if isRed(h.Right.Left) {
|
||||
h.Right = rotateRight(h.Right)
|
||||
h = rotateLeft(h)
|
||||
flip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
// REQUIRE: Left and Right children must be present
|
||||
func moveRedRight(h *Node) *Node {
|
||||
flip(h)
|
||||
if isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
flip(h)
|
||||
}
|
||||
return h
|
||||
}
|
||||
|
||||
func fixUp(h *Node) *Node {
|
||||
if isRed(h.Right) {
|
||||
h = rotateLeft(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Left.Left) {
|
||||
h = rotateRight(h)
|
||||
}
|
||||
|
||||
if isRed(h.Left) && isRed(h.Right) {
|
||||
flip(h)
|
||||
}
|
||||
|
||||
return h
|
||||
}
|
|
@ -1,17 +0,0 @@
|
|||
// Copyright 2010 Petar Maymounkov. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package llrb
|
||||
|
||||
type Int int
|
||||
|
||||
func (x Int) Less(than Item) bool {
|
||||
return x < than.(Int)
|
||||
}
|
||||
|
||||
type String string
|
||||
|
||||
func (x String) Less(than Item) bool {
|
||||
return x < than.(String)
|
||||
}
|
|
@ -1,64 +0,0 @@
|
|||
package diskv
|
||||
|
||||
import (
|
||||
"compress/flate"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Compression is an interface that Diskv uses to implement compression of
|
||||
// data. Writer takes a destination io.Writer and returns a WriteCloser that
|
||||
// compresses all data written through it. Reader takes a source io.Reader and
|
||||
// returns a ReadCloser that decompresses all data read through it. You may
|
||||
// define these methods on your own type, or use one of the NewCompression
|
||||
// helpers.
|
||||
type Compression interface {
|
||||
Writer(dst io.Writer) (io.WriteCloser, error)
|
||||
Reader(src io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
// NewGzipCompression returns a Gzip-based Compression.
|
||||
func NewGzipCompression() Compression {
|
||||
return NewGzipCompressionLevel(flate.DefaultCompression)
|
||||
}
|
||||
|
||||
// NewGzipCompressionLevel returns a Gzip-based Compression with the given level.
|
||||
func NewGzipCompressionLevel(level int) Compression {
|
||||
return &genericCompression{
|
||||
wf: func(w io.Writer) (io.WriteCloser, error) { return gzip.NewWriterLevel(w, level) },
|
||||
rf: func(r io.Reader) (io.ReadCloser, error) { return gzip.NewReader(r) },
|
||||
}
|
||||
}
|
||||
|
||||
// NewZlibCompression returns a Zlib-based Compression.
|
||||
func NewZlibCompression() Compression {
|
||||
return NewZlibCompressionLevel(flate.DefaultCompression)
|
||||
}
|
||||
|
||||
// NewZlibCompressionLevel returns a Zlib-based Compression with the given level.
|
||||
func NewZlibCompressionLevel(level int) Compression {
|
||||
return NewZlibCompressionLevelDict(level, nil)
|
||||
}
|
||||
|
||||
// NewZlibCompressionLevelDict returns a Zlib-based Compression with the given
|
||||
// level, based on the given dictionary.
|
||||
func NewZlibCompressionLevelDict(level int, dict []byte) Compression {
|
||||
return &genericCompression{
|
||||
func(w io.Writer) (io.WriteCloser, error) { return zlib.NewWriterLevelDict(w, level, dict) },
|
||||
func(r io.Reader) (io.ReadCloser, error) { return zlib.NewReaderDict(r, dict) },
|
||||
}
|
||||
}
|
||||
|
||||
type genericCompression struct {
|
||||
wf func(w io.Writer) (io.WriteCloser, error)
|
||||
rf func(r io.Reader) (io.ReadCloser, error)
|
||||
}
|
||||
|
||||
func (g *genericCompression) Writer(dst io.Writer) (io.WriteCloser, error) {
|
||||
return g.wf(dst)
|
||||
}
|
||||
|
||||
func (g *genericCompression) Reader(src io.Reader) (io.ReadCloser, error) {
|
||||
return g.rf(src)
|
||||
}
|
|
@ -1,624 +0,0 @@
|
|||
// Diskv (disk-vee) is a simple, persistent, key-value store.
|
||||
// It stores all data flatly on the filesystem.
|
||||
|
||||
package diskv
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
defaultBasePath = "diskv"
|
||||
defaultFilePerm os.FileMode = 0666
|
||||
defaultPathPerm os.FileMode = 0777
|
||||
)
|
||||
|
||||
var (
|
||||
defaultTransform = func(s string) []string { return []string{} }
|
||||
errCanceled = errors.New("canceled")
|
||||
errEmptyKey = errors.New("empty key")
|
||||
errBadKey = errors.New("bad key")
|
||||
errImportDirectory = errors.New("can't import a directory")
|
||||
)
|
||||
|
||||
// TransformFunction transforms a key into a slice of strings, with each
|
||||
// element in the slice representing a directory in the file path where the
|
||||
// key's entry will eventually be stored.
|
||||
//
|
||||
// For example, if TransformFunc transforms "abcdef" to ["ab", "cde", "f"],
|
||||
// the final location of the data file will be <basedir>/ab/cde/f/abcdef
|
||||
type TransformFunction func(s string) []string
|
||||
|
||||
// Options define a set of properties that dictate Diskv behavior.
|
||||
// All values are optional.
|
||||
type Options struct {
|
||||
BasePath string
|
||||
Transform TransformFunction
|
||||
CacheSizeMax uint64 // bytes
|
||||
PathPerm os.FileMode
|
||||
FilePerm os.FileMode
|
||||
// If TempDir is set, it will enable filesystem atomic writes by
|
||||
// writing temporary files to that location before being moved
|
||||
// to BasePath.
|
||||
// Note that TempDir MUST be on the same device/partition as
|
||||
// BasePath.
|
||||
TempDir string
|
||||
|
||||
Index Index
|
||||
IndexLess LessFunction
|
||||
|
||||
Compression Compression
|
||||
}
|
||||
|
||||
// Diskv implements the Diskv interface. You shouldn't construct Diskv
|
||||
// structures directly; instead, use the New constructor.
|
||||
type Diskv struct {
|
||||
Options
|
||||
mu sync.RWMutex
|
||||
cache map[string][]byte
|
||||
cacheSize uint64
|
||||
}
|
||||
|
||||
// New returns an initialized Diskv structure, ready to use.
|
||||
// If the path identified by baseDir already contains data,
|
||||
// it will be accessible, but not yet cached.
|
||||
func New(o Options) *Diskv {
|
||||
if o.BasePath == "" {
|
||||
o.BasePath = defaultBasePath
|
||||
}
|
||||
if o.Transform == nil {
|
||||
o.Transform = defaultTransform
|
||||
}
|
||||
if o.PathPerm == 0 {
|
||||
o.PathPerm = defaultPathPerm
|
||||
}
|
||||
if o.FilePerm == 0 {
|
||||
o.FilePerm = defaultFilePerm
|
||||
}
|
||||
|
||||
d := &Diskv{
|
||||
Options: o,
|
||||
cache: map[string][]byte{},
|
||||
cacheSize: 0,
|
||||
}
|
||||
|
||||
if d.Index != nil && d.IndexLess != nil {
|
||||
d.Index.Initialize(d.IndexLess, d.Keys(nil))
|
||||
}
|
||||
|
||||
return d
|
||||
}
|
||||
|
||||
// Write synchronously writes the key-value pair to disk, making it immediately
|
||||
// available for reads. Write relies on the filesystem to perform an eventual
|
||||
// sync to physical media. If you need stronger guarantees, see WriteStream.
|
||||
func (d *Diskv) Write(key string, val []byte) error {
|
||||
return d.WriteStream(key, bytes.NewBuffer(val), false)
|
||||
}
|
||||
|
||||
// WriteStream writes the data represented by the io.Reader to the disk, under
|
||||
// the provided key. If sync is true, WriteStream performs an explicit sync on
|
||||
// the file as soon as it's written.
|
||||
//
|
||||
// bytes.Buffer provides io.Reader semantics for basic data types.
|
||||
func (d *Diskv) WriteStream(key string, r io.Reader, sync bool) error {
|
||||
if len(key) <= 0 {
|
||||
return errEmptyKey
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
return d.writeStreamWithLock(key, r, sync)
|
||||
}
|
||||
|
||||
// createKeyFileWithLock either creates the key file directly, or
|
||||
// creates a temporary file in TempDir if it is set.
|
||||
func (d *Diskv) createKeyFileWithLock(key string) (*os.File, error) {
|
||||
if d.TempDir != "" {
|
||||
if err := os.MkdirAll(d.TempDir, d.PathPerm); err != nil {
|
||||
return nil, fmt.Errorf("temp mkdir: %s", err)
|
||||
}
|
||||
f, err := ioutil.TempFile(d.TempDir, "")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("temp file: %s", err)
|
||||
}
|
||||
|
||||
if err := f.Chmod(d.FilePerm); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return nil, fmt.Errorf("chmod: %s", err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
mode := os.O_WRONLY | os.O_CREATE | os.O_TRUNC // overwrite if exists
|
||||
f, err := os.OpenFile(d.completeFilename(key), mode, d.FilePerm)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("open file: %s", err)
|
||||
}
|
||||
return f, nil
|
||||
}
|
||||
|
||||
// writeStream does no input validation checking.
|
||||
func (d *Diskv) writeStreamWithLock(key string, r io.Reader, sync bool) error {
|
||||
if err := d.ensurePathWithLock(key); err != nil {
|
||||
return fmt.Errorf("ensure path: %s", err)
|
||||
}
|
||||
|
||||
f, err := d.createKeyFileWithLock(key)
|
||||
if err != nil {
|
||||
return fmt.Errorf("create key file: %s", err)
|
||||
}
|
||||
|
||||
wc := io.WriteCloser(&nopWriteCloser{f})
|
||||
if d.Compression != nil {
|
||||
wc, err = d.Compression.Writer(f)
|
||||
if err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("compression writer: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if _, err := io.Copy(wc, r); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("i/o copy: %s", err)
|
||||
}
|
||||
|
||||
if err := wc.Close(); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("compression close: %s", err)
|
||||
}
|
||||
|
||||
if sync {
|
||||
if err := f.Sync(); err != nil {
|
||||
f.Close() // error deliberately ignored
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("file sync: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := f.Close(); err != nil {
|
||||
return fmt.Errorf("file close: %s", err)
|
||||
}
|
||||
|
||||
if f.Name() != d.completeFilename(key) {
|
||||
if err := os.Rename(f.Name(), d.completeFilename(key)); err != nil {
|
||||
os.Remove(f.Name()) // error deliberately ignored
|
||||
return fmt.Errorf("rename: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
if d.Index != nil {
|
||||
d.Index.Insert(key)
|
||||
}
|
||||
|
||||
d.bustCacheWithLock(key) // cache only on read
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Import imports the source file into diskv under the destination key. If the
|
||||
// destination key already exists, it's overwritten. If move is true, the
|
||||
// source file is removed after a successful import.
|
||||
func (d *Diskv) Import(srcFilename, dstKey string, move bool) (err error) {
|
||||
if dstKey == "" {
|
||||
return errEmptyKey
|
||||
}
|
||||
|
||||
if fi, err := os.Stat(srcFilename); err != nil {
|
||||
return err
|
||||
} else if fi.IsDir() {
|
||||
return errImportDirectory
|
||||
}
|
||||
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if err := d.ensurePathWithLock(dstKey); err != nil {
|
||||
return fmt.Errorf("ensure path: %s", err)
|
||||
}
|
||||
|
||||
if move {
|
||||
if err := syscall.Rename(srcFilename, d.completeFilename(dstKey)); err == nil {
|
||||
d.bustCacheWithLock(dstKey)
|
||||
return nil
|
||||
} else if err != syscall.EXDEV {
|
||||
// If it failed due to being on a different device, fall back to copying
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
f, err := os.Open(srcFilename)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
err = d.writeStreamWithLock(dstKey, f, false)
|
||||
if err == nil && move {
|
||||
err = os.Remove(srcFilename)
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
// Read reads the key and returns the value.
|
||||
// If the key is available in the cache, Read won't touch the disk.
|
||||
// If the key is not in the cache, Read will have the side-effect of
|
||||
// lazily caching the value.
|
||||
func (d *Diskv) Read(key string) ([]byte, error) {
|
||||
rc, err := d.ReadStream(key, false)
|
||||
if err != nil {
|
||||
return []byte{}, err
|
||||
}
|
||||
defer rc.Close()
|
||||
return ioutil.ReadAll(rc)
|
||||
}
|
||||
|
||||
// ReadStream reads the key and returns the value (data) as an io.ReadCloser.
|
||||
// If the value is cached from a previous read, and direct is false,
|
||||
// ReadStream will use the cached value. Otherwise, it will return a handle to
|
||||
// the file on disk, and cache the data on read.
|
||||
//
|
||||
// If direct is true, ReadStream will lazily delete any cached value for the
|
||||
// key, and return a direct handle to the file on disk.
|
||||
//
|
||||
// If compression is enabled, ReadStream taps into the io.Reader stream prior
|
||||
// to decompression, and caches the compressed data.
|
||||
func (d *Diskv) ReadStream(key string, direct bool) (io.ReadCloser, error) {
|
||||
d.mu.RLock()
|
||||
defer d.mu.RUnlock()
|
||||
|
||||
if val, ok := d.cache[key]; ok {
|
||||
if !direct {
|
||||
buf := bytes.NewBuffer(val)
|
||||
if d.Compression != nil {
|
||||
return d.Compression.Reader(buf)
|
||||
}
|
||||
return ioutil.NopCloser(buf), nil
|
||||
}
|
||||
|
||||
go func() {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}()
|
||||
}
|
||||
|
||||
return d.readWithRLock(key)
|
||||
}
|
||||
|
||||
// read ignores the cache, and returns an io.ReadCloser representing the
|
||||
// decompressed data for the given key, streamed from the disk. Clients should
|
||||
// acquire a read lock on the Diskv and check the cache themselves before
|
||||
// calling read.
|
||||
func (d *Diskv) readWithRLock(key string) (io.ReadCloser, error) {
|
||||
filename := d.completeFilename(key)
|
||||
|
||||
fi, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if fi.IsDir() {
|
||||
return nil, os.ErrNotExist
|
||||
}
|
||||
|
||||
f, err := os.Open(filename)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var r io.Reader
|
||||
if d.CacheSizeMax > 0 {
|
||||
r = newSiphon(f, d, key)
|
||||
} else {
|
||||
r = &closingReader{f}
|
||||
}
|
||||
|
||||
var rc = io.ReadCloser(ioutil.NopCloser(r))
|
||||
if d.Compression != nil {
|
||||
rc, err = d.Compression.Reader(r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
return rc, nil
|
||||
}
|
||||
|
||||
// closingReader provides a Reader that automatically closes the
|
||||
// embedded ReadCloser when it reaches EOF
|
||||
type closingReader struct {
|
||||
rc io.ReadCloser
|
||||
}
|
||||
|
||||
func (cr closingReader) Read(p []byte) (int, error) {
|
||||
n, err := cr.rc.Read(p)
|
||||
if err == io.EOF {
|
||||
if closeErr := cr.rc.Close(); closeErr != nil {
|
||||
return n, closeErr // close must succeed for Read to succeed
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// siphon is like a TeeReader: it copies all data read through it to an
|
||||
// internal buffer, and moves that buffer to the cache at EOF.
|
||||
type siphon struct {
|
||||
f *os.File
|
||||
d *Diskv
|
||||
key string
|
||||
buf *bytes.Buffer
|
||||
}
|
||||
|
||||
// newSiphon constructs a siphoning reader that represents the passed file.
|
||||
// When a successful series of reads ends in an EOF, the siphon will write
|
||||
// the buffered data to Diskv's cache under the given key.
|
||||
func newSiphon(f *os.File, d *Diskv, key string) io.Reader {
|
||||
return &siphon{
|
||||
f: f,
|
||||
d: d,
|
||||
key: key,
|
||||
buf: &bytes.Buffer{},
|
||||
}
|
||||
}
|
||||
|
||||
// Read implements the io.Reader interface for siphon.
|
||||
func (s *siphon) Read(p []byte) (int, error) {
|
||||
n, err := s.f.Read(p)
|
||||
|
||||
if err == nil {
|
||||
return s.buf.Write(p[0:n]) // Write must succeed for Read to succeed
|
||||
}
|
||||
|
||||
if err == io.EOF {
|
||||
s.d.cacheWithoutLock(s.key, s.buf.Bytes()) // cache may fail
|
||||
if closeErr := s.f.Close(); closeErr != nil {
|
||||
return n, closeErr // close must succeed for Read to succeed
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Erase synchronously erases the given key from the disk and the cache.
|
||||
func (d *Diskv) Erase(key string) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
d.bustCacheWithLock(key)
|
||||
|
||||
// erase from index
|
||||
if d.Index != nil {
|
||||
d.Index.Delete(key)
|
||||
}
|
||||
|
||||
// erase from disk
|
||||
filename := d.completeFilename(key)
|
||||
if s, err := os.Stat(filename); err == nil {
|
||||
if s.IsDir() {
|
||||
return errBadKey
|
||||
}
|
||||
if err = os.Remove(filename); err != nil {
|
||||
return err
|
||||
}
|
||||
} else {
|
||||
// Return err as-is so caller can do os.IsNotExist(err).
|
||||
return err
|
||||
}
|
||||
|
||||
// clean up and return
|
||||
d.pruneDirsWithLock(key)
|
||||
return nil
|
||||
}
|
||||
|
||||
// EraseAll will delete all of the data from the store, both in the cache and on
|
||||
// the disk. Note that EraseAll doesn't distinguish diskv-related data from non-
|
||||
// diskv-related data. Care should be taken to always specify a diskv base
|
||||
// directory that is exclusively for diskv data.
|
||||
func (d *Diskv) EraseAll() error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
d.cache = make(map[string][]byte)
|
||||
d.cacheSize = 0
|
||||
if d.TempDir != "" {
|
||||
os.RemoveAll(d.TempDir) // errors ignored
|
||||
}
|
||||
return os.RemoveAll(d.BasePath)
|
||||
}
|
||||
|
||||
// Has returns true if the given key exists.
|
||||
func (d *Diskv) Has(key string) bool {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
|
||||
if _, ok := d.cache[key]; ok {
|
||||
return true
|
||||
}
|
||||
|
||||
filename := d.completeFilename(key)
|
||||
s, err := os.Stat(filename)
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
if s.IsDir() {
|
||||
return false
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// Keys returns a channel that will yield every key accessible by the store,
|
||||
// in undefined order. If a cancel channel is provided, closing it will
|
||||
// terminate and close the keys channel.
|
||||
func (d *Diskv) Keys(cancel <-chan struct{}) <-chan string {
|
||||
return d.KeysPrefix("", cancel)
|
||||
}
|
||||
|
||||
// KeysPrefix returns a channel that will yield every key accessible by the
|
||||
// store with the given prefix, in undefined order. If a cancel channel is
|
||||
// provided, closing it will terminate and close the keys channel. If the
|
||||
// provided prefix is the empty string, all keys will be yielded.
|
||||
func (d *Diskv) KeysPrefix(prefix string, cancel <-chan struct{}) <-chan string {
|
||||
var prepath string
|
||||
if prefix == "" {
|
||||
prepath = d.BasePath
|
||||
} else {
|
||||
prepath = d.pathFor(prefix)
|
||||
}
|
||||
c := make(chan string)
|
||||
go func() {
|
||||
filepath.Walk(prepath, walker(c, prefix, cancel))
|
||||
close(c)
|
||||
}()
|
||||
return c
|
||||
}
|
||||
|
||||
// walker returns a function which satisfies the filepath.WalkFunc interface.
|
||||
// It sends every non-directory file entry down the channel c.
|
||||
func walker(c chan<- string, prefix string, cancel <-chan struct{}) filepath.WalkFunc {
|
||||
return func(path string, info os.FileInfo, err error) error {
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if info.IsDir() || !strings.HasPrefix(info.Name(), prefix) {
|
||||
return nil // "pass"
|
||||
}
|
||||
|
||||
select {
|
||||
case c <- info.Name():
|
||||
case <-cancel:
|
||||
return errCanceled
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
// pathFor returns the absolute path for location on the filesystem where the
|
||||
// data for the given key will be stored.
|
||||
func (d *Diskv) pathFor(key string) string {
|
||||
return filepath.Join(d.BasePath, filepath.Join(d.Transform(key)...))
|
||||
}
|
||||
|
||||
// ensurePathWithLock is a helper function that generates all necessary
|
||||
// directories on the filesystem for the given key.
|
||||
func (d *Diskv) ensurePathWithLock(key string) error {
|
||||
return os.MkdirAll(d.pathFor(key), d.PathPerm)
|
||||
}
|
||||
|
||||
// completeFilename returns the absolute path to the file for the given key.
|
||||
func (d *Diskv) completeFilename(key string) string {
|
||||
return filepath.Join(d.pathFor(key), key)
|
||||
}
|
||||
|
||||
// cacheWithLock attempts to cache the given key-value pair in the store's
|
||||
// cache. It can fail if the value is larger than the cache's maximum size.
|
||||
func (d *Diskv) cacheWithLock(key string, val []byte) error {
|
||||
valueSize := uint64(len(val))
|
||||
if err := d.ensureCacheSpaceWithLock(valueSize); err != nil {
|
||||
return fmt.Errorf("%s; not caching", err)
|
||||
}
|
||||
|
||||
// be very strict about memory guarantees
|
||||
if (d.cacheSize + valueSize) > d.CacheSizeMax {
|
||||
panic(fmt.Sprintf("failed to make room for value (%d/%d)", valueSize, d.CacheSizeMax))
|
||||
}
|
||||
|
||||
d.cache[key] = val
|
||||
d.cacheSize += valueSize
|
||||
return nil
|
||||
}
|
||||
|
||||
// cacheWithoutLock acquires the store's (write) mutex and calls cacheWithLock.
|
||||
func (d *Diskv) cacheWithoutLock(key string, val []byte) error {
|
||||
d.mu.Lock()
|
||||
defer d.mu.Unlock()
|
||||
return d.cacheWithLock(key, val)
|
||||
}
|
||||
|
||||
func (d *Diskv) bustCacheWithLock(key string) {
|
||||
if val, ok := d.cache[key]; ok {
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}
|
||||
}
|
||||
|
||||
func (d *Diskv) uncacheWithLock(key string, sz uint64) {
|
||||
d.cacheSize -= sz
|
||||
delete(d.cache, key)
|
||||
}
|
||||
|
||||
// pruneDirsWithLock deletes empty directories in the path walk leading to the
|
||||
// key k. Typically this function is called after an Erase is made.
|
||||
func (d *Diskv) pruneDirsWithLock(key string) error {
|
||||
pathlist := d.Transform(key)
|
||||
for i := range pathlist {
|
||||
dir := filepath.Join(d.BasePath, filepath.Join(pathlist[:len(pathlist)-i]...))
|
||||
|
||||
// thanks to Steven Blenkinsop for this snippet
|
||||
switch fi, err := os.Stat(dir); true {
|
||||
case err != nil:
|
||||
return err
|
||||
case !fi.IsDir():
|
||||
panic(fmt.Sprintf("corrupt dirstate at %s", dir))
|
||||
}
|
||||
|
||||
nlinks, err := filepath.Glob(filepath.Join(dir, "*"))
|
||||
if err != nil {
|
||||
return err
|
||||
} else if len(nlinks) > 0 {
|
||||
return nil // has subdirs -- do not prune
|
||||
}
|
||||
if err = os.Remove(dir); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureCacheSpaceWithLock deletes entries from the cache in arbitrary order
|
||||
// until the cache has at least valueSize bytes available.
|
||||
func (d *Diskv) ensureCacheSpaceWithLock(valueSize uint64) error {
|
||||
if valueSize > d.CacheSizeMax {
|
||||
return fmt.Errorf("value size (%d bytes) too large for cache (%d bytes)", valueSize, d.CacheSizeMax)
|
||||
}
|
||||
|
||||
safe := func() bool { return (d.cacheSize + valueSize) <= d.CacheSizeMax }
|
||||
|
||||
for key, val := range d.cache {
|
||||
if safe() {
|
||||
break
|
||||
}
|
||||
|
||||
d.uncacheWithLock(key, uint64(len(val)))
|
||||
}
|
||||
|
||||
if !safe() {
|
||||
panic(fmt.Sprintf("%d bytes still won't fit in the cache! (max %d bytes)", valueSize, d.CacheSizeMax))
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// nopWriteCloser wraps an io.Writer and provides a no-op Close method to
|
||||
// satisfy the io.WriteCloser interface.
|
||||
type nopWriteCloser struct {
|
||||
io.Writer
|
||||
}
|
||||
|
||||
func (wc *nopWriteCloser) Write(p []byte) (int, error) { return wc.Writer.Write(p) }
|
||||
func (wc *nopWriteCloser) Close() error { return nil }
|
|
@ -1,115 +0,0 @@
|
|||
package diskv
|
||||
|
||||
import (
|
||||
"sync"
|
||||
|
||||
"github.com/google/btree"
|
||||
)
|
||||
|
||||
// Index is a generic interface for things that can
|
||||
// provide an ordered list of keys.
|
||||
type Index interface {
|
||||
Initialize(less LessFunction, keys <-chan string)
|
||||
Insert(key string)
|
||||
Delete(key string)
|
||||
Keys(from string, n int) []string
|
||||
}
|
||||
|
||||
// LessFunction is used to initialize an Index of keys in a specific order.
|
||||
type LessFunction func(string, string) bool
|
||||
|
||||
// btreeString is a custom data type that satisfies the BTree Less interface,
|
||||
// making the strings it wraps sortable by the BTree package.
|
||||
type btreeString struct {
|
||||
s string
|
||||
l LessFunction
|
||||
}
|
||||
|
||||
// Less satisfies the BTree.Less interface using the btreeString's LessFunction.
|
||||
func (s btreeString) Less(i btree.Item) bool {
|
||||
return s.l(s.s, i.(btreeString).s)
|
||||
}
|
||||
|
||||
// BTreeIndex is an implementation of the Index interface using google/btree.
|
||||
type BTreeIndex struct {
|
||||
sync.RWMutex
|
||||
LessFunction
|
||||
*btree.BTree
|
||||
}
|
||||
|
||||
// Initialize populates the BTree tree with data from the keys channel,
|
||||
// according to the passed less function. It's destructive to the BTreeIndex.
|
||||
func (i *BTreeIndex) Initialize(less LessFunction, keys <-chan string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
i.LessFunction = less
|
||||
i.BTree = rebuild(less, keys)
|
||||
}
|
||||
|
||||
// Insert inserts the given key (only) into the BTree tree.
|
||||
func (i *BTreeIndex) Insert(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
i.BTree.ReplaceOrInsert(btreeString{s: key, l: i.LessFunction})
|
||||
}
|
||||
|
||||
// Delete removes the given key (only) from the BTree tree.
|
||||
func (i *BTreeIndex) Delete(key string) {
|
||||
i.Lock()
|
||||
defer i.Unlock()
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
i.BTree.Delete(btreeString{s: key, l: i.LessFunction})
|
||||
}
|
||||
|
||||
// Keys yields a maximum of n keys in order. If the passed 'from' key is empty,
|
||||
// Keys will return the first n keys. If the passed 'from' key is non-empty, the
|
||||
// first key in the returned slice will be the key that immediately follows the
|
||||
// passed key, in key order.
|
||||
func (i *BTreeIndex) Keys(from string, n int) []string {
|
||||
i.RLock()
|
||||
defer i.RUnlock()
|
||||
|
||||
if i.BTree == nil || i.LessFunction == nil {
|
||||
panic("uninitialized index")
|
||||
}
|
||||
|
||||
if i.BTree.Len() <= 0 {
|
||||
return []string{}
|
||||
}
|
||||
|
||||
btreeFrom := btreeString{s: from, l: i.LessFunction}
|
||||
skipFirst := true
|
||||
if len(from) <= 0 || !i.BTree.Has(btreeFrom) {
|
||||
// no such key, so fabricate an always-smallest item
|
||||
btreeFrom = btreeString{s: "", l: func(string, string) bool { return true }}
|
||||
skipFirst = false
|
||||
}
|
||||
|
||||
keys := []string{}
|
||||
iterator := func(i btree.Item) bool {
|
||||
keys = append(keys, i.(btreeString).s)
|
||||
return len(keys) < n
|
||||
}
|
||||
i.BTree.AscendGreaterOrEqual(btreeFrom, iterator)
|
||||
|
||||
if skipFirst && len(keys) > 0 {
|
||||
keys = keys[1:]
|
||||
}
|
||||
|
||||
return keys
|
||||
}
|
||||
|
||||
// rebuildIndex does the work of regenerating the index
|
||||
// with the given keys.
|
||||
func rebuild(less LessFunction, keys <-chan string) *btree.BTree {
|
||||
tree := btree.New(2)
|
||||
for key := range keys {
|
||||
tree.ReplaceOrInsert(btreeString{s: key, l: less})
|
||||
}
|
||||
return tree
|
||||
}
|
|
@ -1,335 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package encoding defines an interface for character encodings, such as Shift
|
||||
// JIS and Windows 1252, that can convert to and from UTF-8.
|
||||
//
|
||||
// Encoding implementations are provided in other packages, such as
|
||||
// golang.org/x/text/encoding/charmap and
|
||||
// golang.org/x/text/encoding/japanese.
|
||||
package encoding // import "golang.org/x/text/encoding"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"io"
|
||||
"strconv"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/encoding/internal/identifier"
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// TODO:
|
||||
// - There seems to be some inconsistency in when decoders return errors
|
||||
// and when not. Also documentation seems to suggest they shouldn't return
|
||||
// errors at all (except for UTF-16).
|
||||
// - Encoders seem to rely on or at least benefit from the input being in NFC
|
||||
// normal form. Perhaps add an example how users could prepare their output.
|
||||
|
||||
// Encoding is a character set encoding that can be transformed to and from
|
||||
// UTF-8.
|
||||
type Encoding interface {
|
||||
// NewDecoder returns a Decoder.
|
||||
NewDecoder() *Decoder
|
||||
|
||||
// NewEncoder returns an Encoder.
|
||||
NewEncoder() *Encoder
|
||||
}
|
||||
|
||||
// A Decoder converts bytes to UTF-8. It implements transform.Transformer.
|
||||
//
|
||||
// Transforming source bytes that are not of that encoding will not result in an
|
||||
// error per se. Each byte that cannot be transcoded will be represented in the
|
||||
// output by the UTF-8 encoding of '\uFFFD', the replacement rune.
|
||||
type Decoder struct {
|
||||
transform.Transformer
|
||||
|
||||
// This forces external creators of Decoders to use names in struct
|
||||
// initializers, allowing for future extendibility without having to break
|
||||
// code.
|
||||
_ struct{}
|
||||
}
|
||||
|
||||
// Bytes converts the given encoded bytes to UTF-8. It returns the converted
|
||||
// bytes or nil, err if any error occurred.
|
||||
func (d *Decoder) Bytes(b []byte) ([]byte, error) {
|
||||
b, _, err := transform.Bytes(d, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// String converts the given encoded string to UTF-8. It returns the converted
|
||||
// string or "", err if any error occurred.
|
||||
func (d *Decoder) String(s string) (string, error) {
|
||||
s, _, err := transform.String(d, s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Reader wraps another Reader to decode its bytes.
|
||||
//
|
||||
// The Decoder may not be used for any other operation as long as the returned
|
||||
// Reader is in use.
|
||||
func (d *Decoder) Reader(r io.Reader) io.Reader {
|
||||
return transform.NewReader(r, d)
|
||||
}
|
||||
|
||||
// An Encoder converts bytes from UTF-8. It implements transform.Transformer.
|
||||
//
|
||||
// Each rune that cannot be transcoded will result in an error. In this case,
|
||||
// the transform will consume all source byte up to, not including the offending
|
||||
// rune. Transforming source bytes that are not valid UTF-8 will be replaced by
|
||||
// `\uFFFD`. To return early with an error instead, use transform.Chain to
|
||||
// preprocess the data with a UTF8Validator.
|
||||
type Encoder struct {
|
||||
transform.Transformer
|
||||
|
||||
// This forces external creators of Encoders to use names in struct
|
||||
// initializers, allowing for future extendibility without having to break
|
||||
// code.
|
||||
_ struct{}
|
||||
}
|
||||
|
||||
// Bytes converts bytes from UTF-8. It returns the converted bytes or nil, err if
|
||||
// any error occurred.
|
||||
func (e *Encoder) Bytes(b []byte) ([]byte, error) {
|
||||
b, _, err := transform.Bytes(e, b)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return b, nil
|
||||
}
|
||||
|
||||
// String converts a string from UTF-8. It returns the converted string or
|
||||
// "", err if any error occurred.
|
||||
func (e *Encoder) String(s string) (string, error) {
|
||||
s, _, err := transform.String(e, s)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
// Writer wraps another Writer to encode its UTF-8 output.
|
||||
//
|
||||
// The Encoder may not be used for any other operation as long as the returned
|
||||
// Writer is in use.
|
||||
func (e *Encoder) Writer(w io.Writer) io.Writer {
|
||||
return transform.NewWriter(w, e)
|
||||
}
|
||||
|
||||
// ASCIISub is the ASCII substitute character, as recommended by
|
||||
// http://unicode.org/reports/tr36/#Text_Comparison
|
||||
const ASCIISub = '\x1a'
|
||||
|
||||
// Nop is the nop encoding. Its transformed bytes are the same as the source
|
||||
// bytes; it does not replace invalid UTF-8 sequences.
|
||||
var Nop Encoding = nop{}
|
||||
|
||||
type nop struct{}
|
||||
|
||||
func (nop) NewDecoder() *Decoder {
|
||||
return &Decoder{Transformer: transform.Nop}
|
||||
}
|
||||
func (nop) NewEncoder() *Encoder {
|
||||
return &Encoder{Transformer: transform.Nop}
|
||||
}
|
||||
|
||||
// Replacement is the replacement encoding. Decoding from the replacement
|
||||
// encoding yields a single '\uFFFD' replacement rune. Encoding from UTF-8 to
|
||||
// the replacement encoding yields the same as the source bytes except that
|
||||
// invalid UTF-8 is converted to '\uFFFD'.
|
||||
//
|
||||
// It is defined at http://encoding.spec.whatwg.org/#replacement
|
||||
var Replacement Encoding = replacement{}
|
||||
|
||||
type replacement struct{}
|
||||
|
||||
func (replacement) NewDecoder() *Decoder {
|
||||
return &Decoder{Transformer: replacementDecoder{}}
|
||||
}
|
||||
|
||||
func (replacement) NewEncoder() *Encoder {
|
||||
return &Encoder{Transformer: replacementEncoder{}}
|
||||
}
|
||||
|
||||
func (replacement) ID() (mib identifier.MIB, other string) {
|
||||
return identifier.Replacement, ""
|
||||
}
|
||||
|
||||
type replacementDecoder struct{ transform.NopResetter }
|
||||
|
||||
func (replacementDecoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if len(dst) < 3 {
|
||||
return 0, 0, transform.ErrShortDst
|
||||
}
|
||||
if atEOF {
|
||||
const fffd = "\ufffd"
|
||||
dst[0] = fffd[0]
|
||||
dst[1] = fffd[1]
|
||||
dst[2] = fffd[2]
|
||||
nDst = 3
|
||||
}
|
||||
return nDst, len(src), nil
|
||||
}
|
||||
|
||||
type replacementEncoder struct{ transform.NopResetter }
|
||||
|
||||
func (replacementEncoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
r, size := rune(0), 0
|
||||
|
||||
for ; nSrc < len(src); nSrc += size {
|
||||
r = rune(src[nSrc])
|
||||
|
||||
// Decode a 1-byte rune.
|
||||
if r < utf8.RuneSelf {
|
||||
size = 1
|
||||
|
||||
} else {
|
||||
// Decode a multi-byte rune.
|
||||
r, size = utf8.DecodeRune(src[nSrc:])
|
||||
if size == 1 {
|
||||
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||
// full character yet.
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
r = '\ufffd'
|
||||
}
|
||||
}
|
||||
|
||||
if nDst+utf8.RuneLen(r) > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
||||
|
||||
// HTMLEscapeUnsupported wraps encoders to replace source runes outside the
|
||||
// repertoire of the destination encoding with HTML escape sequences.
|
||||
//
|
||||
// This wrapper exists to comply to URL and HTML forms requiring a
|
||||
// non-terminating legacy encoder. The produced sequences may lead to data
|
||||
// loss as they are indistinguishable from legitimate input. To avoid this
|
||||
// issue, use UTF-8 encodings whenever possible.
|
||||
func HTMLEscapeUnsupported(e *Encoder) *Encoder {
|
||||
return &Encoder{Transformer: &errorHandler{e, errorToHTML}}
|
||||
}
|
||||
|
||||
// ReplaceUnsupported wraps encoders to replace source runes outside the
|
||||
// repertoire of the destination encoding with an encoding-specific
|
||||
// replacement.
|
||||
//
|
||||
// This wrapper is only provided for backwards compatibility and legacy
|
||||
// handling. Its use is strongly discouraged. Use UTF-8 whenever possible.
|
||||
func ReplaceUnsupported(e *Encoder) *Encoder {
|
||||
return &Encoder{Transformer: &errorHandler{e, errorToReplacement}}
|
||||
}
|
||||
|
||||
type errorHandler struct {
|
||||
*Encoder
|
||||
handler func(dst []byte, r rune, err repertoireError) (n int, ok bool)
|
||||
}
|
||||
|
||||
// TODO: consider making this error public in some form.
|
||||
type repertoireError interface {
|
||||
Replacement() byte
|
||||
}
|
||||
|
||||
func (h errorHandler) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
nDst, nSrc, err = h.Transformer.Transform(dst, src, atEOF)
|
||||
for err != nil {
|
||||
rerr, ok := err.(repertoireError)
|
||||
if !ok {
|
||||
return nDst, nSrc, err
|
||||
}
|
||||
r, sz := utf8.DecodeRune(src[nSrc:])
|
||||
n, ok := h.handler(dst[nDst:], r, rerr)
|
||||
if !ok {
|
||||
return nDst, nSrc, transform.ErrShortDst
|
||||
}
|
||||
err = nil
|
||||
nDst += n
|
||||
if nSrc += sz; nSrc < len(src) {
|
||||
var dn, sn int
|
||||
dn, sn, err = h.Transformer.Transform(dst[nDst:], src[nSrc:], atEOF)
|
||||
nDst += dn
|
||||
nSrc += sn
|
||||
}
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
||||
|
||||
func errorToHTML(dst []byte, r rune, err repertoireError) (n int, ok bool) {
|
||||
buf := [8]byte{}
|
||||
b := strconv.AppendUint(buf[:0], uint64(r), 10)
|
||||
if n = len(b) + len("&#;"); n >= len(dst) {
|
||||
return 0, false
|
||||
}
|
||||
dst[0] = '&'
|
||||
dst[1] = '#'
|
||||
dst[copy(dst[2:], b)+2] = ';'
|
||||
return n, true
|
||||
}
|
||||
|
||||
func errorToReplacement(dst []byte, r rune, err repertoireError) (n int, ok bool) {
|
||||
if len(dst) == 0 {
|
||||
return 0, false
|
||||
}
|
||||
dst[0] = err.Replacement()
|
||||
return 1, true
|
||||
}
|
||||
|
||||
// ErrInvalidUTF8 means that a transformer encountered invalid UTF-8.
|
||||
var ErrInvalidUTF8 = errors.New("encoding: invalid UTF-8")
|
||||
|
||||
// UTF8Validator is a transformer that returns ErrInvalidUTF8 on the first
|
||||
// input byte that is not valid UTF-8.
|
||||
var UTF8Validator transform.Transformer = utf8Validator{}
|
||||
|
||||
type utf8Validator struct{ transform.NopResetter }
|
||||
|
||||
func (utf8Validator) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
n := len(src)
|
||||
if n > len(dst) {
|
||||
n = len(dst)
|
||||
}
|
||||
for i := 0; i < n; {
|
||||
if c := src[i]; c < utf8.RuneSelf {
|
||||
dst[i] = c
|
||||
i++
|
||||
continue
|
||||
}
|
||||
_, size := utf8.DecodeRune(src[i:])
|
||||
if size == 1 {
|
||||
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||
// full character yet.
|
||||
err = ErrInvalidUTF8
|
||||
if !atEOF && !utf8.FullRune(src[i:]) {
|
||||
err = transform.ErrShortSrc
|
||||
}
|
||||
return i, i, err
|
||||
}
|
||||
if i+size > len(dst) {
|
||||
return i, i, transform.ErrShortDst
|
||||
}
|
||||
for ; size > 0; size-- {
|
||||
dst[i] = src[i]
|
||||
i++
|
||||
}
|
||||
}
|
||||
if len(src) > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
}
|
||||
return n, n, err
|
||||
}
|
|
@ -1,137 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build ignore
|
||||
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/xml"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/internal/gen"
|
||||
)
|
||||
|
||||
type registry struct {
|
||||
XMLName xml.Name `xml:"registry"`
|
||||
Updated string `xml:"updated"`
|
||||
Registry []struct {
|
||||
ID string `xml:"id,attr"`
|
||||
Record []struct {
|
||||
Name string `xml:"name"`
|
||||
Xref []struct {
|
||||
Type string `xml:"type,attr"`
|
||||
Data string `xml:"data,attr"`
|
||||
} `xml:"xref"`
|
||||
Desc struct {
|
||||
Data string `xml:",innerxml"`
|
||||
// Any []struct {
|
||||
// Data string `xml:",chardata"`
|
||||
// } `xml:",any"`
|
||||
// Data string `xml:",chardata"`
|
||||
} `xml:"description,"`
|
||||
MIB string `xml:"value"`
|
||||
Alias []string `xml:"alias"`
|
||||
MIME string `xml:"preferred_alias"`
|
||||
} `xml:"record"`
|
||||
} `xml:"registry"`
|
||||
}
|
||||
|
||||
func main() {
|
||||
r := gen.OpenIANAFile("assignments/character-sets/character-sets.xml")
|
||||
reg := ®istry{}
|
||||
if err := xml.NewDecoder(r).Decode(®); err != nil && err != io.EOF {
|
||||
log.Fatalf("Error decoding charset registry: %v", err)
|
||||
}
|
||||
if len(reg.Registry) == 0 || reg.Registry[0].ID != "character-sets-1" {
|
||||
log.Fatalf("Unexpected ID %s", reg.Registry[0].ID)
|
||||
}
|
||||
|
||||
w := &bytes.Buffer{}
|
||||
fmt.Fprintf(w, "const (\n")
|
||||
for _, rec := range reg.Registry[0].Record {
|
||||
constName := ""
|
||||
for _, a := range rec.Alias {
|
||||
if strings.HasPrefix(a, "cs") && strings.IndexByte(a, '-') == -1 {
|
||||
// Some of the constant definitions have comments in them. Strip those.
|
||||
constName = strings.Title(strings.SplitN(a[2:], "\n", 2)[0])
|
||||
}
|
||||
}
|
||||
if constName == "" {
|
||||
switch rec.MIB {
|
||||
case "2085":
|
||||
constName = "HZGB2312" // Not listed as alias for some reason.
|
||||
default:
|
||||
log.Fatalf("No cs alias defined for %s.", rec.MIB)
|
||||
}
|
||||
}
|
||||
if rec.MIME != "" {
|
||||
rec.MIME = fmt.Sprintf(" (MIME: %s)", rec.MIME)
|
||||
}
|
||||
fmt.Fprintf(w, "// %s is the MIB identifier with IANA name %s%s.\n//\n", constName, rec.Name, rec.MIME)
|
||||
if len(rec.Desc.Data) > 0 {
|
||||
fmt.Fprint(w, "// ")
|
||||
d := xml.NewDecoder(strings.NewReader(rec.Desc.Data))
|
||||
inElem := true
|
||||
attr := ""
|
||||
for {
|
||||
t, err := d.Token()
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Fatal(err)
|
||||
}
|
||||
break
|
||||
}
|
||||
switch x := t.(type) {
|
||||
case xml.CharData:
|
||||
attr = "" // Don't need attribute info.
|
||||
a := bytes.Split([]byte(x), []byte("\n"))
|
||||
for i, b := range a {
|
||||
if b = bytes.TrimSpace(b); len(b) != 0 {
|
||||
if !inElem && i > 0 {
|
||||
fmt.Fprint(w, "\n// ")
|
||||
}
|
||||
inElem = false
|
||||
fmt.Fprintf(w, "%s ", string(b))
|
||||
}
|
||||
}
|
||||
case xml.StartElement:
|
||||
if x.Name.Local == "xref" {
|
||||
inElem = true
|
||||
use := false
|
||||
for _, a := range x.Attr {
|
||||
if a.Name.Local == "type" {
|
||||
use = use || a.Value != "person"
|
||||
}
|
||||
if a.Name.Local == "data" && use {
|
||||
attr = a.Value + " "
|
||||
}
|
||||
}
|
||||
}
|
||||
case xml.EndElement:
|
||||
inElem = false
|
||||
fmt.Fprint(w, attr)
|
||||
}
|
||||
}
|
||||
fmt.Fprint(w, "\n")
|
||||
}
|
||||
for _, x := range rec.Xref {
|
||||
switch x.Type {
|
||||
case "rfc":
|
||||
fmt.Fprintf(w, "// Reference: %s\n", strings.ToUpper(x.Data))
|
||||
case "uri":
|
||||
fmt.Fprintf(w, "// Reference: %s\n", x.Data)
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(w, "%s MIB = %s\n", constName, rec.MIB)
|
||||
fmt.Fprintln(w)
|
||||
}
|
||||
fmt.Fprintln(w, ")")
|
||||
|
||||
gen.WriteGoFile("mib.go", "identifier", w.Bytes())
|
||||
}
|
|
@ -1,81 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
//go:generate go run gen.go
|
||||
|
||||
// Package identifier defines the contract between implementations of Encoding
|
||||
// and Index by defining identifiers that uniquely identify standardized coded
|
||||
// character sets (CCS) and character encoding schemes (CES), which we will
|
||||
// together refer to as encodings, for which Encoding implementations provide
|
||||
// converters to and from UTF-8. This package is typically only of concern to
|
||||
// implementers of Indexes and Encodings.
|
||||
//
|
||||
// One part of the identifier is the MIB code, which is defined by IANA and
|
||||
// uniquely identifies a CCS or CES. Each code is associated with data that
|
||||
// references authorities, official documentation as well as aliases and MIME
|
||||
// names.
|
||||
//
|
||||
// Not all CESs are covered by the IANA registry. The "other" string that is
|
||||
// returned by ID can be used to identify other character sets or versions of
|
||||
// existing ones.
|
||||
//
|
||||
// It is recommended that each package that provides a set of Encodings provide
|
||||
// the All and Common variables to reference all supported encodings and
|
||||
// commonly used subset. This allows Index implementations to include all
|
||||
// available encodings without explicitly referencing or knowing about them.
|
||||
package identifier
|
||||
|
||||
// Note: this package is internal, but could be made public if there is a need
|
||||
// for writing third-party Indexes and Encodings.
|
||||
|
||||
// References:
|
||||
// - http://source.icu-project.org/repos/icu/icu/trunk/source/data/mappings/convrtrs.txt
|
||||
// - http://www.iana.org/assignments/character-sets/character-sets.xhtml
|
||||
// - http://www.iana.org/assignments/ianacharset-mib/ianacharset-mib
|
||||
// - http://www.ietf.org/rfc/rfc2978.txt
|
||||
// - http://www.unicode.org/reports/tr22/
|
||||
// - http://www.w3.org/TR/encoding/
|
||||
// - https://encoding.spec.whatwg.org/
|
||||
// - https://encoding.spec.whatwg.org/encodings.json
|
||||
// - https://tools.ietf.org/html/rfc6657#section-5
|
||||
|
||||
// Interface can be implemented by Encodings to define the CCS or CES for which
|
||||
// it implements conversions.
|
||||
type Interface interface {
|
||||
// ID returns an encoding identifier. Exactly one of the mib and other
|
||||
// values should be non-zero.
|
||||
//
|
||||
// In the usual case it is only necessary to indicate the MIB code. The
|
||||
// other string can be used to specify encodings for which there is no MIB,
|
||||
// such as "x-mac-dingbat".
|
||||
//
|
||||
// The other string may only contain the characters a-z, A-Z, 0-9, - and _.
|
||||
ID() (mib MIB, other string)
|
||||
|
||||
// NOTE: the restrictions on the encoding are to allow extending the syntax
|
||||
// with additional information such as versions, vendors and other variants.
|
||||
}
|
||||
|
||||
// A MIB identifies an encoding. It is derived from the IANA MIB codes and adds
|
||||
// some identifiers for some encodings that are not covered by the IANA
|
||||
// standard.
|
||||
//
|
||||
// See http://www.iana.org/assignments/ianacharset-mib.
|
||||
type MIB uint16
|
||||
|
||||
// These additional MIB types are not defined in IANA. They are added because
|
||||
// they are common and defined within the text repo.
|
||||
const (
|
||||
// Unofficial marks the start of encodings not registered by IANA.
|
||||
Unofficial MIB = 10000 + iota
|
||||
|
||||
// Replacement is the WhatWG replacement encoding.
|
||||
Replacement
|
||||
|
||||
// XUserDefined is the code for x-user-defined.
|
||||
XUserDefined
|
||||
|
||||
// MacintoshCyrillic is the code for x-mac-cyrillic.
|
||||
MacintoshCyrillic
|
||||
)
|
File diff suppressed because it is too large
Load Diff
|
@ -1,75 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package internal contains code that is shared among encoding implementations.
|
||||
package internal
|
||||
|
||||
import (
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/internal/identifier"
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// Encoding is an implementation of the Encoding interface that adds the String
|
||||
// and ID methods to an existing encoding.
|
||||
type Encoding struct {
|
||||
encoding.Encoding
|
||||
Name string
|
||||
MIB identifier.MIB
|
||||
}
|
||||
|
||||
// _ verifies that Encoding implements identifier.Interface.
|
||||
var _ identifier.Interface = (*Encoding)(nil)
|
||||
|
||||
func (e *Encoding) String() string {
|
||||
return e.Name
|
||||
}
|
||||
|
||||
func (e *Encoding) ID() (mib identifier.MIB, other string) {
|
||||
return e.MIB, ""
|
||||
}
|
||||
|
||||
// SimpleEncoding is an Encoding that combines two Transformers.
|
||||
type SimpleEncoding struct {
|
||||
Decoder transform.Transformer
|
||||
Encoder transform.Transformer
|
||||
}
|
||||
|
||||
func (e *SimpleEncoding) NewDecoder() *encoding.Decoder {
|
||||
return &encoding.Decoder{Transformer: e.Decoder}
|
||||
}
|
||||
|
||||
func (e *SimpleEncoding) NewEncoder() *encoding.Encoder {
|
||||
return &encoding.Encoder{Transformer: e.Encoder}
|
||||
}
|
||||
|
||||
// FuncEncoding is an Encoding that combines two functions returning a new
|
||||
// Transformer.
|
||||
type FuncEncoding struct {
|
||||
Decoder func() transform.Transformer
|
||||
Encoder func() transform.Transformer
|
||||
}
|
||||
|
||||
func (e FuncEncoding) NewDecoder() *encoding.Decoder {
|
||||
return &encoding.Decoder{Transformer: e.Decoder()}
|
||||
}
|
||||
|
||||
func (e FuncEncoding) NewEncoder() *encoding.Encoder {
|
||||
return &encoding.Encoder{Transformer: e.Encoder()}
|
||||
}
|
||||
|
||||
// A RepertoireError indicates a rune is not in the repertoire of a destination
|
||||
// encoding. It is associated with an encoding-specific suggested replacement
|
||||
// byte.
|
||||
type RepertoireError byte
|
||||
|
||||
// Error implements the error interrface.
|
||||
func (r RepertoireError) Error() string {
|
||||
return "encoding: rune not supported by encoding."
|
||||
}
|
||||
|
||||
// Replacement returns the replacement string associated with this error.
|
||||
func (r RepertoireError) Replacement() byte { return byte(r) }
|
||||
|
||||
var ErrASCIIReplacement = RepertoireError(encoding.ASCIISub)
|
|
@ -1,82 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package unicode
|
||||
|
||||
import (
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// BOMOverride returns a new decoder transformer that is identical to fallback,
|
||||
// except that the presence of a Byte Order Mark at the start of the input
|
||||
// causes it to switch to the corresponding Unicode decoding. It will only
|
||||
// consider BOMs for UTF-8, UTF-16BE, and UTF-16LE.
|
||||
//
|
||||
// This differs from using ExpectBOM by allowing a BOM to switch to UTF-8, not
|
||||
// just UTF-16 variants, and allowing falling back to any encoding scheme.
|
||||
//
|
||||
// This technique is recommended by the W3C for use in HTML 5: "For
|
||||
// compatibility with deployed content, the byte order mark (also known as BOM)
|
||||
// is considered more authoritative than anything else."
|
||||
// http://www.w3.org/TR/encoding/#specification-hooks
|
||||
//
|
||||
// Using BOMOverride is mostly intended for use cases where the first characters
|
||||
// of a fallback encoding are known to not be a BOM, for example, for valid HTML
|
||||
// and most encodings.
|
||||
func BOMOverride(fallback transform.Transformer) transform.Transformer {
|
||||
// TODO: possibly allow a variadic argument of unicode encodings to allow
|
||||
// specifying details of which fallbacks are supported as well as
|
||||
// specifying the details of the implementations. This would also allow for
|
||||
// support for UTF-32, which should not be supported by default.
|
||||
return &bomOverride{fallback: fallback}
|
||||
}
|
||||
|
||||
type bomOverride struct {
|
||||
fallback transform.Transformer
|
||||
current transform.Transformer
|
||||
}
|
||||
|
||||
func (d *bomOverride) Reset() {
|
||||
d.current = nil
|
||||
d.fallback.Reset()
|
||||
}
|
||||
|
||||
var (
|
||||
// TODO: we could use decode functions here, instead of allocating a new
|
||||
// decoder on every NewDecoder as IgnoreBOM decoders can be stateless.
|
||||
utf16le = UTF16(LittleEndian, IgnoreBOM)
|
||||
utf16be = UTF16(BigEndian, IgnoreBOM)
|
||||
)
|
||||
|
||||
const utf8BOM = "\ufeff"
|
||||
|
||||
func (d *bomOverride) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if d.current != nil {
|
||||
return d.current.Transform(dst, src, atEOF)
|
||||
}
|
||||
if len(src) < 3 && !atEOF {
|
||||
return 0, 0, transform.ErrShortSrc
|
||||
}
|
||||
d.current = d.fallback
|
||||
bomSize := 0
|
||||
if len(src) >= 2 {
|
||||
if src[0] == 0xFF && src[1] == 0xFE {
|
||||
d.current = utf16le.NewDecoder()
|
||||
bomSize = 2
|
||||
} else if src[0] == 0xFE && src[1] == 0xFF {
|
||||
d.current = utf16be.NewDecoder()
|
||||
bomSize = 2
|
||||
} else if len(src) >= 3 &&
|
||||
src[0] == utf8BOM[0] &&
|
||||
src[1] == utf8BOM[1] &&
|
||||
src[2] == utf8BOM[2] {
|
||||
d.current = transform.Nop
|
||||
bomSize = 3
|
||||
}
|
||||
}
|
||||
if bomSize < len(src) {
|
||||
nDst, nSrc, err = d.current.Transform(dst, src[bomSize:], atEOF)
|
||||
}
|
||||
return nDst, nSrc + bomSize, err
|
||||
}
|
|
@ -1,434 +0,0 @@
|
|||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package unicode provides Unicode encodings such as UTF-16.
|
||||
package unicode // import "golang.org/x/text/encoding/unicode"
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"unicode/utf16"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/encoding"
|
||||
"golang.org/x/text/encoding/internal"
|
||||
"golang.org/x/text/encoding/internal/identifier"
|
||||
"golang.org/x/text/internal/utf8internal"
|
||||
"golang.org/x/text/runes"
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// TODO: I think the Transformers really should return errors on unmatched
|
||||
// surrogate pairs and odd numbers of bytes. This is not required by RFC 2781,
|
||||
// which leaves it open, but is suggested by WhatWG. It will allow for all error
|
||||
// modes as defined by WhatWG: fatal, HTML and Replacement. This would require
|
||||
// the introduction of some kind of error type for conveying the erroneous code
|
||||
// point.
|
||||
|
||||
// UTF8 is the UTF-8 encoding.
|
||||
var UTF8 encoding.Encoding = utf8enc
|
||||
|
||||
var utf8enc = &internal.Encoding{
|
||||
&internal.SimpleEncoding{utf8Decoder{}, runes.ReplaceIllFormed()},
|
||||
"UTF-8",
|
||||
identifier.UTF8,
|
||||
}
|
||||
|
||||
type utf8Decoder struct{ transform.NopResetter }
|
||||
|
||||
func (utf8Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
var pSrc int // point from which to start copy in src
|
||||
var accept utf8internal.AcceptRange
|
||||
|
||||
// The decoder can only make the input larger, not smaller.
|
||||
n := len(src)
|
||||
if len(dst) < n {
|
||||
err = transform.ErrShortDst
|
||||
n = len(dst)
|
||||
atEOF = false
|
||||
}
|
||||
for nSrc < n {
|
||||
c := src[nSrc]
|
||||
if c < utf8.RuneSelf {
|
||||
nSrc++
|
||||
continue
|
||||
}
|
||||
first := utf8internal.First[c]
|
||||
size := int(first & utf8internal.SizeMask)
|
||||
if first == utf8internal.FirstInvalid {
|
||||
goto handleInvalid // invalid starter byte
|
||||
}
|
||||
accept = utf8internal.AcceptRanges[first>>utf8internal.AcceptShift]
|
||||
if nSrc+size > n {
|
||||
if !atEOF {
|
||||
// We may stop earlier than necessary here if the short sequence
|
||||
// has invalid bytes. Not checking for this simplifies the code
|
||||
// and may avoid duplicate computations in certain conditions.
|
||||
if err == nil {
|
||||
err = transform.ErrShortSrc
|
||||
}
|
||||
break
|
||||
}
|
||||
// Determine the maximal subpart of an ill-formed subsequence.
|
||||
switch {
|
||||
case nSrc+1 >= n || src[nSrc+1] < accept.Lo || accept.Hi < src[nSrc+1]:
|
||||
size = 1
|
||||
case nSrc+2 >= n || src[nSrc+2] < utf8internal.LoCB || utf8internal.HiCB < src[nSrc+2]:
|
||||
size = 2
|
||||
default:
|
||||
size = 3 // As we are short, the maximum is 3.
|
||||
}
|
||||
goto handleInvalid
|
||||
}
|
||||
if c = src[nSrc+1]; c < accept.Lo || accept.Hi < c {
|
||||
size = 1
|
||||
goto handleInvalid // invalid continuation byte
|
||||
} else if size == 2 {
|
||||
} else if c = src[nSrc+2]; c < utf8internal.LoCB || utf8internal.HiCB < c {
|
||||
size = 2
|
||||
goto handleInvalid // invalid continuation byte
|
||||
} else if size == 3 {
|
||||
} else if c = src[nSrc+3]; c < utf8internal.LoCB || utf8internal.HiCB < c {
|
||||
size = 3
|
||||
goto handleInvalid // invalid continuation byte
|
||||
}
|
||||
nSrc += size
|
||||
continue
|
||||
|
||||
handleInvalid:
|
||||
// Copy the scanned input so far.
|
||||
nDst += copy(dst[nDst:], src[pSrc:nSrc])
|
||||
|
||||
// Append RuneError to the destination.
|
||||
const runeError = "\ufffd"
|
||||
if nDst+len(runeError) > len(dst) {
|
||||
return nDst, nSrc, transform.ErrShortDst
|
||||
}
|
||||
nDst += copy(dst[nDst:], runeError)
|
||||
|
||||
// Skip the maximal subpart of an ill-formed subsequence according to
|
||||
// the W3C standard way instead of the Go way. This Transform is
|
||||
// probably the only place in the text repo where it is warranted.
|
||||
nSrc += size
|
||||
pSrc = nSrc
|
||||
|
||||
// Recompute the maximum source length.
|
||||
if sz := len(dst) - nDst; sz < len(src)-nSrc {
|
||||
err = transform.ErrShortDst
|
||||
n = nSrc + sz
|
||||
atEOF = false
|
||||
}
|
||||
}
|
||||
return nDst + copy(dst[nDst:], src[pSrc:nSrc]), nSrc, err
|
||||
}
|
||||
|
||||
// UTF16 returns a UTF-16 Encoding for the given default endianness and byte
|
||||
// order mark (BOM) policy.
|
||||
//
|
||||
// When decoding from UTF-16 to UTF-8, if the BOMPolicy is IgnoreBOM then
|
||||
// neither BOMs U+FEFF nor noncharacters U+FFFE in the input stream will affect
|
||||
// the endianness used for decoding, and will instead be output as their
|
||||
// standard UTF-8 encodings: "\xef\xbb\xbf" and "\xef\xbf\xbe". If the BOMPolicy
|
||||
// is UseBOM or ExpectBOM a staring BOM is not written to the UTF-8 output.
|
||||
// Instead, it overrides the default endianness e for the remainder of the
|
||||
// transformation. Any subsequent BOMs U+FEFF or noncharacters U+FFFE will not
|
||||
// affect the endianness used, and will instead be output as their standard
|
||||
// UTF-8 encodings. For UseBOM, if there is no starting BOM, it will proceed
|
||||
// with the default Endianness. For ExpectBOM, in that case, the transformation
|
||||
// will return early with an ErrMissingBOM error.
|
||||
//
|
||||
// When encoding from UTF-8 to UTF-16, a BOM will be inserted at the start of
|
||||
// the output if the BOMPolicy is UseBOM or ExpectBOM. Otherwise, a BOM will not
|
||||
// be inserted. The UTF-8 input does not need to contain a BOM.
|
||||
//
|
||||
// There is no concept of a 'native' endianness. If the UTF-16 data is produced
|
||||
// and consumed in a greater context that implies a certain endianness, use
|
||||
// IgnoreBOM. Otherwise, use ExpectBOM and always produce and consume a BOM.
|
||||
//
|
||||
// In the language of http://www.unicode.org/faq/utf_bom.html#bom10, IgnoreBOM
|
||||
// corresponds to "Where the precise type of the data stream is known... the
|
||||
// BOM should not be used" and ExpectBOM corresponds to "A particular
|
||||
// protocol... may require use of the BOM".
|
||||
func UTF16(e Endianness, b BOMPolicy) encoding.Encoding {
|
||||
return utf16Encoding{config{e, b}, mibValue[e][b&bomMask]}
|
||||
}
|
||||
|
||||
// mibValue maps Endianness and BOMPolicy settings to MIB constants. Note that
|
||||
// some configurations map to the same MIB identifier. RFC 2781 has requirements
|
||||
// and recommendations. Some of the "configurations" are merely recommendations,
|
||||
// so multiple configurations could match.
|
||||
var mibValue = map[Endianness][numBOMValues]identifier.MIB{
|
||||
BigEndian: [numBOMValues]identifier.MIB{
|
||||
IgnoreBOM: identifier.UTF16BE,
|
||||
UseBOM: identifier.UTF16, // BigEnding default is preferred by RFC 2781.
|
||||
// TODO: acceptBOM | strictBOM would map to UTF16BE as well.
|
||||
},
|
||||
LittleEndian: [numBOMValues]identifier.MIB{
|
||||
IgnoreBOM: identifier.UTF16LE,
|
||||
UseBOM: identifier.UTF16, // LittleEndian default is allowed and preferred on Windows.
|
||||
// TODO: acceptBOM | strictBOM would map to UTF16LE as well.
|
||||
},
|
||||
// ExpectBOM is not widely used and has no valid MIB identifier.
|
||||
}
|
||||
|
||||
// All lists a configuration for each IANA-defined UTF-16 variant.
|
||||
var All = []encoding.Encoding{
|
||||
UTF8,
|
||||
UTF16(BigEndian, UseBOM),
|
||||
UTF16(BigEndian, IgnoreBOM),
|
||||
UTF16(LittleEndian, IgnoreBOM),
|
||||
}
|
||||
|
||||
// BOMPolicy is a UTF-16 encoding's byte order mark policy.
|
||||
type BOMPolicy uint8
|
||||
|
||||
const (
|
||||
writeBOM BOMPolicy = 0x01
|
||||
acceptBOM BOMPolicy = 0x02
|
||||
requireBOM BOMPolicy = 0x04
|
||||
bomMask BOMPolicy = 0x07
|
||||
|
||||
// HACK: numBOMValues == 8 triggers a bug in the 1.4 compiler (cannot have a
|
||||
// map of an array of length 8 of a type that is also used as a key or value
|
||||
// in another map). See golang.org/issue/11354.
|
||||
// TODO: consider changing this value back to 8 if the use of 1.4.* has
|
||||
// been minimized.
|
||||
numBOMValues = 8 + 1
|
||||
|
||||
// IgnoreBOM means to ignore any byte order marks.
|
||||
IgnoreBOM BOMPolicy = 0
|
||||
// Common and RFC 2781-compliant interpretation for UTF-16BE/LE.
|
||||
|
||||
// UseBOM means that the UTF-16 form may start with a byte order mark, which
|
||||
// will be used to override the default encoding.
|
||||
UseBOM BOMPolicy = writeBOM | acceptBOM
|
||||
// Common and RFC 2781-compliant interpretation for UTF-16.
|
||||
|
||||
// ExpectBOM means that the UTF-16 form must start with a byte order mark,
|
||||
// which will be used to override the default encoding.
|
||||
ExpectBOM BOMPolicy = writeBOM | acceptBOM | requireBOM
|
||||
// Used in Java as Unicode (not to be confused with Java's UTF-16) and
|
||||
// ICU's UTF-16,version=1. Not compliant with RFC 2781.
|
||||
|
||||
// TODO (maybe): strictBOM: BOM must match Endianness. This would allow:
|
||||
// - UTF-16(B|L)E,version=1: writeBOM | acceptBOM | requireBOM | strictBOM
|
||||
// (UnicodeBig and UnicodeLittle in Java)
|
||||
// - RFC 2781-compliant, but less common interpretation for UTF-16(B|L)E:
|
||||
// acceptBOM | strictBOM (e.g. assigned to CheckBOM).
|
||||
// This addition would be consistent with supporting ExpectBOM.
|
||||
)
|
||||
|
||||
// Endianness is a UTF-16 encoding's default endianness.
|
||||
type Endianness bool
|
||||
|
||||
const (
|
||||
// BigEndian is UTF-16BE.
|
||||
BigEndian Endianness = false
|
||||
// LittleEndian is UTF-16LE.
|
||||
LittleEndian Endianness = true
|
||||
)
|
||||
|
||||
// ErrMissingBOM means that decoding UTF-16 input with ExpectBOM did not find a
|
||||
// starting byte order mark.
|
||||
var ErrMissingBOM = errors.New("encoding: missing byte order mark")
|
||||
|
||||
type utf16Encoding struct {
|
||||
config
|
||||
mib identifier.MIB
|
||||
}
|
||||
|
||||
type config struct {
|
||||
endianness Endianness
|
||||
bomPolicy BOMPolicy
|
||||
}
|
||||
|
||||
func (u utf16Encoding) NewDecoder() *encoding.Decoder {
|
||||
return &encoding.Decoder{Transformer: &utf16Decoder{
|
||||
initial: u.config,
|
||||
current: u.config,
|
||||
}}
|
||||
}
|
||||
|
||||
func (u utf16Encoding) NewEncoder() *encoding.Encoder {
|
||||
return &encoding.Encoder{Transformer: &utf16Encoder{
|
||||
endianness: u.endianness,
|
||||
initialBOMPolicy: u.bomPolicy,
|
||||
currentBOMPolicy: u.bomPolicy,
|
||||
}}
|
||||
}
|
||||
|
||||
func (u utf16Encoding) ID() (mib identifier.MIB, other string) {
|
||||
return u.mib, ""
|
||||
}
|
||||
|
||||
func (u utf16Encoding) String() string {
|
||||
e, b := "B", ""
|
||||
if u.endianness == LittleEndian {
|
||||
e = "L"
|
||||
}
|
||||
switch u.bomPolicy {
|
||||
case ExpectBOM:
|
||||
b = "Expect"
|
||||
case UseBOM:
|
||||
b = "Use"
|
||||
case IgnoreBOM:
|
||||
b = "Ignore"
|
||||
}
|
||||
return "UTF-16" + e + "E (" + b + " BOM)"
|
||||
}
|
||||
|
||||
type utf16Decoder struct {
|
||||
initial config
|
||||
current config
|
||||
}
|
||||
|
||||
func (u *utf16Decoder) Reset() {
|
||||
u.current = u.initial
|
||||
}
|
||||
|
||||
func (u *utf16Decoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if len(src) == 0 {
|
||||
if atEOF && u.current.bomPolicy&requireBOM != 0 {
|
||||
return 0, 0, ErrMissingBOM
|
||||
}
|
||||
return 0, 0, nil
|
||||
}
|
||||
if u.current.bomPolicy&acceptBOM != 0 {
|
||||
if len(src) < 2 {
|
||||
return 0, 0, transform.ErrShortSrc
|
||||
}
|
||||
switch {
|
||||
case src[0] == 0xfe && src[1] == 0xff:
|
||||
u.current.endianness = BigEndian
|
||||
nSrc = 2
|
||||
case src[0] == 0xff && src[1] == 0xfe:
|
||||
u.current.endianness = LittleEndian
|
||||
nSrc = 2
|
||||
default:
|
||||
if u.current.bomPolicy&requireBOM != 0 {
|
||||
return 0, 0, ErrMissingBOM
|
||||
}
|
||||
}
|
||||
u.current.bomPolicy = IgnoreBOM
|
||||
}
|
||||
|
||||
var r rune
|
||||
var dSize, sSize int
|
||||
for nSrc < len(src) {
|
||||
if nSrc+1 < len(src) {
|
||||
x := uint16(src[nSrc+0])<<8 | uint16(src[nSrc+1])
|
||||
if u.current.endianness == LittleEndian {
|
||||
x = x>>8 | x<<8
|
||||
}
|
||||
r, sSize = rune(x), 2
|
||||
if utf16.IsSurrogate(r) {
|
||||
if nSrc+3 < len(src) {
|
||||
x = uint16(src[nSrc+2])<<8 | uint16(src[nSrc+3])
|
||||
if u.current.endianness == LittleEndian {
|
||||
x = x>>8 | x<<8
|
||||
}
|
||||
// Save for next iteration if it is not a high surrogate.
|
||||
if isHighSurrogate(rune(x)) {
|
||||
r, sSize = utf16.DecodeRune(r, rune(x)), 4
|
||||
}
|
||||
} else if !atEOF {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
}
|
||||
if dSize = utf8.RuneLen(r); dSize < 0 {
|
||||
r, dSize = utf8.RuneError, 3
|
||||
}
|
||||
} else if atEOF {
|
||||
// Single trailing byte.
|
||||
r, dSize, sSize = utf8.RuneError, 3, 1
|
||||
} else {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
if nDst+dSize > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
nDst += utf8.EncodeRune(dst[nDst:], r)
|
||||
nSrc += sSize
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
||||
|
||||
func isHighSurrogate(r rune) bool {
|
||||
return 0xDC00 <= r && r <= 0xDFFF
|
||||
}
|
||||
|
||||
type utf16Encoder struct {
|
||||
endianness Endianness
|
||||
initialBOMPolicy BOMPolicy
|
||||
currentBOMPolicy BOMPolicy
|
||||
}
|
||||
|
||||
func (u *utf16Encoder) Reset() {
|
||||
u.currentBOMPolicy = u.initialBOMPolicy
|
||||
}
|
||||
|
||||
func (u *utf16Encoder) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
if u.currentBOMPolicy&writeBOM != 0 {
|
||||
if len(dst) < 2 {
|
||||
return 0, 0, transform.ErrShortDst
|
||||
}
|
||||
dst[0], dst[1] = 0xfe, 0xff
|
||||
u.currentBOMPolicy = IgnoreBOM
|
||||
nDst = 2
|
||||
}
|
||||
|
||||
r, size := rune(0), 0
|
||||
for nSrc < len(src) {
|
||||
r = rune(src[nSrc])
|
||||
|
||||
// Decode a 1-byte rune.
|
||||
if r < utf8.RuneSelf {
|
||||
size = 1
|
||||
|
||||
} else {
|
||||
// Decode a multi-byte rune.
|
||||
r, size = utf8.DecodeRune(src[nSrc:])
|
||||
if size == 1 {
|
||||
// All valid runes of size 1 (those below utf8.RuneSelf) were
|
||||
// handled above. We have invalid UTF-8 or we haven't seen the
|
||||
// full character yet.
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if r <= 0xffff {
|
||||
if nDst+2 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst+0] = uint8(r >> 8)
|
||||
dst[nDst+1] = uint8(r)
|
||||
nDst += 2
|
||||
} else {
|
||||
if nDst+4 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
r1, r2 := utf16.EncodeRune(r)
|
||||
dst[nDst+0] = uint8(r1 >> 8)
|
||||
dst[nDst+1] = uint8(r1)
|
||||
dst[nDst+2] = uint8(r2 >> 8)
|
||||
dst[nDst+3] = uint8(r2)
|
||||
nDst += 4
|
||||
}
|
||||
nSrc += size
|
||||
}
|
||||
|
||||
if u.endianness == LittleEndian {
|
||||
for i := 0; i < nDst; i += 2 {
|
||||
dst[i], dst[i+1] = dst[i+1], dst[i]
|
||||
}
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
|
@ -1,87 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package utf8internal contains low-level utf8-related constants, tables, etc.
|
||||
// that are used internally by the text package.
|
||||
package utf8internal
|
||||
|
||||
// The default lowest and highest continuation byte.
|
||||
const (
|
||||
LoCB = 0x80 // 1000 0000
|
||||
HiCB = 0xBF // 1011 1111
|
||||
)
|
||||
|
||||
// Constants related to getting information of first bytes of UTF-8 sequences.
|
||||
const (
|
||||
// ASCII identifies a UTF-8 byte as ASCII.
|
||||
ASCII = as
|
||||
|
||||
// FirstInvalid indicates a byte is invalid as a first byte of a UTF-8
|
||||
// sequence.
|
||||
FirstInvalid = xx
|
||||
|
||||
// SizeMask is a mask for the size bits. Use use x&SizeMask to get the size.
|
||||
SizeMask = 7
|
||||
|
||||
// AcceptShift is the right-shift count for the first byte info byte to get
|
||||
// the index into the AcceptRanges table. See AcceptRanges.
|
||||
AcceptShift = 4
|
||||
|
||||
// The names of these constants are chosen to give nice alignment in the
|
||||
// table below. The first nibble is an index into acceptRanges or F for
|
||||
// special one-byte cases. The second nibble is the Rune length or the
|
||||
// Status for the special one-byte case.
|
||||
xx = 0xF1 // invalid: size 1
|
||||
as = 0xF0 // ASCII: size 1
|
||||
s1 = 0x02 // accept 0, size 2
|
||||
s2 = 0x13 // accept 1, size 3
|
||||
s3 = 0x03 // accept 0, size 3
|
||||
s4 = 0x23 // accept 2, size 3
|
||||
s5 = 0x34 // accept 3, size 4
|
||||
s6 = 0x04 // accept 0, size 4
|
||||
s7 = 0x44 // accept 4, size 4
|
||||
)
|
||||
|
||||
// First is information about the first byte in a UTF-8 sequence.
|
||||
var First = [256]uint8{
|
||||
// 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x00-0x0F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x10-0x1F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x20-0x2F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x30-0x3F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x40-0x4F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x50-0x5F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x60-0x6F
|
||||
as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, as, // 0x70-0x7F
|
||||
// 1 2 3 4 5 6 7 8 9 A B C D E F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x80-0x8F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0x90-0x9F
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xA0-0xAF
|
||||
xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xB0-0xBF
|
||||
xx, xx, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xC0-0xCF
|
||||
s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, s1, // 0xD0-0xDF
|
||||
s2, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s3, s4, s3, s3, // 0xE0-0xEF
|
||||
s5, s6, s6, s6, s7, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, xx, // 0xF0-0xFF
|
||||
}
|
||||
|
||||
// AcceptRange gives the range of valid values for the second byte in a UTF-8
|
||||
// sequence for any value for First that is not ASCII or FirstInvalid.
|
||||
type AcceptRange struct {
|
||||
Lo uint8 // lowest value for second byte.
|
||||
Hi uint8 // highest value for second byte.
|
||||
}
|
||||
|
||||
// AcceptRanges is a slice of AcceptRange values. For a given byte sequence b
|
||||
//
|
||||
// AcceptRanges[First[b[0]]>>AcceptShift]
|
||||
//
|
||||
// will give the value of AcceptRange for the multi-byte UTF-8 sequence starting
|
||||
// at b[0].
|
||||
var AcceptRanges = [...]AcceptRange{
|
||||
0: {LoCB, HiCB},
|
||||
1: {0xA0, HiCB},
|
||||
2: {LoCB, 0x9F},
|
||||
3: {0x90, HiCB},
|
||||
4: {LoCB, 0x8F},
|
||||
}
|
|
@ -1,187 +0,0 @@
|
|||
// Copyright 2015 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package runes
|
||||
|
||||
import (
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// Note: below we pass invalid UTF-8 to the tIn and tNotIn transformers as is.
|
||||
// This is done for various reasons:
|
||||
// - To retain the semantics of the Nop transformer: if input is passed to a Nop
|
||||
// one would expect it to be unchanged.
|
||||
// - It would be very expensive to pass a converted RuneError to a transformer:
|
||||
// a transformer might need more source bytes after RuneError, meaning that
|
||||
// the only way to pass it safely is to create a new buffer and manage the
|
||||
// intermingling of RuneErrors and normal input.
|
||||
// - Many transformers leave ill-formed UTF-8 as is, so this is not
|
||||
// inconsistent. Generally ill-formed UTF-8 is only replaced if it is a
|
||||
// logical consequence of the operation (as for Map) or if it otherwise would
|
||||
// pose security concerns (as for Remove).
|
||||
// - An alternative would be to return an error on ill-formed UTF-8, but this
|
||||
// would be inconsistent with other operations.
|
||||
|
||||
// If returns a transformer that applies tIn to consecutive runes for which
|
||||
// s.Contains(r) and tNotIn to consecutive runes for which !s.Contains(r). Reset
|
||||
// is called on tIn and tNotIn at the start of each run. A Nop transformer will
|
||||
// substitute a nil value passed to tIn or tNotIn. Invalid UTF-8 is translated
|
||||
// to RuneError to determine which transformer to apply, but is passed as is to
|
||||
// the respective transformer.
|
||||
func If(s Set, tIn, tNotIn transform.Transformer) Transformer {
|
||||
if tIn == nil && tNotIn == nil {
|
||||
return Transformer{transform.Nop}
|
||||
}
|
||||
if tIn == nil {
|
||||
tIn = transform.Nop
|
||||
}
|
||||
if tNotIn == nil {
|
||||
tNotIn = transform.Nop
|
||||
}
|
||||
sIn, ok := tIn.(transform.SpanningTransformer)
|
||||
if !ok {
|
||||
sIn = dummySpan{tIn}
|
||||
}
|
||||
sNotIn, ok := tNotIn.(transform.SpanningTransformer)
|
||||
if !ok {
|
||||
sNotIn = dummySpan{tNotIn}
|
||||
}
|
||||
|
||||
a := &cond{
|
||||
tIn: sIn,
|
||||
tNotIn: sNotIn,
|
||||
f: s.Contains,
|
||||
}
|
||||
a.Reset()
|
||||
return Transformer{a}
|
||||
}
|
||||
|
||||
type dummySpan struct{ transform.Transformer }
|
||||
|
||||
func (d dummySpan) Span(src []byte, atEOF bool) (n int, err error) {
|
||||
return 0, transform.ErrEndOfSpan
|
||||
}
|
||||
|
||||
type cond struct {
|
||||
tIn, tNotIn transform.SpanningTransformer
|
||||
f func(rune) bool
|
||||
check func(rune) bool // current check to perform
|
||||
t transform.SpanningTransformer // current transformer to use
|
||||
}
|
||||
|
||||
// Reset implements transform.Transformer.
|
||||
func (t *cond) Reset() {
|
||||
t.check = t.is
|
||||
t.t = t.tIn
|
||||
t.t.Reset() // notIn will be reset on first usage.
|
||||
}
|
||||
|
||||
func (t *cond) is(r rune) bool {
|
||||
if t.f(r) {
|
||||
return true
|
||||
}
|
||||
t.check = t.isNot
|
||||
t.t = t.tNotIn
|
||||
t.tNotIn.Reset()
|
||||
return false
|
||||
}
|
||||
|
||||
func (t *cond) isNot(r rune) bool {
|
||||
if !t.f(r) {
|
||||
return true
|
||||
}
|
||||
t.check = t.is
|
||||
t.t = t.tIn
|
||||
t.tIn.Reset()
|
||||
return false
|
||||
}
|
||||
|
||||
// This implementation of Span doesn't help all too much, but it needs to be
|
||||
// there to satisfy this package's Transformer interface.
|
||||
// TODO: there are certainly room for improvements, though. For example, if
|
||||
// t.t == transform.Nop (which will a common occurrence) it will save a bundle
|
||||
// to special-case that loop.
|
||||
func (t *cond) Span(src []byte, atEOF bool) (n int, err error) {
|
||||
p := 0
|
||||
for n < len(src) && err == nil {
|
||||
// Don't process too much at a time as the Spanner that will be
|
||||
// called on this block may terminate early.
|
||||
const maxChunk = 4096
|
||||
max := len(src)
|
||||
if v := n + maxChunk; v < max {
|
||||
max = v
|
||||
}
|
||||
atEnd := false
|
||||
size := 0
|
||||
current := t.t
|
||||
for ; p < max; p += size {
|
||||
r := rune(src[p])
|
||||
if r < utf8.RuneSelf {
|
||||
size = 1
|
||||
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
|
||||
if !atEOF && !utf8.FullRune(src[p:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
}
|
||||
if !t.check(r) {
|
||||
// The next rune will be the start of a new run.
|
||||
atEnd = true
|
||||
break
|
||||
}
|
||||
}
|
||||
n2, err2 := current.Span(src[n:p], atEnd || (atEOF && p == len(src)))
|
||||
n += n2
|
||||
if err2 != nil {
|
||||
return n, err2
|
||||
}
|
||||
// At this point either err != nil or t.check will pass for the rune at p.
|
||||
p = n + size
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (t *cond) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
p := 0
|
||||
for nSrc < len(src) && err == nil {
|
||||
// Don't process too much at a time, as the work might be wasted if the
|
||||
// destination buffer isn't large enough to hold the result or a
|
||||
// transform returns an error early.
|
||||
const maxChunk = 4096
|
||||
max := len(src)
|
||||
if n := nSrc + maxChunk; n < len(src) {
|
||||
max = n
|
||||
}
|
||||
atEnd := false
|
||||
size := 0
|
||||
current := t.t
|
||||
for ; p < max; p += size {
|
||||
r := rune(src[p])
|
||||
if r < utf8.RuneSelf {
|
||||
size = 1
|
||||
} else if r, size = utf8.DecodeRune(src[p:]); size == 1 {
|
||||
if !atEOF && !utf8.FullRune(src[p:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
}
|
||||
if !t.check(r) {
|
||||
// The next rune will be the start of a new run.
|
||||
atEnd = true
|
||||
break
|
||||
}
|
||||
}
|
||||
nDst2, nSrc2, err2 := current.Transform(dst[nDst:], src[nSrc:p], atEnd || (atEOF && p == len(src)))
|
||||
nDst += nDst2
|
||||
nSrc += nSrc2
|
||||
if err2 != nil {
|
||||
return nDst, nSrc, err2
|
||||
}
|
||||
// At this point either err != nil or t.check will pass for the rune at p.
|
||||
p = nSrc + size
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
|
@ -1,355 +0,0 @@
|
|||
// Copyright 2014 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// Package runes provide transforms for UTF-8 encoded text.
|
||||
package runes // import "golang.org/x/text/runes"
|
||||
|
||||
import (
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
|
||||
"golang.org/x/text/transform"
|
||||
)
|
||||
|
||||
// A Set is a collection of runes.
|
||||
type Set interface {
|
||||
// Contains returns true if r is contained in the set.
|
||||
Contains(r rune) bool
|
||||
}
|
||||
|
||||
type setFunc func(rune) bool
|
||||
|
||||
func (s setFunc) Contains(r rune) bool {
|
||||
return s(r)
|
||||
}
|
||||
|
||||
// Note: using funcs here instead of wrapping types result in cleaner
|
||||
// documentation and a smaller API.
|
||||
|
||||
// In creates a Set with a Contains method that returns true for all runes in
|
||||
// the given RangeTable.
|
||||
func In(rt *unicode.RangeTable) Set {
|
||||
return setFunc(func(r rune) bool { return unicode.Is(rt, r) })
|
||||
}
|
||||
|
||||
// In creates a Set with a Contains method that returns true for all runes not
|
||||
// in the given RangeTable.
|
||||
func NotIn(rt *unicode.RangeTable) Set {
|
||||
return setFunc(func(r rune) bool { return !unicode.Is(rt, r) })
|
||||
}
|
||||
|
||||
// Predicate creates a Set with a Contains method that returns f(r).
|
||||
func Predicate(f func(rune) bool) Set {
|
||||
return setFunc(f)
|
||||
}
|
||||
|
||||
// Transformer implements the transform.Transformer interface.
|
||||
type Transformer struct {
|
||||
t transform.SpanningTransformer
|
||||
}
|
||||
|
||||
func (t Transformer) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
return t.t.Transform(dst, src, atEOF)
|
||||
}
|
||||
|
||||
func (t Transformer) Span(b []byte, atEOF bool) (n int, err error) {
|
||||
return t.t.Span(b, atEOF)
|
||||
}
|
||||
|
||||
func (t Transformer) Reset() { t.t.Reset() }
|
||||
|
||||
// Bytes returns a new byte slice with the result of converting b using t. It
|
||||
// calls Reset on t. It returns nil if any error was found. This can only happen
|
||||
// if an error-producing Transformer is passed to If.
|
||||
func (t Transformer) Bytes(b []byte) []byte {
|
||||
b, _, err := transform.Bytes(t, b)
|
||||
if err != nil {
|
||||
return nil
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// String returns a string with the result of converting s using t. It calls
|
||||
// Reset on t. It returns the empty string if any error was found. This can only
|
||||
// happen if an error-producing Transformer is passed to If.
|
||||
func (t Transformer) String(s string) string {
|
||||
s, _, err := transform.String(t, s)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
return s
|
||||
}
|
||||
|
||||
// TODO:
|
||||
// - Copy: copying strings and bytes in whole-rune units.
|
||||
// - Validation (maybe)
|
||||
// - Well-formed-ness (maybe)
|
||||
|
||||
const runeErrorString = string(utf8.RuneError)
|
||||
|
||||
// Remove returns a Transformer that removes runes r for which s.Contains(r).
|
||||
// Illegal input bytes are replaced by RuneError before being passed to f.
|
||||
func Remove(s Set) Transformer {
|
||||
if f, ok := s.(setFunc); ok {
|
||||
// This little trick cuts the running time of BenchmarkRemove for sets
|
||||
// created by Predicate roughly in half.
|
||||
// TODO: special-case RangeTables as well.
|
||||
return Transformer{remove(f)}
|
||||
}
|
||||
return Transformer{remove(s.Contains)}
|
||||
}
|
||||
|
||||
// TODO: remove transform.RemoveFunc.
|
||||
|
||||
type remove func(r rune) bool
|
||||
|
||||
func (remove) Reset() {}
|
||||
|
||||
// Span implements transform.Spanner.
|
||||
func (t remove) Span(src []byte, atEOF bool) (n int, err error) {
|
||||
for r, size := rune(0), 0; n < len(src); {
|
||||
if r = rune(src[n]); r < utf8.RuneSelf {
|
||||
size = 1
|
||||
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
|
||||
// Invalid rune.
|
||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
||||
err = transform.ErrShortSrc
|
||||
} else {
|
||||
err = transform.ErrEndOfSpan
|
||||
}
|
||||
break
|
||||
}
|
||||
if t(r) {
|
||||
err = transform.ErrEndOfSpan
|
||||
break
|
||||
}
|
||||
n += size
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Transform implements transform.Transformer.
|
||||
func (t remove) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
for r, size := rune(0), 0; nSrc < len(src); {
|
||||
if r = rune(src[nSrc]); r < utf8.RuneSelf {
|
||||
size = 1
|
||||
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
|
||||
// Invalid rune.
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
// We replace illegal bytes with RuneError. Not doing so might
|
||||
// otherwise turn a sequence of invalid UTF-8 into valid UTF-8.
|
||||
// The resulting byte sequence may subsequently contain runes
|
||||
// for which t(r) is true that were passed unnoticed.
|
||||
if !t(utf8.RuneError) {
|
||||
if nDst+3 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst+0] = runeErrorString[0]
|
||||
dst[nDst+1] = runeErrorString[1]
|
||||
dst[nDst+2] = runeErrorString[2]
|
||||
nDst += 3
|
||||
}
|
||||
nSrc++
|
||||
continue
|
||||
}
|
||||
if t(r) {
|
||||
nSrc += size
|
||||
continue
|
||||
}
|
||||
if nDst+size > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
dst[nDst] = src[nSrc]
|
||||
nDst++
|
||||
nSrc++
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Map returns a Transformer that maps the runes in the input using the given
|
||||
// mapping. Illegal bytes in the input are converted to utf8.RuneError before
|
||||
// being passed to the mapping func.
|
||||
func Map(mapping func(rune) rune) Transformer {
|
||||
return Transformer{mapper(mapping)}
|
||||
}
|
||||
|
||||
type mapper func(rune) rune
|
||||
|
||||
func (mapper) Reset() {}
|
||||
|
||||
// Span implements transform.Spanner.
|
||||
func (t mapper) Span(src []byte, atEOF bool) (n int, err error) {
|
||||
for r, size := rune(0), 0; n < len(src); n += size {
|
||||
if r = rune(src[n]); r < utf8.RuneSelf {
|
||||
size = 1
|
||||
} else if r, size = utf8.DecodeRune(src[n:]); size == 1 {
|
||||
// Invalid rune.
|
||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
||||
err = transform.ErrShortSrc
|
||||
} else {
|
||||
err = transform.ErrEndOfSpan
|
||||
}
|
||||
break
|
||||
}
|
||||
if t(r) != r {
|
||||
err = transform.ErrEndOfSpan
|
||||
break
|
||||
}
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
// Transform implements transform.Transformer.
|
||||
func (t mapper) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
var replacement rune
|
||||
var b [utf8.UTFMax]byte
|
||||
|
||||
for r, size := rune(0), 0; nSrc < len(src); {
|
||||
if r = rune(src[nSrc]); r < utf8.RuneSelf {
|
||||
if replacement = t(r); replacement < utf8.RuneSelf {
|
||||
if nDst == len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst] = byte(replacement)
|
||||
nDst++
|
||||
nSrc++
|
||||
continue
|
||||
}
|
||||
size = 1
|
||||
} else if r, size = utf8.DecodeRune(src[nSrc:]); size == 1 {
|
||||
// Invalid rune.
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
|
||||
if replacement = t(utf8.RuneError); replacement == utf8.RuneError {
|
||||
if nDst+3 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst+0] = runeErrorString[0]
|
||||
dst[nDst+1] = runeErrorString[1]
|
||||
dst[nDst+2] = runeErrorString[2]
|
||||
nDst += 3
|
||||
nSrc++
|
||||
continue
|
||||
}
|
||||
} else if replacement = t(r); replacement == r {
|
||||
if nDst+size > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
for i := 0; i < size; i++ {
|
||||
dst[nDst] = src[nSrc]
|
||||
nDst++
|
||||
nSrc++
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
n := utf8.EncodeRune(b[:], replacement)
|
||||
|
||||
if nDst+n > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
for i := 0; i < n; i++ {
|
||||
dst[nDst] = b[i]
|
||||
nDst++
|
||||
}
|
||||
nSrc += size
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// ReplaceIllFormed returns a transformer that replaces all input bytes that are
|
||||
// not part of a well-formed UTF-8 code sequence with utf8.RuneError.
|
||||
func ReplaceIllFormed() Transformer {
|
||||
return Transformer{&replaceIllFormed{}}
|
||||
}
|
||||
|
||||
type replaceIllFormed struct{ transform.NopResetter }
|
||||
|
||||
func (t replaceIllFormed) Span(src []byte, atEOF bool) (n int, err error) {
|
||||
for n < len(src) {
|
||||
// ASCII fast path.
|
||||
if src[n] < utf8.RuneSelf {
|
||||
n++
|
||||
continue
|
||||
}
|
||||
|
||||
r, size := utf8.DecodeRune(src[n:])
|
||||
|
||||
// Look for a valid non-ASCII rune.
|
||||
if r != utf8.RuneError || size != 1 {
|
||||
n += size
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for short source data.
|
||||
if !atEOF && !utf8.FullRune(src[n:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
|
||||
// We have an invalid rune.
|
||||
err = transform.ErrEndOfSpan
|
||||
break
|
||||
}
|
||||
return n, err
|
||||
}
|
||||
|
||||
func (t replaceIllFormed) Transform(dst, src []byte, atEOF bool) (nDst, nSrc int, err error) {
|
||||
for nSrc < len(src) {
|
||||
// ASCII fast path.
|
||||
if r := src[nSrc]; r < utf8.RuneSelf {
|
||||
if nDst == len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst] = r
|
||||
nDst++
|
||||
nSrc++
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for a valid non-ASCII rune.
|
||||
if _, size := utf8.DecodeRune(src[nSrc:]); size != 1 {
|
||||
if size != copy(dst[nDst:], src[nSrc:nSrc+size]) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
nDst += size
|
||||
nSrc += size
|
||||
continue
|
||||
}
|
||||
|
||||
// Look for short source data.
|
||||
if !atEOF && !utf8.FullRune(src[nSrc:]) {
|
||||
err = transform.ErrShortSrc
|
||||
break
|
||||
}
|
||||
|
||||
// We have an invalid rune.
|
||||
if nDst+3 > len(dst) {
|
||||
err = transform.ErrShortDst
|
||||
break
|
||||
}
|
||||
dst[nDst+0] = runeErrorString[0]
|
||||
dst[nDst+1] = runeErrorString[1]
|
||||
dst[nDst+2] = runeErrorString[2]
|
||||
nDst += 3
|
||||
nSrc++
|
||||
}
|
||||
return nDst, nSrc, err
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,127 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
authenticationv1 "k8s.io/api/authentication/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/types"
|
||||
)
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// AdmissionReview describes an admission review request/response.
|
||||
type AdmissionReview struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Request describes the attributes for the admission request.
|
||||
// +optional
|
||||
Request *AdmissionRequest `json:"request,omitempty" protobuf:"bytes,1,opt,name=request"`
|
||||
// Response describes the attributes for the admission response.
|
||||
// +optional
|
||||
Response *AdmissionResponse `json:"response,omitempty" protobuf:"bytes,2,opt,name=response"`
|
||||
}
|
||||
|
||||
// AdmissionRequest describes the admission.Attributes for the admission request.
|
||||
type AdmissionRequest struct {
|
||||
// UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are
|
||||
// otherwise identical (parallel requests, requests when earlier requests did not modify etc)
|
||||
// The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request.
|
||||
// It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.
|
||||
UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
|
||||
// Kind is the type of object being manipulated. For example: Pod
|
||||
Kind metav1.GroupVersionKind `json:"kind" protobuf:"bytes,2,opt,name=kind"`
|
||||
// Resource is the name of the resource being requested. This is not the kind. For example: pods
|
||||
Resource metav1.GroupVersionResource `json:"resource" protobuf:"bytes,3,opt,name=resource"`
|
||||
// SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent
|
||||
// resource, but it may have a different kind. For instance, /pods has the resource "pods" and the kind "Pod", while
|
||||
// /pods/foo/status has the resource "pods", the sub resource "status", and the kind "Pod" (because status operates on
|
||||
// pods). The binding resource for a pod though may be /pods/foo/binding, which has resource "pods", subresource
|
||||
// "binding", and kind "Binding".
|
||||
// +optional
|
||||
SubResource string `json:"subResource,omitempty" protobuf:"bytes,4,opt,name=subResource"`
|
||||
// Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and
|
||||
// rely on the server to generate the name. If that is the case, this method will return the empty string.
|
||||
// +optional
|
||||
Name string `json:"name,omitempty" protobuf:"bytes,5,opt,name=name"`
|
||||
// Namespace is the namespace associated with the request (if any).
|
||||
// +optional
|
||||
Namespace string `json:"namespace,omitempty" protobuf:"bytes,6,opt,name=namespace"`
|
||||
// Operation is the operation being performed
|
||||
Operation Operation `json:"operation" protobuf:"bytes,7,opt,name=operation"`
|
||||
// UserInfo is information about the requesting user
|
||||
UserInfo authenticationv1.UserInfo `json:"userInfo" protobuf:"bytes,8,opt,name=userInfo"`
|
||||
// Object is the object from the incoming request prior to default values being applied
|
||||
// +optional
|
||||
Object runtime.RawExtension `json:"object,omitempty" protobuf:"bytes,9,opt,name=object"`
|
||||
// OldObject is the existing object. Only populated for UPDATE requests.
|
||||
// +optional
|
||||
OldObject runtime.RawExtension `json:"oldObject,omitempty" protobuf:"bytes,10,opt,name=oldObject"`
|
||||
// DryRun indicates that modifications will definitely not be persisted for this request.
|
||||
// Defaults to false.
|
||||
// +optional
|
||||
DryRun *bool `json:"dryRun,omitempty" protobuf:"varint,11,opt,name=dryRun"`
|
||||
}
|
||||
|
||||
// AdmissionResponse describes an admission response.
|
||||
type AdmissionResponse struct {
|
||||
// UID is an identifier for the individual request/response.
|
||||
// This should be copied over from the corresponding AdmissionRequest.
|
||||
UID types.UID `json:"uid" protobuf:"bytes,1,opt,name=uid"`
|
||||
|
||||
// Allowed indicates whether or not the admission request was permitted.
|
||||
Allowed bool `json:"allowed" protobuf:"varint,2,opt,name=allowed"`
|
||||
|
||||
// Result contains extra details into why an admission request was denied.
|
||||
// This field IS NOT consulted in any way if "Allowed" is "true".
|
||||
// +optional
|
||||
Result *metav1.Status `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
|
||||
// The patch body. Currently we only support "JSONPatch" which implements RFC 6902.
|
||||
// +optional
|
||||
Patch []byte `json:"patch,omitempty" protobuf:"bytes,4,opt,name=patch"`
|
||||
|
||||
// The type of Patch. Currently we only allow "JSONPatch".
|
||||
// +optional
|
||||
PatchType *PatchType `json:"patchType,omitempty" protobuf:"bytes,5,opt,name=patchType"`
|
||||
|
||||
// AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted).
|
||||
// MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with
|
||||
// admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by
|
||||
// the admission webhook to add additional context to the audit log for this request.
|
||||
// +optional
|
||||
AuditAnnotations map[string]string `json:"auditAnnotations,omitempty" protobuf:"bytes,6,opt,name=auditAnnotations"`
|
||||
}
|
||||
|
||||
// PatchType is the type of patch being used to represent the mutated object
|
||||
type PatchType string
|
||||
|
||||
// PatchType constants.
|
||||
const (
|
||||
PatchTypeJSONPatch PatchType = "JSONPatch"
|
||||
)
|
||||
|
||||
// Operation is the type of resource operation being checked for admission control
|
||||
type Operation string
|
||||
|
||||
// Operation constants
|
||||
const (
|
||||
Create Operation = "CREATE"
|
||||
Update Operation = "UPDATE"
|
||||
Delete Operation = "DELETE"
|
||||
Connect Operation = "CONNECT"
|
||||
)
|
|
@ -1,73 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1beta1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_AdmissionRequest = map[string]string{
|
||||
"": "AdmissionRequest describes the admission.Attributes for the admission request.",
|
||||
"uid": "UID is an identifier for the individual request/response. It allows us to distinguish instances of requests which are otherwise identical (parallel requests, requests when earlier requests did not modify etc) The UID is meant to track the round trip (request/response) between the KAS and the WebHook, not the user request. It is suitable for correlating log entries between the webhook and apiserver, for either auditing or debugging.",
|
||||
"kind": "Kind is the type of object being manipulated. For example: Pod",
|
||||
"resource": "Resource is the name of the resource being requested. This is not the kind. For example: pods",
|
||||
"subResource": "SubResource is the name of the subresource being requested. This is a different resource, scoped to the parent resource, but it may have a different kind. For instance, /pods has the resource \"pods\" and the kind \"Pod\", while /pods/foo/status has the resource \"pods\", the sub resource \"status\", and the kind \"Pod\" (because status operates on pods). The binding resource for a pod though may be /pods/foo/binding, which has resource \"pods\", subresource \"binding\", and kind \"Binding\".",
|
||||
"name": "Name is the name of the object as presented in the request. On a CREATE operation, the client may omit name and rely on the server to generate the name. If that is the case, this method will return the empty string.",
|
||||
"namespace": "Namespace is the namespace associated with the request (if any).",
|
||||
"operation": "Operation is the operation being performed",
|
||||
"userInfo": "UserInfo is information about the requesting user",
|
||||
"object": "Object is the object from the incoming request prior to default values being applied",
|
||||
"oldObject": "OldObject is the existing object. Only populated for UPDATE requests.",
|
||||
"dryRun": "DryRun indicates that modifications will definitely not be persisted for this request. Defaults to false.",
|
||||
}
|
||||
|
||||
func (AdmissionRequest) SwaggerDoc() map[string]string {
|
||||
return map_AdmissionRequest
|
||||
}
|
||||
|
||||
var map_AdmissionResponse = map[string]string{
|
||||
"": "AdmissionResponse describes an admission response.",
|
||||
"uid": "UID is an identifier for the individual request/response. This should be copied over from the corresponding AdmissionRequest.",
|
||||
"allowed": "Allowed indicates whether or not the admission request was permitted.",
|
||||
"status": "Result contains extra details into why an admission request was denied. This field IS NOT consulted in any way if \"Allowed\" is \"true\".",
|
||||
"patch": "The patch body. Currently we only support \"JSONPatch\" which implements RFC 6902.",
|
||||
"patchType": "The type of Patch. Currently we only allow \"JSONPatch\".",
|
||||
"auditAnnotations": "AuditAnnotations is an unstructured key value map set by remote admission controller (e.g. error=image-blacklisted). MutatingAdmissionWebhook and ValidatingAdmissionWebhook admission controller will prefix the keys with admission webhook name (e.g. imagepolicy.example.com/error=image-blacklisted). AuditAnnotations will be provided by the admission webhook to add additional context to the audit log for this request.",
|
||||
}
|
||||
|
||||
func (AdmissionResponse) SwaggerDoc() map[string]string {
|
||||
return map_AdmissionResponse
|
||||
}
|
||||
|
||||
var map_AdmissionReview = map[string]string{
|
||||
"": "AdmissionReview describes an admission review request/response.",
|
||||
"request": "Request describes the attributes for the admission request.",
|
||||
"response": "Response describes the attributes for the admission response.",
|
||||
}
|
||||
|
||||
func (AdmissionReview) SwaggerDoc() map[string]string {
|
||||
return map_AdmissionReview
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
|
@ -1,125 +0,0 @@
|
|||
// +build !ignore_autogenerated
|
||||
|
||||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by deepcopy-gen. DO NOT EDIT.
|
||||
|
||||
package v1beta1
|
||||
|
||||
import (
|
||||
v1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
)
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdmissionRequest) DeepCopyInto(out *AdmissionRequest) {
|
||||
*out = *in
|
||||
out.Kind = in.Kind
|
||||
out.Resource = in.Resource
|
||||
in.UserInfo.DeepCopyInto(&out.UserInfo)
|
||||
in.Object.DeepCopyInto(&out.Object)
|
||||
in.OldObject.DeepCopyInto(&out.OldObject)
|
||||
if in.DryRun != nil {
|
||||
in, out := &in.DryRun, &out.DryRun
|
||||
*out = new(bool)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionRequest.
|
||||
func (in *AdmissionRequest) DeepCopy() *AdmissionRequest {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AdmissionRequest)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdmissionResponse) DeepCopyInto(out *AdmissionResponse) {
|
||||
*out = *in
|
||||
if in.Result != nil {
|
||||
in, out := &in.Result, &out.Result
|
||||
*out = new(v1.Status)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Patch != nil {
|
||||
in, out := &in.Patch, &out.Patch
|
||||
*out = make([]byte, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.PatchType != nil {
|
||||
in, out := &in.PatchType, &out.PatchType
|
||||
*out = new(PatchType)
|
||||
**out = **in
|
||||
}
|
||||
if in.AuditAnnotations != nil {
|
||||
in, out := &in.AuditAnnotations, &out.AuditAnnotations
|
||||
*out = make(map[string]string, len(*in))
|
||||
for key, val := range *in {
|
||||
(*out)[key] = val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionResponse.
|
||||
func (in *AdmissionResponse) DeepCopy() *AdmissionResponse {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AdmissionResponse)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyInto is an autogenerated deepcopy function, copying the receiver, writing into out. in must be non-nil.
|
||||
func (in *AdmissionReview) DeepCopyInto(out *AdmissionReview) {
|
||||
*out = *in
|
||||
out.TypeMeta = in.TypeMeta
|
||||
if in.Request != nil {
|
||||
in, out := &in.Request, &out.Request
|
||||
*out = new(AdmissionRequest)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
if in.Response != nil {
|
||||
in, out := &in.Response, &out.Response
|
||||
*out = new(AdmissionResponse)
|
||||
(*in).DeepCopyInto(*out)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// DeepCopy is an autogenerated deepcopy function, copying the receiver, creating a new AdmissionReview.
|
||||
func (in *AdmissionReview) DeepCopy() *AdmissionReview {
|
||||
if in == nil {
|
||||
return nil
|
||||
}
|
||||
out := new(AdmissionReview)
|
||||
in.DeepCopyInto(out)
|
||||
return out
|
||||
}
|
||||
|
||||
// DeepCopyObject is an autogenerated deepcopy function, copying the receiver, creating a new runtime.Object.
|
||||
func (in *AdmissionReview) DeepCopyObject() runtime.Object {
|
||||
if c := in.DeepCopy(); c != nil {
|
||||
return c
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,25 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
// Package v1alpha1 is the v1alpha1 version of the API.
|
||||
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
|
||||
// InitializerConfiguration and validatingWebhookConfiguration is for the
|
||||
// new dynamic admission controller configuration.
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
package v1alpha1 // import "k8s.io/api/admissionregistration/v1alpha1"
|
|
@ -1,106 +0,0 @@
|
|||
/*
|
||||
Copyright 2017 The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
import (
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:nonNamespaced
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// InitializerConfiguration describes the configuration of initializers.
|
||||
type InitializerConfiguration struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Initializers is a list of resources and their default initializers
|
||||
// Order-sensitive.
|
||||
// When merging multiple InitializerConfigurations, we sort the initializers
|
||||
// from different InitializerConfigurations by the name of the
|
||||
// InitializerConfigurations; the order of the initializers from the same
|
||||
// InitializerConfiguration is preserved.
|
||||
// +patchMergeKey=name
|
||||
// +patchStrategy=merge
|
||||
// +optional
|
||||
Initializers []Initializer `json:"initializers,omitempty" patchStrategy:"merge" patchMergeKey:"name" protobuf:"bytes,2,rep,name=initializers"`
|
||||
}
|
||||
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// InitializerConfigurationList is a list of InitializerConfiguration.
|
||||
type InitializerConfigurationList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// List of InitializerConfiguration.
|
||||
Items []InitializerConfiguration `json:"items" protobuf:"bytes,2,rep,name=items"`
|
||||
}
|
||||
|
||||
// Initializer describes the name and the failure policy of an initializer, and
|
||||
// what resources it applies to.
|
||||
type Initializer struct {
|
||||
// Name is the identifier of the initializer. It will be added to the
|
||||
// object that needs to be initialized.
|
||||
// Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where
|
||||
// "alwayspullimages" is the name of the webhook, and kubernetes.io is the name
|
||||
// of the organization.
|
||||
// Required
|
||||
Name string `json:"name" protobuf:"bytes,1,opt,name=name"`
|
||||
|
||||
// Rules describes what resources/subresources the initializer cares about.
|
||||
// The initializer cares about an operation if it matches _any_ Rule.
|
||||
// Rule.Resources must not include subresources.
|
||||
Rules []Rule `json:"rules,omitempty" protobuf:"bytes,2,rep,name=rules"`
|
||||
}
|
||||
|
||||
// Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended
|
||||
// to make sure that all the tuple expansions are valid.
|
||||
type Rule struct {
|
||||
// APIGroups is the API groups the resources belong to. '*' is all groups.
|
||||
// If '*' is present, the length of the slice must be one.
|
||||
// Required.
|
||||
APIGroups []string `json:"apiGroups,omitempty" protobuf:"bytes,1,rep,name=apiGroups"`
|
||||
|
||||
// APIVersions is the API versions the resources belong to. '*' is all versions.
|
||||
// If '*' is present, the length of the slice must be one.
|
||||
// Required.
|
||||
APIVersions []string `json:"apiVersions,omitempty" protobuf:"bytes,2,rep,name=apiVersions"`
|
||||
|
||||
// Resources is a list of resources this rule applies to.
|
||||
//
|
||||
// For example:
|
||||
// 'pods' means pods.
|
||||
// 'pods/log' means the log subresource of pods.
|
||||
// '*' means all resources, but not subresources.
|
||||
// 'pods/*' means all subresources of pods.
|
||||
// '*/scale' means all scale subresources.
|
||||
// '*/*' means all resources and their subresources.
|
||||
//
|
||||
// If wildcard is present, the validation rule will ensure resources do not
|
||||
// overlap with each other.
|
||||
//
|
||||
// Depending on the enclosing object, subresources might not be allowed.
|
||||
// Required.
|
||||
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
|
||||
}
|
71
vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
71
vendor/k8s.io/api/admissionregistration/v1alpha1/types_swagger_doc_generated.go
generated
vendored
|
@ -1,71 +0,0 @@
|
|||
/*
|
||||
Copyright The Kubernetes Authors.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package v1alpha1
|
||||
|
||||
// This file contains a collection of methods that can be used from go-restful to
|
||||
// generate Swagger API documentation for its models. Please read this PR for more
|
||||
// information on the implementation: https://github.com/emicklei/go-restful/pull/215
|
||||
//
|
||||
// TODOs are ignored from the parser (e.g. TODO(andronat):... || TODO:...) if and only if
|
||||
// they are on one line! For multiple line or blocks that you want to ignore use ---.
|
||||
// Any context after a --- is ignored.
|
||||
//
|
||||
// Those methods can be generated by using hack/update-generated-swagger-docs.sh
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_Initializer = map[string]string{
|
||||
"": "Initializer describes the name and the failure policy of an initializer, and what resources it applies to.",
|
||||
"name": "Name is the identifier of the initializer. It will be added to the object that needs to be initialized. Name should be fully qualified, e.g., alwayspullimages.kubernetes.io, where \"alwayspullimages\" is the name of the webhook, and kubernetes.io is the name of the organization. Required",
|
||||
"rules": "Rules describes what resources/subresources the initializer cares about. The initializer cares about an operation if it matches _any_ Rule. Rule.Resources must not include subresources.",
|
||||
}
|
||||
|
||||
func (Initializer) SwaggerDoc() map[string]string {
|
||||
return map_Initializer
|
||||
}
|
||||
|
||||
var map_InitializerConfiguration = map[string]string{
|
||||
"": "InitializerConfiguration describes the configuration of initializers.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
|
||||
"initializers": "Initializers is a list of resources and their default initializers Order-sensitive. When merging multiple InitializerConfigurations, we sort the initializers from different InitializerConfigurations by the name of the InitializerConfigurations; the order of the initializers from the same InitializerConfiguration is preserved.",
|
||||
}
|
||||
|
||||
func (InitializerConfiguration) SwaggerDoc() map[string]string {
|
||||
return map_InitializerConfiguration
|
||||
}
|
||||
|
||||
var map_InitializerConfigurationList = map[string]string{
|
||||
"": "InitializerConfigurationList is a list of InitializerConfiguration.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"items": "List of InitializerConfiguration.",
|
||||
}
|
||||
|
||||
func (InitializerConfigurationList) SwaggerDoc() map[string]string {
|
||||
return map_InitializerConfigurationList
|
||||
}
|
||||
|
||||
var map_Rule = map[string]string{
|
||||
"": "Rule is a tuple of APIGroups, APIVersion, and Resources.It is recommended to make sure that all the tuple expansions are valid.",
|
||||
"apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.",
|
||||
"apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.",
|
||||
"resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.",
|
||||
}
|
||||
|
||||
func (Rule) SwaggerDoc() map[string]string {
|
||||
return map_Rule
|
||||
}
|
||||
|
||||
// AUTO-GENERATED FUNCTIONS END HERE
|
|
@ -15,11 +15,12 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
|
||||
// Package v1beta1 is the v1beta1 version of the API.
|
||||
// AdmissionConfiguration and AdmissionPluginConfiguration are legacy static admission plugin configuration
|
||||
// InitializerConfiguration and validatingWebhookConfiguration is for the
|
||||
// MutatingWebhookConfiguration and ValidatingWebhookConfiguration are for the
|
||||
// new dynamic admission controller configuration.
|
||||
// +groupName=admissionregistration.k8s.io
|
||||
package v1beta1 // import "k8s.io/api/admissionregistration/v1beta1"
|
||||
|
|
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/admissionregistration/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -250,6 +249,12 @@ func (m *Rule) MarshalTo(dAtA []byte) (int, error) {
|
|||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
if m.Scope != nil {
|
||||
dAtA[i] = 0x22
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.Scope)))
|
||||
i += copy(dAtA[i:], *m.Scope)
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -463,6 +468,26 @@ func (m *Webhook) MarshalTo(dAtA []byte) (int, error) {
|
|||
i = encodeVarintGenerated(dAtA, i, uint64(len(*m.SideEffects)))
|
||||
i += copy(dAtA[i:], *m.SideEffects)
|
||||
}
|
||||
if m.TimeoutSeconds != nil {
|
||||
dAtA[i] = 0x38
|
||||
i++
|
||||
i = encodeVarintGenerated(dAtA, i, uint64(*m.TimeoutSeconds))
|
||||
}
|
||||
if len(m.AdmissionReviewVersions) > 0 {
|
||||
for _, s := range m.AdmissionReviewVersions {
|
||||
dAtA[i] = 0x42
|
||||
i++
|
||||
l = len(s)
|
||||
for l >= 1<<7 {
|
||||
dAtA[i] = uint8(uint64(l)&0x7f | 0x80)
|
||||
l >>= 7
|
||||
i++
|
||||
}
|
||||
dAtA[i] = uint8(l)
|
||||
i++
|
||||
i += copy(dAtA[i:], s)
|
||||
}
|
||||
}
|
||||
return i, nil
|
||||
}
|
||||
|
||||
|
@ -506,24 +531,6 @@ func (m *WebhookClientConfig) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -582,6 +589,10 @@ func (m *Rule) Size() (n int) {
|
|||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
if m.Scope != nil {
|
||||
l = len(*m.Scope)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -666,6 +677,15 @@ func (m *Webhook) Size() (n int) {
|
|||
l = len(*m.SideEffects)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
if m.TimeoutSeconds != nil {
|
||||
n += 1 + sovGenerated(uint64(*m.TimeoutSeconds))
|
||||
}
|
||||
if len(m.AdmissionReviewVersions) > 0 {
|
||||
for _, s := range m.AdmissionReviewVersions {
|
||||
l = len(s)
|
||||
n += 1 + l + sovGenerated(uint64(l))
|
||||
}
|
||||
}
|
||||
return n
|
||||
}
|
||||
|
||||
|
@ -730,6 +750,7 @@ func (this *Rule) String() string {
|
|||
`APIGroups:` + fmt.Sprintf("%v", this.APIGroups) + `,`,
|
||||
`APIVersions:` + fmt.Sprintf("%v", this.APIVersions) + `,`,
|
||||
`Resources:` + fmt.Sprintf("%v", this.Resources) + `,`,
|
||||
`Scope:` + valueToStringGenerated(this.Scope) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -790,6 +811,8 @@ func (this *Webhook) String() string {
|
|||
`FailurePolicy:` + valueToStringGenerated(this.FailurePolicy) + `,`,
|
||||
`NamespaceSelector:` + strings.Replace(fmt.Sprintf("%v", this.NamespaceSelector), "LabelSelector", "k8s_io_apimachinery_pkg_apis_meta_v1.LabelSelector", 1) + `,`,
|
||||
`SideEffects:` + valueToStringGenerated(this.SideEffects) + `,`,
|
||||
`TimeoutSeconds:` + valueToStringGenerated(this.TimeoutSeconds) + `,`,
|
||||
`AdmissionReviewVersions:` + fmt.Sprintf("%v", this.AdmissionReviewVersions) + `,`,
|
||||
`}`,
|
||||
}, "")
|
||||
return s
|
||||
|
@ -1152,6 +1175,36 @@ func (m *Rule) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
m.Resources = append(m.Resources, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
case 4:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field Scope", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
s := ScopeType(dAtA[iNdEx:postIndex])
|
||||
m.Scope = &s
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
@ -1854,6 +1907,55 @@ func (m *Webhook) Unmarshal(dAtA []byte) error {
|
|||
s := SideEffectClass(dAtA[iNdEx:postIndex])
|
||||
m.SideEffects = &s
|
||||
iNdEx = postIndex
|
||||
case 7:
|
||||
if wireType != 0 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field TimeoutSeconds", wireType)
|
||||
}
|
||||
var v int32
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
v |= (int32(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
m.TimeoutSeconds = &v
|
||||
case 8:
|
||||
if wireType != 2 {
|
||||
return fmt.Errorf("proto: wrong wireType = %d for field AdmissionReviewVersions", wireType)
|
||||
}
|
||||
var stringLen uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLen |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLen := int(stringLen)
|
||||
if intStringLen < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postIndex := iNdEx + intStringLen
|
||||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
m.AdmissionReviewVersions = append(m.AdmissionReviewVersions, string(dAtA[iNdEx:postIndex]))
|
||||
iNdEx = postIndex
|
||||
default:
|
||||
iNdEx = preIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
|
@ -2129,62 +2231,67 @@ func init() {
|
|||
}
|
||||
|
||||
var fileDescriptorGenerated = []byte{
|
||||
// 906 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x54, 0xcf, 0x6f, 0xe3, 0x44,
|
||||
0x14, 0x8e, 0x37, 0x29, 0x49, 0x26, 0x89, 0x76, 0x3b, 0x80, 0x14, 0xaa, 0x95, 0x1d, 0xe5, 0x80,
|
||||
0x22, 0xa1, 0xb5, 0x49, 0x41, 0x08, 0x21, 0x10, 0xaa, 0x0b, 0x0b, 0x95, 0xba, 0xbb, 0x61, 0x0a,
|
||||
0xbb, 0x12, 0xe2, 0xc0, 0xc4, 0x79, 0x49, 0x86, 0xf8, 0x97, 0x66, 0xc6, 0x59, 0x7a, 0x43, 0xe2,
|
||||
0x1f, 0x40, 0x42, 0xfc, 0x0d, 0xfc, 0x15, 0xdc, 0x7b, 0xdc, 0x0b, 0x62, 0x4f, 0x16, 0x35, 0x67,
|
||||
0x0e, 0x5c, 0x7b, 0x42, 0x63, 0x3b, 0x71, 0xd2, 0x6c, 0xbb, 0xe9, 0x85, 0x03, 0x37, 0xcf, 0xf7,
|
||||
0xe6, 0xfb, 0xde, 0xfb, 0x9e, 0xdf, 0x1b, 0xf4, 0xc5, 0xec, 0x7d, 0x61, 0xb2, 0xc0, 0x9a, 0x45,
|
||||
0x43, 0xe0, 0x3e, 0x48, 0x10, 0xd6, 0x1c, 0xfc, 0x51, 0xc0, 0xad, 0x3c, 0x40, 0x43, 0x66, 0xd1,
|
||||
0x91, 0xc7, 0x84, 0x60, 0x81, 0xcf, 0x61, 0xc2, 0x84, 0xe4, 0x54, 0xb2, 0xc0, 0xb7, 0xe6, 0xfd,
|
||||
0x21, 0x48, 0xda, 0xb7, 0x26, 0xe0, 0x03, 0xa7, 0x12, 0x46, 0x66, 0xc8, 0x03, 0x19, 0xe0, 0x5e,
|
||||
0xc6, 0x34, 0x69, 0xc8, 0xcc, 0x17, 0x32, 0xcd, 0x9c, 0xb9, 0x77, 0x6f, 0xc2, 0xe4, 0x34, 0x1a,
|
||||
0x9a, 0x4e, 0xe0, 0x59, 0x93, 0x60, 0x12, 0x58, 0xa9, 0xc0, 0x30, 0x1a, 0xa7, 0xa7, 0xf4, 0x90,
|
||||
0x7e, 0x65, 0xc2, 0x7b, 0xef, 0x16, 0x25, 0x79, 0xd4, 0x99, 0x32, 0x1f, 0xf8, 0xa9, 0x15, 0xce,
|
||||
0x26, 0x0a, 0x10, 0x96, 0x07, 0x92, 0x5a, 0xf3, 0x8d, 0x72, 0xf6, 0xac, 0xab, 0x58, 0x3c, 0xf2,
|
||||
0x25, 0xf3, 0x60, 0x83, 0xf0, 0xde, 0xcb, 0x08, 0xc2, 0x99, 0x82, 0x47, 0x2f, 0xf3, 0xba, 0xbf,
|
||||
0x6b, 0xe8, 0xee, 0x83, 0x48, 0x52, 0xc9, 0xfc, 0xc9, 0x13, 0x18, 0x4e, 0x83, 0x60, 0x76, 0x18,
|
||||
0xf8, 0x63, 0x36, 0x89, 0x32, 0xdb, 0xf8, 0x5b, 0x54, 0x53, 0x45, 0x8e, 0xa8, 0xa4, 0x6d, 0xad,
|
||||
0xa3, 0xf5, 0x1a, 0xfb, 0x6f, 0x9b, 0x45, 0xaf, 0x96, 0xb9, 0xcc, 0x70, 0x36, 0x51, 0x80, 0x30,
|
||||
0xd5, 0x6d, 0x73, 0xde, 0x37, 0x1f, 0x0d, 0xbf, 0x03, 0x47, 0x3e, 0x00, 0x49, 0x6d, 0x7c, 0x16,
|
||||
0x1b, 0xa5, 0x24, 0x36, 0x50, 0x81, 0x91, 0xa5, 0x2a, 0x3e, 0x41, 0xb5, 0x3c, 0xb3, 0x68, 0xdf,
|
||||
0xea, 0x94, 0x7b, 0x8d, 0xfd, 0xbe, 0xb9, 0xed, 0xdf, 0x30, 0x73, 0xa6, 0x5d, 0x51, 0x29, 0x48,
|
||||
0xed, 0x69, 0x2e, 0xd4, 0xfd, 0x5b, 0x43, 0x9d, 0xeb, 0x7c, 0x1d, 0x33, 0x21, 0xf1, 0x37, 0x1b,
|
||||
0xde, 0xcc, 0xed, 0xbc, 0x29, 0x76, 0xea, 0xec, 0x4e, 0xee, 0xac, 0xb6, 0x40, 0x56, 0x7c, 0xcd,
|
||||
0xd0, 0x0e, 0x93, 0xe0, 0x2d, 0x4c, 0xdd, 0xdf, 0xde, 0xd4, 0x75, 0x85, 0xdb, 0xad, 0x3c, 0xe5,
|
||||
0xce, 0x91, 0x12, 0x27, 0x59, 0x8e, 0xee, 0xcf, 0x1a, 0xaa, 0x90, 0xc8, 0x05, 0xfc, 0x16, 0xaa,
|
||||
0xd3, 0x90, 0x7d, 0xc6, 0x83, 0x28, 0x14, 0x6d, 0xad, 0x53, 0xee, 0xd5, 0xed, 0x56, 0x12, 0x1b,
|
||||
0xf5, 0x83, 0xc1, 0x51, 0x06, 0x92, 0x22, 0x8e, 0xfb, 0xa8, 0x41, 0x43, 0xf6, 0x18, 0xb8, 0x2a,
|
||||
0x25, 0x2b, 0xb4, 0x6e, 0xdf, 0x4e, 0x62, 0xa3, 0x71, 0x30, 0x38, 0x5a, 0xc0, 0x64, 0xf5, 0x8e,
|
||||
0xd2, 0xe7, 0x20, 0x82, 0x88, 0x3b, 0x20, 0xda, 0xe5, 0x42, 0x9f, 0x2c, 0x40, 0x52, 0xc4, 0xbb,
|
||||
0xbf, 0x6a, 0x08, 0xab, 0xaa, 0x9e, 0x30, 0x39, 0x7d, 0x14, 0x42, 0xe6, 0x40, 0xe0, 0x8f, 0x11,
|
||||
0x0a, 0x96, 0xa7, 0xbc, 0x48, 0x23, 0x9d, 0x8f, 0x25, 0x7a, 0x11, 0x1b, 0xad, 0xe5, 0xe9, 0xcb,
|
||||
0xd3, 0x10, 0xc8, 0x0a, 0x05, 0x0f, 0x50, 0x85, 0x47, 0x2e, 0xb4, 0x6f, 0x6d, 0xfc, 0xb4, 0x97,
|
||||
0x74, 0x56, 0x15, 0x63, 0x37, 0xf3, 0x0e, 0xa6, 0x0d, 0x23, 0xa9, 0x52, 0xf7, 0x47, 0x0d, 0xdd,
|
||||
0x39, 0x01, 0x3e, 0x67, 0x0e, 0x10, 0x18, 0x03, 0x07, 0xdf, 0x01, 0x6c, 0xa1, 0xba, 0x4f, 0x3d,
|
||||
0x10, 0x21, 0x75, 0x20, 0x1d, 0x90, 0xba, 0xbd, 0x9b, 0x73, 0xeb, 0x0f, 0x17, 0x01, 0x52, 0xdc,
|
||||
0xc1, 0x1d, 0x54, 0x51, 0x87, 0xb4, 0xae, 0x7a, 0x91, 0x47, 0xdd, 0x25, 0x69, 0x04, 0xdf, 0x45,
|
||||
0x95, 0x90, 0xca, 0x69, 0xbb, 0x9c, 0xde, 0xa8, 0xa9, 0xe8, 0x80, 0xca, 0x29, 0x49, 0xd1, 0xee,
|
||||
0x1f, 0x1a, 0xd2, 0x1f, 0x53, 0x97, 0x8d, 0xfe, 0x77, 0xfb, 0xf8, 0x8f, 0x86, 0xba, 0xd7, 0x3b,
|
||||
0xfb, 0x0f, 0x36, 0xd2, 0x5b, 0xdf, 0xc8, 0xcf, 0xb7, 0xb7, 0x75, 0x7d, 0xe9, 0x57, 0xec, 0xe4,
|
||||
0x2f, 0x15, 0x54, 0xcd, 0xaf, 0x2f, 0x27, 0x43, 0xbb, 0x72, 0x32, 0x9e, 0xa2, 0xa6, 0xe3, 0x32,
|
||||
0xf0, 0x65, 0x26, 0x9d, 0xcf, 0xf6, 0x47, 0x37, 0x6e, 0xfd, 0xe1, 0x8a, 0x88, 0xfd, 0x5a, 0x9e,
|
||||
0xa8, 0xb9, 0x8a, 0x92, 0xb5, 0x44, 0x98, 0xa2, 0x1d, 0xb5, 0x02, 0xd9, 0x36, 0x37, 0xf6, 0x3f,
|
||||
0xbc, 0xd9, 0x36, 0xad, 0xaf, 0x76, 0xd1, 0x09, 0x15, 0x13, 0x24, 0x53, 0xc6, 0xc7, 0xa8, 0x35,
|
||||
0xa6, 0xcc, 0x8d, 0x38, 0x0c, 0x02, 0x97, 0x39, 0xa7, 0xed, 0x4a, 0xda, 0x86, 0x37, 0x93, 0xd8,
|
||||
0x68, 0xdd, 0x5f, 0x0d, 0x5c, 0xc4, 0xc6, 0xee, 0x1a, 0x90, 0xae, 0xfe, 0x3a, 0x19, 0x7f, 0x8f,
|
||||
0x76, 0x97, 0x2b, 0x77, 0x02, 0x2e, 0x38, 0x32, 0xe0, 0xed, 0x9d, 0xb4, 0x5d, 0xef, 0x6c, 0x39,
|
||||
0x2d, 0x74, 0x08, 0xee, 0x82, 0x6a, 0xbf, 0x9e, 0xc4, 0xc6, 0xee, 0xc3, 0xcb, 0x8a, 0x64, 0x33,
|
||||
0x09, 0xfe, 0x04, 0x35, 0x04, 0x1b, 0xc1, 0xa7, 0xe3, 0x31, 0x38, 0x52, 0xb4, 0x5f, 0x49, 0x5d,
|
||||
0x74, 0xd5, 0x7b, 0x79, 0x52, 0xc0, 0x17, 0xb1, 0x71, 0xbb, 0x38, 0x1e, 0xba, 0x54, 0x08, 0xb2,
|
||||
0x4a, 0xeb, 0xfe, 0xa6, 0xa1, 0x57, 0x5f, 0xf0, 0xb3, 0x30, 0x45, 0x55, 0x91, 0x3d, 0x41, 0xf9,
|
||||
0xec, 0x7f, 0xb0, 0xfd, 0xaf, 0xb8, 0xfc, 0x76, 0xd9, 0x8d, 0x24, 0x36, 0xaa, 0x0b, 0x74, 0xa1,
|
||||
0x8b, 0x7b, 0xa8, 0xe6, 0x50, 0x3b, 0xf2, 0x47, 0xf9, 0xe3, 0xd9, 0xb4, 0x9b, 0x6a, 0x57, 0x0e,
|
||||
0x0f, 0x32, 0x8c, 0x2c, 0xa3, 0xf8, 0x0d, 0x54, 0x8e, 0xb8, 0x9b, 0xbf, 0x53, 0xd5, 0x24, 0x36,
|
||||
0xca, 0x5f, 0x91, 0x63, 0xa2, 0x30, 0xfb, 0xde, 0xd9, 0xb9, 0x5e, 0x7a, 0x76, 0xae, 0x97, 0x9e,
|
||||
0x9f, 0xeb, 0xa5, 0x1f, 0x12, 0x5d, 0x3b, 0x4b, 0x74, 0xed, 0x59, 0xa2, 0x6b, 0xcf, 0x13, 0x5d,
|
||||
0xfb, 0x33, 0xd1, 0xb5, 0x9f, 0xfe, 0xd2, 0x4b, 0x5f, 0x57, 0xf3, 0xd2, 0xfe, 0x0d, 0x00, 0x00,
|
||||
0xff, 0xff, 0x85, 0x06, 0x8c, 0x7f, 0xae, 0x09, 0x00, 0x00,
|
||||
// 989 bytes of a gzipped FileDescriptorProto
|
||||
0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0xdc, 0x55, 0x4f, 0x6f, 0xe3, 0x44,
|
||||
0x14, 0xaf, 0x37, 0x09, 0x49, 0x26, 0xed, 0xee, 0x76, 0xf8, 0xb3, 0xa1, 0xac, 0xec, 0x28, 0x07,
|
||||
0x14, 0x09, 0xd6, 0xa6, 0x05, 0x21, 0xb4, 0x02, 0xa1, 0xba, 0xb0, 0x50, 0xa9, 0xbb, 0x5b, 0x26,
|
||||
0xfb, 0x47, 0x42, 0x1c, 0x98, 0x38, 0x2f, 0xc9, 0x10, 0xc7, 0x63, 0x79, 0xc6, 0x29, 0xbd, 0x21,
|
||||
0xf1, 0x05, 0xf8, 0x16, 0xf0, 0x25, 0x38, 0x70, 0xeb, 0x71, 0x2f, 0x88, 0x3d, 0x59, 0xd4, 0x9c,
|
||||
0x39, 0x70, 0xed, 0x09, 0x8d, 0xed, 0xd8, 0x49, 0xd3, 0x76, 0xb3, 0x17, 0x0e, 0xdc, 0x3c, 0xbf,
|
||||
0xf7, 0x7e, 0xef, 0xbd, 0xdf, 0xcc, 0x7b, 0xcf, 0xe8, 0xab, 0xf1, 0x47, 0xc2, 0x64, 0xdc, 0x1a,
|
||||
0x87, 0x3d, 0x08, 0x3c, 0x90, 0x20, 0xac, 0x29, 0x78, 0x7d, 0x1e, 0x58, 0x99, 0x81, 0xfa, 0xcc,
|
||||
0xa2, 0xfd, 0x09, 0x13, 0x82, 0x71, 0x2f, 0x80, 0x21, 0x13, 0x32, 0xa0, 0x92, 0x71, 0xcf, 0x9a,
|
||||
0x6e, 0xf7, 0x40, 0xd2, 0x6d, 0x6b, 0x08, 0x1e, 0x04, 0x54, 0x42, 0xdf, 0xf4, 0x03, 0x2e, 0x39,
|
||||
0xee, 0xa4, 0x4c, 0x93, 0xfa, 0xcc, 0xbc, 0x90, 0x69, 0x66, 0xcc, 0xad, 0x3b, 0x43, 0x26, 0x47,
|
||||
0x61, 0xcf, 0x74, 0xf8, 0xc4, 0x1a, 0xf2, 0x21, 0xb7, 0x92, 0x00, 0xbd, 0x70, 0x90, 0x9c, 0x92,
|
||||
0x43, 0xf2, 0x95, 0x06, 0xde, 0xfa, 0xa0, 0x28, 0x69, 0x42, 0x9d, 0x11, 0xf3, 0x20, 0x38, 0xb6,
|
||||
0xfc, 0xf1, 0x50, 0x01, 0xc2, 0x9a, 0x80, 0xa4, 0xd6, 0x74, 0xa9, 0x9c, 0x2d, 0xeb, 0x32, 0x56,
|
||||
0x10, 0x7a, 0x92, 0x4d, 0x60, 0x89, 0xf0, 0xe1, 0x8b, 0x08, 0xc2, 0x19, 0xc1, 0x84, 0x9e, 0xe7,
|
||||
0xb5, 0x7f, 0xd7, 0xd0, 0xed, 0xfb, 0xa1, 0xa4, 0x92, 0x79, 0xc3, 0xa7, 0xd0, 0x1b, 0x71, 0x3e,
|
||||
0xde, 0xe3, 0xde, 0x80, 0x0d, 0xc3, 0x54, 0x36, 0xfe, 0x16, 0xd5, 0x54, 0x91, 0x7d, 0x2a, 0x69,
|
||||
0x53, 0x6b, 0x69, 0x9d, 0xc6, 0xce, 0x7b, 0x66, 0x71, 0x57, 0x79, 0x2e, 0xd3, 0x1f, 0x0f, 0x15,
|
||||
0x20, 0x4c, 0xe5, 0x6d, 0x4e, 0xb7, 0xcd, 0x87, 0xbd, 0xef, 0xc0, 0x91, 0xf7, 0x41, 0x52, 0x1b,
|
||||
0x9f, 0x44, 0xc6, 0x5a, 0x1c, 0x19, 0xa8, 0xc0, 0x48, 0x1e, 0x15, 0x77, 0x51, 0x2d, 0xcb, 0x2c,
|
||||
0x9a, 0xd7, 0x5a, 0xa5, 0x4e, 0x63, 0x67, 0xdb, 0x5c, 0xf5, 0x35, 0xcc, 0x8c, 0x69, 0x97, 0x55,
|
||||
0x0a, 0x52, 0x3b, 0xca, 0x02, 0xb5, 0xff, 0xd6, 0x50, 0xeb, 0x2a, 0x5d, 0x07, 0x4c, 0x48, 0xfc,
|
||||
0xcd, 0x92, 0x36, 0x73, 0x35, 0x6d, 0x8a, 0x9d, 0x28, 0xbb, 0x99, 0x29, 0xab, 0xcd, 0x90, 0x39,
|
||||
0x5d, 0x63, 0x54, 0x61, 0x12, 0x26, 0x33, 0x51, 0xf7, 0x56, 0x17, 0x75, 0x55, 0xe1, 0xf6, 0x46,
|
||||
0x96, 0xb2, 0xb2, 0xaf, 0x82, 0x93, 0x34, 0x47, 0xfb, 0x37, 0x0d, 0x95, 0x49, 0xe8, 0x02, 0x7e,
|
||||
0x07, 0xd5, 0xa9, 0xcf, 0xbe, 0x08, 0x78, 0xe8, 0x8b, 0xa6, 0xd6, 0x2a, 0x75, 0xea, 0xf6, 0x46,
|
||||
0x1c, 0x19, 0xf5, 0xdd, 0xc3, 0xfd, 0x14, 0x24, 0x85, 0x1d, 0x6f, 0xa3, 0x06, 0xf5, 0xd9, 0x13,
|
||||
0x08, 0x54, 0x29, 0x69, 0xa1, 0x75, 0xfb, 0x46, 0x1c, 0x19, 0x8d, 0xdd, 0xc3, 0xfd, 0x19, 0x4c,
|
||||
0xe6, 0x7d, 0x54, 0xfc, 0x00, 0x04, 0x0f, 0x03, 0x07, 0x44, 0xb3, 0x54, 0xc4, 0x27, 0x33, 0x90,
|
||||
0x14, 0x76, 0xfc, 0x2e, 0xaa, 0x08, 0x87, 0xfb, 0xd0, 0x2c, 0xb7, 0xb4, 0x4e, 0xdd, 0x7e, 0x43,
|
||||
0x95, 0xdd, 0x55, 0xc0, 0x59, 0x64, 0xd4, 0x93, 0x8f, 0x47, 0xc7, 0x3e, 0x90, 0xd4, 0xa9, 0xfd,
|
||||
0xb3, 0x86, 0xb0, 0xd2, 0xf0, 0x94, 0xc9, 0xd1, 0x43, 0x1f, 0x52, 0xbd, 0x02, 0x7f, 0x8a, 0x10,
|
||||
0xcf, 0x4f, 0x99, 0x24, 0x23, 0xe9, 0xa6, 0x1c, 0x3d, 0x8b, 0x8c, 0x8d, 0xfc, 0x94, 0x84, 0x9c,
|
||||
0xa3, 0xe0, 0x43, 0x54, 0x0e, 0x42, 0x17, 0x9a, 0xd7, 0x96, 0x9e, 0xf8, 0x05, 0xef, 0xa0, 0x8a,
|
||||
0xb1, 0xd7, 0xb3, 0xfb, 0x4e, 0xae, 0x97, 0x24, 0x91, 0xda, 0x3f, 0x6a, 0xe8, 0x66, 0x17, 0x82,
|
||||
0x29, 0x73, 0x80, 0xc0, 0x00, 0x02, 0xf0, 0x1c, 0xc0, 0x16, 0xaa, 0x7b, 0x74, 0x02, 0xc2, 0xa7,
|
||||
0x0e, 0x24, 0xed, 0x54, 0xb7, 0x37, 0x33, 0x6e, 0xfd, 0xc1, 0xcc, 0x40, 0x0a, 0x1f, 0xdc, 0x42,
|
||||
0x65, 0x75, 0x48, 0xea, 0xaa, 0x17, 0x79, 0x94, 0x2f, 0x49, 0x2c, 0xf8, 0x36, 0x2a, 0xfb, 0x54,
|
||||
0x8e, 0x9a, 0xa5, 0xc4, 0xa3, 0xa6, 0xac, 0x87, 0x54, 0x8e, 0x48, 0x82, 0xb6, 0xff, 0xd0, 0x90,
|
||||
0xfe, 0x84, 0xba, 0xac, 0xff, 0xbf, 0x9b, 0xde, 0x7f, 0x34, 0xd4, 0xbe, 0x5a, 0xd9, 0x7f, 0x30,
|
||||
0xbf, 0x93, 0xc5, 0xf9, 0xfd, 0x72, 0x75, 0x59, 0x57, 0x97, 0x7e, 0xc9, 0x04, 0xff, 0x52, 0x41,
|
||||
0xd5, 0xcc, 0x3d, 0xef, 0x0c, 0xed, 0xd2, 0xce, 0x38, 0x42, 0xeb, 0x8e, 0xcb, 0xc0, 0x93, 0x69,
|
||||
0xe8, 0xac, 0xb7, 0x3f, 0x79, 0xe9, 0xab, 0xdf, 0x9b, 0x0b, 0x62, 0xbf, 0x96, 0x25, 0x5a, 0x9f,
|
||||
0x47, 0xc9, 0x42, 0x22, 0x4c, 0x51, 0x45, 0x8d, 0x40, 0x3a, 0xfb, 0x8d, 0x9d, 0x8f, 0x5f, 0x6e,
|
||||
0x9a, 0x16, 0x47, 0xbb, 0xb8, 0x09, 0x65, 0x13, 0x24, 0x8d, 0x8c, 0x0f, 0xd0, 0xc6, 0x80, 0x32,
|
||||
0x37, 0x0c, 0xe0, 0x90, 0xbb, 0xcc, 0x39, 0xce, 0xb6, 0xc7, 0xdb, 0x71, 0x64, 0x6c, 0xdc, 0x9b,
|
||||
0x37, 0x9c, 0x45, 0xc6, 0xe6, 0x02, 0x90, 0x8c, 0xfe, 0x22, 0x19, 0x7f, 0x8f, 0x36, 0xf3, 0x91,
|
||||
0xeb, 0x82, 0x0b, 0x8e, 0xe4, 0x41, 0xb3, 0x92, 0x5c, 0xd7, 0xfb, 0x2b, 0x76, 0x0b, 0xed, 0x81,
|
||||
0x3b, 0xa3, 0xda, 0xaf, 0xc7, 0x91, 0xb1, 0xf9, 0xe0, 0x7c, 0x44, 0xb2, 0x9c, 0x04, 0x7f, 0x86,
|
||||
0x1a, 0x82, 0xf5, 0xe1, 0xf3, 0xc1, 0x00, 0x1c, 0x29, 0x9a, 0xaf, 0x24, 0x2a, 0xda, 0x6a, 0xbb,
|
||||
0x76, 0x0b, 0xf8, 0x2c, 0x32, 0x6e, 0x14, 0xc7, 0x3d, 0x97, 0x0a, 0x41, 0xe6, 0x69, 0xf8, 0x2e,
|
||||
0xba, 0xae, 0x7e, 0xe0, 0x3c, 0x94, 0x5d, 0x70, 0xb8, 0xd7, 0x17, 0xcd, 0x6a, 0x4b, 0xeb, 0x54,
|
||||
0x6c, 0x1c, 0x47, 0xc6, 0xf5, 0x47, 0x0b, 0x16, 0x72, 0xce, 0x13, 0x3f, 0x46, 0xb7, 0xf2, 0x37,
|
||||
0x21, 0x30, 0x65, 0x70, 0x94, 0xef, 0xfa, 0x5a, 0xb2, 0x47, 0xdf, 0x8a, 0x23, 0xe3, 0xd6, 0xee,
|
||||
0xc5, 0x2e, 0xe4, 0x32, 0x6e, 0xfb, 0x57, 0x0d, 0xbd, 0x7a, 0x41, 0xff, 0x60, 0x8a, 0xaa, 0x22,
|
||||
0xdd, 0x8a, 0xd9, 0x38, 0xde, 0x5d, 0xbd, 0x3b, 0xce, 0xaf, 0x53, 0xbb, 0x11, 0x47, 0x46, 0x75,
|
||||
0x86, 0xce, 0xe2, 0xe2, 0x0e, 0xaa, 0x39, 0xd4, 0x0e, 0xbd, 0x7e, 0xb6, 0xcf, 0xd7, 0xed, 0x75,
|
||||
0x35, 0xbe, 0x7b, 0xbb, 0x29, 0x46, 0x72, 0x2b, 0x7e, 0x13, 0x95, 0xc2, 0xc0, 0xcd, 0x56, 0x67,
|
||||
0x35, 0x8e, 0x8c, 0xd2, 0x63, 0x72, 0x40, 0x14, 0x66, 0xdf, 0x39, 0x39, 0xd5, 0xd7, 0x9e, 0x9d,
|
||||
0xea, 0x6b, 0xcf, 0x4f, 0xf5, 0xb5, 0x1f, 0x62, 0x5d, 0x3b, 0x89, 0x75, 0xed, 0x59, 0xac, 0x6b,
|
||||
0xcf, 0x63, 0x5d, 0xfb, 0x33, 0xd6, 0xb5, 0x9f, 0xfe, 0xd2, 0xd7, 0xbe, 0xae, 0x66, 0xa5, 0xfd,
|
||||
0x1b, 0x00, 0x00, 0xff, 0xff, 0xbb, 0xc0, 0x7c, 0xc4, 0x6f, 0x0a, 0x00, 0x00,
|
||||
}
|
||||
|
|
|
@ -49,8 +49,32 @@ type Rule struct {
|
|||
// Depending on the enclosing object, subresources might not be allowed.
|
||||
// Required.
|
||||
Resources []string `json:"resources,omitempty" protobuf:"bytes,3,rep,name=resources"`
|
||||
|
||||
// scope specifies the scope of this rule.
|
||||
// Valid values are "Cluster", "Namespaced", and "*"
|
||||
// "Cluster" means that only cluster-scoped resources will match this rule.
|
||||
// Namespace API objects are cluster-scoped.
|
||||
// "Namespaced" means that only namespaced resources will match this rule.
|
||||
// "*" means that there are no scope restrictions.
|
||||
// Subresources match the scope of their parent resource.
|
||||
// Default is "*".
|
||||
//
|
||||
// +optional
|
||||
Scope *ScopeType `json:"scope,omitempty" protobuf:"bytes,4,rep,name=scope"`
|
||||
}
|
||||
|
||||
type ScopeType string
|
||||
|
||||
const (
|
||||
// ClusterScope means that scope is limited to cluster-scoped objects.
|
||||
// Namespace objects are cluster-scoped.
|
||||
ClusterScope ScopeType = "Cluster"
|
||||
// NamespacedScope means that scope is limited to namespaced objects.
|
||||
NamespacedScope ScopeType = "Namespaced"
|
||||
// AllScopes means that all scopes are included.
|
||||
AllScopes ScopeType = "*"
|
||||
)
|
||||
|
||||
type FailurePolicyType string
|
||||
|
||||
const (
|
||||
|
@ -216,6 +240,25 @@ type Webhook struct {
|
|||
// sideEffects == Unknown or Some. Defaults to Unknown.
|
||||
// +optional
|
||||
SideEffects *SideEffectClass `json:"sideEffects,omitempty" protobuf:"bytes,6,opt,name=sideEffects,casttype=SideEffectClass"`
|
||||
|
||||
// TimeoutSeconds specifies the timeout for this webhook. After the timeout passes,
|
||||
// the webhook call will be ignored or the API call will fail based on the
|
||||
// failure policy.
|
||||
// The timeout value must be between 1 and 30 seconds.
|
||||
// Default to 30 seconds.
|
||||
// +optional
|
||||
TimeoutSeconds *int32 `json:"timeoutSeconds,omitempty" protobuf:"varint,7,opt,name=timeoutSeconds"`
|
||||
|
||||
// AdmissionReviewVersions is an ordered list of preferred `AdmissionReview`
|
||||
// versions the Webhook expects. API server will try to use first version in
|
||||
// the list which it supports. If none of the versions specified in this list
|
||||
// supported by API server, validation will fail for this object.
|
||||
// If a persisted webhook configuration specifies allowed versions and does not
|
||||
// include any versions known to the API Server, calls to the webhook will fail
|
||||
// and be subject to the failure policy.
|
||||
// Default to `['v1beta1']`.
|
||||
// +optional
|
||||
AdmissionReviewVersions []string `json:"admissionReviewVersions,omitempty" protobuf:"bytes,8,rep,name=admissionReviewVersions"`
|
||||
}
|
||||
|
||||
// RuleWithOperations is a tuple of Operations and Resources. It is recommended to make
|
||||
|
@ -246,7 +289,7 @@ const (
|
|||
// connection with the webhook
|
||||
type WebhookClientConfig struct {
|
||||
// `url` gives the location of the webhook, in standard URL form
|
||||
// (`[scheme://]host:port/path`). Exactly one of `url` or `service`
|
||||
// (`scheme://host:port/path`). Exactly one of `url` or `service`
|
||||
// must be specified.
|
||||
//
|
||||
// The `host` should not refer to a service running in the cluster; use
|
||||
|
@ -282,12 +325,12 @@ type WebhookClientConfig struct {
|
|||
// Port 443 will be used if it is open, otherwise it is an error.
|
||||
//
|
||||
// +optional
|
||||
Service *ServiceReference `json:"service" protobuf:"bytes,1,opt,name=service"`
|
||||
Service *ServiceReference `json:"service,omitempty" protobuf:"bytes,1,opt,name=service"`
|
||||
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate
|
||||
// the webhook's server certificate.
|
||||
// Required.
|
||||
CABundle []byte `json:"caBundle" protobuf:"bytes,2,opt,name=caBundle"`
|
||||
// `caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate.
|
||||
// If unspecified, system trust roots on the apiserver are used.
|
||||
// +optional
|
||||
CABundle []byte `json:"caBundle,omitempty" protobuf:"bytes,2,opt,name=caBundle"`
|
||||
}
|
||||
|
||||
// ServiceReference holds a reference to Service.legacy.k8s.io
|
||||
|
|
21
vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
generated
vendored
21
vendor/k8s.io/api/admissionregistration/v1beta1/types_swagger_doc_generated.go
generated
vendored
|
@ -52,6 +52,7 @@ var map_Rule = map[string]string{
|
|||
"apiGroups": "APIGroups is the API groups the resources belong to. '*' is all groups. If '*' is present, the length of the slice must be one. Required.",
|
||||
"apiVersions": "APIVersions is the API versions the resources belong to. '*' is all versions. If '*' is present, the length of the slice must be one. Required.",
|
||||
"resources": "Resources is a list of resources this rule applies to.\n\nFor example: 'pods' means pods. 'pods/log' means the log subresource of pods. '*' means all resources, but not subresources. 'pods/*' means all subresources of pods. '*/scale' means all scale subresources. '*/*' means all resources and their subresources.\n\nIf wildcard is present, the validation rule will ensure resources do not overlap with each other.\n\nDepending on the enclosing object, subresources might not be allowed. Required.",
|
||||
"scope": "scope specifies the scope of this rule. Valid values are \"Cluster\", \"Namespaced\", and \"*\" \"Cluster\" means that only cluster-scoped resources will match this rule. Namespace API objects are cluster-scoped. \"Namespaced\" means that only namespaced resources will match this rule. \"*\" means that there are no scope restrictions. Subresources match the scope of their parent resource. Default is \"*\".",
|
||||
}
|
||||
|
||||
func (Rule) SwaggerDoc() map[string]string {
|
||||
|
@ -99,13 +100,15 @@ func (ValidatingWebhookConfigurationList) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_Webhook = map[string]string{
|
||||
"": "Webhook describes an admission webhook and the resources and operations it applies to.",
|
||||
"name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
|
||||
"clientConfig": "ClientConfig defines how to communicate with the hook. Required",
|
||||
"rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.",
|
||||
"failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.",
|
||||
"namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
|
||||
"sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
|
||||
"": "Webhook describes an admission webhook and the resources and operations it applies to.",
|
||||
"name": "The name of the admission webhook. Name should be fully qualified, e.g., imagepolicy.kubernetes.io, where \"imagepolicy\" is the name of the webhook, and kubernetes.io is the name of the organization. Required.",
|
||||
"clientConfig": "ClientConfig defines how to communicate with the hook. Required",
|
||||
"rules": "Rules describes what operations on what resources/subresources the webhook cares about. The webhook cares about an operation if it matches _any_ Rule. However, in order to prevent ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks from putting the cluster in a state which cannot be recovered from without completely disabling the plugin, ValidatingAdmissionWebhooks and MutatingAdmissionWebhooks are never called on admission requests for ValidatingWebhookConfiguration and MutatingWebhookConfiguration objects.",
|
||||
"failurePolicy": "FailurePolicy defines how unrecognized errors from the admission endpoint are handled - allowed values are Ignore or Fail. Defaults to Ignore.",
|
||||
"namespaceSelector": "NamespaceSelector decides whether to run the webhook on an object based on whether the namespace for that object matches the selector. If the object itself is a namespace, the matching is performed on object.metadata.labels. If the object is another cluster scoped resource, it never skips the webhook.\n\nFor example, to run the webhook on any objects whose namespace is not associated with \"runlevel\" of \"0\" or \"1\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"runlevel\",\n \"operator\": \"NotIn\",\n \"values\": [\n \"0\",\n \"1\"\n ]\n }\n ]\n}\n\nIf instead you want to only run the webhook on any objects whose namespace is associated with the \"environment\" of \"prod\" or \"staging\"; you will set the selector as follows: \"namespaceSelector\": {\n \"matchExpressions\": [\n {\n \"key\": \"environment\",\n \"operator\": \"In\",\n \"values\": [\n \"prod\",\n \"staging\"\n ]\n }\n ]\n}\n\nSee https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ for more examples of label selectors.\n\nDefault to the empty LabelSelector, which matches everything.",
|
||||
"sideEffects": "SideEffects states whether this webhookk has side effects. Acceptable values are: Unknown, None, Some, NoneOnDryRun Webhooks with side effects MUST implement a reconciliation system, since a request may be rejected by a future step in the admission change and the side effects therefore need to be undone. Requests with the dryRun attribute will be auto-rejected if they match a webhook with sideEffects == Unknown or Some. Defaults to Unknown.",
|
||||
"timeoutSeconds": "TimeoutSeconds specifies the timeout for this webhook. After the timeout passes, the webhook call will be ignored or the API call will fail based on the failure policy. The timeout value must be between 1 and 30 seconds. Default to 30 seconds.",
|
||||
"admissionReviewVersions": "AdmissionReviewVersions is an ordered list of preferred `AdmissionReview` versions the Webhook expects. API server will try to use first version in the list which it supports. If none of the versions specified in this list supported by API server, validation will fail for this object. If a persisted webhook configuration specifies allowed versions and does not include any versions known to the API Server, calls to the webhook will fail and be subject to the failure policy. Default to `['v1beta1']`.",
|
||||
}
|
||||
|
||||
func (Webhook) SwaggerDoc() map[string]string {
|
||||
|
@ -114,9 +117,9 @@ func (Webhook) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_WebhookClientConfig = map[string]string{
|
||||
"": "WebhookClientConfig contains the information to make a TLS connection with the webhook",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`[scheme://]host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
"url": "`url` gives the location of the webhook, in standard URL form (`scheme://host:port/path`). Exactly one of `url` or `service` must be specified.\n\nThe `host` should not refer to a service running in the cluster; use the `service` field instead. The host might be resolved via external DNS in some apiservers (e.g., `kube-apiserver` cannot resolve in-cluster DNS as that would be a layering violation). `host` may also be an IP address.\n\nPlease note that using `localhost` or `127.0.0.1` as a `host` is risky unless you take great care to run this webhook on all hosts which run an apiserver which might need to make calls to this webhook. Such installs are likely to be non-portable, i.e., not easy to turn up in a new cluster.\n\nThe scheme must be \"https\"; the URL must begin with \"https://\".\n\nA path is optional, and if present may be any string permissible in a URL. You may use the path to pass an arbitrary string to the webhook, for example, a cluster identifier.\n\nAttempting to use a user or basic auth e.g. \"user:password@\" is not allowed. Fragments (\"#...\") and query parameters (\"?...\") are not allowed, either.",
|
||||
"service": "`service` is a reference to the service for this webhook. Either `service` or `url` must be specified.\n\nIf the webhook is running within the cluster, then you should use `service`.\n\nPort 443 will be used if it is open, otherwise it is an error.",
|
||||
"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. Required.",
|
||||
"caBundle": "`caBundle` is a PEM encoded CA bundle which will be used to validate the webhook's server certificate. If unspecified, system trust roots on the apiserver are used.",
|
||||
}
|
||||
|
||||
func (WebhookClientConfig) SwaggerDoc() map[string]string {
|
||||
|
|
|
@ -109,6 +109,11 @@ func (in *Rule) DeepCopyInto(out *Rule) {
|
|||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
if in.Scope != nil {
|
||||
in, out := &in.Scope, &out.Scope
|
||||
*out = new(ScopeType)
|
||||
**out = **in
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
@ -257,6 +262,16 @@ func (in *Webhook) DeepCopyInto(out *Webhook) {
|
|||
*out = new(SideEffectClass)
|
||||
**out = **in
|
||||
}
|
||||
if in.TimeoutSeconds != nil {
|
||||
in, out := &in.TimeoutSeconds, &out.TimeoutSeconds
|
||||
*out = new(int32)
|
||||
**out = **in
|
||||
}
|
||||
if in.AdmissionReviewVersions != nil {
|
||||
in, out := &in.AdmissionReviewVersions, &out.AdmissionReviewVersions
|
||||
*out = make([]string, len(*in))
|
||||
copy(*out, *in)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
@ -15,6 +15,7 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1 // import "k8s.io/api/apps/v1"
|
||||
|
|
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1 is a generated protocol buffer package.
|
||||
|
@ -1440,24 +1439,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
|
|
@ -32,6 +32,8 @@ const (
|
|||
)
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// StatefulSet represents a set of pods with consistent identities.
|
||||
|
@ -67,7 +69,7 @@ const (
|
|||
// ParallelPodManagement will create and delete pods as soon as the stateful set
|
||||
// replica count is changed, and will not wait for pods to be ready or complete
|
||||
// termination.
|
||||
ParallelPodManagement = "Parallel"
|
||||
ParallelPodManagement PodManagementPolicyType = "Parallel"
|
||||
)
|
||||
|
||||
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
|
||||
|
@ -244,6 +246,8 @@ type StatefulSetList struct {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Deployment enables declarative updates for Pods and ReplicaSets.
|
||||
|
@ -279,7 +283,8 @@ type DeploymentSpec struct {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
|
@ -653,6 +658,8 @@ type DaemonSetList struct {
|
|||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:method=GetScale,verb=get,subresource=scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +genclient:method=UpdateScale,verb=update,subresource=scale,input=k8s.io/api/autoscaling/v1.Scale,result=k8s.io/api/autoscaling/v1.Scale
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// ReplicaSet ensures that a specified number of pod replicas are running at any given time.
|
||||
|
|
|
@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_DaemonSetStatus = map[string]string{
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
|
|
|
@ -15,6 +15,7 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1beta1 // import "k8s.io/api/apps/v1beta1"
|
||||
|
|
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta1/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta1 is a generated protocol buffer package.
|
||||
|
@ -1091,24 +1090,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -2552,51 +2533,14 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.UpdatedAnnotations == nil {
|
||||
m.UpdatedAnnotations = make(map[string]string)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -2606,41 +2550,80 @@ func (m *DeploymentRollback) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
}
|
||||
m.UpdatedAnnotations[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
|
@ -3833,51 +3816,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Selector == nil {
|
||||
m.Selector = make(map[string]string)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -3887,41 +3833,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.Selector[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.Selector[mapkey] = mapvalue
|
||||
}
|
||||
m.Selector[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1beta1
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
"k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
@ -55,8 +55,6 @@ type ScaleStatus struct {
|
|||
TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:noVerbs
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
|
@ -113,7 +111,7 @@ const (
|
|||
// ParallelPodManagement will create and delete pods as soon as the stateful set
|
||||
// replica count is changed, and will not wait for pods to be ready or complete
|
||||
// termination.
|
||||
ParallelPodManagement = "Parallel"
|
||||
ParallelPodManagement PodManagementPolicyType = "Parallel"
|
||||
)
|
||||
|
||||
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
|
||||
|
@ -323,7 +321,8 @@ type DeploymentSpec struct {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
|
@ -434,7 +433,7 @@ type RollingUpdateDeployment struct {
|
|||
// the rolling update starts, such that the total number of old and new pods do not exceed
|
||||
// 130% of desired pods. Once old pods have been killed,
|
||||
// new ReplicaSet can be scaled up further, ensuring that total number of pods running
|
||||
// at any time during the update is atmost 130% of desired pods.
|
||||
// at any time during the update is at most 130% of desired pods.
|
||||
// +optional
|
||||
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
|
||||
}
|
||||
|
|
|
@ -149,7 +149,7 @@ func (RollbackConfig) SwaggerDoc() map[string]string {
|
|||
var map_RollingUpdateDeployment = map[string]string{
|
||||
"": "Spec to control the desired behavior of rolling update.",
|
||||
"maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
|
||||
"maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
|
||||
"maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.",
|
||||
}
|
||||
|
||||
func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
|
||||
|
|
|
@ -15,6 +15,7 @@ limitations under the License.
|
|||
*/
|
||||
|
||||
// +k8s:deepcopy-gen=package
|
||||
// +k8s:protobuf-gen=package
|
||||
// +k8s:openapi-gen=true
|
||||
|
||||
package v1beta2 // import "k8s.io/api/apps/v1beta2"
|
||||
|
|
|
@ -14,9 +14,8 @@ See the License for the specific language governing permissions and
|
|||
limitations under the License.
|
||||
*/
|
||||
|
||||
// Code generated by protoc-gen-gogo.
|
||||
// Code generated by protoc-gen-gogo. DO NOT EDIT.
|
||||
// source: k8s.io/kubernetes/vendor/k8s.io/api/apps/v1beta2/generated.proto
|
||||
// DO NOT EDIT!
|
||||
|
||||
/*
|
||||
Package v1beta2 is a generated protocol buffer package.
|
||||
|
@ -1570,24 +1569,6 @@ func (m *StatefulSetUpdateStrategy) MarshalTo(dAtA []byte) (int, error) {
|
|||
return i, nil
|
||||
}
|
||||
|
||||
func encodeFixed64Generated(dAtA []byte, offset int, v uint64) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
dAtA[offset+4] = uint8(v >> 32)
|
||||
dAtA[offset+5] = uint8(v >> 40)
|
||||
dAtA[offset+6] = uint8(v >> 48)
|
||||
dAtA[offset+7] = uint8(v >> 56)
|
||||
return offset + 8
|
||||
}
|
||||
func encodeFixed32Generated(dAtA []byte, offset int, v uint32) int {
|
||||
dAtA[offset] = uint8(v)
|
||||
dAtA[offset+1] = uint8(v >> 8)
|
||||
dAtA[offset+2] = uint8(v >> 16)
|
||||
dAtA[offset+3] = uint8(v >> 24)
|
||||
return offset + 4
|
||||
}
|
||||
func encodeVarintGenerated(dAtA []byte, offset int, v uint64) int {
|
||||
for v >= 1<<7 {
|
||||
dAtA[offset] = uint8(v&0x7f | 0x80)
|
||||
|
@ -6109,51 +6090,14 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
|||
if postIndex > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
var keykey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
keykey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapkey := string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
if m.Selector == nil {
|
||||
m.Selector = make(map[string]string)
|
||||
}
|
||||
if iNdEx < postIndex {
|
||||
var valuekey uint64
|
||||
var mapkey string
|
||||
var mapvalue string
|
||||
for iNdEx < postIndex {
|
||||
entryPreIndex := iNdEx
|
||||
var wire uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
|
@ -6163,41 +6107,80 @@ func (m *ScaleStatus) Unmarshal(dAtA []byte) error {
|
|||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
valuekey |= (uint64(b) & 0x7F) << shift
|
||||
wire |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
fieldNum := int32(wire >> 3)
|
||||
if fieldNum == 1 {
|
||||
var stringLenmapkey uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapkey |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
if iNdEx >= l {
|
||||
intStringLenmapkey := int(stringLenmapkey)
|
||||
if intStringLenmapkey < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapkey := iNdEx + intStringLenmapkey
|
||||
if postStringIndexmapkey > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
mapkey = string(dAtA[iNdEx:postStringIndexmapkey])
|
||||
iNdEx = postStringIndexmapkey
|
||||
} else if fieldNum == 2 {
|
||||
var stringLenmapvalue uint64
|
||||
for shift := uint(0); ; shift += 7 {
|
||||
if shift >= 64 {
|
||||
return ErrIntOverflowGenerated
|
||||
}
|
||||
if iNdEx >= l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
b := dAtA[iNdEx]
|
||||
iNdEx++
|
||||
stringLenmapvalue |= (uint64(b) & 0x7F) << shift
|
||||
if b < 0x80 {
|
||||
break
|
||||
}
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue = string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
} else {
|
||||
iNdEx = entryPreIndex
|
||||
skippy, err := skipGenerated(dAtA[iNdEx:])
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if skippy < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
if (iNdEx + skippy) > postIndex {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
iNdEx += skippy
|
||||
}
|
||||
intStringLenmapvalue := int(stringLenmapvalue)
|
||||
if intStringLenmapvalue < 0 {
|
||||
return ErrInvalidLengthGenerated
|
||||
}
|
||||
postStringIndexmapvalue := iNdEx + intStringLenmapvalue
|
||||
if postStringIndexmapvalue > l {
|
||||
return io.ErrUnexpectedEOF
|
||||
}
|
||||
mapvalue := string(dAtA[iNdEx:postStringIndexmapvalue])
|
||||
iNdEx = postStringIndexmapvalue
|
||||
m.Selector[mapkey] = mapvalue
|
||||
} else {
|
||||
var mapvalue string
|
||||
m.Selector[mapkey] = mapvalue
|
||||
}
|
||||
m.Selector[mapkey] = mapvalue
|
||||
iNdEx = postIndex
|
||||
case 3:
|
||||
if wireType != 2 {
|
||||
|
|
|
@ -17,7 +17,7 @@ limitations under the License.
|
|||
package v1beta2
|
||||
|
||||
import (
|
||||
"k8s.io/api/core/v1"
|
||||
v1 "k8s.io/api/core/v1"
|
||||
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
|
||||
runtime "k8s.io/apimachinery/pkg/runtime"
|
||||
"k8s.io/apimachinery/pkg/util/intstr"
|
||||
|
@ -57,22 +57,20 @@ type ScaleStatus struct {
|
|||
TargetSelector string `json:"targetSelector,omitempty" protobuf:"bytes,3,opt,name=targetSelector"`
|
||||
}
|
||||
|
||||
// +genclient
|
||||
// +genclient:noVerbs
|
||||
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
|
||||
|
||||
// Scale represents a scaling request for a resource.
|
||||
type Scale struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.
|
||||
// Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.
|
||||
// defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.
|
||||
// +optional
|
||||
Spec ScaleSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.
|
||||
// current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.
|
||||
// +optional
|
||||
Status ScaleStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
@ -117,7 +115,7 @@ const (
|
|||
// ParallelPodManagement will create and delete pods as soon as the stateful set
|
||||
// replica count is changed, and will not wait for pods to be ready or complete
|
||||
// termination.
|
||||
ParallelPodManagement = "Parallel"
|
||||
ParallelPodManagement PodManagementPolicyType = "Parallel"
|
||||
)
|
||||
|
||||
// StatefulSetUpdateStrategy indicates the strategy that the StatefulSet
|
||||
|
@ -331,7 +329,8 @@ type DeploymentSpec struct {
|
|||
|
||||
// The deployment strategy to use to replace existing pods with new ones.
|
||||
// +optional
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" protobuf:"bytes,4,opt,name=strategy"`
|
||||
// +patchStrategy=retainKeys
|
||||
Strategy DeploymentStrategy `json:"strategy,omitempty" patchStrategy:"retainKeys" protobuf:"bytes,4,opt,name=strategy"`
|
||||
|
||||
// Minimum number of seconds for which a newly created pod should be ready
|
||||
// without any of its container crashing, for it to be considered available.
|
||||
|
@ -414,7 +413,7 @@ type RollingUpdateDeployment struct {
|
|||
// the rolling update starts, such that the total number of old and new pods do not exceed
|
||||
// 130% of desired pods. Once old pods have been killed,
|
||||
// new ReplicaSet can be scaled up further, ensuring that total number of pods running
|
||||
// at any time during the update is atmost 130% of desired pods.
|
||||
// at any time during the update is at most 130% of desired pods.
|
||||
// +optional
|
||||
MaxSurge *intstr.IntOrString `json:"maxSurge,omitempty" protobuf:"bytes,2,opt,name=maxSurge"`
|
||||
}
|
||||
|
@ -667,12 +666,12 @@ type DaemonSetCondition struct {
|
|||
type DaemonSet struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// The desired behavior of this daemon set.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec DaemonSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
|
@ -680,7 +679,7 @@ type DaemonSet struct {
|
|||
// out of date by some window of time.
|
||||
// Populated by the system.
|
||||
// Read-only.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status DaemonSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
@ -698,7 +697,7 @@ const (
|
|||
type DaemonSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
|
@ -717,12 +716,12 @@ type ReplicaSet struct {
|
|||
|
||||
// If the Labels of a ReplicaSet are empty, they are defaulted to
|
||||
// be the same as the Pod(s) that the ReplicaSet manages.
|
||||
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
// Spec defines the specification of the desired behavior of the ReplicaSet.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Spec ReplicaSetSpec `json:"spec,omitempty" protobuf:"bytes,2,opt,name=spec"`
|
||||
|
||||
|
@ -730,7 +729,7 @@ type ReplicaSet struct {
|
|||
// This data may be out of date by some window of time.
|
||||
// Populated by the system.
|
||||
// Read-only.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status
|
||||
// +optional
|
||||
Status ReplicaSetStatus `json:"status,omitempty" protobuf:"bytes,3,opt,name=status"`
|
||||
}
|
||||
|
@ -741,7 +740,7 @@ type ReplicaSet struct {
|
|||
type ReplicaSetList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard list metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
|
@ -851,7 +850,7 @@ type ReplicaSetCondition struct {
|
|||
type ControllerRevision struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
// Standard object's metadata.
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ObjectMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
|
@ -868,7 +867,7 @@ type ControllerRevision struct {
|
|||
type ControllerRevisionList struct {
|
||||
metav1.TypeMeta `json:",inline"`
|
||||
|
||||
// More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata
|
||||
// More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata
|
||||
// +optional
|
||||
metav1.ListMeta `json:"metadata,omitempty" protobuf:"bytes,1,opt,name=metadata"`
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ package v1beta2
|
|||
// AUTO-GENERATED FUNCTIONS START HERE. DO NOT EDIT.
|
||||
var map_ControllerRevision = map[string]string{
|
||||
"": "DEPRECATED - This group version of ControllerRevision is deprecated by apps/v1/ControllerRevision. See the release notes for more information. ControllerRevision implements an immutable snapshot of state data. Clients are responsible for serializing and deserializing the objects that contain their internal state. Once a ControllerRevision has been successfully created, it can not be updated. The API Server will fail validation of all requests that attempt to mutate the Data field. ControllerRevisions may, however, be deleted. Note that, due to its use by both the DaemonSet and StatefulSet controllers for update and rollback, this object is beta. However, it may be subject to name and representation changes in future releases, and clients should not depend on its stability. It is primarily for internal use by controllers.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"data": "Data is the serialized representation of the state.",
|
||||
"revision": "Revision indicates the revision of the state represented by Data.",
|
||||
}
|
||||
|
@ -40,7 +40,7 @@ func (ControllerRevision) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_ControllerRevisionList = map[string]string{
|
||||
"": "ControllerRevisionList is a resource containing a list of ControllerRevision objects.",
|
||||
"metadata": "More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"metadata": "More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"items": "Items is the list of ControllerRevisions",
|
||||
}
|
||||
|
||||
|
@ -50,9 +50,9 @@ func (ControllerRevisionList) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_DaemonSet = map[string]string{
|
||||
"": "DEPRECATED - This group version of DaemonSet is deprecated by apps/v1/DaemonSet. See the release notes for more information. DaemonSet represents the configuration of a daemon set.",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
"status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
"metadata": "Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"spec": "The desired behavior of this daemon set. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
"status": "The current status of this daemon set. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
}
|
||||
|
||||
func (DaemonSet) SwaggerDoc() map[string]string {
|
||||
|
@ -74,7 +74,7 @@ func (DaemonSetCondition) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_DaemonSetList = map[string]string{
|
||||
"": "DaemonSetList is a collection of daemon sets.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"items": "A list of daemon sets.",
|
||||
}
|
||||
|
||||
|
@ -96,7 +96,7 @@ func (DaemonSetSpec) SwaggerDoc() map[string]string {
|
|||
}
|
||||
|
||||
var map_DaemonSetStatus = map[string]string{
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"": "DaemonSetStatus represents the current status of a daemon set.",
|
||||
"currentNumberScheduled": "The number of nodes that are running at least 1 daemon pod and are supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"numberMisscheduled": "The number of nodes that are running the daemon pod, but are not supposed to run the daemon pod. More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
"desiredNumberScheduled": "The total number of nodes that should be running the daemon pod (including nodes correctly running the daemon pod). More info: https://kubernetes.io/docs/concepts/workloads/controllers/daemonset/",
|
||||
|
@ -202,9 +202,9 @@ func (DeploymentStrategy) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_ReplicaSet = map[string]string{
|
||||
"": "DEPRECATED - This group version of ReplicaSet is deprecated by apps/v1/ReplicaSet. See the release notes for more information. ReplicaSet ensures that a specified number of pod replicas are running at any given time.",
|
||||
"metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata",
|
||||
"spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
"status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status",
|
||||
"metadata": "If the Labels of a ReplicaSet are empty, they are defaulted to be the same as the Pod(s) that the ReplicaSet manages. Standard object's metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata",
|
||||
"spec": "Spec defines the specification of the desired behavior of the ReplicaSet. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
"status": "Status is the most recently observed status of the ReplicaSet. This data may be out of date by some window of time. Populated by the system. Read-only. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status",
|
||||
}
|
||||
|
||||
func (ReplicaSet) SwaggerDoc() map[string]string {
|
||||
|
@ -226,7 +226,7 @@ func (ReplicaSetCondition) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_ReplicaSetList = map[string]string{
|
||||
"": "ReplicaSetList is a collection of ReplicaSets.",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds",
|
||||
"metadata": "Standard list metadata. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#types-kinds",
|
||||
"items": "List of ReplicaSets. More info: https://kubernetes.io/docs/concepts/workloads/controllers/replicationcontroller",
|
||||
}
|
||||
|
||||
|
@ -272,7 +272,7 @@ func (RollingUpdateDaemonSet) SwaggerDoc() map[string]string {
|
|||
var map_RollingUpdateDeployment = map[string]string{
|
||||
"": "Spec to control the desired behavior of rolling update.",
|
||||
"maxUnavailable": "The maximum number of pods that can be unavailable during the update. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). Absolute number is calculated from percentage by rounding down. This can not be 0 if MaxSurge is 0. Defaults to 25%. Example: when this is set to 30%, the old ReplicaSet can be scaled down to 70% of desired pods immediately when the rolling update starts. Once new pods are ready, old ReplicaSet can be scaled down further, followed by scaling up the new ReplicaSet, ensuring that the total number of pods available at all times during the update is at least 70% of desired pods.",
|
||||
"maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is atmost 130% of desired pods.",
|
||||
"maxSurge": "The maximum number of pods that can be scheduled above the desired number of pods. Value can be an absolute number (ex: 5) or a percentage of desired pods (ex: 10%). This can not be 0 if MaxUnavailable is 0. Absolute number is calculated from percentage by rounding up. Defaults to 25%. Example: when this is set to 30%, the new ReplicaSet can be scaled up immediately when the rolling update starts, such that the total number of old and new pods do not exceed 130% of desired pods. Once old pods have been killed, new ReplicaSet can be scaled up further, ensuring that total number of pods running at any time during the update is at most 130% of desired pods.",
|
||||
}
|
||||
|
||||
func (RollingUpdateDeployment) SwaggerDoc() map[string]string {
|
||||
|
@ -290,9 +290,9 @@ func (RollingUpdateStatefulSetStrategy) SwaggerDoc() map[string]string {
|
|||
|
||||
var map_Scale = map[string]string{
|
||||
"": "Scale represents a scaling request for a resource.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#metadata.",
|
||||
"spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status.",
|
||||
"status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#spec-and-status. Read-only.",
|
||||
"metadata": "Standard object metadata; More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#metadata.",
|
||||
"spec": "defines the behavior of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status.",
|
||||
"status": "current status of the scale. More info: https://git.k8s.io/community/contributors/devel/sig-architecture/api-conventions.md#spec-and-status. Read-only.",
|
||||
}
|
||||
|
||||
func (Scale) SwaggerDoc() map[string]string {
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue