Merge pull request #22 from dlorenc/master
Cleanup unused godeps after removing localkubectl dependency.pull/24/head
commit
426ae543f3
|
@ -6,79 +6,11 @@
|
|||
"./..."
|
||||
],
|
||||
"Deps": [
|
||||
{
|
||||
"ImportPath": "bitbucket.org/ww/goautoneg",
|
||||
"Comment": "null-5",
|
||||
"Rev": "'75cd24fc2f2c2a2088577d12123ddee5f54e0675'"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/BurntSushi/toml",
|
||||
"Comment": "v0.2.0",
|
||||
"Rev": "bbd5bb678321a0d6e58f1099321dfa73391c1b6f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/Sirupsen/logrus",
|
||||
"Comment": "v0.6.2-10-g51fe59a",
|
||||
"Rev": "51fe59aca108dc5680109e7b2051cbdcfa5a253c"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/beorn7/perks/quantile",
|
||||
"Rev": "b965b613227fddccbfffe13eae360ed3fa822f8d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/blang/semver",
|
||||
"Comment": "v3.0.1",
|
||||
"Rev": "31b736133b98f26d5e078ec9eb591666edfd091f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/codegangsta/cli",
|
||||
"Comment": "1.2.0-235-gbc465be",
|
||||
"Rev": "bc465becccd1d527002fda095fc3c19d9c115029"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/cpuguy83/go-md2man/md2man",
|
||||
"Comment": "v1.0.4",
|
||||
"Rev": "71acacd42f85e5e82f70a55327789582a5200a90"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/davecgh/go-spew/spew",
|
||||
"Rev": "3e6e67c4dcea3ac2f25fd4731abc0e1deaf36216"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/jsonmessage",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/mount",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/parsers",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/term/winconsole",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/timeutils",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/docker/pkg/units",
|
||||
"Comment": "v1.4.1-4831-g0f5c9d3",
|
||||
"Rev": "0f5c9d301b9b1cca66b3ea0f9dec3b5317d3686d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/docker/go-units",
|
||||
"Comment": "v0.1.0-21-g0bbddae",
|
||||
|
@ -224,280 +156,22 @@
|
|||
"Comment": "v0.7.0-10-gcbb8749",
|
||||
"Rev": "cbb87491e18986311ce166131ab410d892489eb0"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/emicklei/go-restful",
|
||||
"Comment": "v1.2",
|
||||
"Rev": "777bb3f19bcafe2575ffb2a3e46af92509ae9594"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/emicklei/go-restful/log",
|
||||
"Comment": "v1.2",
|
||||
"Rev": "777bb3f19bcafe2575ffb2a3e46af92509ae9594"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/emicklei/go-restful/swagger",
|
||||
"Comment": "v1.2",
|
||||
"Rev": "777bb3f19bcafe2575ffb2a3e46af92509ae9594"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/evanphx/json-patch",
|
||||
"Rev": "7dd4489c2eb6073e5a9d7746c3274c5b5f0387df"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/Sirupsen/logrus",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/opts",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/archive",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/fileutils",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/homedir",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/idtools",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/ioutils",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/longpath",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/pools",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/promise",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/stdcopy",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/docker/pkg/system",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/docker/go-units",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/hashicorp/go-cleanhttp",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/github.com/opencontainers/runc/libcontainer/user",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/net/context",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/fsouza/go-dockerclient/external/golang.org/x/sys/unix",
|
||||
"Rev": "0099401a7342ad77e71ca9f9a57c5e72fb80f6b2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ghodss/yaml",
|
||||
"Rev": "73d445a93680fa1a78ae23a5839bad48f32ba1ee"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/glog",
|
||||
"Rev": "44145f04b68cf362d9c4df2182967c2275eaefed"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/groupcache/lru",
|
||||
"Rev": "604ed5785183e59ae2789449d89e73f3a2a77987"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/golang/protobuf/proto",
|
||||
"Rev": "b982704f8bb716bb608144408cff30e15fbde841"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/cadvisor/info/v1",
|
||||
"Comment": "v0.22.2",
|
||||
"Rev": "546a3771589bdb356777c646c6eca24914fdd48b"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/google/gofuzz",
|
||||
"Rev": "bbcb9da2d746f8bdbd6a936686a0a6067ada0ec5"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/ast",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/parser",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/scanner",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/strconv",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/hcl/token",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/parser",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/scanner",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/hashicorp/hcl/json/token",
|
||||
"Rev": "27a57f2605e04995c111273c263d51cee60d9bc4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/imdario/mergo",
|
||||
"Comment": "0.1.3-8-g6633656",
|
||||
"Rev": "6633656539c1639d9d78127b7d47c622b5d7b6dc"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/inconshreveable/mousetrap",
|
||||
"Rev": "76626ae9c91c4f2a10f34cad8ce83ea42c93bb75"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/juju/ratelimit",
|
||||
"Rev": "77ed1c8a01217656d2080ad51981f6e99adaa177"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/pretty",
|
||||
"Comment": "go.weekly.2011-12-22-29-gadd1dbc",
|
||||
"Rev": "add1dbc86daf0f983cd4a48ceb39deb95c729b67"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/kr/text",
|
||||
"Rev": "bb797dc4fb8320488f47bf11de07a733d7233e1f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/magiconair/properties",
|
||||
"Comment": "v1.7.0",
|
||||
"Rev": "c265cfa48dda6474e208715ca93e987829f572f8"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/matttproud/golang_protobuf_extensions/pbutil",
|
||||
"Rev": "fc2b8d3a73c4867e51861bbdd5ae3c1f0869dd6a"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/go-homedir",
|
||||
"Rev": "981ab348d865cf048eb7d17e78ac7192632d8415"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/mitchellh/mapstructure",
|
||||
"Rev": "d2dd0262208475919e1a362f675cfc0e7c10e905"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups",
|
||||
"Comment": "v0.0.7",
|
||||
"Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/cgroups/fs",
|
||||
"Comment": "v0.0.7",
|
||||
"Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/configs",
|
||||
"Comment": "v0.0.7",
|
||||
"Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/opencontainers/runc/libcontainer/system",
|
||||
"Comment": "v0.0.7",
|
||||
"Rev": "7ca2aa4873aea7cb4265b1726acb24b90d8726c6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/pborman/uuid",
|
||||
"Rev": "ca53cad383cad2479bbba7f7a1a05797ec1386e4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_golang/prometheus",
|
||||
"Comment": "0.7.0-39-g3b78d7a",
|
||||
"Rev": "3b78d7a77f51ccbc364d4bc170920153022cfd08"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/client_model/go",
|
||||
"Comment": "model-0.0.2-12-gfa8ad6f",
|
||||
"Rev": "fa8ad6fec33561be4280a8f0514318c79d7f6cb6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/expfmt",
|
||||
"Rev": "ef7a9a5fb138aa5d3a19988537606226869a0390"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/common/model",
|
||||
"Rev": "ef7a9a5fb138aa5d3a19988537606226869a0390"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/prometheus/procfs",
|
||||
"Rev": "490cc6eb5fa45bf8a8b7b73c8bc82a8160e8531d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/russross/blackfriday",
|
||||
"Comment": "v1.2-42-g77efab5",
|
||||
"Rev": "77efab57b2f74dd3f9051c79752b2e8995c8b789"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/samalba/dockerclient",
|
||||
"Rev": "91d7393ff85980ba3a8966405871a3d446ca28f2"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/shurcooL/sanitized_anchor_name",
|
||||
"Rev": "9a8b7d4e8f347bfa230879db9d7d4e4d9e19f962"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cast",
|
||||
"Rev": "27b586b42e29bec072fe7379259cc719e1289da6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/cobra",
|
||||
"Rev": "4c05eb1145f16d0e6bb4a3e1b6d769f4713cb41f"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/jwalterweatherman",
|
||||
"Rev": "33c24e77fb80341fe7130ee7c594256ff08ccc46"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/pflag",
|
||||
"Rev": "8f6a28b0916586e7f22fe931ae2fcfc380b1c0e6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/spf13/viper",
|
||||
"Rev": "c975dc1b4eacf4ec7fdbf0873638de5d090ba323"
|
||||
},
|
||||
{
|
||||
"ImportPath": "github.com/ugorji/go/codec",
|
||||
"Rev": "f4485b318aadd133842532f841dc205a8e339d74"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/crypto/curve25519",
|
||||
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
|
||||
|
@ -510,511 +184,9 @@
|
|||
"ImportPath": "golang.org/x/crypto/ssh/terminal",
|
||||
"Rev": "c84e1f8e3a7e322d497cd16c0e8a13c7e127baf3"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/net/context",
|
||||
"Rev": "c2528b2dd8352441850638a8bb678c2ad056fd3e"
|
||||
},
|
||||
{
|
||||
"ImportPath": "golang.org/x/sys/windows/registry",
|
||||
"Rev": "833a04a10549a95dc34458c195cbad61bbb6cb4d"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/fsnotify.v1",
|
||||
"Comment": "v1.2.11",
|
||||
"Rev": "836bfd95fecc0f1511dd66bdbf2b5b61ab8b00b6"
|
||||
},
|
||||
{
|
||||
"ImportPath": "gopkg.in/yaml.v2",
|
||||
"Rev": "a83829b6f1293c91addabc89d0571c246397bbf4"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/endpoints",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/errors",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/install",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/meta",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/pod",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/resource",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/service",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/unversioned",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/util",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/v1",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/api/validation",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apimachinery",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apimachinery/registered",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/authorization",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/install",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/authorization/v1beta1",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/install",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/autoscaling/v1",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/batch",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/batch/install",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/batch/v1",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/install",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/componentconfig/v1alpha1",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/extensions",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/install",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/extensions/v1beta1",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/metrics",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/metrics/install",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/apis/metrics/v1alpha1",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/auth/user",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/capabilities",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/cache",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/clientset_generated/internalclientset",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/metrics",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/record",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/restclient",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/transport",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/typed/discovery",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/typed/generated/core/unversioned",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/typed/generated/extensions/unversioned",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/unversioned",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/auth",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/latest",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/client/unversioned/clientcmd/api/v1",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/controller",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/controller/framework",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/conversion",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/conversion/queryparams",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/credentialprovider",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/fieldpath",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/fields",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubectl",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/config",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubectl/cmd/util",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubectl/resource",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/kubelet/qos/util",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/labels",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/master/ports",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/runtime",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/json",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/recognizer",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/runtime/serializer/versioning",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/types",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/deployment",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/errors",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/hash",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/integer",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/intstr",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/jsonpath",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/labels",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/net",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/net/sets",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/parsers",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/pod",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/rand",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/runtime",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/sets",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/strategicpatch",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/validation",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/validation/field",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/wait",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/util/yaml",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/version",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/watch",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/pkg/watch/json",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/third_party/forked/json",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/third_party/forked/reflect",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "k8s.io/kubernetes/third_party/golang/template",
|
||||
"Comment": "v1.2.1",
|
||||
"Rev": "50809107cd47a1f62da362bccefdd9e6f7076145"
|
||||
},
|
||||
{
|
||||
"ImportPath": "rsprd.com/localkube/pkg/localkubectl",
|
||||
"Comment": "v1.2.1-v1-4-g6474c5b",
|
||||
"Rev": "6474c5bfa75d77f6a40054723d549eeed7bf2570"
|
||||
},
|
||||
{
|
||||
"ImportPath": "speter.net/go/exp/math/dec/inf",
|
||||
"Rev": "42ca6cd68aa922bc3f32f1e056e61b65945d9ad7"
|
||||
}
|
||||
]
|
||||
}
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
include $(GOROOT)/src/Make.inc
|
||||
|
||||
TARG=bitbucket.org/ww/goautoneg
|
||||
GOFILES=autoneg.go
|
||||
|
||||
include $(GOROOT)/src/Make.pkg
|
||||
|
||||
format:
|
||||
gofmt -w *.go
|
||||
|
||||
docs:
|
||||
gomake clean
|
||||
godoc ${TARG} > README.txt
|
|
@ -1,67 +0,0 @@
|
|||
PACKAGE
|
||||
|
||||
package goautoneg
|
||||
import "bitbucket.org/ww/goautoneg"
|
||||
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
FUNCTIONS
|
||||
|
||||
func Negotiate(header string, alternatives []string) (content_type string)
|
||||
Negotiate the most appropriate content_type given the accept header
|
||||
and a list of alternatives.
|
||||
|
||||
func ParseAccept(header string) (accept []Accept)
|
||||
Parse an Accept Header string returning a sorted list
|
||||
of clauses
|
||||
|
||||
|
||||
TYPES
|
||||
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float32
|
||||
Params map[string]string
|
||||
}
|
||||
Structure to represent a clause in an HTTP Accept Header
|
||||
|
||||
|
||||
SUBDIRECTORIES
|
||||
|
||||
.hg
|
|
@ -1,162 +0,0 @@
|
|||
/*
|
||||
HTTP Content-Type Autonegotiation.
|
||||
|
||||
The functions in this package implement the behaviour specified in
|
||||
http://www.w3.org/Protocols/rfc2616/rfc2616-sec14.html
|
||||
|
||||
Copyright (c) 2011, Open Knowledge Foundation Ltd.
|
||||
All rights reserved.
|
||||
|
||||
Redistribution and use in source and binary forms, with or without
|
||||
modification, are permitted provided that the following conditions are
|
||||
met:
|
||||
|
||||
Redistributions of source code must retain the above copyright
|
||||
notice, this list of conditions and the following disclaimer.
|
||||
|
||||
Redistributions in binary form must reproduce the above copyright
|
||||
notice, this list of conditions and the following disclaimer in
|
||||
the documentation and/or other materials provided with the
|
||||
distribution.
|
||||
|
||||
Neither the name of the Open Knowledge Foundation Ltd. nor the
|
||||
names of its contributors may be used to endorse or promote
|
||||
products derived from this software without specific prior written
|
||||
permission.
|
||||
|
||||
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
|
||||
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
|
||||
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
|
||||
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
|
||||
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
|
||||
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
|
||||
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
|
||||
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
|
||||
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
|
||||
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
|
||||
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
|
||||
|
||||
|
||||
*/
|
||||
package goautoneg
|
||||
|
||||
import (
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Structure to represent a clause in an HTTP Accept Header
|
||||
type Accept struct {
|
||||
Type, SubType string
|
||||
Q float64
|
||||
Params map[string]string
|
||||
}
|
||||
|
||||
// For internal use, so that we can use the sort interface
|
||||
type accept_slice []Accept
|
||||
|
||||
func (accept accept_slice) Len() int {
|
||||
slice := []Accept(accept)
|
||||
return len(slice)
|
||||
}
|
||||
|
||||
func (accept accept_slice) Less(i, j int) bool {
|
||||
slice := []Accept(accept)
|
||||
ai, aj := slice[i], slice[j]
|
||||
if ai.Q > aj.Q {
|
||||
return true
|
||||
}
|
||||
if ai.Type != "*" && aj.Type == "*" {
|
||||
return true
|
||||
}
|
||||
if ai.SubType != "*" && aj.SubType == "*" {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (accept accept_slice) Swap(i, j int) {
|
||||
slice := []Accept(accept)
|
||||
slice[i], slice[j] = slice[j], slice[i]
|
||||
}
|
||||
|
||||
// Parse an Accept Header string returning a sorted list
|
||||
// of clauses
|
||||
func ParseAccept(header string) (accept []Accept) {
|
||||
parts := strings.Split(header, ",")
|
||||
accept = make([]Accept, 0, len(parts))
|
||||
for _, part := range parts {
|
||||
part := strings.Trim(part, " ")
|
||||
|
||||
a := Accept{}
|
||||
a.Params = make(map[string]string)
|
||||
a.Q = 1.0
|
||||
|
||||
mrp := strings.Split(part, ";")
|
||||
|
||||
media_range := mrp[0]
|
||||
sp := strings.Split(media_range, "/")
|
||||
a.Type = strings.Trim(sp[0], " ")
|
||||
|
||||
switch {
|
||||
case len(sp) == 1 && a.Type == "*":
|
||||
a.SubType = "*"
|
||||
case len(sp) == 2:
|
||||
a.SubType = strings.Trim(sp[1], " ")
|
||||
default:
|
||||
continue
|
||||
}
|
||||
|
||||
if len(mrp) == 1 {
|
||||
accept = append(accept, a)
|
||||
continue
|
||||
}
|
||||
|
||||
for _, param := range mrp[1:] {
|
||||
sp := strings.SplitN(param, "=", 2)
|
||||
if len(sp) != 2 {
|
||||
continue
|
||||
}
|
||||
token := strings.Trim(sp[0], " ")
|
||||
if token == "q" {
|
||||
a.Q, _ = strconv.ParseFloat(sp[1], 32)
|
||||
} else {
|
||||
a.Params[token] = strings.Trim(sp[1], " ")
|
||||
}
|
||||
}
|
||||
|
||||
accept = append(accept, a)
|
||||
}
|
||||
|
||||
slice := accept_slice(accept)
|
||||
sort.Sort(slice)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Negotiate the most appropriate content_type given the accept header
|
||||
// and a list of alternatives.
|
||||
func Negotiate(header string, alternatives []string) (content_type string) {
|
||||
asp := make([][]string, 0, len(alternatives))
|
||||
for _, ctype := range alternatives {
|
||||
asp = append(asp, strings.SplitN(ctype, "/", 2))
|
||||
}
|
||||
for _, clause := range ParseAccept(header) {
|
||||
for i, ctsp := range asp {
|
||||
if clause.Type == ctsp[0] && clause.SubType == ctsp[1] {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == ctsp[0] && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
if clause.Type == "*" && clause.SubType == "*" {
|
||||
content_type = alternatives[i]
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,5 +0,0 @@
|
|||
TAGS
|
||||
tags
|
||||
.*.swp
|
||||
tomlcheck/tomlcheck
|
||||
toml.test
|
|
@ -1,12 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.1
|
||||
- 1.2
|
||||
- tip
|
||||
install:
|
||||
- go install ./...
|
||||
- go get github.com/BurntSushi/toml-test
|
||||
script:
|
||||
- export PATH="$PATH:$HOME/gopath/bin"
|
||||
- make test
|
||||
|
|
@ -1,3 +0,0 @@
|
|||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/mojombo/toml/blob/master/versions/toml-v0.2.0.md)
|
||||
|
|
@ -1,14 +0,0 @@
|
|||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
Version 2, December 2004
|
||||
|
||||
Copyright (C) 2004 Sam Hocevar <sam@hocevar.net>
|
||||
|
||||
Everyone is permitted to copy and distribute verbatim or modified
|
||||
copies of this license document, and changing it is allowed as long
|
||||
as the name is changed.
|
||||
|
||||
DO WHAT THE FUCK YOU WANT TO PUBLIC LICENSE
|
||||
TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
|
||||
|
||||
0. You just DO WHAT THE FUCK YOU WANT TO.
|
||||
|
|
@ -1,19 +0,0 @@
|
|||
install:
|
||||
go install ./...
|
||||
|
||||
test: install
|
||||
go test -v
|
||||
toml-test toml-test-decoder
|
||||
toml-test -encoder toml-test-encoder
|
||||
|
||||
fmt:
|
||||
gofmt -w *.go */*.go
|
||||
colcheck *.go */*.go
|
||||
|
||||
tags:
|
||||
find ./ -name '*.go' -print0 | xargs -0 gotags > TAGS
|
||||
|
||||
push:
|
||||
git push origin master
|
||||
git push github master
|
||||
|
|
@ -1,220 +0,0 @@
|
|||
## TOML parser and encoder for Go with reflection
|
||||
|
||||
TOML stands for Tom's Obvious, Minimal Language. This Go package provides a
|
||||
reflection interface similar to Go's standard library `json` and `xml`
|
||||
packages. This package also supports the `encoding.TextUnmarshaler` and
|
||||
`encoding.TextMarshaler` interfaces so that you can define custom data
|
||||
representations. (There is an example of this below.)
|
||||
|
||||
Spec: https://github.com/mojombo/toml
|
||||
|
||||
Compatible with TOML version
|
||||
[v0.2.0](https://github.com/toml-lang/toml/blob/master/versions/en/toml-v0.2.0.md)
|
||||
|
||||
Documentation: http://godoc.org/github.com/BurntSushi/toml
|
||||
|
||||
Installation:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml
|
||||
```
|
||||
|
||||
Try the toml validator:
|
||||
|
||||
```bash
|
||||
go get github.com/BurntSushi/toml/cmd/tomlv
|
||||
tomlv some-toml-file.toml
|
||||
```
|
||||
|
||||
[](https://travis-ci.org/BurntSushi/toml)
|
||||
|
||||
|
||||
### Testing
|
||||
|
||||
This package passes all tests in
|
||||
[toml-test](https://github.com/BurntSushi/toml-test) for both the decoder
|
||||
and the encoder.
|
||||
|
||||
### Examples
|
||||
|
||||
This package works similarly to how the Go standard library handles `XML`
|
||||
and `JSON`. Namely, data is loaded into Go values via reflection.
|
||||
|
||||
For the simplest example, consider some TOML file as just a list of keys
|
||||
and values:
|
||||
|
||||
```toml
|
||||
Age = 25
|
||||
Cats = [ "Cauchy", "Plato" ]
|
||||
Pi = 3.14
|
||||
Perfection = [ 6, 28, 496, 8128 ]
|
||||
DOB = 1987-07-05T05:45:00Z
|
||||
```
|
||||
|
||||
Which could be defined in Go as:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
Age int
|
||||
Cats []string
|
||||
Pi float64
|
||||
Perfection []int
|
||||
DOB time.Time // requires `import time`
|
||||
}
|
||||
```
|
||||
|
||||
And then decoded with:
|
||||
|
||||
```go
|
||||
var conf Config
|
||||
if _, err := toml.Decode(tomlData, &conf); err != nil {
|
||||
// handle error
|
||||
}
|
||||
```
|
||||
|
||||
You can also use struct tags if your struct field name doesn't map to a TOML
|
||||
key value directly:
|
||||
|
||||
```toml
|
||||
some_key_NAME = "wat"
|
||||
```
|
||||
|
||||
```go
|
||||
type TOML struct {
|
||||
ObscureKey string `toml:"some_key_NAME"`
|
||||
}
|
||||
```
|
||||
|
||||
### Using the `encoding.TextUnmarshaler` interface
|
||||
|
||||
Here's an example that automatically parses duration strings into
|
||||
`time.Duration` values:
|
||||
|
||||
```toml
|
||||
[[song]]
|
||||
name = "Thunder Road"
|
||||
duration = "4m49s"
|
||||
|
||||
[[song]]
|
||||
name = "Stairway to Heaven"
|
||||
duration = "8m03s"
|
||||
```
|
||||
|
||||
Which can be decoded with:
|
||||
|
||||
```go
|
||||
type song struct {
|
||||
Name string
|
||||
Duration duration
|
||||
}
|
||||
type songs struct {
|
||||
Song []song
|
||||
}
|
||||
var favorites songs
|
||||
if _, err := toml.Decode(blob, &favorites); err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
for _, s := range favorites.Song {
|
||||
fmt.Printf("%s (%s)\n", s.Name, s.Duration)
|
||||
}
|
||||
```
|
||||
|
||||
And you'll also need a `duration` type that satisfies the
|
||||
`encoding.TextUnmarshaler` interface:
|
||||
|
||||
```go
|
||||
type duration struct {
|
||||
time.Duration
|
||||
}
|
||||
|
||||
func (d *duration) UnmarshalText(text []byte) error {
|
||||
var err error
|
||||
d.Duration, err = time.ParseDuration(string(text))
|
||||
return err
|
||||
}
|
||||
```
|
||||
|
||||
### More complex usage
|
||||
|
||||
Here's an example of how to load the example from the official spec page:
|
||||
|
||||
```toml
|
||||
# This is a TOML document. Boom.
|
||||
|
||||
title = "TOML Example"
|
||||
|
||||
[owner]
|
||||
name = "Tom Preston-Werner"
|
||||
organization = "GitHub"
|
||||
bio = "GitHub Cofounder & CEO\nLikes tater tots and beer."
|
||||
dob = 1979-05-27T07:32:00Z # First class dates? Why not?
|
||||
|
||||
[database]
|
||||
server = "192.168.1.1"
|
||||
ports = [ 8001, 8001, 8002 ]
|
||||
connection_max = 5000
|
||||
enabled = true
|
||||
|
||||
[servers]
|
||||
|
||||
# You can indent as you please. Tabs or spaces. TOML don't care.
|
||||
[servers.alpha]
|
||||
ip = "10.0.0.1"
|
||||
dc = "eqdc10"
|
||||
|
||||
[servers.beta]
|
||||
ip = "10.0.0.2"
|
||||
dc = "eqdc10"
|
||||
|
||||
[clients]
|
||||
data = [ ["gamma", "delta"], [1, 2] ] # just an update to make sure parsers support it
|
||||
|
||||
# Line breaks are OK when inside arrays
|
||||
hosts = [
|
||||
"alpha",
|
||||
"omega"
|
||||
]
|
||||
```
|
||||
|
||||
And the corresponding Go types are:
|
||||
|
||||
```go
|
||||
type tomlConfig struct {
|
||||
Title string
|
||||
Owner ownerInfo
|
||||
DB database `toml:"database"`
|
||||
Servers map[string]server
|
||||
Clients clients
|
||||
}
|
||||
|
||||
type ownerInfo struct {
|
||||
Name string
|
||||
Org string `toml:"organization"`
|
||||
Bio string
|
||||
DOB time.Time
|
||||
}
|
||||
|
||||
type database struct {
|
||||
Server string
|
||||
Ports []int
|
||||
ConnMax int `toml:"connection_max"`
|
||||
Enabled bool
|
||||
}
|
||||
|
||||
type server struct {
|
||||
IP string
|
||||
DC string
|
||||
}
|
||||
|
||||
type clients struct {
|
||||
Data [][]interface{}
|
||||
Hosts []string
|
||||
}
|
||||
```
|
||||
|
||||
Note that a case insensitive match will be tried if an exact match can't be
|
||||
found.
|
||||
|
||||
A working example of the above can be found in `_examples/example.{go,toml}`.
|
||||
|
|
@ -1,505 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"math"
|
||||
"reflect"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
var e = fmt.Errorf
|
||||
|
||||
// Unmarshaler is the interface implemented by objects that can unmarshal a
|
||||
// TOML description of themselves.
|
||||
type Unmarshaler interface {
|
||||
UnmarshalTOML(interface{}) error
|
||||
}
|
||||
|
||||
// Unmarshal decodes the contents of `p` in TOML format into a pointer `v`.
|
||||
func Unmarshal(p []byte, v interface{}) error {
|
||||
_, err := Decode(string(p), v)
|
||||
return err
|
||||
}
|
||||
|
||||
// Primitive is a TOML value that hasn't been decoded into a Go value.
|
||||
// When using the various `Decode*` functions, the type `Primitive` may
|
||||
// be given to any value, and its decoding will be delayed.
|
||||
//
|
||||
// A `Primitive` value can be decoded using the `PrimitiveDecode` function.
|
||||
//
|
||||
// The underlying representation of a `Primitive` value is subject to change.
|
||||
// Do not rely on it.
|
||||
//
|
||||
// N.B. Primitive values are still parsed, so using them will only avoid
|
||||
// the overhead of reflection. They can be useful when you don't know the
|
||||
// exact type of TOML data until run time.
|
||||
type Primitive struct {
|
||||
undecoded interface{}
|
||||
context Key
|
||||
}
|
||||
|
||||
// DEPRECATED!
|
||||
//
|
||||
// Use MetaData.PrimitiveDecode instead.
|
||||
func PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md := MetaData{decoded: make(map[string]bool)}
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// PrimitiveDecode is just like the other `Decode*` functions, except it
|
||||
// decodes a TOML value that has already been parsed. Valid primitive values
|
||||
// can *only* be obtained from values filled by the decoder functions,
|
||||
// including this method. (i.e., `v` may contain more `Primitive`
|
||||
// values.)
|
||||
//
|
||||
// Meta data for primitive values is included in the meta data returned by
|
||||
// the `Decode*` functions with one exception: keys returned by the Undecoded
|
||||
// method will only reflect keys that were decoded. Namely, any keys hidden
|
||||
// behind a Primitive will be considered undecoded. Executing this method will
|
||||
// update the undecoded keys in the meta data. (See the example.)
|
||||
func (md *MetaData) PrimitiveDecode(primValue Primitive, v interface{}) error {
|
||||
md.context = primValue.context
|
||||
defer func() { md.context = nil }()
|
||||
return md.unify(primValue.undecoded, rvalue(v))
|
||||
}
|
||||
|
||||
// Decode will decode the contents of `data` in TOML format into a pointer
|
||||
// `v`.
|
||||
//
|
||||
// TOML hashes correspond to Go structs or maps. (Dealer's choice. They can be
|
||||
// used interchangeably.)
|
||||
//
|
||||
// TOML arrays of tables correspond to either a slice of structs or a slice
|
||||
// of maps.
|
||||
//
|
||||
// TOML datetimes correspond to Go `time.Time` values.
|
||||
//
|
||||
// All other TOML types (float, string, int, bool and array) correspond
|
||||
// to the obvious Go types.
|
||||
//
|
||||
// An exception to the above rules is if a type implements the
|
||||
// encoding.TextUnmarshaler interface. In this case, any primitive TOML value
|
||||
// (floats, strings, integers, booleans and datetimes) will be converted to
|
||||
// a byte string and given to the value's UnmarshalText method. See the
|
||||
// Unmarshaler example for a demonstration with time duration strings.
|
||||
//
|
||||
// Key mapping
|
||||
//
|
||||
// TOML keys can map to either keys in a Go map or field names in a Go
|
||||
// struct. The special `toml` struct tag may be used to map TOML keys to
|
||||
// struct fields that don't match the key name exactly. (See the example.)
|
||||
// A case insensitive match to struct names will be tried if an exact match
|
||||
// can't be found.
|
||||
//
|
||||
// The mapping between TOML values and Go values is loose. That is, there
|
||||
// may exist TOML values that cannot be placed into your representation, and
|
||||
// there may be parts of your representation that do not correspond to
|
||||
// TOML values. This loose mapping can be made stricter by using the IsDefined
|
||||
// and/or Undecoded methods on the MetaData returned.
|
||||
//
|
||||
// This decoder will not handle cyclic types. If a cyclic type is passed,
|
||||
// `Decode` will not terminate.
|
||||
func Decode(data string, v interface{}) (MetaData, error) {
|
||||
p, err := parse(data)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
md := MetaData{
|
||||
p.mapping, p.types, p.ordered,
|
||||
make(map[string]bool, len(p.ordered)), nil,
|
||||
}
|
||||
return md, md.unify(p.mapping, rvalue(v))
|
||||
}
|
||||
|
||||
// DecodeFile is just like Decode, except it will automatically read the
|
||||
// contents of the file at `fpath` and decode it for you.
|
||||
func DecodeFile(fpath string, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadFile(fpath)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// DecodeReader is just like Decode, except it will consume all bytes
|
||||
// from the reader and decode it for you.
|
||||
func DecodeReader(r io.Reader, v interface{}) (MetaData, error) {
|
||||
bs, err := ioutil.ReadAll(r)
|
||||
if err != nil {
|
||||
return MetaData{}, err
|
||||
}
|
||||
return Decode(string(bs), v)
|
||||
}
|
||||
|
||||
// unify performs a sort of type unification based on the structure of `rv`,
|
||||
// which is the client representation.
|
||||
//
|
||||
// Any type mismatch produces an error. Finding a type that we don't know
|
||||
// how to handle produces an unsupported type error.
|
||||
func (md *MetaData) unify(data interface{}, rv reflect.Value) error {
|
||||
|
||||
// Special case. Look for a `Primitive` value.
|
||||
if rv.Type() == reflect.TypeOf((*Primitive)(nil)).Elem() {
|
||||
// Save the undecoded data and the key context into the primitive
|
||||
// value.
|
||||
context := make(Key, len(md.context))
|
||||
copy(context, md.context)
|
||||
rv.Set(reflect.ValueOf(Primitive{
|
||||
undecoded: data,
|
||||
context: context,
|
||||
}))
|
||||
return nil
|
||||
}
|
||||
|
||||
// Special case. Unmarshaler Interface support.
|
||||
if rv.CanAddr() {
|
||||
if v, ok := rv.Addr().Interface().(Unmarshaler); ok {
|
||||
return v.UnmarshalTOML(data)
|
||||
}
|
||||
}
|
||||
|
||||
// Special case. Handle time.Time values specifically.
|
||||
// TODO: Remove this code when we decide to drop support for Go 1.1.
|
||||
// This isn't necessary in Go 1.2 because time.Time satisfies the encoding
|
||||
// interfaces.
|
||||
if rv.Type().AssignableTo(rvalue(time.Time{}).Type()) {
|
||||
return md.unifyDatetime(data, rv)
|
||||
}
|
||||
|
||||
// Special case. Look for a value satisfying the TextUnmarshaler interface.
|
||||
if v, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return md.unifyText(data, v)
|
||||
}
|
||||
// BUG(burntsushi)
|
||||
// The behavior here is incorrect whenever a Go type satisfies the
|
||||
// encoding.TextUnmarshaler interface but also corresponds to a TOML
|
||||
// hash or array. In particular, the unmarshaler should only be applied
|
||||
// to primitive TOML values. But at this point, it will be applied to
|
||||
// all kinds of values and produce an incorrect error whenever those values
|
||||
// are hashes or arrays (including arrays of tables).
|
||||
|
||||
k := rv.Kind()
|
||||
|
||||
// laziness
|
||||
if k >= reflect.Int && k <= reflect.Uint64 {
|
||||
return md.unifyInt(data, rv)
|
||||
}
|
||||
switch k {
|
||||
case reflect.Ptr:
|
||||
elem := reflect.New(rv.Type().Elem())
|
||||
err := md.unify(data, reflect.Indirect(elem))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
rv.Set(elem)
|
||||
return nil
|
||||
case reflect.Struct:
|
||||
return md.unifyStruct(data, rv)
|
||||
case reflect.Map:
|
||||
return md.unifyMap(data, rv)
|
||||
case reflect.Array:
|
||||
return md.unifyArray(data, rv)
|
||||
case reflect.Slice:
|
||||
return md.unifySlice(data, rv)
|
||||
case reflect.String:
|
||||
return md.unifyString(data, rv)
|
||||
case reflect.Bool:
|
||||
return md.unifyBool(data, rv)
|
||||
case reflect.Interface:
|
||||
// we only support empty interfaces.
|
||||
if rv.NumMethod() > 0 {
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
}
|
||||
return md.unifyAnything(data, rv)
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
return md.unifyFloat64(data, rv)
|
||||
}
|
||||
return e("Unsupported type '%s'.", rv.Kind())
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyStruct(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if mapping == nil {
|
||||
return nil
|
||||
}
|
||||
return mismatch(rv, "map", mapping)
|
||||
}
|
||||
|
||||
for key, datum := range tmap {
|
||||
var f *field
|
||||
fields := cachedTypeFields(rv.Type())
|
||||
for i := range fields {
|
||||
ff := &fields[i]
|
||||
if ff.name == key {
|
||||
f = ff
|
||||
break
|
||||
}
|
||||
if f == nil && strings.EqualFold(ff.name, key) {
|
||||
f = ff
|
||||
}
|
||||
}
|
||||
if f != nil {
|
||||
subv := rv
|
||||
for _, i := range f.index {
|
||||
subv = indirect(subv.Field(i))
|
||||
}
|
||||
if isUnifiable(subv) {
|
||||
md.decoded[md.context.add(key).String()] = true
|
||||
md.context = append(md.context, key)
|
||||
if err := md.unify(datum, subv); err != nil {
|
||||
return e("Type mismatch for '%s.%s': %s",
|
||||
rv.Type().String(), f.name, err)
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
} else if f.name != "" {
|
||||
// Bad user! No soup for you!
|
||||
return e("Field '%s.%s' is unexported, and therefore cannot "+
|
||||
"be loaded with reflection.", rv.Type().String(), f.name)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyMap(mapping interface{}, rv reflect.Value) error {
|
||||
tmap, ok := mapping.(map[string]interface{})
|
||||
if !ok {
|
||||
if tmap == nil {
|
||||
return nil
|
||||
}
|
||||
return badtype("map", mapping)
|
||||
}
|
||||
if rv.IsNil() {
|
||||
rv.Set(reflect.MakeMap(rv.Type()))
|
||||
}
|
||||
for k, v := range tmap {
|
||||
md.decoded[md.context.add(k).String()] = true
|
||||
md.context = append(md.context, k)
|
||||
|
||||
rvkey := indirect(reflect.New(rv.Type().Key()))
|
||||
rvval := reflect.Indirect(reflect.New(rv.Type().Elem()))
|
||||
if err := md.unify(v, rvval); err != nil {
|
||||
return err
|
||||
}
|
||||
md.context = md.context[0 : len(md.context)-1]
|
||||
|
||||
rvkey.SetString(k)
|
||||
rv.SetMapIndex(rvkey, rvval)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyArray(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
sliceLen := datav.Len()
|
||||
if sliceLen != rv.Len() {
|
||||
return e("expected array length %d; got TOML array of length %d",
|
||||
rv.Len(), sliceLen)
|
||||
}
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySlice(data interface{}, rv reflect.Value) error {
|
||||
datav := reflect.ValueOf(data)
|
||||
if datav.Kind() != reflect.Slice {
|
||||
if !datav.IsValid() {
|
||||
return nil
|
||||
}
|
||||
return badtype("slice", data)
|
||||
}
|
||||
n := datav.Len()
|
||||
if rv.IsNil() || rv.Cap() < n {
|
||||
rv.Set(reflect.MakeSlice(rv.Type(), n, n))
|
||||
}
|
||||
rv.SetLen(n)
|
||||
return md.unifySliceArray(datav, rv)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifySliceArray(data, rv reflect.Value) error {
|
||||
sliceLen := data.Len()
|
||||
for i := 0; i < sliceLen; i++ {
|
||||
v := data.Index(i).Interface()
|
||||
sliceval := indirect(rv.Index(i))
|
||||
if err := md.unify(v, sliceval); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyDatetime(data interface{}, rv reflect.Value) error {
|
||||
if _, ok := data.(time.Time); ok {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
return badtype("time.Time", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyString(data interface{}, rv reflect.Value) error {
|
||||
if s, ok := data.(string); ok {
|
||||
rv.SetString(s)
|
||||
return nil
|
||||
}
|
||||
return badtype("string", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyFloat64(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(float64); ok {
|
||||
switch rv.Kind() {
|
||||
case reflect.Float32:
|
||||
fallthrough
|
||||
case reflect.Float64:
|
||||
rv.SetFloat(num)
|
||||
default:
|
||||
panic("bug")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("float", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyInt(data interface{}, rv reflect.Value) error {
|
||||
if num, ok := data.(int64); ok {
|
||||
if rv.Kind() >= reflect.Int && rv.Kind() <= reflect.Int64 {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Int8:
|
||||
if num < math.MinInt8 || num > math.MaxInt8 {
|
||||
return e("Value '%d' is out of range for int8.", num)
|
||||
}
|
||||
case reflect.Int16:
|
||||
if num < math.MinInt16 || num > math.MaxInt16 {
|
||||
return e("Value '%d' is out of range for int16.", num)
|
||||
}
|
||||
case reflect.Int32:
|
||||
if num < math.MinInt32 || num > math.MaxInt32 {
|
||||
return e("Value '%d' is out of range for int32.", num)
|
||||
}
|
||||
}
|
||||
rv.SetInt(num)
|
||||
} else if rv.Kind() >= reflect.Uint && rv.Kind() <= reflect.Uint64 {
|
||||
unum := uint64(num)
|
||||
switch rv.Kind() {
|
||||
case reflect.Uint, reflect.Uint64:
|
||||
// No bounds checking necessary.
|
||||
case reflect.Uint8:
|
||||
if num < 0 || unum > math.MaxUint8 {
|
||||
return e("Value '%d' is out of range for uint8.", num)
|
||||
}
|
||||
case reflect.Uint16:
|
||||
if num < 0 || unum > math.MaxUint16 {
|
||||
return e("Value '%d' is out of range for uint16.", num)
|
||||
}
|
||||
case reflect.Uint32:
|
||||
if num < 0 || unum > math.MaxUint32 {
|
||||
return e("Value '%d' is out of range for uint32.", num)
|
||||
}
|
||||
}
|
||||
rv.SetUint(unum)
|
||||
} else {
|
||||
panic("unreachable")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
return badtype("integer", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyBool(data interface{}, rv reflect.Value) error {
|
||||
if b, ok := data.(bool); ok {
|
||||
rv.SetBool(b)
|
||||
return nil
|
||||
}
|
||||
return badtype("boolean", data)
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyAnything(data interface{}, rv reflect.Value) error {
|
||||
rv.Set(reflect.ValueOf(data))
|
||||
return nil
|
||||
}
|
||||
|
||||
func (md *MetaData) unifyText(data interface{}, v TextUnmarshaler) error {
|
||||
var s string
|
||||
switch sdata := data.(type) {
|
||||
case TextMarshaler:
|
||||
text, err := sdata.MarshalText()
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
s = string(text)
|
||||
case fmt.Stringer:
|
||||
s = sdata.String()
|
||||
case string:
|
||||
s = sdata
|
||||
case bool:
|
||||
s = fmt.Sprintf("%v", sdata)
|
||||
case int64:
|
||||
s = fmt.Sprintf("%d", sdata)
|
||||
case float64:
|
||||
s = fmt.Sprintf("%f", sdata)
|
||||
default:
|
||||
return badtype("primitive (string-like)", data)
|
||||
}
|
||||
if err := v.UnmarshalText([]byte(s)); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// rvalue returns a reflect.Value of `v`. All pointers are resolved.
|
||||
func rvalue(v interface{}) reflect.Value {
|
||||
return indirect(reflect.ValueOf(v))
|
||||
}
|
||||
|
||||
// indirect returns the value pointed to by a pointer.
|
||||
// Pointers are followed until the value is not a pointer.
|
||||
// New values are allocated for each nil pointer.
|
||||
//
|
||||
// An exception to this rule is if the value satisfies an interface of
|
||||
// interest to us (like encoding.TextUnmarshaler).
|
||||
func indirect(v reflect.Value) reflect.Value {
|
||||
if v.Kind() != reflect.Ptr {
|
||||
if v.CanAddr() {
|
||||
pv := v.Addr()
|
||||
if _, ok := pv.Interface().(TextUnmarshaler); ok {
|
||||
return pv
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
if v.IsNil() {
|
||||
v.Set(reflect.New(v.Type().Elem()))
|
||||
}
|
||||
return indirect(reflect.Indirect(v))
|
||||
}
|
||||
|
||||
func isUnifiable(rv reflect.Value) bool {
|
||||
if rv.CanSet() {
|
||||
return true
|
||||
}
|
||||
if _, ok := rv.Interface().(TextUnmarshaler); ok {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func badtype(expected string, data interface{}) error {
|
||||
return e("Expected %s but found '%T'.", expected, data)
|
||||
}
|
||||
|
||||
func mismatch(user reflect.Value, expected string, data interface{}) error {
|
||||
return e("Type mismatch for %s. Expected %s but found '%T'.",
|
||||
user.Type().String(), expected, data)
|
||||
}
|
|
@ -1,122 +0,0 @@
|
|||
package toml
|
||||
|
||||
import "strings"
|
||||
|
||||
// MetaData allows access to meta information about TOML data that may not
|
||||
// be inferrable via reflection. In particular, whether a key has been defined
|
||||
// and the TOML type of a key.
|
||||
type MetaData struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
keys []Key
|
||||
decoded map[string]bool
|
||||
context Key // Used only during decoding.
|
||||
}
|
||||
|
||||
// IsDefined returns true if the key given exists in the TOML data. The key
|
||||
// should be specified hierarchially. e.g.,
|
||||
//
|
||||
// // access the TOML key 'a.b.c'
|
||||
// IsDefined("a", "b", "c")
|
||||
//
|
||||
// IsDefined will return false if an empty key given. Keys are case sensitive.
|
||||
func (md *MetaData) IsDefined(key ...string) bool {
|
||||
if len(key) == 0 {
|
||||
return false
|
||||
}
|
||||
|
||||
var hash map[string]interface{}
|
||||
var ok bool
|
||||
var hashOrVal interface{} = md.mapping
|
||||
for _, k := range key {
|
||||
if hash, ok = hashOrVal.(map[string]interface{}); !ok {
|
||||
return false
|
||||
}
|
||||
if hashOrVal, ok = hash[k]; !ok {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
// Type returns a string representation of the type of the key specified.
|
||||
//
|
||||
// Type will return the empty string if given an empty key or a key that
|
||||
// does not exist. Keys are case sensitive.
|
||||
func (md *MetaData) Type(key ...string) string {
|
||||
fullkey := strings.Join(key, ".")
|
||||
if typ, ok := md.types[fullkey]; ok {
|
||||
return typ.typeString()
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Key is the type of any TOML key, including key groups. Use (MetaData).Keys
|
||||
// to get values of this type.
|
||||
type Key []string
|
||||
|
||||
func (k Key) String() string {
|
||||
return strings.Join(k, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuotedAll() string {
|
||||
var ss []string
|
||||
for i := range k {
|
||||
ss = append(ss, k.maybeQuoted(i))
|
||||
}
|
||||
return strings.Join(ss, ".")
|
||||
}
|
||||
|
||||
func (k Key) maybeQuoted(i int) string {
|
||||
quote := false
|
||||
for _, c := range k[i] {
|
||||
if !isBareKeyChar(c) {
|
||||
quote = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if quote {
|
||||
return "\"" + strings.Replace(k[i], "\"", "\\\"", -1) + "\""
|
||||
} else {
|
||||
return k[i]
|
||||
}
|
||||
}
|
||||
|
||||
func (k Key) add(piece string) Key {
|
||||
newKey := make(Key, len(k)+1)
|
||||
copy(newKey, k)
|
||||
newKey[len(k)] = piece
|
||||
return newKey
|
||||
}
|
||||
|
||||
// Keys returns a slice of every key in the TOML data, including key groups.
|
||||
// Each key is itself a slice, where the first element is the top of the
|
||||
// hierarchy and the last is the most specific.
|
||||
//
|
||||
// The list will have the same order as the keys appeared in the TOML data.
|
||||
//
|
||||
// All keys returned are non-empty.
|
||||
func (md *MetaData) Keys() []Key {
|
||||
return md.keys
|
||||
}
|
||||
|
||||
// Undecoded returns all keys that have not been decoded in the order in which
|
||||
// they appear in the original TOML document.
|
||||
//
|
||||
// This includes keys that haven't been decoded because of a Primitive value.
|
||||
// Once the Primitive value is decoded, the keys will be considered decoded.
|
||||
//
|
||||
// Also note that decoding into an empty interface will result in no decoding,
|
||||
// and so no keys will be considered decoded.
|
||||
//
|
||||
// In this sense, the Undecoded keys correspond to keys in the TOML document
|
||||
// that do not have a concrete type in your representation.
|
||||
func (md *MetaData) Undecoded() []Key {
|
||||
undecoded := make([]Key, 0, len(md.keys))
|
||||
for _, key := range md.keys {
|
||||
if !md.decoded[key.String()] {
|
||||
undecoded = append(undecoded, key)
|
||||
}
|
||||
}
|
||||
return undecoded
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
/*
|
||||
Package toml provides facilities for decoding and encoding TOML configuration
|
||||
files via reflection. There is also support for delaying decoding with
|
||||
the Primitive type, and querying the set of keys in a TOML document with the
|
||||
MetaData type.
|
||||
|
||||
The specification implemented: https://github.com/mojombo/toml
|
||||
|
||||
The sub-command github.com/BurntSushi/toml/cmd/tomlv can be used to verify
|
||||
whether a file is a valid TOML document. It can also be used to print the
|
||||
type of each key in a TOML document.
|
||||
|
||||
Testing
|
||||
|
||||
There are two important types of tests used for this package. The first is
|
||||
contained inside '*_test.go' files and uses the standard Go unit testing
|
||||
framework. These tests are primarily devoted to holistically testing the
|
||||
decoder and encoder.
|
||||
|
||||
The second type of testing is used to verify the implementation's adherence
|
||||
to the TOML specification. These tests have been factored into their own
|
||||
project: https://github.com/BurntSushi/toml-test
|
||||
|
||||
The reason the tests are in a separate project is so that they can be used by
|
||||
any implementation of TOML. Namely, it is language agnostic.
|
||||
*/
|
||||
package toml
|
|
@ -1,549 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"errors"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type tomlEncodeError struct{ error }
|
||||
|
||||
var (
|
||||
errArrayMixedElementTypes = errors.New(
|
||||
"can't encode array with mixed element types")
|
||||
errArrayNilElement = errors.New(
|
||||
"can't encode array with nil element")
|
||||
errNonString = errors.New(
|
||||
"can't encode a map with non-string key type")
|
||||
errAnonNonStruct = errors.New(
|
||||
"can't encode an anonymous field that is not a struct")
|
||||
errArrayNoTable = errors.New(
|
||||
"TOML array element can't contain a table")
|
||||
errNoKey = errors.New(
|
||||
"top-level values must be a Go map or struct")
|
||||
errAnything = errors.New("") // used in testing
|
||||
)
|
||||
|
||||
var quotedReplacer = strings.NewReplacer(
|
||||
"\t", "\\t",
|
||||
"\n", "\\n",
|
||||
"\r", "\\r",
|
||||
"\"", "\\\"",
|
||||
"\\", "\\\\",
|
||||
)
|
||||
|
||||
// Encoder controls the encoding of Go values to a TOML document to some
|
||||
// io.Writer.
|
||||
//
|
||||
// The indentation level can be controlled with the Indent field.
|
||||
type Encoder struct {
|
||||
// A single indentation level. By default it is two spaces.
|
||||
Indent string
|
||||
|
||||
// hasWritten is whether we have written any output to w yet.
|
||||
hasWritten bool
|
||||
w *bufio.Writer
|
||||
}
|
||||
|
||||
// NewEncoder returns a TOML encoder that encodes Go values to the io.Writer
|
||||
// given. By default, a single indentation level is 2 spaces.
|
||||
func NewEncoder(w io.Writer) *Encoder {
|
||||
return &Encoder{
|
||||
w: bufio.NewWriter(w),
|
||||
Indent: " ",
|
||||
}
|
||||
}
|
||||
|
||||
// Encode writes a TOML representation of the Go value to the underlying
|
||||
// io.Writer. If the value given cannot be encoded to a valid TOML document,
|
||||
// then an error is returned.
|
||||
//
|
||||
// The mapping between Go values and TOML values should be precisely the same
|
||||
// as for the Decode* functions. Similarly, the TextMarshaler interface is
|
||||
// supported by encoding the resulting bytes as strings. (If you want to write
|
||||
// arbitrary binary data then you will need to use something like base64 since
|
||||
// TOML does not have any binary types.)
|
||||
//
|
||||
// When encoding TOML hashes (i.e., Go maps or structs), keys without any
|
||||
// sub-hashes are encoded first.
|
||||
//
|
||||
// If a Go map is encoded, then its keys are sorted alphabetically for
|
||||
// deterministic output. More control over this behavior may be provided if
|
||||
// there is demand for it.
|
||||
//
|
||||
// Encoding Go values without a corresponding TOML representation---like map
|
||||
// types with non-string keys---will cause an error to be returned. Similarly
|
||||
// for mixed arrays/slices, arrays/slices with nil elements, embedded
|
||||
// non-struct types and nested slices containing maps or structs.
|
||||
// (e.g., [][]map[string]string is not allowed but []map[string]string is OK
|
||||
// and so is []map[string][]string.)
|
||||
func (enc *Encoder) Encode(v interface{}) error {
|
||||
rv := eindirect(reflect.ValueOf(v))
|
||||
if err := enc.safeEncode(Key([]string{}), rv); err != nil {
|
||||
return err
|
||||
}
|
||||
return enc.w.Flush()
|
||||
}
|
||||
|
||||
func (enc *Encoder) safeEncode(key Key, rv reflect.Value) (err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
if terr, ok := r.(tomlEncodeError); ok {
|
||||
err = terr.error
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
enc.encode(key, rv)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (enc *Encoder) encode(key Key, rv reflect.Value) {
|
||||
// Special case. Time needs to be in ISO8601 format.
|
||||
// Special case. If we can marshal the type to text, then we used that.
|
||||
// Basically, this prevents the encoder for handling these types as
|
||||
// generic structs (or whatever the underlying type of a TextMarshaler is).
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time, TextMarshaler:
|
||||
enc.keyEqElement(key, rv)
|
||||
return
|
||||
}
|
||||
|
||||
k := rv.Kind()
|
||||
switch k {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64,
|
||||
reflect.Float32, reflect.Float64, reflect.String, reflect.Bool:
|
||||
enc.keyEqElement(key, rv)
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlArrayHash, tomlTypeOfGo(rv)) {
|
||||
enc.eArrayOfTables(key, rv)
|
||||
} else {
|
||||
enc.keyEqElement(key, rv)
|
||||
}
|
||||
case reflect.Interface:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Map:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.eTable(key, rv)
|
||||
case reflect.Ptr:
|
||||
if rv.IsNil() {
|
||||
return
|
||||
}
|
||||
enc.encode(key, rv.Elem())
|
||||
case reflect.Struct:
|
||||
enc.eTable(key, rv)
|
||||
default:
|
||||
panic(e("Unsupported type for key '%s': %s", key, k))
|
||||
}
|
||||
}
|
||||
|
||||
// eElement encodes any value that can be an array element (primitives and
|
||||
// arrays).
|
||||
func (enc *Encoder) eElement(rv reflect.Value) {
|
||||
switch v := rv.Interface().(type) {
|
||||
case time.Time:
|
||||
// Special case time.Time as a primitive. Has to come before
|
||||
// TextMarshaler below because time.Time implements
|
||||
// encoding.TextMarshaler, but we need to always use UTC.
|
||||
enc.wf(v.In(time.FixedZone("UTC", 0)).Format("2006-01-02T15:04:05Z"))
|
||||
return
|
||||
case TextMarshaler:
|
||||
// Special case. Use text marshaler if it's available for this value.
|
||||
if s, err := v.MarshalText(); err != nil {
|
||||
encPanic(err)
|
||||
} else {
|
||||
enc.writeQuoted(string(s))
|
||||
}
|
||||
return
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
enc.wf(strconv.FormatBool(rv.Bool()))
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64:
|
||||
enc.wf(strconv.FormatInt(rv.Int(), 10))
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16,
|
||||
reflect.Uint32, reflect.Uint64:
|
||||
enc.wf(strconv.FormatUint(rv.Uint(), 10))
|
||||
case reflect.Float32:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 32)))
|
||||
case reflect.Float64:
|
||||
enc.wf(floatAddDecimal(strconv.FormatFloat(rv.Float(), 'f', -1, 64)))
|
||||
case reflect.Array, reflect.Slice:
|
||||
enc.eArrayOrSliceElement(rv)
|
||||
case reflect.Interface:
|
||||
enc.eElement(rv.Elem())
|
||||
case reflect.String:
|
||||
enc.writeQuoted(rv.String())
|
||||
default:
|
||||
panic(e("Unexpected primitive type: %s", rv.Kind()))
|
||||
}
|
||||
}
|
||||
|
||||
// By the TOML spec, all floats must have a decimal with at least one
|
||||
// number on either side.
|
||||
func floatAddDecimal(fstr string) string {
|
||||
if !strings.Contains(fstr, ".") {
|
||||
return fstr + ".0"
|
||||
}
|
||||
return fstr
|
||||
}
|
||||
|
||||
func (enc *Encoder) writeQuoted(s string) {
|
||||
enc.wf("\"%s\"", quotedReplacer.Replace(s))
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOrSliceElement(rv reflect.Value) {
|
||||
length := rv.Len()
|
||||
enc.wf("[")
|
||||
for i := 0; i < length; i++ {
|
||||
elem := rv.Index(i)
|
||||
enc.eElement(elem)
|
||||
if i != length-1 {
|
||||
enc.wf(", ")
|
||||
}
|
||||
}
|
||||
enc.wf("]")
|
||||
}
|
||||
|
||||
func (enc *Encoder) eArrayOfTables(key Key, rv reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
for i := 0; i < rv.Len(); i++ {
|
||||
trv := rv.Index(i)
|
||||
if isNil(trv) {
|
||||
continue
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.newline()
|
||||
enc.wf("%s[[%s]]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
enc.eMapOrStruct(key, trv)
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eTable(key Key, rv reflect.Value) {
|
||||
panicIfInvalidKey(key)
|
||||
if len(key) == 1 {
|
||||
// Output an extra new line between top-level tables.
|
||||
// (The newline isn't written if nothing else has been written though.)
|
||||
enc.newline()
|
||||
}
|
||||
if len(key) > 0 {
|
||||
enc.wf("%s[%s]", enc.indentStr(key), key.maybeQuotedAll())
|
||||
enc.newline()
|
||||
}
|
||||
enc.eMapOrStruct(key, rv)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMapOrStruct(key Key, rv reflect.Value) {
|
||||
switch rv := eindirect(rv); rv.Kind() {
|
||||
case reflect.Map:
|
||||
enc.eMap(key, rv)
|
||||
case reflect.Struct:
|
||||
enc.eStruct(key, rv)
|
||||
default:
|
||||
panic("eTable: unhandled reflect.Value Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) eMap(key Key, rv reflect.Value) {
|
||||
rt := rv.Type()
|
||||
if rt.Key().Kind() != reflect.String {
|
||||
encPanic(errNonString)
|
||||
}
|
||||
|
||||
// Sort keys so that we have deterministic output. And write keys directly
|
||||
// underneath this key first, before writing sub-structs or sub-maps.
|
||||
var mapKeysDirect, mapKeysSub []string
|
||||
for _, mapKey := range rv.MapKeys() {
|
||||
k := mapKey.String()
|
||||
if typeIsHash(tomlTypeOfGo(rv.MapIndex(mapKey))) {
|
||||
mapKeysSub = append(mapKeysSub, k)
|
||||
} else {
|
||||
mapKeysDirect = append(mapKeysDirect, k)
|
||||
}
|
||||
}
|
||||
|
||||
var writeMapKeys = func(mapKeys []string) {
|
||||
sort.Strings(mapKeys)
|
||||
for _, mapKey := range mapKeys {
|
||||
mrv := rv.MapIndex(reflect.ValueOf(mapKey))
|
||||
if isNil(mrv) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
enc.encode(key.add(mapKey), mrv)
|
||||
}
|
||||
}
|
||||
writeMapKeys(mapKeysDirect)
|
||||
writeMapKeys(mapKeysSub)
|
||||
}
|
||||
|
||||
func (enc *Encoder) eStruct(key Key, rv reflect.Value) {
|
||||
// Write keys for fields directly under this key first, because if we write
|
||||
// a field that creates a new table, then all keys under it will be in that
|
||||
// table (not the one we're writing here).
|
||||
rt := rv.Type()
|
||||
var fieldsDirect, fieldsSub [][]int
|
||||
var addFields func(rt reflect.Type, rv reflect.Value, start []int)
|
||||
addFields = func(rt reflect.Type, rv reflect.Value, start []int) {
|
||||
for i := 0; i < rt.NumField(); i++ {
|
||||
f := rt.Field(i)
|
||||
// skip unexported fields
|
||||
if f.PkgPath != "" && !f.Anonymous {
|
||||
continue
|
||||
}
|
||||
frv := rv.Field(i)
|
||||
if f.Anonymous {
|
||||
t := f.Type
|
||||
switch t.Kind() {
|
||||
case reflect.Struct:
|
||||
addFields(t, frv, f.Index)
|
||||
continue
|
||||
case reflect.Ptr:
|
||||
if t.Elem().Kind() == reflect.Struct {
|
||||
if !frv.IsNil() {
|
||||
addFields(t.Elem(), frv.Elem(), f.Index)
|
||||
}
|
||||
continue
|
||||
}
|
||||
// Fall through to the normal field encoding logic below
|
||||
// for non-struct anonymous fields.
|
||||
}
|
||||
}
|
||||
|
||||
if typeIsHash(tomlTypeOfGo(frv)) {
|
||||
fieldsSub = append(fieldsSub, append(start, f.Index...))
|
||||
} else {
|
||||
fieldsDirect = append(fieldsDirect, append(start, f.Index...))
|
||||
}
|
||||
}
|
||||
}
|
||||
addFields(rt, rv, nil)
|
||||
|
||||
var writeFields = func(fields [][]int) {
|
||||
for _, fieldIndex := range fields {
|
||||
sft := rt.FieldByIndex(fieldIndex)
|
||||
sf := rv.FieldByIndex(fieldIndex)
|
||||
if isNil(sf) {
|
||||
// Don't write anything for nil fields.
|
||||
continue
|
||||
}
|
||||
|
||||
tag := sft.Tag.Get("toml")
|
||||
if tag == "-" {
|
||||
continue
|
||||
}
|
||||
keyName, opts := getOptions(tag)
|
||||
if keyName == "" {
|
||||
keyName = sft.Name
|
||||
}
|
||||
if _, ok := opts["omitempty"]; ok && isEmpty(sf) {
|
||||
continue
|
||||
} else if _, ok := opts["omitzero"]; ok && isZero(sf) {
|
||||
continue
|
||||
}
|
||||
|
||||
enc.encode(key.add(keyName), sf)
|
||||
}
|
||||
}
|
||||
writeFields(fieldsDirect)
|
||||
writeFields(fieldsSub)
|
||||
}
|
||||
|
||||
// tomlTypeName returns the TOML type name of the Go value's type. It is
|
||||
// used to determine whether the types of array elements are mixed (which is
|
||||
// forbidden). If the Go value is nil, then it is illegal for it to be an array
|
||||
// element, and valueIsNil is returned as true.
|
||||
|
||||
// Returns the TOML type of a Go value. The type may be `nil`, which means
|
||||
// no concrete TOML type could be found.
|
||||
func tomlTypeOfGo(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() {
|
||||
return nil
|
||||
}
|
||||
switch rv.Kind() {
|
||||
case reflect.Bool:
|
||||
return tomlBool
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32,
|
||||
reflect.Int64,
|
||||
reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32,
|
||||
reflect.Uint64:
|
||||
return tomlInteger
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return tomlFloat
|
||||
case reflect.Array, reflect.Slice:
|
||||
if typeEqual(tomlHash, tomlArrayType(rv)) {
|
||||
return tomlArrayHash
|
||||
} else {
|
||||
return tomlArray
|
||||
}
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return tomlTypeOfGo(rv.Elem())
|
||||
case reflect.String:
|
||||
return tomlString
|
||||
case reflect.Map:
|
||||
return tomlHash
|
||||
case reflect.Struct:
|
||||
switch rv.Interface().(type) {
|
||||
case time.Time:
|
||||
return tomlDatetime
|
||||
case TextMarshaler:
|
||||
return tomlString
|
||||
default:
|
||||
return tomlHash
|
||||
}
|
||||
default:
|
||||
panic("unexpected reflect.Kind: " + rv.Kind().String())
|
||||
}
|
||||
}
|
||||
|
||||
// tomlArrayType returns the element type of a TOML array. The type returned
|
||||
// may be nil if it cannot be determined (e.g., a nil slice or a zero length
|
||||
// slize). This function may also panic if it finds a type that cannot be
|
||||
// expressed in TOML (such as nil elements, heterogeneous arrays or directly
|
||||
// nested arrays of tables).
|
||||
func tomlArrayType(rv reflect.Value) tomlType {
|
||||
if isNil(rv) || !rv.IsValid() || rv.Len() == 0 {
|
||||
return nil
|
||||
}
|
||||
firstType := tomlTypeOfGo(rv.Index(0))
|
||||
if firstType == nil {
|
||||
encPanic(errArrayNilElement)
|
||||
}
|
||||
|
||||
rvlen := rv.Len()
|
||||
for i := 1; i < rvlen; i++ {
|
||||
elem := rv.Index(i)
|
||||
switch elemType := tomlTypeOfGo(elem); {
|
||||
case elemType == nil:
|
||||
encPanic(errArrayNilElement)
|
||||
case !typeEqual(firstType, elemType):
|
||||
encPanic(errArrayMixedElementTypes)
|
||||
}
|
||||
}
|
||||
// If we have a nested array, then we must make sure that the nested
|
||||
// array contains ONLY primitives.
|
||||
// This checks arbitrarily nested arrays.
|
||||
if typeEqual(firstType, tomlArray) || typeEqual(firstType, tomlArrayHash) {
|
||||
nest := tomlArrayType(eindirect(rv.Index(0)))
|
||||
if typeEqual(nest, tomlHash) || typeEqual(nest, tomlArrayHash) {
|
||||
encPanic(errArrayNoTable)
|
||||
}
|
||||
}
|
||||
return firstType
|
||||
}
|
||||
|
||||
func getOptions(keyName string) (string, map[string]struct{}) {
|
||||
opts := make(map[string]struct{})
|
||||
ss := strings.Split(keyName, ",")
|
||||
name := ss[0]
|
||||
if len(ss) > 1 {
|
||||
for _, opt := range ss {
|
||||
opts[opt] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
return name, opts
|
||||
}
|
||||
|
||||
func isZero(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Int, reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64:
|
||||
return rv.Int() == 0
|
||||
case reflect.Uint, reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64:
|
||||
return rv.Uint() == 0
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return rv.Float() == 0.0
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isEmpty(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Map, reflect.String:
|
||||
return rv.Len() == 0
|
||||
case reflect.Bool:
|
||||
return !rv.Bool()
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (enc *Encoder) newline() {
|
||||
if enc.hasWritten {
|
||||
enc.wf("\n")
|
||||
}
|
||||
}
|
||||
|
||||
func (enc *Encoder) keyEqElement(key Key, val reflect.Value) {
|
||||
if len(key) == 0 {
|
||||
encPanic(errNoKey)
|
||||
}
|
||||
panicIfInvalidKey(key)
|
||||
enc.wf("%s%s = ", enc.indentStr(key), key.maybeQuoted(len(key)-1))
|
||||
enc.eElement(val)
|
||||
enc.newline()
|
||||
}
|
||||
|
||||
func (enc *Encoder) wf(format string, v ...interface{}) {
|
||||
if _, err := fmt.Fprintf(enc.w, format, v...); err != nil {
|
||||
encPanic(err)
|
||||
}
|
||||
enc.hasWritten = true
|
||||
}
|
||||
|
||||
func (enc *Encoder) indentStr(key Key) string {
|
||||
return strings.Repeat(enc.Indent, len(key)-1)
|
||||
}
|
||||
|
||||
func encPanic(err error) {
|
||||
panic(tomlEncodeError{err})
|
||||
}
|
||||
|
||||
func eindirect(v reflect.Value) reflect.Value {
|
||||
switch v.Kind() {
|
||||
case reflect.Ptr, reflect.Interface:
|
||||
return eindirect(v.Elem())
|
||||
default:
|
||||
return v
|
||||
}
|
||||
}
|
||||
|
||||
func isNil(rv reflect.Value) bool {
|
||||
switch rv.Kind() {
|
||||
case reflect.Interface, reflect.Map, reflect.Ptr, reflect.Slice:
|
||||
return rv.IsNil()
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
func panicIfInvalidKey(key Key) {
|
||||
for _, k := range key {
|
||||
if len(k) == 0 {
|
||||
encPanic(e("Key '%s' is not a valid table name. Key names "+
|
||||
"cannot be empty.", key.maybeQuotedAll()))
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func isValidKeyName(s string) bool {
|
||||
return len(s) != 0
|
||||
}
|
|
@ -1,19 +0,0 @@
|
|||
// +build go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// In order to support Go 1.1, we define our own TextMarshaler and
|
||||
// TextUnmarshaler types. For Go 1.2+, we just alias them with the
|
||||
// standard library interfaces.
|
||||
|
||||
import (
|
||||
"encoding"
|
||||
)
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler encoding.TextMarshaler
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler encoding.TextUnmarshaler
|
|
@ -1,18 +0,0 @@
|
|||
// +build !go1.2
|
||||
|
||||
package toml
|
||||
|
||||
// These interfaces were introduced in Go 1.2, so we add them manually when
|
||||
// compiling for Go 1.1.
|
||||
|
||||
// TextMarshaler is a synonym for encoding.TextMarshaler. It is defined here
|
||||
// so that Go 1.1 can be supported.
|
||||
type TextMarshaler interface {
|
||||
MarshalText() (text []byte, err error)
|
||||
}
|
||||
|
||||
// TextUnmarshaler is a synonym for encoding.TextUnmarshaler. It is defined
|
||||
// here so that Go 1.1 can be supported.
|
||||
type TextUnmarshaler interface {
|
||||
UnmarshalText(text []byte) error
|
||||
}
|
|
@ -1,871 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type itemType int
|
||||
|
||||
const (
|
||||
itemError itemType = iota
|
||||
itemNIL // used in the parser to indicate no type
|
||||
itemEOF
|
||||
itemText
|
||||
itemString
|
||||
itemRawString
|
||||
itemMultilineString
|
||||
itemRawMultilineString
|
||||
itemBool
|
||||
itemInteger
|
||||
itemFloat
|
||||
itemDatetime
|
||||
itemArray // the start of an array
|
||||
itemArrayEnd
|
||||
itemTableStart
|
||||
itemTableEnd
|
||||
itemArrayTableStart
|
||||
itemArrayTableEnd
|
||||
itemKeyStart
|
||||
itemCommentStart
|
||||
)
|
||||
|
||||
const (
|
||||
eof = 0
|
||||
tableStart = '['
|
||||
tableEnd = ']'
|
||||
arrayTableStart = '['
|
||||
arrayTableEnd = ']'
|
||||
tableSep = '.'
|
||||
keySep = '='
|
||||
arrayStart = '['
|
||||
arrayEnd = ']'
|
||||
arrayValTerm = ','
|
||||
commentStart = '#'
|
||||
stringStart = '"'
|
||||
stringEnd = '"'
|
||||
rawStringStart = '\''
|
||||
rawStringEnd = '\''
|
||||
)
|
||||
|
||||
type stateFn func(lx *lexer) stateFn
|
||||
|
||||
type lexer struct {
|
||||
input string
|
||||
start int
|
||||
pos int
|
||||
width int
|
||||
line int
|
||||
state stateFn
|
||||
items chan item
|
||||
|
||||
// A stack of state functions used to maintain context.
|
||||
// The idea is to reuse parts of the state machine in various places.
|
||||
// For example, values can appear at the top level or within arbitrarily
|
||||
// nested arrays. The last state on the stack is used after a value has
|
||||
// been lexed. Similarly for comments.
|
||||
stack []stateFn
|
||||
}
|
||||
|
||||
type item struct {
|
||||
typ itemType
|
||||
val string
|
||||
line int
|
||||
}
|
||||
|
||||
func (lx *lexer) nextItem() item {
|
||||
for {
|
||||
select {
|
||||
case item := <-lx.items:
|
||||
return item
|
||||
default:
|
||||
lx.state = lx.state(lx)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func lex(input string) *lexer {
|
||||
lx := &lexer{
|
||||
input: input + "\n",
|
||||
state: lexTop,
|
||||
line: 1,
|
||||
items: make(chan item, 10),
|
||||
stack: make([]stateFn, 0, 10),
|
||||
}
|
||||
return lx
|
||||
}
|
||||
|
||||
func (lx *lexer) push(state stateFn) {
|
||||
lx.stack = append(lx.stack, state)
|
||||
}
|
||||
|
||||
func (lx *lexer) pop() stateFn {
|
||||
if len(lx.stack) == 0 {
|
||||
return lx.errorf("BUG in lexer: no states to pop.")
|
||||
}
|
||||
last := lx.stack[len(lx.stack)-1]
|
||||
lx.stack = lx.stack[0 : len(lx.stack)-1]
|
||||
return last
|
||||
}
|
||||
|
||||
func (lx *lexer) current() string {
|
||||
return lx.input[lx.start:lx.pos]
|
||||
}
|
||||
|
||||
func (lx *lexer) emit(typ itemType) {
|
||||
lx.items <- item{typ, lx.current(), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) emitTrim(typ itemType) {
|
||||
lx.items <- item{typ, strings.TrimSpace(lx.current()), lx.line}
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
func (lx *lexer) next() (r rune) {
|
||||
if lx.pos >= len(lx.input) {
|
||||
lx.width = 0
|
||||
return eof
|
||||
}
|
||||
|
||||
if lx.input[lx.pos] == '\n' {
|
||||
lx.line++
|
||||
}
|
||||
r, lx.width = utf8.DecodeRuneInString(lx.input[lx.pos:])
|
||||
lx.pos += lx.width
|
||||
return r
|
||||
}
|
||||
|
||||
// ignore skips over the pending input before this point.
|
||||
func (lx *lexer) ignore() {
|
||||
lx.start = lx.pos
|
||||
}
|
||||
|
||||
// backup steps back one rune. Can be called only once per call of next.
|
||||
func (lx *lexer) backup() {
|
||||
lx.pos -= lx.width
|
||||
if lx.pos < len(lx.input) && lx.input[lx.pos] == '\n' {
|
||||
lx.line--
|
||||
}
|
||||
}
|
||||
|
||||
// accept consumes the next rune if it's equal to `valid`.
|
||||
func (lx *lexer) accept(valid rune) bool {
|
||||
if lx.next() == valid {
|
||||
return true
|
||||
}
|
||||
lx.backup()
|
||||
return false
|
||||
}
|
||||
|
||||
// peek returns but does not consume the next rune in the input.
|
||||
func (lx *lexer) peek() rune {
|
||||
r := lx.next()
|
||||
lx.backup()
|
||||
return r
|
||||
}
|
||||
|
||||
// errorf stops all lexing by emitting an error and returning `nil`.
|
||||
// Note that any value that is a character is escaped if it's a special
|
||||
// character (new lines, tabs, etc.).
|
||||
func (lx *lexer) errorf(format string, values ...interface{}) stateFn {
|
||||
lx.items <- item{
|
||||
itemError,
|
||||
fmt.Sprintf(format, values...),
|
||||
lx.line,
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTop consumes elements at the top level of TOML data.
|
||||
func lexTop(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isWhitespace(r) || isNL(r) {
|
||||
return lexSkip(lx, lexTop)
|
||||
}
|
||||
|
||||
switch r {
|
||||
case commentStart:
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case tableStart:
|
||||
return lexTableStart
|
||||
case eof:
|
||||
if lx.pos > lx.start {
|
||||
return lx.errorf("Unexpected EOF.")
|
||||
}
|
||||
lx.emit(itemEOF)
|
||||
return nil
|
||||
}
|
||||
|
||||
// At this point, the only valid item can be a key, so we back up
|
||||
// and let the key lexer do the rest.
|
||||
lx.backup()
|
||||
lx.push(lexTopEnd)
|
||||
return lexKeyStart
|
||||
}
|
||||
|
||||
// lexTopEnd is entered whenever a top-level item has been consumed. (A value
|
||||
// or a table.) It must see only whitespace, and will turn back to lexTop
|
||||
// upon a new line. If it sees EOF, it will quit the lexer successfully.
|
||||
func lexTopEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == commentStart:
|
||||
// a comment will read to a new line for us.
|
||||
lx.push(lexTop)
|
||||
return lexCommentStart
|
||||
case isWhitespace(r):
|
||||
return lexTopEnd
|
||||
case isNL(r):
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
case r == eof:
|
||||
lx.ignore()
|
||||
return lexTop
|
||||
}
|
||||
return lx.errorf("Expected a top-level item to end with a new line, "+
|
||||
"comment or EOF, but got %q instead.", r)
|
||||
}
|
||||
|
||||
// lexTable lexes the beginning of a table. Namely, it makes sure that
|
||||
// it starts with a character other than '.' and ']'.
|
||||
// It assumes that '[' has already been consumed.
|
||||
// It also handles the case that this is an item in an array of tables.
|
||||
// e.g., '[[name]]'.
|
||||
func lexTableStart(lx *lexer) stateFn {
|
||||
if lx.peek() == arrayTableStart {
|
||||
lx.next()
|
||||
lx.emit(itemArrayTableStart)
|
||||
lx.push(lexArrayTableEnd)
|
||||
} else {
|
||||
lx.emit(itemTableStart)
|
||||
lx.push(lexTableEnd)
|
||||
}
|
||||
return lexTableNameStart
|
||||
}
|
||||
|
||||
func lexTableEnd(lx *lexer) stateFn {
|
||||
lx.emit(itemTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexArrayTableEnd(lx *lexer) stateFn {
|
||||
if r := lx.next(); r != arrayTableEnd {
|
||||
return lx.errorf("Expected end of table array name delimiter %q, "+
|
||||
"but got %q instead.", arrayTableEnd, r)
|
||||
}
|
||||
lx.emit(itemArrayTableEnd)
|
||||
return lexTopEnd
|
||||
}
|
||||
|
||||
func lexTableNameStart(lx *lexer) stateFn {
|
||||
switch r := lx.peek(); {
|
||||
case r == tableEnd || r == eof:
|
||||
return lx.errorf("Unexpected end of table name. (Table names cannot " +
|
||||
"be empty.)")
|
||||
case r == tableSep:
|
||||
return lx.errorf("Unexpected table separator. (Table names cannot " +
|
||||
"be empty.)")
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.push(lexTableNameEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
return lexBareTableName
|
||||
}
|
||||
}
|
||||
|
||||
// lexTableName lexes the name of a table. It assumes that at least one
|
||||
// valid character for the table has already been read.
|
||||
func lexBareTableName(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareTableName
|
||||
case r == tableSep || r == tableEnd:
|
||||
lx.backup()
|
||||
lx.emitTrim(itemText)
|
||||
return lexTableNameEnd
|
||||
default:
|
||||
return lx.errorf("Bare keys cannot contain %q.", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexTableNameEnd reads the end of a piece of a table name, optionally
|
||||
// consuming whitespace.
|
||||
func lexTableNameEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isWhitespace(r):
|
||||
return lexTableNameEnd
|
||||
case r == tableSep:
|
||||
lx.ignore()
|
||||
return lexTableNameStart
|
||||
case r == tableEnd:
|
||||
return lx.pop()
|
||||
default:
|
||||
return lx.errorf("Expected '.' or ']' to end table name, but got %q "+
|
||||
"instead.", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyStart consumes a key name up until the first non-whitespace character.
|
||||
// lexKeyStart will ignore whitespace.
|
||||
func lexKeyStart(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
switch {
|
||||
case r == keySep:
|
||||
return lx.errorf("Unexpected key separator %q.", keySep)
|
||||
case isWhitespace(r) || isNL(r):
|
||||
lx.next()
|
||||
return lexSkip(lx, lexKeyStart)
|
||||
case r == stringStart || r == rawStringStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
lx.push(lexKeyEnd)
|
||||
return lexValue // reuse string lexing
|
||||
default:
|
||||
lx.ignore()
|
||||
lx.emit(itemKeyStart)
|
||||
return lexBareKey
|
||||
}
|
||||
}
|
||||
|
||||
// lexBareKey consumes the text of a bare key. Assumes that the first character
|
||||
// (which is not whitespace) has not yet been consumed.
|
||||
func lexBareKey(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case isBareKeyChar(r):
|
||||
return lexBareKey
|
||||
case isWhitespace(r):
|
||||
lx.emitTrim(itemText)
|
||||
return lexKeyEnd
|
||||
case r == keySep:
|
||||
lx.backup()
|
||||
lx.emitTrim(itemText)
|
||||
return lexKeyEnd
|
||||
default:
|
||||
return lx.errorf("Bare keys cannot contain %q.", r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexKeyEnd consumes the end of a key and trims whitespace (up to the key
|
||||
// separator).
|
||||
func lexKeyEnd(lx *lexer) stateFn {
|
||||
switch r := lx.next(); {
|
||||
case r == keySep:
|
||||
return lexSkip(lx, lexValue)
|
||||
case isWhitespace(r):
|
||||
return lexSkip(lx, lexKeyEnd)
|
||||
default:
|
||||
return lx.errorf("Expected key separator %q, but got %q instead.",
|
||||
keySep, r)
|
||||
}
|
||||
}
|
||||
|
||||
// lexValue starts the consumption of a value anywhere a value is expected.
|
||||
// lexValue will ignore whitespace.
|
||||
// After a value is lexed, the last state on the next is popped and returned.
|
||||
func lexValue(lx *lexer) stateFn {
|
||||
// We allow whitespace to precede a value, but NOT new lines.
|
||||
// In array syntax, the array states are responsible for ignoring new
|
||||
// lines.
|
||||
r := lx.next()
|
||||
if isWhitespace(r) {
|
||||
return lexSkip(lx, lexValue)
|
||||
}
|
||||
|
||||
switch {
|
||||
case r == arrayStart:
|
||||
lx.ignore()
|
||||
lx.emit(itemArray)
|
||||
return lexArrayValue
|
||||
case r == stringStart:
|
||||
if lx.accept(stringStart) {
|
||||
if lx.accept(stringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the '"'
|
||||
return lexString
|
||||
case r == rawStringStart:
|
||||
if lx.accept(rawStringStart) {
|
||||
if lx.accept(rawStringStart) {
|
||||
lx.ignore() // Ignore """
|
||||
return lexMultilineRawString
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
lx.ignore() // ignore the "'"
|
||||
return lexRawString
|
||||
case r == 't':
|
||||
return lexTrue
|
||||
case r == 'f':
|
||||
return lexFalse
|
||||
case r == '-':
|
||||
return lexNumberStart
|
||||
case isDigit(r):
|
||||
lx.backup() // avoid an extra state and use the same as above
|
||||
return lexNumberOrDateStart
|
||||
case r == '.': // special error case, be kind to users
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
}
|
||||
return lx.errorf("Expected value but found %q instead.", r)
|
||||
}
|
||||
|
||||
// lexArrayValue consumes one value in an array. It assumes that '[' or ','
|
||||
// have already been consumed. All whitespace and new lines are ignored.
|
||||
func lexArrayValue(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValue)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValue)
|
||||
return lexCommentStart
|
||||
case r == arrayValTerm:
|
||||
return lx.errorf("Unexpected array value terminator %q.",
|
||||
arrayValTerm)
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexValue
|
||||
}
|
||||
|
||||
// lexArrayValueEnd consumes the cruft between values of an array. Namely,
|
||||
// it ignores whitespace and expects either a ',' or a ']'.
|
||||
func lexArrayValueEnd(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isWhitespace(r) || isNL(r):
|
||||
return lexSkip(lx, lexArrayValueEnd)
|
||||
case r == commentStart:
|
||||
lx.push(lexArrayValueEnd)
|
||||
return lexCommentStart
|
||||
case r == arrayValTerm:
|
||||
lx.ignore()
|
||||
return lexArrayValue // move on to the next value
|
||||
case r == arrayEnd:
|
||||
return lexArrayEnd
|
||||
}
|
||||
return lx.errorf("Expected an array value terminator %q or an array "+
|
||||
"terminator %q, but got %q instead.", arrayValTerm, arrayEnd, r)
|
||||
}
|
||||
|
||||
// lexArrayEnd finishes the lexing of an array. It assumes that a ']' has
|
||||
// just been consumed.
|
||||
func lexArrayEnd(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemArrayEnd)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexString consumes the inner contents of a string. It assumes that the
|
||||
// beginning '"' has already been consumed and ignored.
|
||||
func lexString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isNL(r):
|
||||
return lx.errorf("Strings cannot contain new lines.")
|
||||
case r == '\\':
|
||||
lx.push(lexString)
|
||||
return lexStringEscape
|
||||
case r == stringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexString
|
||||
}
|
||||
|
||||
// lexMultilineString consumes the inner contents of a string. It assumes that
|
||||
// the beginning '"""' has already been consumed and ignored.
|
||||
func lexMultilineString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '\\':
|
||||
return lexMultilineStringEscape
|
||||
case r == stringEnd:
|
||||
if lx.accept(stringEnd) {
|
||||
if lx.accept(stringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineString
|
||||
}
|
||||
|
||||
// lexRawString consumes a raw string. Nothing can be escaped in such a string.
|
||||
// It assumes that the beginning "'" has already been consumed and ignored.
|
||||
func lexRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isNL(r):
|
||||
return lx.errorf("Strings cannot contain new lines.")
|
||||
case r == rawStringEnd:
|
||||
lx.backup()
|
||||
lx.emit(itemRawString)
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
return lexRawString
|
||||
}
|
||||
|
||||
// lexMultilineRawString consumes a raw string. Nothing can be escaped in such
|
||||
// a string. It assumes that the beginning "'" has already been consumed and
|
||||
// ignored.
|
||||
func lexMultilineRawString(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == rawStringEnd:
|
||||
if lx.accept(rawStringEnd) {
|
||||
if lx.accept(rawStringEnd) {
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.backup()
|
||||
lx.emit(itemRawMultilineString)
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.next()
|
||||
lx.ignore()
|
||||
return lx.pop()
|
||||
}
|
||||
lx.backup()
|
||||
}
|
||||
}
|
||||
return lexMultilineRawString
|
||||
}
|
||||
|
||||
// lexMultilineStringEscape consumes an escaped character. It assumes that the
|
||||
// preceding '\\' has already been consumed.
|
||||
func lexMultilineStringEscape(lx *lexer) stateFn {
|
||||
// Handle the special case first:
|
||||
if isNL(lx.next()) {
|
||||
return lexMultilineString
|
||||
} else {
|
||||
lx.backup()
|
||||
lx.push(lexMultilineString)
|
||||
return lexStringEscape(lx)
|
||||
}
|
||||
}
|
||||
|
||||
func lexStringEscape(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch r {
|
||||
case 'b':
|
||||
fallthrough
|
||||
case 't':
|
||||
fallthrough
|
||||
case 'n':
|
||||
fallthrough
|
||||
case 'f':
|
||||
fallthrough
|
||||
case 'r':
|
||||
fallthrough
|
||||
case '"':
|
||||
fallthrough
|
||||
case '\\':
|
||||
return lx.pop()
|
||||
case 'u':
|
||||
return lexShortUnicodeEscape
|
||||
case 'U':
|
||||
return lexLongUnicodeEscape
|
||||
}
|
||||
return lx.errorf("Invalid escape character %q. Only the following "+
|
||||
"escape characters are allowed: "+
|
||||
"\\b, \\t, \\n, \\f, \\r, \\\", \\/, \\\\, "+
|
||||
"\\uXXXX and \\UXXXXXXXX.", r)
|
||||
}
|
||||
|
||||
func lexShortUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 4; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf("Expected four hexadecimal digits after '\\u', "+
|
||||
"but got '%s' instead.", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
func lexLongUnicodeEscape(lx *lexer) stateFn {
|
||||
var r rune
|
||||
for i := 0; i < 8; i++ {
|
||||
r = lx.next()
|
||||
if !isHexadecimal(r) {
|
||||
return lx.errorf("Expected eight hexadecimal digits after '\\U', "+
|
||||
"but got '%s' instead.", lx.current())
|
||||
}
|
||||
}
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberOrDateStart consumes either a (positive) integer, float or
|
||||
// datetime. It assumes that NO negative sign has been consumed.
|
||||
func lexNumberOrDateStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
}
|
||||
return lexNumberOrDate
|
||||
}
|
||||
|
||||
// lexNumberOrDate consumes either a (positive) integer, float or datetime.
|
||||
func lexNumberOrDate(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case r == '-':
|
||||
if lx.pos-lx.start != 5 {
|
||||
return lx.errorf("All ISO8601 dates must be in full Zulu form.")
|
||||
}
|
||||
return lexDateAfterYear
|
||||
case isDigit(r):
|
||||
return lexNumberOrDate
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexDateAfterYear consumes a full Zulu Datetime in ISO8601 format.
|
||||
// It assumes that "YYYY-" has already been consumed.
|
||||
func lexDateAfterYear(lx *lexer) stateFn {
|
||||
formats := []rune{
|
||||
// digits are '0'.
|
||||
// everything else is direct equality.
|
||||
'0', '0', '-', '0', '0',
|
||||
'T',
|
||||
'0', '0', ':', '0', '0', ':', '0', '0',
|
||||
'Z',
|
||||
}
|
||||
for _, f := range formats {
|
||||
r := lx.next()
|
||||
if f == '0' {
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Expected digit in ISO8601 datetime, "+
|
||||
"but found %q instead.", r)
|
||||
}
|
||||
} else if f != r {
|
||||
return lx.errorf("Expected %q in ISO8601 datetime, "+
|
||||
"but found %q instead.", f, r)
|
||||
}
|
||||
}
|
||||
lx.emit(itemDatetime)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexNumberStart consumes either an integer or a float. It assumes that
|
||||
// a negative sign has already been read, but that *no* digits have been
|
||||
// consumed. lexNumberStart will move to the appropriate integer or float
|
||||
// states.
|
||||
func lexNumberStart(lx *lexer) stateFn {
|
||||
// we MUST see a digit. Even floats have to start with a digit.
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
if r == '.' {
|
||||
return lx.errorf("Floats must start with a digit, not '.'.")
|
||||
} else {
|
||||
return lx.errorf("Expected a digit but got %q.", r)
|
||||
}
|
||||
}
|
||||
return lexNumber
|
||||
}
|
||||
|
||||
// lexNumber consumes an integer or a float after seeing the first digit.
|
||||
func lexNumber(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
switch {
|
||||
case isDigit(r):
|
||||
return lexNumber
|
||||
case r == '.':
|
||||
return lexFloatStart
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemInteger)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFloatStart starts the consumption of digits of a float after a '.'.
|
||||
// Namely, at least one digit is required.
|
||||
func lexFloatStart(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if !isDigit(r) {
|
||||
return lx.errorf("Floats must have a digit after the '.', but got "+
|
||||
"%q instead.", r)
|
||||
}
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
// lexFloat consumes the digits of a float after a '.'.
|
||||
// Assumes that one digit has been consumed after a '.' already.
|
||||
func lexFloat(lx *lexer) stateFn {
|
||||
r := lx.next()
|
||||
if isDigit(r) {
|
||||
return lexFloat
|
||||
}
|
||||
|
||||
lx.backup()
|
||||
lx.emit(itemFloat)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexConst consumes the s[1:] in s. It assumes that s[0] has already been
|
||||
// consumed.
|
||||
func lexConst(lx *lexer, s string) stateFn {
|
||||
for i := range s[1:] {
|
||||
if r := lx.next(); r != rune(s[i+1]) {
|
||||
return lx.errorf("Expected %q, but found %q instead.", s[:i+1],
|
||||
s[:i]+string(r))
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// lexTrue consumes the "rue" in "true". It assumes that 't' has already
|
||||
// been consumed.
|
||||
func lexTrue(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "true"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexFalse consumes the "alse" in "false". It assumes that 'f' has already
|
||||
// been consumed.
|
||||
func lexFalse(lx *lexer) stateFn {
|
||||
if fn := lexConst(lx, "false"); fn != nil {
|
||||
return fn
|
||||
}
|
||||
lx.emit(itemBool)
|
||||
return lx.pop()
|
||||
}
|
||||
|
||||
// lexCommentStart begins the lexing of a comment. It will emit
|
||||
// itemCommentStart and consume no characters, passing control to lexComment.
|
||||
func lexCommentStart(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
lx.emit(itemCommentStart)
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexComment lexes an entire comment. It assumes that '#' has been consumed.
|
||||
// It will consume *up to* the first new line character, and pass control
|
||||
// back to the last state on the stack.
|
||||
func lexComment(lx *lexer) stateFn {
|
||||
r := lx.peek()
|
||||
if isNL(r) || r == eof {
|
||||
lx.emit(itemText)
|
||||
return lx.pop()
|
||||
}
|
||||
lx.next()
|
||||
return lexComment
|
||||
}
|
||||
|
||||
// lexSkip ignores all slurped input and moves on to the next state.
|
||||
func lexSkip(lx *lexer, nextState stateFn) stateFn {
|
||||
return func(lx *lexer) stateFn {
|
||||
lx.ignore()
|
||||
return nextState
|
||||
}
|
||||
}
|
||||
|
||||
// isWhitespace returns true if `r` is a whitespace character according
|
||||
// to the spec.
|
||||
func isWhitespace(r rune) bool {
|
||||
return r == '\t' || r == ' '
|
||||
}
|
||||
|
||||
func isNL(r rune) bool {
|
||||
return r == '\n' || r == '\r'
|
||||
}
|
||||
|
||||
func isDigit(r rune) bool {
|
||||
return r >= '0' && r <= '9'
|
||||
}
|
||||
|
||||
func isHexadecimal(r rune) bool {
|
||||
return (r >= '0' && r <= '9') ||
|
||||
(r >= 'a' && r <= 'f') ||
|
||||
(r >= 'A' && r <= 'F')
|
||||
}
|
||||
|
||||
func isBareKeyChar(r rune) bool {
|
||||
return (r >= 'A' && r <= 'Z') ||
|
||||
(r >= 'a' && r <= 'z') ||
|
||||
(r >= '0' && r <= '9') ||
|
||||
r == '_' ||
|
||||
r == '-'
|
||||
}
|
||||
|
||||
func (itype itemType) String() string {
|
||||
switch itype {
|
||||
case itemError:
|
||||
return "Error"
|
||||
case itemNIL:
|
||||
return "NIL"
|
||||
case itemEOF:
|
||||
return "EOF"
|
||||
case itemText:
|
||||
return "Text"
|
||||
case itemString:
|
||||
return "String"
|
||||
case itemRawString:
|
||||
return "String"
|
||||
case itemMultilineString:
|
||||
return "String"
|
||||
case itemRawMultilineString:
|
||||
return "String"
|
||||
case itemBool:
|
||||
return "Bool"
|
||||
case itemInteger:
|
||||
return "Integer"
|
||||
case itemFloat:
|
||||
return "Float"
|
||||
case itemDatetime:
|
||||
return "DateTime"
|
||||
case itemTableStart:
|
||||
return "TableStart"
|
||||
case itemTableEnd:
|
||||
return "TableEnd"
|
||||
case itemKeyStart:
|
||||
return "KeyStart"
|
||||
case itemArray:
|
||||
return "Array"
|
||||
case itemArrayEnd:
|
||||
return "ArrayEnd"
|
||||
case itemCommentStart:
|
||||
return "CommentStart"
|
||||
}
|
||||
panic(fmt.Sprintf("BUG: Unknown type '%d'.", int(itype)))
|
||||
}
|
||||
|
||||
func (item item) String() string {
|
||||
return fmt.Sprintf("(%s, %s)", item.typ.String(), item.val)
|
||||
}
|
|
@ -1,493 +0,0 @@
|
|||
package toml
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
"unicode"
|
||||
"unicode/utf8"
|
||||
)
|
||||
|
||||
type parser struct {
|
||||
mapping map[string]interface{}
|
||||
types map[string]tomlType
|
||||
lx *lexer
|
||||
|
||||
// A list of keys in the order that they appear in the TOML data.
|
||||
ordered []Key
|
||||
|
||||
// the full key for the current hash in scope
|
||||
context Key
|
||||
|
||||
// the base key name for everything except hashes
|
||||
currentKey string
|
||||
|
||||
// rough approximation of line number
|
||||
approxLine int
|
||||
|
||||
// A map of 'key.group.names' to whether they were created implicitly.
|
||||
implicits map[string]bool
|
||||
}
|
||||
|
||||
type parseError string
|
||||
|
||||
func (pe parseError) Error() string {
|
||||
return string(pe)
|
||||
}
|
||||
|
||||
func parse(data string) (p *parser, err error) {
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
var ok bool
|
||||
if err, ok = r.(parseError); ok {
|
||||
return
|
||||
}
|
||||
panic(r)
|
||||
}
|
||||
}()
|
||||
|
||||
p = &parser{
|
||||
mapping: make(map[string]interface{}),
|
||||
types: make(map[string]tomlType),
|
||||
lx: lex(data),
|
||||
ordered: make([]Key, 0),
|
||||
implicits: make(map[string]bool),
|
||||
}
|
||||
for {
|
||||
item := p.next()
|
||||
if item.typ == itemEOF {
|
||||
break
|
||||
}
|
||||
p.topLevel(item)
|
||||
}
|
||||
|
||||
return p, nil
|
||||
}
|
||||
|
||||
func (p *parser) panicf(format string, v ...interface{}) {
|
||||
msg := fmt.Sprintf("Near line %d (last key parsed '%s'): %s",
|
||||
p.approxLine, p.current(), fmt.Sprintf(format, v...))
|
||||
panic(parseError(msg))
|
||||
}
|
||||
|
||||
func (p *parser) next() item {
|
||||
it := p.lx.nextItem()
|
||||
if it.typ == itemError {
|
||||
p.panicf("%s", it.val)
|
||||
}
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) bug(format string, v ...interface{}) {
|
||||
log.Panicf("BUG: %s\n\n", fmt.Sprintf(format, v...))
|
||||
}
|
||||
|
||||
func (p *parser) expect(typ itemType) item {
|
||||
it := p.next()
|
||||
p.assertEqual(typ, it.typ)
|
||||
return it
|
||||
}
|
||||
|
||||
func (p *parser) assertEqual(expected, got itemType) {
|
||||
if expected != got {
|
||||
p.bug("Expected '%s' but got '%s'.", expected, got)
|
||||
}
|
||||
}
|
||||
|
||||
func (p *parser) topLevel(item item) {
|
||||
switch item.typ {
|
||||
case itemCommentStart:
|
||||
p.approxLine = item.line
|
||||
p.expect(itemText)
|
||||
case itemTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, false)
|
||||
p.setType("", tomlHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemArrayTableStart:
|
||||
kg := p.next()
|
||||
p.approxLine = kg.line
|
||||
|
||||
var key Key
|
||||
for ; kg.typ != itemArrayTableEnd && kg.typ != itemEOF; kg = p.next() {
|
||||
key = append(key, p.keyString(kg))
|
||||
}
|
||||
p.assertEqual(itemArrayTableEnd, kg.typ)
|
||||
|
||||
p.establishContext(key, true)
|
||||
p.setType("", tomlArrayHash)
|
||||
p.ordered = append(p.ordered, key)
|
||||
case itemKeyStart:
|
||||
kname := p.next()
|
||||
p.approxLine = kname.line
|
||||
p.currentKey = p.keyString(kname)
|
||||
|
||||
val, typ := p.value(p.next())
|
||||
p.setValue(p.currentKey, val)
|
||||
p.setType(p.currentKey, typ)
|
||||
p.ordered = append(p.ordered, p.context.add(p.currentKey))
|
||||
p.currentKey = ""
|
||||
default:
|
||||
p.bug("Unexpected type at top level: %s", item.typ)
|
||||
}
|
||||
}
|
||||
|
||||
// Gets a string for a key (or part of a key in a table name).
|
||||
func (p *parser) keyString(it item) string {
|
||||
switch it.typ {
|
||||
case itemText:
|
||||
return it.val
|
||||
case itemString, itemMultilineString,
|
||||
itemRawString, itemRawMultilineString:
|
||||
s, _ := p.value(it)
|
||||
return s.(string)
|
||||
default:
|
||||
p.bug("Unexpected key type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
}
|
||||
|
||||
// value translates an expected value from the lexer into a Go value wrapped
|
||||
// as an empty interface.
|
||||
func (p *parser) value(it item) (interface{}, tomlType) {
|
||||
switch it.typ {
|
||||
case itemString:
|
||||
return p.replaceEscapes(it.val), p.typeOfPrimitive(it)
|
||||
case itemMultilineString:
|
||||
trimmed := stripFirstNewline(stripEscapedWhitespace(it.val))
|
||||
return p.replaceEscapes(trimmed), p.typeOfPrimitive(it)
|
||||
case itemRawString:
|
||||
return it.val, p.typeOfPrimitive(it)
|
||||
case itemRawMultilineString:
|
||||
return stripFirstNewline(it.val), p.typeOfPrimitive(it)
|
||||
case itemBool:
|
||||
switch it.val {
|
||||
case "true":
|
||||
return true, p.typeOfPrimitive(it)
|
||||
case "false":
|
||||
return false, p.typeOfPrimitive(it)
|
||||
}
|
||||
p.bug("Expected boolean value, but got '%s'.", it.val)
|
||||
case itemInteger:
|
||||
num, err := strconv.ParseInt(it.val, 10, 64)
|
||||
if err != nil {
|
||||
// See comment below for floats describing why we make a
|
||||
// distinction between a bug and a user error.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Integer '%s' is out of the range of 64-bit "+
|
||||
"signed integers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected integer value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemFloat:
|
||||
num, err := strconv.ParseFloat(it.val, 64)
|
||||
if err != nil {
|
||||
// Distinguish float values. Normally, it'd be a bug if the lexer
|
||||
// provides an invalid float, but it's possible that the float is
|
||||
// out of range of valid values (which the lexer cannot determine).
|
||||
// So mark the former as a bug but the latter as a legitimate user
|
||||
// error.
|
||||
//
|
||||
// This is also true for integers.
|
||||
if e, ok := err.(*strconv.NumError); ok &&
|
||||
e.Err == strconv.ErrRange {
|
||||
|
||||
p.panicf("Float '%s' is out of the range of 64-bit "+
|
||||
"IEEE-754 floating-point numbers.", it.val)
|
||||
} else {
|
||||
p.bug("Expected float value, but got '%s'.", it.val)
|
||||
}
|
||||
}
|
||||
return num, p.typeOfPrimitive(it)
|
||||
case itemDatetime:
|
||||
t, err := time.Parse("2006-01-02T15:04:05Z", it.val)
|
||||
if err != nil {
|
||||
p.panicf("Invalid RFC3339 Zulu DateTime: '%s'.", it.val)
|
||||
}
|
||||
return t, p.typeOfPrimitive(it)
|
||||
case itemArray:
|
||||
array := make([]interface{}, 0)
|
||||
types := make([]tomlType, 0)
|
||||
|
||||
for it = p.next(); it.typ != itemArrayEnd; it = p.next() {
|
||||
if it.typ == itemCommentStart {
|
||||
p.expect(itemText)
|
||||
continue
|
||||
}
|
||||
|
||||
val, typ := p.value(it)
|
||||
array = append(array, val)
|
||||
types = append(types, typ)
|
||||
}
|
||||
return array, p.typeOfArray(types)
|
||||
}
|
||||
p.bug("Unexpected value type: %s", it.typ)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// establishContext sets the current context of the parser,
|
||||
// where the context is either a hash or an array of hashes. Which one is
|
||||
// set depends on the value of the `array` parameter.
|
||||
//
|
||||
// Establishing the context also makes sure that the key isn't a duplicate, and
|
||||
// will create implicit hashes automatically.
|
||||
func (p *parser) establishContext(key Key, array bool) {
|
||||
var ok bool
|
||||
|
||||
// Always start at the top level and drill down for our context.
|
||||
hashContext := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
|
||||
// We only need implicit hashes for key[0:-1]
|
||||
for _, k := range key[0 : len(key)-1] {
|
||||
_, ok = hashContext[k]
|
||||
keyContext = append(keyContext, k)
|
||||
|
||||
// No key? Make an implicit hash and move on.
|
||||
if !ok {
|
||||
p.addImplicit(keyContext)
|
||||
hashContext[k] = make(map[string]interface{})
|
||||
}
|
||||
|
||||
// If the hash context is actually an array of tables, then set
|
||||
// the hash context to the last element in that array.
|
||||
//
|
||||
// Otherwise, it better be a table, since this MUST be a key group (by
|
||||
// virtue of it not being the last element in a key).
|
||||
switch t := hashContext[k].(type) {
|
||||
case []map[string]interface{}:
|
||||
hashContext = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hashContext = t
|
||||
default:
|
||||
p.panicf("Key '%s' was already created as a hash.", keyContext)
|
||||
}
|
||||
}
|
||||
|
||||
p.context = keyContext
|
||||
if array {
|
||||
// If this is the first element for this array, then allocate a new
|
||||
// list of tables for it.
|
||||
k := key[len(key)-1]
|
||||
if _, ok := hashContext[k]; !ok {
|
||||
hashContext[k] = make([]map[string]interface{}, 0, 5)
|
||||
}
|
||||
|
||||
// Add a new table. But make sure the key hasn't already been used
|
||||
// for something else.
|
||||
if hash, ok := hashContext[k].([]map[string]interface{}); ok {
|
||||
hashContext[k] = append(hash, make(map[string]interface{}))
|
||||
} else {
|
||||
p.panicf("Key '%s' was already created and cannot be used as "+
|
||||
"an array.", keyContext)
|
||||
}
|
||||
} else {
|
||||
p.setValue(key[len(key)-1], make(map[string]interface{}))
|
||||
}
|
||||
p.context = append(p.context, key[len(key)-1])
|
||||
}
|
||||
|
||||
// setValue sets the given key to the given value in the current context.
|
||||
// It will make sure that the key hasn't already been defined, account for
|
||||
// implicit key groups.
|
||||
func (p *parser) setValue(key string, value interface{}) {
|
||||
var tmpHash interface{}
|
||||
var ok bool
|
||||
|
||||
hash := p.mapping
|
||||
keyContext := make(Key, 0)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
if tmpHash, ok = hash[k]; !ok {
|
||||
p.bug("Context for key '%s' has not been established.", keyContext)
|
||||
}
|
||||
switch t := tmpHash.(type) {
|
||||
case []map[string]interface{}:
|
||||
// The context is a table of hashes. Pick the most recent table
|
||||
// defined as the current hash.
|
||||
hash = t[len(t)-1]
|
||||
case map[string]interface{}:
|
||||
hash = t
|
||||
default:
|
||||
p.bug("Expected hash to have type 'map[string]interface{}', but "+
|
||||
"it has '%T' instead.", tmpHash)
|
||||
}
|
||||
}
|
||||
keyContext = append(keyContext, key)
|
||||
|
||||
if _, ok := hash[key]; ok {
|
||||
// Typically, if the given key has already been set, then we have
|
||||
// to raise an error since duplicate keys are disallowed. However,
|
||||
// it's possible that a key was previously defined implicitly. In this
|
||||
// case, it is allowed to be redefined concretely. (See the
|
||||
// `tests/valid/implicit-and-explicit-after.toml` test in `toml-test`.)
|
||||
//
|
||||
// But we have to make sure to stop marking it as an implicit. (So that
|
||||
// another redefinition provokes an error.)
|
||||
//
|
||||
// Note that since it has already been defined (as a hash), we don't
|
||||
// want to overwrite it. So our business is done.
|
||||
if p.isImplicit(keyContext) {
|
||||
p.removeImplicit(keyContext)
|
||||
return
|
||||
}
|
||||
|
||||
// Otherwise, we have a concrete key trying to override a previous
|
||||
// key, which is *always* wrong.
|
||||
p.panicf("Key '%s' has already been defined.", keyContext)
|
||||
}
|
||||
hash[key] = value
|
||||
}
|
||||
|
||||
// setType sets the type of a particular value at a given key.
|
||||
// It should be called immediately AFTER setValue.
|
||||
//
|
||||
// Note that if `key` is empty, then the type given will be applied to the
|
||||
// current context (which is either a table or an array of tables).
|
||||
func (p *parser) setType(key string, typ tomlType) {
|
||||
keyContext := make(Key, 0, len(p.context)+1)
|
||||
for _, k := range p.context {
|
||||
keyContext = append(keyContext, k)
|
||||
}
|
||||
if len(key) > 0 { // allow type setting for hashes
|
||||
keyContext = append(keyContext, key)
|
||||
}
|
||||
p.types[keyContext.String()] = typ
|
||||
}
|
||||
|
||||
// addImplicit sets the given Key as having been created implicitly.
|
||||
func (p *parser) addImplicit(key Key) {
|
||||
p.implicits[key.String()] = true
|
||||
}
|
||||
|
||||
// removeImplicit stops tagging the given key as having been implicitly
|
||||
// created.
|
||||
func (p *parser) removeImplicit(key Key) {
|
||||
p.implicits[key.String()] = false
|
||||
}
|
||||
|
||||
// isImplicit returns true if the key group pointed to by the key was created
|
||||
// implicitly.
|
||||
func (p *parser) isImplicit(key Key) bool {
|
||||
return p.implicits[key.String()]
|
||||
}
|
||||
|
||||
// current returns the full key name of the current context.
|
||||
func (p *parser) current() string {
|
||||
if len(p.currentKey) == 0 {
|
||||
return p.context.String()
|
||||
}
|
||||
if len(p.context) == 0 {
|
||||
return p.currentKey
|
||||
}
|
||||
return fmt.Sprintf("%s.%s", p.context, p.currentKey)
|
||||
}
|
||||
|
||||
func stripFirstNewline(s string) string {
|
||||
if len(s) == 0 || s[0] != '\n' {
|
||||
return s
|
||||
}
|
||||
return s[1:]
|
||||
}
|
||||
|
||||
func stripEscapedWhitespace(s string) string {
|
||||
esc := strings.Split(s, "\\\n")
|
||||
if len(esc) > 1 {
|
||||
for i := 1; i < len(esc); i++ {
|
||||
esc[i] = strings.TrimLeftFunc(esc[i], unicode.IsSpace)
|
||||
}
|
||||
}
|
||||
return strings.Join(esc, "")
|
||||
}
|
||||
|
||||
func (p *parser) replaceEscapes(str string) string {
|
||||
var replaced []rune
|
||||
s := []byte(str)
|
||||
r := 0
|
||||
for r < len(s) {
|
||||
if s[r] != '\\' {
|
||||
c, size := utf8.DecodeRune(s[r:])
|
||||
r += size
|
||||
replaced = append(replaced, c)
|
||||
continue
|
||||
}
|
||||
r += 1
|
||||
if r >= len(s) {
|
||||
p.bug("Escape sequence at end of string.")
|
||||
return ""
|
||||
}
|
||||
switch s[r] {
|
||||
default:
|
||||
p.bug("Expected valid escape code after \\, but got %q.", s[r])
|
||||
return ""
|
||||
case 'b':
|
||||
replaced = append(replaced, rune(0x0008))
|
||||
r += 1
|
||||
case 't':
|
||||
replaced = append(replaced, rune(0x0009))
|
||||
r += 1
|
||||
case 'n':
|
||||
replaced = append(replaced, rune(0x000A))
|
||||
r += 1
|
||||
case 'f':
|
||||
replaced = append(replaced, rune(0x000C))
|
||||
r += 1
|
||||
case 'r':
|
||||
replaced = append(replaced, rune(0x000D))
|
||||
r += 1
|
||||
case '"':
|
||||
replaced = append(replaced, rune(0x0022))
|
||||
r += 1
|
||||
case '\\':
|
||||
replaced = append(replaced, rune(0x005C))
|
||||
r += 1
|
||||
case 'u':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+5). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+5])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 5
|
||||
case 'U':
|
||||
// At this point, we know we have a Unicode escape of the form
|
||||
// `uXXXX` at [r, r+9). (Because the lexer guarantees this
|
||||
// for us.)
|
||||
escaped := p.asciiEscapeToUnicode(s[r+1 : r+9])
|
||||
replaced = append(replaced, escaped)
|
||||
r += 9
|
||||
}
|
||||
}
|
||||
return string(replaced)
|
||||
}
|
||||
|
||||
func (p *parser) asciiEscapeToUnicode(bs []byte) rune {
|
||||
s := string(bs)
|
||||
hex, err := strconv.ParseUint(strings.ToLower(s), 16, 32)
|
||||
if err != nil {
|
||||
p.bug("Could not parse '%s' as a hexadecimal number, but the "+
|
||||
"lexer claims it's OK: %s", s, err)
|
||||
}
|
||||
if !utf8.ValidRune(rune(hex)) {
|
||||
p.panicf("Escaped character '\\u%s' is not valid UTF-8.", s)
|
||||
}
|
||||
return rune(hex)
|
||||
}
|
||||
|
||||
func isStringType(ty itemType) bool {
|
||||
return ty == itemString || ty == itemMultilineString ||
|
||||
ty == itemRawString || ty == itemRawMultilineString
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
au BufWritePost *.go silent!make tags > /dev/null 2>&1
|
|
@ -1,91 +0,0 @@
|
|||
package toml
|
||||
|
||||
// tomlType represents any Go type that corresponds to a TOML type.
|
||||
// While the first draft of the TOML spec has a simplistic type system that
|
||||
// probably doesn't need this level of sophistication, we seem to be militating
|
||||
// toward adding real composite types.
|
||||
type tomlType interface {
|
||||
typeString() string
|
||||
}
|
||||
|
||||
// typeEqual accepts any two types and returns true if they are equal.
|
||||
func typeEqual(t1, t2 tomlType) bool {
|
||||
if t1 == nil || t2 == nil {
|
||||
return false
|
||||
}
|
||||
return t1.typeString() == t2.typeString()
|
||||
}
|
||||
|
||||
func typeIsHash(t tomlType) bool {
|
||||
return typeEqual(t, tomlHash) || typeEqual(t, tomlArrayHash)
|
||||
}
|
||||
|
||||
type tomlBaseType string
|
||||
|
||||
func (btype tomlBaseType) typeString() string {
|
||||
return string(btype)
|
||||
}
|
||||
|
||||
func (btype tomlBaseType) String() string {
|
||||
return btype.typeString()
|
||||
}
|
||||
|
||||
var (
|
||||
tomlInteger tomlBaseType = "Integer"
|
||||
tomlFloat tomlBaseType = "Float"
|
||||
tomlDatetime tomlBaseType = "Datetime"
|
||||
tomlString tomlBaseType = "String"
|
||||
tomlBool tomlBaseType = "Bool"
|
||||
tomlArray tomlBaseType = "Array"
|
||||
tomlHash tomlBaseType = "Hash"
|
||||
tomlArrayHash tomlBaseType = "ArrayHash"
|
||||
)
|
||||
|
||||
// typeOfPrimitive returns a tomlType of any primitive value in TOML.
|
||||
// Primitive values are: Integer, Float, Datetime, String and Bool.
|
||||
//
|
||||
// Passing a lexer item other than the following will cause a BUG message
|
||||
// to occur: itemString, itemBool, itemInteger, itemFloat, itemDatetime.
|
||||
func (p *parser) typeOfPrimitive(lexItem item) tomlType {
|
||||
switch lexItem.typ {
|
||||
case itemInteger:
|
||||
return tomlInteger
|
||||
case itemFloat:
|
||||
return tomlFloat
|
||||
case itemDatetime:
|
||||
return tomlDatetime
|
||||
case itemString:
|
||||
return tomlString
|
||||
case itemMultilineString:
|
||||
return tomlString
|
||||
case itemRawString:
|
||||
return tomlString
|
||||
case itemRawMultilineString:
|
||||
return tomlString
|
||||
case itemBool:
|
||||
return tomlBool
|
||||
}
|
||||
p.bug("Cannot infer primitive type of lex item '%s'.", lexItem)
|
||||
panic("unreachable")
|
||||
}
|
||||
|
||||
// typeOfArray returns a tomlType for an array given a list of types of its
|
||||
// values.
|
||||
//
|
||||
// In the current spec, if an array is homogeneous, then its type is always
|
||||
// "Array". If the array is not homogeneous, an error is generated.
|
||||
func (p *parser) typeOfArray(types []tomlType) tomlType {
|
||||
// Empty arrays are cool.
|
||||
if len(types) == 0 {
|
||||
return tomlArray
|
||||
}
|
||||
|
||||
theType := types[0]
|
||||
for _, t := range types[1:] {
|
||||
if !typeEqual(theType, t) {
|
||||
p.panicf("Array contains values of type '%s' and '%s', but "+
|
||||
"arrays must be homogeneous.", theType, t)
|
||||
}
|
||||
}
|
||||
return tomlArray
|
||||
}
|
|
@ -1,241 +0,0 @@
|
|||
package toml
|
||||
|
||||
// Struct field handling is adapted from code in encoding/json:
|
||||
//
|
||||
// Copyright 2010 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the Go distribution.
|
||||
|
||||
import (
|
||||
"reflect"
|
||||
"sort"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// A field represents a single field found in a struct.
|
||||
type field struct {
|
||||
name string // the name of the field (`toml` tag included)
|
||||
tag bool // whether field has a `toml` tag
|
||||
index []int // represents the depth of an anonymous field
|
||||
typ reflect.Type // the type of the field
|
||||
}
|
||||
|
||||
// byName sorts field by name, breaking ties with depth,
|
||||
// then breaking ties with "name came from toml tag", then
|
||||
// breaking ties with index sequence.
|
||||
type byName []field
|
||||
|
||||
func (x byName) Len() int { return len(x) }
|
||||
|
||||
func (x byName) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byName) Less(i, j int) bool {
|
||||
if x[i].name != x[j].name {
|
||||
return x[i].name < x[j].name
|
||||
}
|
||||
if len(x[i].index) != len(x[j].index) {
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
if x[i].tag != x[j].tag {
|
||||
return x[i].tag
|
||||
}
|
||||
return byIndex(x).Less(i, j)
|
||||
}
|
||||
|
||||
// byIndex sorts field by index sequence.
|
||||
type byIndex []field
|
||||
|
||||
func (x byIndex) Len() int { return len(x) }
|
||||
|
||||
func (x byIndex) Swap(i, j int) { x[i], x[j] = x[j], x[i] }
|
||||
|
||||
func (x byIndex) Less(i, j int) bool {
|
||||
for k, xik := range x[i].index {
|
||||
if k >= len(x[j].index) {
|
||||
return false
|
||||
}
|
||||
if xik != x[j].index[k] {
|
||||
return xik < x[j].index[k]
|
||||
}
|
||||
}
|
||||
return len(x[i].index) < len(x[j].index)
|
||||
}
|
||||
|
||||
// typeFields returns a list of fields that TOML should recognize for the given
|
||||
// type. The algorithm is breadth-first search over the set of structs to
|
||||
// include - the top struct and then any reachable anonymous structs.
|
||||
func typeFields(t reflect.Type) []field {
|
||||
// Anonymous fields to explore at the current level and the next.
|
||||
current := []field{}
|
||||
next := []field{{typ: t}}
|
||||
|
||||
// Count of queued names for current level and the next.
|
||||
count := map[reflect.Type]int{}
|
||||
nextCount := map[reflect.Type]int{}
|
||||
|
||||
// Types already visited at an earlier level.
|
||||
visited := map[reflect.Type]bool{}
|
||||
|
||||
// Fields found.
|
||||
var fields []field
|
||||
|
||||
for len(next) > 0 {
|
||||
current, next = next, current[:0]
|
||||
count, nextCount = nextCount, map[reflect.Type]int{}
|
||||
|
||||
for _, f := range current {
|
||||
if visited[f.typ] {
|
||||
continue
|
||||
}
|
||||
visited[f.typ] = true
|
||||
|
||||
// Scan f.typ for fields to include.
|
||||
for i := 0; i < f.typ.NumField(); i++ {
|
||||
sf := f.typ.Field(i)
|
||||
if sf.PkgPath != "" && !sf.Anonymous { // unexported
|
||||
continue
|
||||
}
|
||||
name, _ := getOptions(sf.Tag.Get("toml"))
|
||||
if name == "-" {
|
||||
continue
|
||||
}
|
||||
index := make([]int, len(f.index)+1)
|
||||
copy(index, f.index)
|
||||
index[len(f.index)] = i
|
||||
|
||||
ft := sf.Type
|
||||
if ft.Name() == "" && ft.Kind() == reflect.Ptr {
|
||||
// Follow pointer.
|
||||
ft = ft.Elem()
|
||||
}
|
||||
|
||||
// Record found field and index sequence.
|
||||
if name != "" || !sf.Anonymous || ft.Kind() != reflect.Struct {
|
||||
tagged := name != ""
|
||||
if name == "" {
|
||||
name = sf.Name
|
||||
}
|
||||
fields = append(fields, field{name, tagged, index, ft})
|
||||
if count[f.typ] > 1 {
|
||||
// If there were multiple instances, add a second,
|
||||
// so that the annihilation code will see a duplicate.
|
||||
// It only cares about the distinction between 1 or 2,
|
||||
// so don't bother generating any more copies.
|
||||
fields = append(fields, fields[len(fields)-1])
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
// Record new anonymous struct to explore in next round.
|
||||
nextCount[ft]++
|
||||
if nextCount[ft] == 1 {
|
||||
f := field{name: ft.Name(), index: index, typ: ft}
|
||||
next = append(next, f)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
sort.Sort(byName(fields))
|
||||
|
||||
// Delete all fields that are hidden by the Go rules for embedded fields,
|
||||
// except that fields with TOML tags are promoted.
|
||||
|
||||
// The fields are sorted in primary order of name, secondary order
|
||||
// of field index length. Loop over names; for each name, delete
|
||||
// hidden fields by choosing the one dominant field that survives.
|
||||
out := fields[:0]
|
||||
for advance, i := 0, 0; i < len(fields); i += advance {
|
||||
// One iteration per name.
|
||||
// Find the sequence of fields with the name of this first field.
|
||||
fi := fields[i]
|
||||
name := fi.name
|
||||
for advance = 1; i+advance < len(fields); advance++ {
|
||||
fj := fields[i+advance]
|
||||
if fj.name != name {
|
||||
break
|
||||
}
|
||||
}
|
||||
if advance == 1 { // Only one field with this name
|
||||
out = append(out, fi)
|
||||
continue
|
||||
}
|
||||
dominant, ok := dominantField(fields[i : i+advance])
|
||||
if ok {
|
||||
out = append(out, dominant)
|
||||
}
|
||||
}
|
||||
|
||||
fields = out
|
||||
sort.Sort(byIndex(fields))
|
||||
|
||||
return fields
|
||||
}
|
||||
|
||||
// dominantField looks through the fields, all of which are known to
|
||||
// have the same name, to find the single field that dominates the
|
||||
// others using Go's embedding rules, modified by the presence of
|
||||
// TOML tags. If there are multiple top-level fields, the boolean
|
||||
// will be false: This condition is an error in Go and we skip all
|
||||
// the fields.
|
||||
func dominantField(fields []field) (field, bool) {
|
||||
// The fields are sorted in increasing index-length order. The winner
|
||||
// must therefore be one with the shortest index length. Drop all
|
||||
// longer entries, which is easy: just truncate the slice.
|
||||
length := len(fields[0].index)
|
||||
tagged := -1 // Index of first tagged field.
|
||||
for i, f := range fields {
|
||||
if len(f.index) > length {
|
||||
fields = fields[:i]
|
||||
break
|
||||
}
|
||||
if f.tag {
|
||||
if tagged >= 0 {
|
||||
// Multiple tagged fields at the same level: conflict.
|
||||
// Return no field.
|
||||
return field{}, false
|
||||
}
|
||||
tagged = i
|
||||
}
|
||||
}
|
||||
if tagged >= 0 {
|
||||
return fields[tagged], true
|
||||
}
|
||||
// All remaining fields have the same length. If there's more than one,
|
||||
// we have a conflict (two fields named "X" at the same level) and we
|
||||
// return no field.
|
||||
if len(fields) > 1 {
|
||||
return field{}, false
|
||||
}
|
||||
return fields[0], true
|
||||
}
|
||||
|
||||
var fieldCache struct {
|
||||
sync.RWMutex
|
||||
m map[reflect.Type][]field
|
||||
}
|
||||
|
||||
// cachedTypeFields is like typeFields but uses a cache to avoid repeated work.
|
||||
func cachedTypeFields(t reflect.Type) []field {
|
||||
fieldCache.RLock()
|
||||
f := fieldCache.m[t]
|
||||
fieldCache.RUnlock()
|
||||
if f != nil {
|
||||
return f
|
||||
}
|
||||
|
||||
// Compute fields without lock.
|
||||
// Might duplicate effort but won't hold other computations back.
|
||||
f = typeFields(t)
|
||||
if f == nil {
|
||||
f = []field{}
|
||||
}
|
||||
|
||||
fieldCache.Lock()
|
||||
if fieldCache.m == nil {
|
||||
fieldCache.m = map[reflect.Type][]field{}
|
||||
}
|
||||
fieldCache.m[t] = f
|
||||
fieldCache.Unlock()
|
||||
return f
|
||||
}
|
|
@ -1 +0,0 @@
|
|||
logrus
|
|
@ -1,10 +0,0 @@
|
|||
language: go
|
||||
go:
|
||||
- 1.2
|
||||
- 1.3
|
||||
- tip
|
||||
install:
|
||||
- go get github.com/stretchr/testify
|
||||
- go get github.com/stvp/go-udp-testing
|
||||
- go get github.com/tobi/airbrake-go
|
||||
- go get github.com/getsentry/raven-go
|
|
@ -1,21 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Simon Eskildsen
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
|
@ -1,352 +0,0 @@
|
|||
# Logrus <img src="http://i.imgur.com/hTeVwmJ.png" width="40" height="40" alt=":walrus:" class="emoji" title=":walrus:"/> [](https://travis-ci.org/Sirupsen/logrus)
|
||||
|
||||
Logrus is a structured logger for Go (golang), completely API compatible with
|
||||
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
|
||||
yet stable (pre 1.0), the core API is unlikely change much but please version
|
||||
control your Logrus to make sure you aren't fetching latest `master` on every
|
||||
build.**
|
||||
|
||||
Nicely color-coded in development (when a TTY is attached, otherwise just
|
||||
plain text):
|
||||
|
||||

|
||||
|
||||
With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
|
||||
or Splunk:
|
||||
|
||||
```json
|
||||
{"animal":"walrus","level":"info","msg":"A group of walrus emerges from the
|
||||
ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
|
||||
|
||||
{"level":"warning","msg":"The group's number increased tremendously!",
|
||||
"number":122,"omg":true,"time":"2014-03-10 19:57:38.562471297 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"A giant walrus appears!",
|
||||
"size":10,"time":"2014-03-10 19:57:38.562500591 -0400 EDT"}
|
||||
|
||||
{"animal":"walrus","level":"info","msg":"Tremendously sized cow enters the ocean.",
|
||||
"size":9,"time":"2014-03-10 19:57:38.562527896 -0400 EDT"}
|
||||
|
||||
{"level":"fatal","msg":"The ice breaks!","number":100,"omg":true,
|
||||
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
|
||||
```
|
||||
|
||||
With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
|
||||
attached, the output is compatible with the
|
||||
[l2met](http://r.32k.io/l2met-introduction) format:
|
||||
|
||||
```text
|
||||
time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
|
||||
time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
|
||||
time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
|
||||
time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
|
||||
time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
|
||||
```
|
||||
|
||||
#### Example
|
||||
|
||||
The simplest way to use Logrus is simply the package-level exported logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
}).Info("A walrus appears")
|
||||
}
|
||||
```
|
||||
|
||||
Note that it's completely api-compatible with the stdlib logger, so you can
|
||||
replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
|
||||
and you'll now have the flexibility of Logrus. You can customize it all you
|
||||
want:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Log as JSON instead of the default ASCII formatter.
|
||||
log.SetFormatter(&log.JSONFormatter{})
|
||||
|
||||
// Use the Airbrake hook to report errors that have Error severity or above to
|
||||
// an exception tracker. You can create custom hooks, see the Hooks section.
|
||||
log.AddHook(&logrus_airbrake.AirbrakeHook{})
|
||||
|
||||
// Output to stderr instead of stdout, could also be a file.
|
||||
log.SetOutput(os.Stderr)
|
||||
|
||||
// Only log the warning severity or above.
|
||||
log.SetLevel(log.WarnLevel)
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.WithFields(log.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 122,
|
||||
}).Warn("The group's number increased tremendously!")
|
||||
|
||||
log.WithFields(log.Fields{
|
||||
"omg": true,
|
||||
"number": 100,
|
||||
}).Fatal("The ice breaks!")
|
||||
}
|
||||
```
|
||||
|
||||
For more advanced usage such as logging to multiple locations from the same
|
||||
application, you can also create an instance of the `logrus` Logger:
|
||||
|
||||
```go
|
||||
package main
|
||||
|
||||
import (
|
||||
"github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
// Create a new instance of the logger. You can have any number of instances.
|
||||
var log = logrus.New()
|
||||
|
||||
func main() {
|
||||
// The API for setting attributes is a little different than the package level
|
||||
// exported logger. See Godoc.
|
||||
log.Out = os.Stderr
|
||||
|
||||
log.WithFields(logrus.Fields{
|
||||
"animal": "walrus",
|
||||
"size": 10,
|
||||
}).Info("A group of walrus emerges from the ocean")
|
||||
}
|
||||
```
|
||||
|
||||
#### Fields
|
||||
|
||||
Logrus encourages careful, structured logging though logging fields instead of
|
||||
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
|
||||
to send event %s to topic %s with key %d")`, you should log the much more
|
||||
discoverable:
|
||||
|
||||
```go
|
||||
log.WithFields(log.Fields{
|
||||
"event": event,
|
||||
"topic": topic,
|
||||
"key": key,
|
||||
}).Fatal("Failed to send event")
|
||||
```
|
||||
|
||||
We've found this API forces you to think about logging in a way that produces
|
||||
much more useful logging messages. We've been in countless situations where just
|
||||
a single added field to a log statement that was already there would've saved us
|
||||
hours. The `WithFields` call is optional.
|
||||
|
||||
In general, with Logrus using any of the `printf`-family functions should be
|
||||
seen as a hint you should add a field, however, you can still use the
|
||||
`printf`-family functions with Logrus.
|
||||
|
||||
#### Hooks
|
||||
|
||||
You can add hooks for logging levels. For example to send errors to an exception
|
||||
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
|
||||
multiple places simultaneously, e.g. syslog.
|
||||
|
||||
```go
|
||||
// Not the real implementation of the Airbrake hook. Just a simple sample.
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.AddHook(new(AirbrakeHook))
|
||||
}
|
||||
|
||||
type AirbrakeHook struct{}
|
||||
|
||||
// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
|
||||
// the fields for the entry. See the Fields section of the README.
|
||||
func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
|
||||
err := airbrake.Notify(entry.Data["error"].(error))
|
||||
if err != nil {
|
||||
log.WithFields(log.Fields{
|
||||
"source": "airbrake",
|
||||
"endpoint": airbrake.Endpoint,
|
||||
}).Info("Failed to send error to Airbrake")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// `Levels()` returns a slice of `Levels` the hook is fired for.
|
||||
func (hook *AirbrakeHook) Levels() []log.Level {
|
||||
return []log.Level{
|
||||
log.ErrorLevel,
|
||||
log.FatalLevel,
|
||||
log.PanicLevel,
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
"github.com/Sirupsen/logrus/hooks/airbrake"
|
||||
"github.com/Sirupsen/logrus/hooks/syslog"
|
||||
"log/syslog"
|
||||
)
|
||||
|
||||
func init() {
|
||||
log.AddHook(new(logrus_airbrake.AirbrakeHook))
|
||||
|
||||
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
|
||||
if err != nil {
|
||||
log.Error("Unable to connect to local syslog daemon")
|
||||
} else {
|
||||
log.AddHook(hook)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
|
||||
Send errors to an exception tracking service compatible with the Airbrake API.
|
||||
Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
|
||||
|
||||
* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
|
||||
Send errors to the Papertrail hosted logging service via UDP.
|
||||
|
||||
* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
|
||||
Send errors to remote syslog server.
|
||||
Uses standard library `log/syslog` behind the scenes.
|
||||
|
||||
* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
|
||||
Send errors to a channel in hipchat.
|
||||
|
||||
* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
|
||||
Send logs to Loggly (https://www.loggly.com/)
|
||||
|
||||
#### Level logging
|
||||
|
||||
Logrus has six logging levels: Debug, Info, Warning, Error, Fatal and Panic.
|
||||
|
||||
```go
|
||||
log.Debug("Useful debugging information.")
|
||||
log.Info("Something noteworthy happened!")
|
||||
log.Warn("You should probably take a look at this.")
|
||||
log.Error("Something failed but I'm not quitting.")
|
||||
// Calls os.Exit(1) after logging
|
||||
log.Fatal("Bye.")
|
||||
// Calls panic() after logging
|
||||
log.Panic("I'm bailing.")
|
||||
```
|
||||
|
||||
You can set the logging level on a `Logger`, then it will only log entries with
|
||||
that severity or anything above it:
|
||||
|
||||
```go
|
||||
// Will log anything that is info or above (warn, error, fatal, panic). Default.
|
||||
log.SetLevel(log.InfoLevel)
|
||||
```
|
||||
|
||||
It may be useful to set `log.Level = logrus.DebugLevel` in a debug or verbose
|
||||
environment if your application has that.
|
||||
|
||||
#### Entries
|
||||
|
||||
Besides the fields added with `WithField` or `WithFields` some fields are
|
||||
automatically added to all logging events:
|
||||
|
||||
1. `time`. The timestamp when the entry was created.
|
||||
2. `msg`. The logging message passed to `{Info,Warn,Error,Fatal,Panic}` after
|
||||
the `AddFields` call. E.g. `Failed to send event.`
|
||||
3. `level`. The logging level. E.g. `info`.
|
||||
|
||||
#### Environments
|
||||
|
||||
Logrus has no notion of environment.
|
||||
|
||||
If you wish for hooks and formatters to only be used in specific environments,
|
||||
you should handle that yourself. For example, if your application has a global
|
||||
variable `Environment`, which is a string representation of the environment you
|
||||
could do:
|
||||
|
||||
```go
|
||||
import (
|
||||
log "github.com/Sirupsen/logrus"
|
||||
)
|
||||
|
||||
init() {
|
||||
// do something here to set environment depending on an environment variable
|
||||
// or command-line flag
|
||||
if Environment == "production" {
|
||||
log.SetFormatter(logrus.JSONFormatter)
|
||||
} else {
|
||||
// The TextFormatter is default, you don't actually have to do this.
|
||||
log.SetFormatter(logrus.TextFormatter)
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
This configuration is how `logrus` was intended to be used, but JSON in
|
||||
production is mostly only useful if you do log aggregation with tools like
|
||||
Splunk or Logstash.
|
||||
|
||||
#### Formatters
|
||||
|
||||
The built-in logging formatters are:
|
||||
|
||||
* `logrus.TextFormatter`. Logs the event in colors if stdout is a tty, otherwise
|
||||
without colors.
|
||||
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
|
||||
field to `true`. To force no colored output even if there is a TTY set the
|
||||
`DisableColors` field to `true`
|
||||
* `logrus.JSONFormatter`. Logs fields as JSON.
|
||||
|
||||
Third party logging formatters:
|
||||
|
||||
* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
|
||||
|
||||
You can define your formatter by implementing the `Formatter` interface,
|
||||
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
|
||||
`Fields` type (`map[string]interface{}`) with all your fields as well as the
|
||||
default ones (see Entries section above):
|
||||
|
||||
```go
|
||||
type MyJSONFormatter struct {
|
||||
}
|
||||
|
||||
log.SetFormatter(new(MyJSONFormatter))
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
// Note this doesn't include Time, Level and Message which are available on
|
||||
// the Entry. Consult `godoc` on information about those fields or read the
|
||||
// source of the official loggers.
|
||||
serialized, err := json.Marshal(entry.Data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
||||
```
|
||||
|
||||
#### Rotation
|
||||
|
||||
Log rotation is not provided with Logrus. Log rotation should be done by an
|
||||
external program (like `logrotated(8)`) that can compress and delete old log
|
||||
entries. It should not be a feature of the application-level logger.
|
||||
|
||||
|
||||
[godoc]: https://godoc.org/github.com/Sirupsen/logrus
|
|
@ -1,248 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"time"
|
||||
)
|
||||
|
||||
// An entry is the final or intermediate Logrus logging entry. It contains all
|
||||
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
|
||||
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
|
||||
// passed around as much as you wish to avoid field duplication.
|
||||
type Entry struct {
|
||||
Logger *Logger
|
||||
|
||||
// Contains all the fields set by the user.
|
||||
Data Fields
|
||||
|
||||
// Time at which the log entry was created
|
||||
Time time.Time
|
||||
|
||||
// Level the log entry was logged at: Debug, Info, Warn, Error, Fatal or Panic
|
||||
Level Level
|
||||
|
||||
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
|
||||
Message string
|
||||
}
|
||||
|
||||
func NewEntry(logger *Logger) *Entry {
|
||||
return &Entry{
|
||||
Logger: logger,
|
||||
// Default is three fields, give a little extra room
|
||||
Data: make(Fields, 5),
|
||||
}
|
||||
}
|
||||
|
||||
// Returns a reader for the entry, which is a proxy to the formatter.
|
||||
func (entry *Entry) Reader() (*bytes.Buffer, error) {
|
||||
serialized, err := entry.Logger.Formatter.Format(entry)
|
||||
return bytes.NewBuffer(serialized), err
|
||||
}
|
||||
|
||||
// Returns the string representation from the reader and ultimately the
|
||||
// formatter.
|
||||
func (entry *Entry) String() (string, error) {
|
||||
reader, err := entry.Reader()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return reader.String(), err
|
||||
}
|
||||
|
||||
// Add a single field to the Entry.
|
||||
func (entry *Entry) WithField(key string, value interface{}) *Entry {
|
||||
return entry.WithFields(Fields{key: value})
|
||||
}
|
||||
|
||||
// Add a map of fields to the Entry.
|
||||
func (entry *Entry) WithFields(fields Fields) *Entry {
|
||||
data := Fields{}
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
for k, v := range fields {
|
||||
data[k] = v
|
||||
}
|
||||
return &Entry{Logger: entry.Logger, Data: data}
|
||||
}
|
||||
|
||||
func (entry *Entry) log(level Level, msg string) {
|
||||
entry.Time = time.Now()
|
||||
entry.Level = level
|
||||
entry.Message = msg
|
||||
|
||||
if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
reader, err := entry.Reader()
|
||||
if err != nil {
|
||||
entry.Logger.mu.Lock()
|
||||
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
|
||||
entry.Logger.mu.Unlock()
|
||||
}
|
||||
|
||||
entry.Logger.mu.Lock()
|
||||
defer entry.Logger.mu.Unlock()
|
||||
|
||||
_, err = io.Copy(entry.Logger.Out, reader)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
|
||||
}
|
||||
|
||||
// To avoid Entry#log() returning a value that only would make sense for
|
||||
// panic() to use in Entry#Panic(), we avoid the allocation by checking
|
||||
// directly here.
|
||||
if level <= PanicLevel {
|
||||
panic(entry)
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Debug(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.log(DebugLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Print(args ...interface{}) {
|
||||
entry.Info(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Info(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.log(InfoLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warn(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.log(WarnLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Error(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.log(ErrorLevel, fmt.Sprint(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatal(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.log(FatalLevel, fmt.Sprint(args...))
|
||||
}
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
func (entry *Entry) Panic(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.log(PanicLevel, fmt.Sprint(args...))
|
||||
}
|
||||
panic(fmt.Sprint(args...))
|
||||
}
|
||||
|
||||
// Entry Printf family functions
|
||||
|
||||
func (entry *Entry) Debugf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.Debug(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infof(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.Info(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Printf(format string, args ...interface{}) {
|
||||
entry.Infof(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.Warn(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningf(format string, args ...interface{}) {
|
||||
entry.Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.Error(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicf(format string, args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.Panic(fmt.Sprintf(format, args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Entry Println family functions
|
||||
|
||||
func (entry *Entry) Debugln(args ...interface{}) {
|
||||
if entry.Logger.Level >= DebugLevel {
|
||||
entry.Debug(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Infoln(args ...interface{}) {
|
||||
if entry.Logger.Level >= InfoLevel {
|
||||
entry.Info(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Println(args ...interface{}) {
|
||||
entry.Infoln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Warnln(args ...interface{}) {
|
||||
if entry.Logger.Level >= WarnLevel {
|
||||
entry.Warn(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Warningln(args ...interface{}) {
|
||||
entry.Warnln(args...)
|
||||
}
|
||||
|
||||
func (entry *Entry) Errorln(args ...interface{}) {
|
||||
if entry.Logger.Level >= ErrorLevel {
|
||||
entry.Error(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Fatalln(args ...interface{}) {
|
||||
if entry.Logger.Level >= FatalLevel {
|
||||
entry.Fatal(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
func (entry *Entry) Panicln(args ...interface{}) {
|
||||
if entry.Logger.Level >= PanicLevel {
|
||||
entry.Panic(entry.sprintlnn(args...))
|
||||
}
|
||||
}
|
||||
|
||||
// Sprintlnn => Sprint no newline. This is to get the behavior of how
|
||||
// fmt.Sprintln where spaces are always added between operands, regardless of
|
||||
// their type. Instead of vendoring the Sprintln implementation to spare a
|
||||
// string allocation, we do the simplest thing.
|
||||
func (entry *Entry) sprintlnn(args ...interface{}) string {
|
||||
msg := fmt.Sprintln(args...)
|
||||
return msg[:len(msg)-1]
|
||||
}
|
|
@ -1,182 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
)
|
||||
|
||||
var (
|
||||
// std is the name of the standard logger in stdlib `log`
|
||||
std = New()
|
||||
)
|
||||
|
||||
// SetOutput sets the standard logger output.
|
||||
func SetOutput(out io.Writer) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Out = out
|
||||
}
|
||||
|
||||
// SetFormatter sets the standard logger formatter.
|
||||
func SetFormatter(formatter Formatter) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Formatter = formatter
|
||||
}
|
||||
|
||||
// SetLevel sets the standard logger level.
|
||||
func SetLevel(level Level) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Level = level
|
||||
}
|
||||
|
||||
// GetLevel returns the standard logger level.
|
||||
func GetLevel() Level {
|
||||
return std.Level
|
||||
}
|
||||
|
||||
// AddHook adds a hook to the standard logger hooks.
|
||||
func AddHook(hook Hook) {
|
||||
std.mu.Lock()
|
||||
defer std.mu.Unlock()
|
||||
std.Hooks.Add(hook)
|
||||
}
|
||||
|
||||
// WithField creates an entry from the standard logger and adds a field to
|
||||
// it. If you want multiple fields, use `WithFields`.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithField(key string, value interface{}) *Entry {
|
||||
return std.WithField(key, value)
|
||||
}
|
||||
|
||||
// WithFields creates an entry from the standard logger and adds multiple
|
||||
// fields to it. This is simply a helper for `WithField`, invoking it
|
||||
// once for each field.
|
||||
//
|
||||
// Note that it doesn't log until you call Debug, Print, Info, Warn, Fatal
|
||||
// or Panic on the Entry it returns.
|
||||
func WithFields(fields Fields) *Entry {
|
||||
return std.WithFields(fields)
|
||||
}
|
||||
|
||||
// Debug logs a message at level Debug on the standard logger.
|
||||
func Debug(args ...interface{}) {
|
||||
std.Debug(args...)
|
||||
}
|
||||
|
||||
// Print logs a message at level Info on the standard logger.
|
||||
func Print(args ...interface{}) {
|
||||
std.Print(args...)
|
||||
}
|
||||
|
||||
// Info logs a message at level Info on the standard logger.
|
||||
func Info(args ...interface{}) {
|
||||
std.Info(args...)
|
||||
}
|
||||
|
||||
// Warn logs a message at level Warn on the standard logger.
|
||||
func Warn(args ...interface{}) {
|
||||
std.Warn(args...)
|
||||
}
|
||||
|
||||
// Warning logs a message at level Warn on the standard logger.
|
||||
func Warning(args ...interface{}) {
|
||||
std.Warning(args...)
|
||||
}
|
||||
|
||||
// Error logs a message at level Error on the standard logger.
|
||||
func Error(args ...interface{}) {
|
||||
std.Error(args...)
|
||||
}
|
||||
|
||||
// Panic logs a message at level Panic on the standard logger.
|
||||
func Panic(args ...interface{}) {
|
||||
std.Panic(args...)
|
||||
}
|
||||
|
||||
// Fatal logs a message at level Fatal on the standard logger.
|
||||
func Fatal(args ...interface{}) {
|
||||
std.Fatal(args...)
|
||||
}
|
||||
|
||||
// Debugf logs a message at level Debug on the standard logger.
|
||||
func Debugf(format string, args ...interface{}) {
|
||||
std.Debugf(format, args...)
|
||||
}
|
||||
|
||||
// Printf logs a message at level Info on the standard logger.
|
||||
func Printf(format string, args ...interface{}) {
|
||||
std.Printf(format, args...)
|
||||
}
|
||||
|
||||
// Infof logs a message at level Info on the standard logger.
|
||||
func Infof(format string, args ...interface{}) {
|
||||
std.Infof(format, args...)
|
||||
}
|
||||
|
||||
// Warnf logs a message at level Warn on the standard logger.
|
||||
func Warnf(format string, args ...interface{}) {
|
||||
std.Warnf(format, args...)
|
||||
}
|
||||
|
||||
// Warningf logs a message at level Warn on the standard logger.
|
||||
func Warningf(format string, args ...interface{}) {
|
||||
std.Warningf(format, args...)
|
||||
}
|
||||
|
||||
// Errorf logs a message at level Error on the standard logger.
|
||||
func Errorf(format string, args ...interface{}) {
|
||||
std.Errorf(format, args...)
|
||||
}
|
||||
|
||||
// Panicf logs a message at level Panic on the standard logger.
|
||||
func Panicf(format string, args ...interface{}) {
|
||||
std.Panicf(format, args...)
|
||||
}
|
||||
|
||||
// Fatalf logs a message at level Fatal on the standard logger.
|
||||
func Fatalf(format string, args ...interface{}) {
|
||||
std.Fatalf(format, args...)
|
||||
}
|
||||
|
||||
// Debugln logs a message at level Debug on the standard logger.
|
||||
func Debugln(args ...interface{}) {
|
||||
std.Debugln(args...)
|
||||
}
|
||||
|
||||
// Println logs a message at level Info on the standard logger.
|
||||
func Println(args ...interface{}) {
|
||||
std.Println(args...)
|
||||
}
|
||||
|
||||
// Infoln logs a message at level Info on the standard logger.
|
||||
func Infoln(args ...interface{}) {
|
||||
std.Infoln(args...)
|
||||
}
|
||||
|
||||
// Warnln logs a message at level Warn on the standard logger.
|
||||
func Warnln(args ...interface{}) {
|
||||
std.Warnln(args...)
|
||||
}
|
||||
|
||||
// Warningln logs a message at level Warn on the standard logger.
|
||||
func Warningln(args ...interface{}) {
|
||||
std.Warningln(args...)
|
||||
}
|
||||
|
||||
// Errorln logs a message at level Error on the standard logger.
|
||||
func Errorln(args ...interface{}) {
|
||||
std.Errorln(args...)
|
||||
}
|
||||
|
||||
// Panicln logs a message at level Panic on the standard logger.
|
||||
func Panicln(args ...interface{}) {
|
||||
std.Panicln(args...)
|
||||
}
|
||||
|
||||
// Fatalln logs a message at level Fatal on the standard logger.
|
||||
func Fatalln(args ...interface{}) {
|
||||
std.Fatalln(args...)
|
||||
}
|
|
@ -1,44 +0,0 @@
|
|||
package logrus
|
||||
|
||||
// The Formatter interface is used to implement a custom Formatter. It takes an
|
||||
// `Entry`. It exposes all the fields, including the default ones:
|
||||
//
|
||||
// * `entry.Data["msg"]`. The message passed from Info, Warn, Error ..
|
||||
// * `entry.Data["time"]`. The timestamp.
|
||||
// * `entry.Data["level"]. The level the entry was logged at.
|
||||
//
|
||||
// Any additional fields added with `WithField` or `WithFields` are also in
|
||||
// `entry.Data`. Format is expected to return an array of bytes which are then
|
||||
// logged to `logger.Out`.
|
||||
type Formatter interface {
|
||||
Format(*Entry) ([]byte, error)
|
||||
}
|
||||
|
||||
// This is to not silently overwrite `time`, `msg` and `level` fields when
|
||||
// dumping it. If this code wasn't there doing:
|
||||
//
|
||||
// logrus.WithField("level", 1).Info("hello")
|
||||
//
|
||||
// Would just silently drop the user provided level. Instead with this code
|
||||
// it'll logged as:
|
||||
//
|
||||
// {"level": "info", "fields.level": 1, "msg": "hello", "time": "..."}
|
||||
//
|
||||
// It's not exported because it's still using Data in an opinionated way. It's to
|
||||
// avoid code duplication between the two default formatters.
|
||||
func prefixFieldClashes(data Fields) {
|
||||
_, ok := data["time"]
|
||||
if ok {
|
||||
data["fields.time"] = data["time"]
|
||||
}
|
||||
|
||||
_, ok = data["msg"]
|
||||
if ok {
|
||||
data["fields.msg"] = data["msg"]
|
||||
}
|
||||
|
||||
_, ok = data["level"]
|
||||
if ok {
|
||||
data["fields.level"] = data["level"]
|
||||
}
|
||||
}
|
|
@ -1,34 +0,0 @@
|
|||
package logrus
|
||||
|
||||
// A hook to be fired when logging on the logging levels returned from
|
||||
// `Levels()` on your implementation of the interface. Note that this is not
|
||||
// fired in a goroutine or a channel with workers, you should handle such
|
||||
// functionality yourself if your call is non-blocking and you don't wish for
|
||||
// the logging calls for levels returned from `Levels()` to block.
|
||||
type Hook interface {
|
||||
Levels() []Level
|
||||
Fire(*Entry) error
|
||||
}
|
||||
|
||||
// Internal type for storing the hooks on a logger instance.
|
||||
type levelHooks map[Level][]Hook
|
||||
|
||||
// Add a hook to an instance of logger. This is called with
|
||||
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
|
||||
func (hooks levelHooks) Add(hook Hook) {
|
||||
for _, level := range hook.Levels() {
|
||||
hooks[level] = append(hooks[level], hook)
|
||||
}
|
||||
}
|
||||
|
||||
// Fire all the hooks for the passed level. Used by `entry.log` to fire
|
||||
// appropriate hooks for a log entry.
|
||||
func (hooks levelHooks) Fire(level Level, entry *Entry) error {
|
||||
for _, hook := range hooks[level] {
|
||||
if err := hook.Fire(entry); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
type JSONFormatter struct{}
|
||||
|
||||
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
data := make(Fields, len(entry.Data)+3)
|
||||
for k, v := range entry.Data {
|
||||
data[k] = v
|
||||
}
|
||||
prefixFieldClashes(data)
|
||||
data["time"] = entry.Time.Format(time.RFC3339)
|
||||
data["msg"] = entry.Message
|
||||
data["level"] = entry.Level.String()
|
||||
|
||||
serialized, err := json.Marshal(data)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to marshal fields to JSON, %v", err)
|
||||
}
|
||||
return append(serialized, '\n'), nil
|
||||
}
|
|
@ -1,161 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Logger struct {
|
||||
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
|
||||
// file, or leave it default which is `os.Stdout`. You can also set this to
|
||||
// something more adventorous, such as logging to Kafka.
|
||||
Out io.Writer
|
||||
// Hooks for the logger instance. These allow firing events based on logging
|
||||
// levels and log entries. For example, to send errors to an error tracking
|
||||
// service, log to StatsD or dump the core on fatal errors.
|
||||
Hooks levelHooks
|
||||
// All log entries pass through the formatter before logged to Out. The
|
||||
// included formatters are `TextFormatter` and `JSONFormatter` for which
|
||||
// TextFormatter is the default. In development (when a TTY is attached) it
|
||||
// logs with colors, but to a file it wouldn't. You can easily implement your
|
||||
// own that implements the `Formatter` interface, see the `README` or included
|
||||
// formatters for examples.
|
||||
Formatter Formatter
|
||||
// The logging level the logger should log at. This is typically (and defaults
|
||||
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
|
||||
// logged. `logrus.Debug` is useful in
|
||||
Level Level
|
||||
// Used to sync writing to the log.
|
||||
mu sync.Mutex
|
||||
}
|
||||
|
||||
// Creates a new logger. Configuration should be set by changing `Formatter`,
|
||||
// `Out` and `Hooks` directly on the default logger instance. You can also just
|
||||
// instantiate your own:
|
||||
//
|
||||
// var log = &Logger{
|
||||
// Out: os.Stderr,
|
||||
// Formatter: new(JSONFormatter),
|
||||
// Hooks: make(levelHooks),
|
||||
// Level: logrus.DebugLevel,
|
||||
// }
|
||||
//
|
||||
// It's recommended to make this a global instance called `log`.
|
||||
func New() *Logger {
|
||||
return &Logger{
|
||||
Out: os.Stdout,
|
||||
Formatter: new(TextFormatter),
|
||||
Hooks: make(levelHooks),
|
||||
Level: InfoLevel,
|
||||
}
|
||||
}
|
||||
|
||||
// Adds a field to the log entry, note that you it doesn't log until you call
|
||||
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
|
||||
// Ff you want multiple fields, use `WithFields`.
|
||||
func (logger *Logger) WithField(key string, value interface{}) *Entry {
|
||||
return NewEntry(logger).WithField(key, value)
|
||||
}
|
||||
|
||||
// Adds a struct of fields to the log entry. All it does is call `WithField` for
|
||||
// each `Field`.
|
||||
func (logger *Logger) WithFields(fields Fields) *Entry {
|
||||
return NewEntry(logger).WithFields(fields)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Debugf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infof(format string, args ...interface{}) {
|
||||
NewEntry(logger).Infof(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Printf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Printf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Warnf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Errorf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Fatalf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicf(format string, args ...interface{}) {
|
||||
NewEntry(logger).Panicf(format, args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debug(args ...interface{}) {
|
||||
NewEntry(logger).Debug(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Info(args ...interface{}) {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Print(args ...interface{}) {
|
||||
NewEntry(logger).Info(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warn(args ...interface{}) {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warning(args ...interface{}) {
|
||||
NewEntry(logger).Warn(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Error(args ...interface{}) {
|
||||
NewEntry(logger).Error(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatal(args ...interface{}) {
|
||||
NewEntry(logger).Fatal(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panic(args ...interface{}) {
|
||||
NewEntry(logger).Panic(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Debugln(args ...interface{}) {
|
||||
NewEntry(logger).Debugln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Infoln(args ...interface{}) {
|
||||
NewEntry(logger).Infoln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Println(args ...interface{}) {
|
||||
NewEntry(logger).Println(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warnln(args ...interface{}) {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Warningln(args ...interface{}) {
|
||||
NewEntry(logger).Warnln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Errorln(args ...interface{}) {
|
||||
NewEntry(logger).Errorln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Fatalln(args ...interface{}) {
|
||||
NewEntry(logger).Fatalln(args...)
|
||||
}
|
||||
|
||||
func (logger *Logger) Panicln(args ...interface{}) {
|
||||
NewEntry(logger).Panicln(args...)
|
||||
}
|
|
@ -1,94 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
)
|
||||
|
||||
// Fields type, used to pass to `WithFields`.
|
||||
type Fields map[string]interface{}
|
||||
|
||||
// Level type
|
||||
type Level uint8
|
||||
|
||||
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
|
||||
func (level Level) String() string {
|
||||
switch level {
|
||||
case DebugLevel:
|
||||
return "debug"
|
||||
case InfoLevel:
|
||||
return "info"
|
||||
case WarnLevel:
|
||||
return "warning"
|
||||
case ErrorLevel:
|
||||
return "error"
|
||||
case FatalLevel:
|
||||
return "fatal"
|
||||
case PanicLevel:
|
||||
return "panic"
|
||||
}
|
||||
|
||||
return "unknown"
|
||||
}
|
||||
|
||||
// ParseLevel takes a string level and returns the Logrus log level constant.
|
||||
func ParseLevel(lvl string) (Level, error) {
|
||||
switch lvl {
|
||||
case "panic":
|
||||
return PanicLevel, nil
|
||||
case "fatal":
|
||||
return FatalLevel, nil
|
||||
case "error":
|
||||
return ErrorLevel, nil
|
||||
case "warn", "warning":
|
||||
return WarnLevel, nil
|
||||
case "info":
|
||||
return InfoLevel, nil
|
||||
case "debug":
|
||||
return DebugLevel, nil
|
||||
}
|
||||
|
||||
var l Level
|
||||
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
|
||||
}
|
||||
|
||||
// These are the different logging levels. You can set the logging level to log
|
||||
// on your instance of logger, obtained with `logrus.New()`.
|
||||
const (
|
||||
// PanicLevel level, highest level of severity. Logs and then calls panic with the
|
||||
// message passed to Debug, Info, ...
|
||||
PanicLevel Level = iota
|
||||
// FatalLevel level. Logs and then calls `os.Exit(1)`. It will exit even if the
|
||||
// logging level is set to Panic.
|
||||
FatalLevel
|
||||
// ErrorLevel level. Logs. Used for errors that should definitely be noted.
|
||||
// Commonly used for hooks to send errors to an error tracking service.
|
||||
ErrorLevel
|
||||
// WarnLevel level. Non-critical entries that deserve eyes.
|
||||
WarnLevel
|
||||
// InfoLevel level. General operational entries about what's going on inside the
|
||||
// application.
|
||||
InfoLevel
|
||||
// DebugLevel level. Usually only enabled when debugging. Very verbose logging.
|
||||
DebugLevel
|
||||
)
|
||||
|
||||
// Won't compile if StdLogger can't be realized by a log.Logger
|
||||
var _ StdLogger = &log.Logger{}
|
||||
|
||||
// StdLogger is what your logrus-enabled library should take, that way
|
||||
// it'll accept a stdlib logger and a logrus logger. There's no standard
|
||||
// interface, this is the closest we get, unfortunately.
|
||||
type StdLogger interface {
|
||||
Print(...interface{})
|
||||
Printf(string, ...interface{})
|
||||
Println(...interface{})
|
||||
|
||||
Fatal(...interface{})
|
||||
Fatalf(string, ...interface{})
|
||||
Fatalln(...interface{})
|
||||
|
||||
Panic(...interface{})
|
||||
Panicf(string, ...interface{})
|
||||
Panicln(...interface{})
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios syscall.Termios
|
|
@ -1,20 +0,0 @@
|
|||
/*
|
||||
Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
|
||||
*/
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const ioctlReadTermios = syscall.TIOCGETA
|
||||
|
||||
type Termios struct {
|
||||
Iflag uint32
|
||||
Oflag uint32
|
||||
Cflag uint32
|
||||
Lflag uint32
|
||||
Cc [20]uint8
|
||||
Ispeed uint32
|
||||
Ospeed uint32
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2013 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
package logrus
|
||||
|
||||
import "syscall"
|
||||
|
||||
const ioctlReadTermios = syscall.TCGETS
|
||||
|
||||
type Termios syscall.Termios
|
|
@ -1,21 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build linux,!appengine darwin freebsd
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stdout
|
||||
var termios Termios
|
||||
_, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
|
||||
return err == 0
|
||||
}
|
|
@ -1,27 +0,0 @@
|
|||
// Based on ssh/terminal:
|
||||
// Copyright 2011 The Go Authors. All rights reserved.
|
||||
// Use of this source code is governed by a BSD-style
|
||||
// license that can be found in the LICENSE file.
|
||||
|
||||
// +build windows
|
||||
|
||||
package logrus
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
var kernel32 = syscall.NewLazyDLL("kernel32.dll")
|
||||
|
||||
var (
|
||||
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
|
||||
)
|
||||
|
||||
// IsTerminal returns true if the given file descriptor is a terminal.
|
||||
func IsTerminal() bool {
|
||||
fd := syscall.Stdout
|
||||
var st uint32
|
||||
r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
|
||||
return r != 0 && e == 0
|
||||
}
|
|
@ -1,124 +0,0 @@
|
|||
package logrus
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
nocolor = 0
|
||||
red = 31
|
||||
green = 32
|
||||
yellow = 33
|
||||
blue = 34
|
||||
)
|
||||
|
||||
var (
|
||||
baseTimestamp time.Time
|
||||
isTerminal bool
|
||||
noQuoteNeeded *regexp.Regexp
|
||||
)
|
||||
|
||||
func init() {
|
||||
baseTimestamp = time.Now()
|
||||
isTerminal = IsTerminal()
|
||||
}
|
||||
|
||||
func miniTS() int {
|
||||
return int(time.Since(baseTimestamp) / time.Second)
|
||||
}
|
||||
|
||||
type TextFormatter struct {
|
||||
// Set to true to bypass checking for a TTY before outputting colors.
|
||||
ForceColors bool
|
||||
DisableColors bool
|
||||
// Set to true to disable timestamp logging (useful when the output
|
||||
// is redirected to a logging system already adding a timestamp)
|
||||
DisableTimestamp bool
|
||||
}
|
||||
|
||||
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
|
||||
|
||||
var keys []string
|
||||
for k := range entry.Data {
|
||||
keys = append(keys, k)
|
||||
}
|
||||
sort.Strings(keys)
|
||||
|
||||
b := &bytes.Buffer{}
|
||||
|
||||
prefixFieldClashes(entry.Data)
|
||||
|
||||
isColored := (f.ForceColors || isTerminal) && !f.DisableColors
|
||||
|
||||
if isColored {
|
||||
printColored(b, entry, keys)
|
||||
} else {
|
||||
if !f.DisableTimestamp {
|
||||
f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
|
||||
}
|
||||
f.appendKeyValue(b, "level", entry.Level.String())
|
||||
f.appendKeyValue(b, "msg", entry.Message)
|
||||
for _, key := range keys {
|
||||
f.appendKeyValue(b, key, entry.Data[key])
|
||||
}
|
||||
}
|
||||
|
||||
b.WriteByte('\n')
|
||||
return b.Bytes(), nil
|
||||
}
|
||||
|
||||
func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
|
||||
var levelColor int
|
||||
switch entry.Level {
|
||||
case WarnLevel:
|
||||
levelColor = yellow
|
||||
case ErrorLevel, FatalLevel, PanicLevel:
|
||||
levelColor = red
|
||||
default:
|
||||
levelColor = blue
|
||||
}
|
||||
|
||||
levelText := strings.ToUpper(entry.Level.String())[0:4]
|
||||
|
||||
fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
|
||||
for _, k := range keys {
|
||||
v := entry.Data[k]
|
||||
fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
|
||||
}
|
||||
}
|
||||
|
||||
func needsQuoting(text string) bool {
|
||||
for _, ch := range text {
|
||||
if !((ch >= 'a' && ch <= 'z') ||
|
||||
(ch >= 'A' && ch <= 'Z') ||
|
||||
(ch >= '0' && ch < '9') ||
|
||||
ch == '-' || ch == '.') {
|
||||
return false
|
||||
}
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
|
||||
switch value.(type) {
|
||||
case string:
|
||||
if needsQuoting(value.(string)) {
|
||||
fmt.Fprintf(b, "%v=%s ", key, value)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%v=%q ", key, value)
|
||||
}
|
||||
case error:
|
||||
if needsQuoting(value.(error).Error()) {
|
||||
fmt.Fprintf(b, "%v=%s ", key, value)
|
||||
} else {
|
||||
fmt.Fprintf(b, "%v=%q ", key, value)
|
||||
}
|
||||
default:
|
||||
fmt.Fprintf(b, "%v=%v ", key, value)
|
||||
}
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,292 +0,0 @@
|
|||
// Package quantile computes approximate quantiles over an unbounded data
|
||||
// stream within low memory and CPU bounds.
|
||||
//
|
||||
// A small amount of accuracy is traded to achieve the above properties.
|
||||
//
|
||||
// Multiple streams can be merged before calling Query to generate a single set
|
||||
// of results. This is meaningful when the streams represent the same type of
|
||||
// data. See Merge and Samples.
|
||||
//
|
||||
// For more detailed information about the algorithm used, see:
|
||||
//
|
||||
// Effective Computation of Biased Quantiles over Data Streams
|
||||
//
|
||||
// http://www.cs.rutgers.edu/~muthu/bquant.pdf
|
||||
package quantile
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Sample holds an observed value and meta information for compression. JSON
|
||||
// tags have been added for convenience.
|
||||
type Sample struct {
|
||||
Value float64 `json:",string"`
|
||||
Width float64 `json:",string"`
|
||||
Delta float64 `json:",string"`
|
||||
}
|
||||
|
||||
// Samples represents a slice of samples. It implements sort.Interface.
|
||||
type Samples []Sample
|
||||
|
||||
func (a Samples) Len() int { return len(a) }
|
||||
func (a Samples) Less(i, j int) bool { return a[i].Value < a[j].Value }
|
||||
func (a Samples) Swap(i, j int) { a[i], a[j] = a[j], a[i] }
|
||||
|
||||
type invariant func(s *stream, r float64) float64
|
||||
|
||||
// NewLowBiased returns an initialized Stream for low-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the lower ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within (1±Epsilon)*Quantile.
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewLowBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * r
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewHighBiased returns an initialized Stream for high-biased quantiles
|
||||
// (e.g. 0.01, 0.1, 0.5) where the needed quantiles are not known a priori, but
|
||||
// error guarantees can still be given even for the higher ranks of the data
|
||||
// distribution.
|
||||
//
|
||||
// The provided epsilon is a relative error, i.e. the true quantile of a value
|
||||
// returned by a query is guaranteed to be within 1-(1±Epsilon)*(1-Quantile).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error
|
||||
// properties.
|
||||
func NewHighBiased(epsilon float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
return 2 * epsilon * (s.n - r)
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// NewTargeted returns an initialized Stream concerned with a particular set of
|
||||
// quantile values that are supplied a priori. Knowing these a priori reduces
|
||||
// space and computation time. The targets map maps the desired quantiles to
|
||||
// their absolute errors, i.e. the true quantile of a value returned by a query
|
||||
// is guaranteed to be within (Quantile±Epsilon).
|
||||
//
|
||||
// See http://www.cs.rutgers.edu/~muthu/bquant.pdf for time, space, and error properties.
|
||||
func NewTargeted(targets map[float64]float64) *Stream {
|
||||
ƒ := func(s *stream, r float64) float64 {
|
||||
var m = math.MaxFloat64
|
||||
var f float64
|
||||
for quantile, epsilon := range targets {
|
||||
if quantile*s.n <= r {
|
||||
f = (2 * epsilon * r) / quantile
|
||||
} else {
|
||||
f = (2 * epsilon * (s.n - r)) / (1 - quantile)
|
||||
}
|
||||
if f < m {
|
||||
m = f
|
||||
}
|
||||
}
|
||||
return m
|
||||
}
|
||||
return newStream(ƒ)
|
||||
}
|
||||
|
||||
// Stream computes quantiles for a stream of float64s. It is not thread-safe by
|
||||
// design. Take care when using across multiple goroutines.
|
||||
type Stream struct {
|
||||
*stream
|
||||
b Samples
|
||||
sorted bool
|
||||
}
|
||||
|
||||
func newStream(ƒ invariant) *Stream {
|
||||
x := &stream{ƒ: ƒ}
|
||||
return &Stream{x, make(Samples, 0, 500), true}
|
||||
}
|
||||
|
||||
// Insert inserts v into the stream.
|
||||
func (s *Stream) Insert(v float64) {
|
||||
s.insert(Sample{Value: v, Width: 1})
|
||||
}
|
||||
|
||||
func (s *Stream) insert(sample Sample) {
|
||||
s.b = append(s.b, sample)
|
||||
s.sorted = false
|
||||
if len(s.b) == cap(s.b) {
|
||||
s.flush()
|
||||
}
|
||||
}
|
||||
|
||||
// Query returns the computed qth percentiles value. If s was created with
|
||||
// NewTargeted, and q is not in the set of quantiles provided a priori, Query
|
||||
// will return an unspecified result.
|
||||
func (s *Stream) Query(q float64) float64 {
|
||||
if !s.flushed() {
|
||||
// Fast path when there hasn't been enough data for a flush;
|
||||
// this also yields better accuracy for small sets of data.
|
||||
l := len(s.b)
|
||||
if l == 0 {
|
||||
return 0
|
||||
}
|
||||
i := int(float64(l) * q)
|
||||
if i > 0 {
|
||||
i -= 1
|
||||
}
|
||||
s.maybeSort()
|
||||
return s.b[i].Value
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.query(q)
|
||||
}
|
||||
|
||||
// Merge merges samples into the underlying streams samples. This is handy when
|
||||
// merging multiple streams from separate threads, database shards, etc.
|
||||
//
|
||||
// ATTENTION: This method is broken and does not yield correct results. The
|
||||
// underlying algorithm is not capable of merging streams correctly.
|
||||
func (s *Stream) Merge(samples Samples) {
|
||||
sort.Sort(samples)
|
||||
s.stream.merge(samples)
|
||||
}
|
||||
|
||||
// Reset reinitializes and clears the list reusing the samples buffer memory.
|
||||
func (s *Stream) Reset() {
|
||||
s.stream.reset()
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
// Samples returns stream samples held by s.
|
||||
func (s *Stream) Samples() Samples {
|
||||
if !s.flushed() {
|
||||
return s.b
|
||||
}
|
||||
s.flush()
|
||||
return s.stream.samples()
|
||||
}
|
||||
|
||||
// Count returns the total number of samples observed in the stream
|
||||
// since initialization.
|
||||
func (s *Stream) Count() int {
|
||||
return len(s.b) + s.stream.count()
|
||||
}
|
||||
|
||||
func (s *Stream) flush() {
|
||||
s.maybeSort()
|
||||
s.stream.merge(s.b)
|
||||
s.b = s.b[:0]
|
||||
}
|
||||
|
||||
func (s *Stream) maybeSort() {
|
||||
if !s.sorted {
|
||||
s.sorted = true
|
||||
sort.Sort(s.b)
|
||||
}
|
||||
}
|
||||
|
||||
func (s *Stream) flushed() bool {
|
||||
return len(s.stream.l) > 0
|
||||
}
|
||||
|
||||
type stream struct {
|
||||
n float64
|
||||
l []Sample
|
||||
ƒ invariant
|
||||
}
|
||||
|
||||
func (s *stream) reset() {
|
||||
s.l = s.l[:0]
|
||||
s.n = 0
|
||||
}
|
||||
|
||||
func (s *stream) insert(v float64) {
|
||||
s.merge(Samples{{v, 1, 0}})
|
||||
}
|
||||
|
||||
func (s *stream) merge(samples Samples) {
|
||||
// TODO(beorn7): This tries to merge not only individual samples, but
|
||||
// whole summaries. The paper doesn't mention merging summaries at
|
||||
// all. Unittests show that the merging is inaccurate. Find out how to
|
||||
// do merges properly.
|
||||
var r float64
|
||||
i := 0
|
||||
for _, sample := range samples {
|
||||
for ; i < len(s.l); i++ {
|
||||
c := s.l[i]
|
||||
if c.Value > sample.Value {
|
||||
// Insert at position i.
|
||||
s.l = append(s.l, Sample{})
|
||||
copy(s.l[i+1:], s.l[i:])
|
||||
s.l[i] = Sample{
|
||||
sample.Value,
|
||||
sample.Width,
|
||||
math.Max(sample.Delta, math.Floor(s.ƒ(s, r))-1),
|
||||
// TODO(beorn7): How to calculate delta correctly?
|
||||
}
|
||||
i++
|
||||
goto inserted
|
||||
}
|
||||
r += c.Width
|
||||
}
|
||||
s.l = append(s.l, Sample{sample.Value, sample.Width, 0})
|
||||
i++
|
||||
inserted:
|
||||
s.n += sample.Width
|
||||
r += sample.Width
|
||||
}
|
||||
s.compress()
|
||||
}
|
||||
|
||||
func (s *stream) count() int {
|
||||
return int(s.n)
|
||||
}
|
||||
|
||||
func (s *stream) query(q float64) float64 {
|
||||
t := math.Ceil(q * s.n)
|
||||
t += math.Ceil(s.ƒ(s, t) / 2)
|
||||
p := s.l[0]
|
||||
var r float64
|
||||
for _, c := range s.l[1:] {
|
||||
r += p.Width
|
||||
if r+c.Width+c.Delta > t {
|
||||
return p.Value
|
||||
}
|
||||
p = c
|
||||
}
|
||||
return p.Value
|
||||
}
|
||||
|
||||
func (s *stream) compress() {
|
||||
if len(s.l) < 2 {
|
||||
return
|
||||
}
|
||||
x := s.l[len(s.l)-1]
|
||||
xi := len(s.l) - 1
|
||||
r := s.n - 1 - x.Width
|
||||
|
||||
for i := len(s.l) - 2; i >= 0; i-- {
|
||||
c := s.l[i]
|
||||
if c.Width+x.Width+x.Delta <= s.ƒ(s, r) {
|
||||
x.Width += c.Width
|
||||
s.l[xi] = x
|
||||
// Remove element at i.
|
||||
copy(s.l[i:], s.l[i+1:])
|
||||
s.l = s.l[:len(s.l)-1]
|
||||
xi -= 1
|
||||
} else {
|
||||
x = c
|
||||
xi = i
|
||||
}
|
||||
r -= c.Width
|
||||
}
|
||||
}
|
||||
|
||||
func (s *stream) samples() Samples {
|
||||
samples := make(Samples, len(s.l))
|
||||
copy(samples, s.l)
|
||||
return samples
|
||||
}
|
|
@ -1,22 +0,0 @@
|
|||
The MIT License
|
||||
|
||||
Copyright (c) 2014 Benedikt Lang <github at benediktlang.de>
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in
|
||||
all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
|
||||
THE SOFTWARE.
|
||||
|
|
@ -1,142 +0,0 @@
|
|||
semver for golang [](https://drone.io/github.com/blang/semver/latest) [](https://godoc.org/github.com/blang/semver) [](https://coveralls.io/r/blang/semver?branch=master)
|
||||
======
|
||||
|
||||
semver is a [Semantic Versioning](http://semver.org/) library written in golang. It fully covers spec version `2.0.0`.
|
||||
|
||||
Usage
|
||||
-----
|
||||
```bash
|
||||
$ go get github.com/blang/semver
|
||||
```
|
||||
Note: Always vendor your dependencies or fix on a specific version tag.
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
v1, err := semver.Make("1.0.0-beta")
|
||||
v2, err := semver.Make("2.0.0-beta")
|
||||
v1.Compare(v2)
|
||||
```
|
||||
|
||||
Also check the [GoDocs](http://godoc.org/github.com/blang/semver).
|
||||
|
||||
Why should I use this lib?
|
||||
-----
|
||||
|
||||
- Fully spec compatible
|
||||
- No reflection
|
||||
- No regex
|
||||
- Fully tested (Coverage >99%)
|
||||
- Readable parsing/validation errors
|
||||
- Fast (See [Benchmarks](#benchmarks))
|
||||
- Only Stdlib
|
||||
- Uses values instead of pointers
|
||||
- Many features, see below
|
||||
|
||||
|
||||
Features
|
||||
-----
|
||||
|
||||
- Parsing and validation at all levels
|
||||
- Comparator-like comparisons
|
||||
- Compare Helper Methods
|
||||
- InPlace manipulation
|
||||
- Sortable (implements sort.Interface)
|
||||
- database/sql compatible (sql.Scanner/Valuer)
|
||||
- encoding/json compatible (json.Marshaler/Unmarshaler)
|
||||
|
||||
|
||||
Example
|
||||
-----
|
||||
|
||||
Have a look at full examples in [examples/main.go](examples/main.go)
|
||||
|
||||
```go
|
||||
import github.com/blang/semver
|
||||
|
||||
v, err := semver.Make("0.0.1-alpha.preview+123.github")
|
||||
fmt.Printf("Major: %d\n", v.Major)
|
||||
fmt.Printf("Minor: %d\n", v.Minor)
|
||||
fmt.Printf("Patch: %d\n", v.Patch)
|
||||
fmt.Printf("Pre: %s\n", v.Pre)
|
||||
fmt.Printf("Build: %s\n", v.Build)
|
||||
|
||||
// Prerelease versions array
|
||||
if len(v.Pre) > 0 {
|
||||
fmt.Println("Prerelease versions:")
|
||||
for i, pre := range v.Pre {
|
||||
fmt.Printf("%d: %q\n", i, pre)
|
||||
}
|
||||
}
|
||||
|
||||
// Build meta data array
|
||||
if len(v.Build) > 0 {
|
||||
fmt.Println("Build meta data:")
|
||||
for i, build := range v.Build {
|
||||
fmt.Printf("%d: %q\n", i, build)
|
||||
}
|
||||
}
|
||||
|
||||
v001, err := semver.Make("0.0.1")
|
||||
// Compare using helpers: v.GT(v2), v.LT, v.GTE, v.LTE
|
||||
v001.GT(v) == true
|
||||
v.LT(v001) == true
|
||||
v.GTE(v) == true
|
||||
v.LTE(v) == true
|
||||
|
||||
// Or use v.Compare(v2) for comparisons (-1, 0, 1):
|
||||
v001.Compare(v) == 1
|
||||
v.Compare(v001) == -1
|
||||
v.Compare(v) == 0
|
||||
|
||||
// Manipulate Version in place:
|
||||
v.Pre[0], err = semver.NewPRVersion("beta")
|
||||
if err != nil {
|
||||
fmt.Printf("Error parsing pre release version: %q", err)
|
||||
}
|
||||
|
||||
fmt.Println("\nValidate versions:")
|
||||
v.Build[0] = "?"
|
||||
|
||||
err = v.Validate()
|
||||
if err != nil {
|
||||
fmt.Printf("Validation failed: %s\n", err)
|
||||
}
|
||||
```
|
||||
|
||||
Benchmarks
|
||||
-----
|
||||
|
||||
BenchmarkParseSimple 5000000 328 ns/op 49 B/op 1 allocs/op
|
||||
BenchmarkParseComplex 1000000 2105 ns/op 263 B/op 7 allocs/op
|
||||
BenchmarkParseAverage 1000000 1301 ns/op 168 B/op 4 allocs/op
|
||||
BenchmarkStringSimple 10000000 130 ns/op 5 B/op 1 allocs/op
|
||||
BenchmarkStringLarger 5000000 280 ns/op 32 B/op 2 allocs/op
|
||||
BenchmarkStringComplex 3000000 512 ns/op 80 B/op 3 allocs/op
|
||||
BenchmarkStringAverage 5000000 387 ns/op 47 B/op 2 allocs/op
|
||||
BenchmarkValidateSimple 500000000 7.92 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateComplex 2000000 923 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkValidateAverage 5000000 452 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareSimple 100000000 11.2 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareComplex 50000000 40.9 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkCompareAverage 50000000 43.8 ns/op 0 B/op 0 allocs/op
|
||||
BenchmarkSort 5000000 436 ns/op 259 B/op 2 allocs/op
|
||||
|
||||
See benchmark cases at [semver_test.go](semver_test.go)
|
||||
|
||||
|
||||
Motivation
|
||||
-----
|
||||
|
||||
I simply couldn't find any lib supporting the full spec. Others were just wrong or used reflection and regex which i don't like.
|
||||
|
||||
|
||||
Contribution
|
||||
-----
|
||||
|
||||
Feel free to make a pull request. For bigger changes create a issue first to discuss about it.
|
||||
|
||||
|
||||
License
|
||||
-----
|
||||
|
||||
See [LICENSE](LICENSE) file.
|
|
@ -1,23 +0,0 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
)
|
||||
|
||||
// MarshalJSON implements the encoding/json.Marshaler interface.
|
||||
func (v Version) MarshalJSON() ([]byte, error) {
|
||||
return json.Marshal(v.String())
|
||||
}
|
||||
|
||||
// UnmarshalJSON implements the encoding/json.Unmarshaler interface.
|
||||
func (v *Version) UnmarshalJSON(data []byte) (err error) {
|
||||
var versionString string
|
||||
|
||||
if err = json.Unmarshal(data, &versionString); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
*v, err = Parse(versionString)
|
||||
|
||||
return
|
||||
}
|
|
@ -1,395 +0,0 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
numbers string = "0123456789"
|
||||
alphas = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ-"
|
||||
alphanum = alphas + numbers
|
||||
)
|
||||
|
||||
// SpecVersion is the latest fully supported spec version of semver
|
||||
var SpecVersion = Version{
|
||||
Major: 2,
|
||||
Minor: 0,
|
||||
Patch: 0,
|
||||
}
|
||||
|
||||
// Version represents a semver compatible version
|
||||
type Version struct {
|
||||
Major uint64
|
||||
Minor uint64
|
||||
Patch uint64
|
||||
Pre []PRVersion
|
||||
Build []string //No Precendence
|
||||
}
|
||||
|
||||
// Version to string
|
||||
func (v Version) String() string {
|
||||
b := make([]byte, 0, 5)
|
||||
b = strconv.AppendUint(b, v.Major, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Minor, 10)
|
||||
b = append(b, '.')
|
||||
b = strconv.AppendUint(b, v.Patch, 10)
|
||||
|
||||
if len(v.Pre) > 0 {
|
||||
b = append(b, '-')
|
||||
b = append(b, v.Pre[0].String()...)
|
||||
|
||||
for _, pre := range v.Pre[1:] {
|
||||
b = append(b, '.')
|
||||
b = append(b, pre.String()...)
|
||||
}
|
||||
}
|
||||
|
||||
if len(v.Build) > 0 {
|
||||
b = append(b, '+')
|
||||
b = append(b, v.Build[0]...)
|
||||
|
||||
for _, build := range v.Build[1:] {
|
||||
b = append(b, '.')
|
||||
b = append(b, build...)
|
||||
}
|
||||
}
|
||||
|
||||
return string(b)
|
||||
}
|
||||
|
||||
// Equals checks if v is equal to o.
|
||||
func (v Version) Equals(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
}
|
||||
|
||||
// EQ checks if v is equal to o.
|
||||
func (v Version) EQ(o Version) bool {
|
||||
return (v.Compare(o) == 0)
|
||||
}
|
||||
|
||||
// NE checks if v is not equal to o.
|
||||
func (v Version) NE(o Version) bool {
|
||||
return (v.Compare(o) != 0)
|
||||
}
|
||||
|
||||
// GT checks if v is greater than o.
|
||||
func (v Version) GT(o Version) bool {
|
||||
return (v.Compare(o) == 1)
|
||||
}
|
||||
|
||||
// GTE checks if v is greater than or equal to o.
|
||||
func (v Version) GTE(o Version) bool {
|
||||
return (v.Compare(o) >= 0)
|
||||
}
|
||||
|
||||
// GE checks if v is greater than or equal to o.
|
||||
func (v Version) GE(o Version) bool {
|
||||
return (v.Compare(o) >= 0)
|
||||
}
|
||||
|
||||
// LT checks if v is less than o.
|
||||
func (v Version) LT(o Version) bool {
|
||||
return (v.Compare(o) == -1)
|
||||
}
|
||||
|
||||
// LTE checks if v is less than or equal to o.
|
||||
func (v Version) LTE(o Version) bool {
|
||||
return (v.Compare(o) <= 0)
|
||||
}
|
||||
|
||||
// LE checks if v is less than or equal to o.
|
||||
func (v Version) LE(o Version) bool {
|
||||
return (v.Compare(o) <= 0)
|
||||
}
|
||||
|
||||
// Compare compares Versions v to o:
|
||||
// -1 == v is less than o
|
||||
// 0 == v is equal to o
|
||||
// 1 == v is greater than o
|
||||
func (v Version) Compare(o Version) int {
|
||||
if v.Major != o.Major {
|
||||
if v.Major > o.Major {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if v.Minor != o.Minor {
|
||||
if v.Minor > o.Minor {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
if v.Patch != o.Patch {
|
||||
if v.Patch > o.Patch {
|
||||
return 1
|
||||
}
|
||||
return -1
|
||||
}
|
||||
|
||||
// Quick comparison if a version has no prerelease versions
|
||||
if len(v.Pre) == 0 && len(o.Pre) == 0 {
|
||||
return 0
|
||||
} else if len(v.Pre) == 0 && len(o.Pre) > 0 {
|
||||
return 1
|
||||
} else if len(v.Pre) > 0 && len(o.Pre) == 0 {
|
||||
return -1
|
||||
}
|
||||
|
||||
i := 0
|
||||
for ; i < len(v.Pre) && i < len(o.Pre); i++ {
|
||||
if comp := v.Pre[i].Compare(o.Pre[i]); comp == 0 {
|
||||
continue
|
||||
} else if comp == 1 {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
|
||||
// If all pr versions are the equal but one has further prversion, this one greater
|
||||
if i == len(v.Pre) && i == len(o.Pre) {
|
||||
return 0
|
||||
} else if i == len(v.Pre) && i < len(o.Pre) {
|
||||
return -1
|
||||
} else {
|
||||
return 1
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
// Validate validates v and returns error in case
|
||||
func (v Version) Validate() error {
|
||||
// Major, Minor, Patch already validated using uint64
|
||||
|
||||
for _, pre := range v.Pre {
|
||||
if !pre.IsNum { //Numeric prerelease versions already uint64
|
||||
if len(pre.VersionStr) == 0 {
|
||||
return fmt.Errorf("Prerelease can not be empty %q", pre.VersionStr)
|
||||
}
|
||||
if !containsOnly(pre.VersionStr, alphanum) {
|
||||
return fmt.Errorf("Invalid character(s) found in prerelease %q", pre.VersionStr)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
for _, build := range v.Build {
|
||||
if len(build) == 0 {
|
||||
return fmt.Errorf("Build meta data can not be empty %q", build)
|
||||
}
|
||||
if !containsOnly(build, alphanum) {
|
||||
return fmt.Errorf("Invalid character(s) found in build meta data %q", build)
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// New is an alias for Parse and returns a pointer, parses version string and returns a validated Version or error
|
||||
func New(s string) (vp *Version, err error) {
|
||||
v, err := Parse(s)
|
||||
vp = &v
|
||||
return
|
||||
}
|
||||
|
||||
// Make is an alias for Parse, parses version string and returns a validated Version or error
|
||||
func Make(s string) (Version, error) {
|
||||
return Parse(s)
|
||||
}
|
||||
|
||||
// Parse parses version string and returns a validated Version or error
|
||||
func Parse(s string) (Version, error) {
|
||||
if len(s) == 0 {
|
||||
return Version{}, errors.New("Version string empty")
|
||||
}
|
||||
|
||||
// Split into major.minor.(patch+pr+meta)
|
||||
parts := strings.SplitN(s, ".", 3)
|
||||
if len(parts) != 3 {
|
||||
return Version{}, errors.New("No Major.Minor.Patch elements found")
|
||||
}
|
||||
|
||||
// Major
|
||||
if !containsOnly(parts[0], numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in major number %q", parts[0])
|
||||
}
|
||||
if hasLeadingZeroes(parts[0]) {
|
||||
return Version{}, fmt.Errorf("Major number must not contain leading zeroes %q", parts[0])
|
||||
}
|
||||
major, err := strconv.ParseUint(parts[0], 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
// Minor
|
||||
if !containsOnly(parts[1], numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in minor number %q", parts[1])
|
||||
}
|
||||
if hasLeadingZeroes(parts[1]) {
|
||||
return Version{}, fmt.Errorf("Minor number must not contain leading zeroes %q", parts[1])
|
||||
}
|
||||
minor, err := strconv.ParseUint(parts[1], 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
v := Version{}
|
||||
v.Major = major
|
||||
v.Minor = minor
|
||||
|
||||
var build, prerelease []string
|
||||
patchStr := parts[2]
|
||||
|
||||
if buildIndex := strings.IndexRune(patchStr, '+'); buildIndex != -1 {
|
||||
build = strings.Split(patchStr[buildIndex+1:], ".")
|
||||
patchStr = patchStr[:buildIndex]
|
||||
}
|
||||
|
||||
if preIndex := strings.IndexRune(patchStr, '-'); preIndex != -1 {
|
||||
prerelease = strings.Split(patchStr[preIndex+1:], ".")
|
||||
patchStr = patchStr[:preIndex]
|
||||
}
|
||||
|
||||
if !containsOnly(patchStr, numbers) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in patch number %q", patchStr)
|
||||
}
|
||||
if hasLeadingZeroes(patchStr) {
|
||||
return Version{}, fmt.Errorf("Patch number must not contain leading zeroes %q", patchStr)
|
||||
}
|
||||
patch, err := strconv.ParseUint(patchStr, 10, 64)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
|
||||
v.Patch = patch
|
||||
|
||||
// Prerelease
|
||||
for _, prstr := range prerelease {
|
||||
parsedPR, err := NewPRVersion(prstr)
|
||||
if err != nil {
|
||||
return Version{}, err
|
||||
}
|
||||
v.Pre = append(v.Pre, parsedPR)
|
||||
}
|
||||
|
||||
// Build meta data
|
||||
for _, str := range build {
|
||||
if len(str) == 0 {
|
||||
return Version{}, errors.New("Build meta data is empty")
|
||||
}
|
||||
if !containsOnly(str, alphanum) {
|
||||
return Version{}, fmt.Errorf("Invalid character(s) found in build meta data %q", str)
|
||||
}
|
||||
v.Build = append(v.Build, str)
|
||||
}
|
||||
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// MustParse is like Parse but panics if the version cannot be parsed.
|
||||
func MustParse(s string) Version {
|
||||
v, err := Parse(s)
|
||||
if err != nil {
|
||||
panic(`semver: Parse(` + s + `): ` + err.Error())
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// PRVersion represents a PreRelease Version
|
||||
type PRVersion struct {
|
||||
VersionStr string
|
||||
VersionNum uint64
|
||||
IsNum bool
|
||||
}
|
||||
|
||||
// NewPRVersion creates a new valid prerelease version
|
||||
func NewPRVersion(s string) (PRVersion, error) {
|
||||
if len(s) == 0 {
|
||||
return PRVersion{}, errors.New("Prerelease is empty")
|
||||
}
|
||||
v := PRVersion{}
|
||||
if containsOnly(s, numbers) {
|
||||
if hasLeadingZeroes(s) {
|
||||
return PRVersion{}, fmt.Errorf("Numeric PreRelease version must not contain leading zeroes %q", s)
|
||||
}
|
||||
num, err := strconv.ParseUint(s, 10, 64)
|
||||
|
||||
// Might never be hit, but just in case
|
||||
if err != nil {
|
||||
return PRVersion{}, err
|
||||
}
|
||||
v.VersionNum = num
|
||||
v.IsNum = true
|
||||
} else if containsOnly(s, alphanum) {
|
||||
v.VersionStr = s
|
||||
v.IsNum = false
|
||||
} else {
|
||||
return PRVersion{}, fmt.Errorf("Invalid character(s) found in prerelease %q", s)
|
||||
}
|
||||
return v, nil
|
||||
}
|
||||
|
||||
// IsNumeric checks if prerelease-version is numeric
|
||||
func (v PRVersion) IsNumeric() bool {
|
||||
return v.IsNum
|
||||
}
|
||||
|
||||
// Compare compares two PreRelease Versions v and o:
|
||||
// -1 == v is less than o
|
||||
// 0 == v is equal to o
|
||||
// 1 == v is greater than o
|
||||
func (v PRVersion) Compare(o PRVersion) int {
|
||||
if v.IsNum && !o.IsNum {
|
||||
return -1
|
||||
} else if !v.IsNum && o.IsNum {
|
||||
return 1
|
||||
} else if v.IsNum && o.IsNum {
|
||||
if v.VersionNum == o.VersionNum {
|
||||
return 0
|
||||
} else if v.VersionNum > o.VersionNum {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
} else { // both are Alphas
|
||||
if v.VersionStr == o.VersionStr {
|
||||
return 0
|
||||
} else if v.VersionStr > o.VersionStr {
|
||||
return 1
|
||||
} else {
|
||||
return -1
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// PreRelease version to string
|
||||
func (v PRVersion) String() string {
|
||||
if v.IsNum {
|
||||
return strconv.FormatUint(v.VersionNum, 10)
|
||||
}
|
||||
return v.VersionStr
|
||||
}
|
||||
|
||||
func containsOnly(s string, set string) bool {
|
||||
return strings.IndexFunc(s, func(r rune) bool {
|
||||
return !strings.ContainsRune(set, r)
|
||||
}) == -1
|
||||
}
|
||||
|
||||
func hasLeadingZeroes(s string) bool {
|
||||
return len(s) > 1 && s[0] == '0'
|
||||
}
|
||||
|
||||
// NewBuildVersion creates a new valid build version
|
||||
func NewBuildVersion(s string) (string, error) {
|
||||
if len(s) == 0 {
|
||||
return "", errors.New("Buildversion is empty")
|
||||
}
|
||||
if !containsOnly(s, alphanum) {
|
||||
return "", fmt.Errorf("Invalid character(s) found in build meta data %q", s)
|
||||
}
|
||||
return s, nil
|
||||
}
|
|
@ -1,28 +0,0 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"sort"
|
||||
)
|
||||
|
||||
// Versions represents multiple versions.
|
||||
type Versions []Version
|
||||
|
||||
// Len returns length of version collection
|
||||
func (s Versions) Len() int {
|
||||
return len(s)
|
||||
}
|
||||
|
||||
// Swap swaps two versions inside the collection by its indices
|
||||
func (s Versions) Swap(i, j int) {
|
||||
s[i], s[j] = s[j], s[i]
|
||||
}
|
||||
|
||||
// Less checks if version at index i is less than version at index j
|
||||
func (s Versions) Less(i, j int) bool {
|
||||
return s[i].LT(s[j])
|
||||
}
|
||||
|
||||
// Sort sorts a slice of versions
|
||||
func Sort(versions []Version) {
|
||||
sort.Sort(Versions(versions))
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
package semver
|
||||
|
||||
import (
|
||||
"database/sql/driver"
|
||||
"fmt"
|
||||
)
|
||||
|
||||
// Scan implements the database/sql.Scanner interface.
|
||||
func (v *Version) Scan(src interface{}) (err error) {
|
||||
var str string
|
||||
switch src := src.(type) {
|
||||
case string:
|
||||
str = src
|
||||
case []byte:
|
||||
str = string(src)
|
||||
default:
|
||||
return fmt.Errorf("Version.Scan: cannot convert %T to string.", src)
|
||||
}
|
||||
|
||||
if t, err := Parse(str); err == nil {
|
||||
*v = t
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// Value implements the database/sql/driver.Valuer interface.
|
||||
func (v Version) Value() (driver.Value, error) {
|
||||
return v.String(), nil
|
||||
}
|
|
@ -1,18 +0,0 @@
|
|||
language: go
|
||||
sudo: false
|
||||
|
||||
go:
|
||||
- 1.1.2
|
||||
- 1.2.2
|
||||
- 1.3.3
|
||||
- 1.4.2
|
||||
- 1.5.1
|
||||
- tip
|
||||
|
||||
matrix:
|
||||
allow_failures:
|
||||
- go: tip
|
||||
|
||||
script:
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
|
@ -1,21 +0,0 @@
|
|||
Copyright (C) 2013 Jeremy Saenz
|
||||
All Rights Reserved.
|
||||
|
||||
MIT LICENSE
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy of
|
||||
this software and associated documentation files (the "Software"), to deal in
|
||||
the Software without restriction, including without limitation the rights to
|
||||
use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
|
||||
the Software, and to permit persons to whom the Software is furnished to do so,
|
||||
subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
|
||||
FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
|
||||
COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
|
||||
IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
|
||||
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,434 +0,0 @@
|
|||
[](http://gocover.io/github.com/codegangsta/cli)
|
||||
[](https://travis-ci.org/codegangsta/cli)
|
||||
[](https://godoc.org/github.com/codegangsta/cli)
|
||||
[](https://codebeat.co/projects/github-com-codegangsta-cli)
|
||||
|
||||
# cli.go
|
||||
|
||||
`cli.go` is simple, fast, and fun package for building command line apps in Go. The goal is to enable developers to write fast and distributable command line applications in an expressive way.
|
||||
|
||||
## Overview
|
||||
|
||||
Command line apps are usually so tiny that there is absolutely no reason why your code should *not* be self-documenting. Things like generating help text and parsing command flags/options should not hinder productivity when writing a command line app.
|
||||
|
||||
**This is where `cli.go` comes into play.** `cli.go` makes command line programming fun, organized, and expressive!
|
||||
|
||||
## Installation
|
||||
|
||||
Make sure you have a working Go environment (go 1.1+ is *required*). [See the install instructions](http://golang.org/doc/install.html).
|
||||
|
||||
To install `cli.go`, simply run:
|
||||
```
|
||||
$ go get github.com/codegangsta/cli
|
||||
```
|
||||
|
||||
Make sure your `PATH` includes to the `$GOPATH/bin` directory so your commands can be easily used:
|
||||
```
|
||||
export PATH=$PATH:$GOPATH/bin
|
||||
```
|
||||
|
||||
## Getting Started
|
||||
|
||||
One of the philosophies behind `cli.go` is that an API should be playful and full of discovery. So a `cli.go` app can be as little as one line of code in `main()`.
|
||||
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
cli.NewApp().Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
This app will run and show help text, but is not very useful. Let's give an action to execute and some help documentation:
|
||||
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "boom"
|
||||
app.Usage = "make an explosive entrance"
|
||||
app.Action = func(c *cli.Context) {
|
||||
println("boom! I say!")
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
Running this already gives you a ton of functionality, plus support for things like subcommands and flags, which are covered below.
|
||||
|
||||
## Example
|
||||
|
||||
Being a programmer can be a lonely job. Thankfully by the power of automation that is not the case! Let's create a greeter app to fend off our demons of loneliness!
|
||||
|
||||
Start by creating a directory named `greet`, and within it, add a file, `greet.go` with the following code in it:
|
||||
|
||||
``` go
|
||||
package main
|
||||
|
||||
import (
|
||||
"os"
|
||||
"github.com/codegangsta/cli"
|
||||
)
|
||||
|
||||
func main() {
|
||||
app := cli.NewApp()
|
||||
app.Name = "greet"
|
||||
app.Usage = "fight the loneliness!"
|
||||
app.Action = func(c *cli.Context) {
|
||||
println("Hello friend!")
|
||||
}
|
||||
|
||||
app.Run(os.Args)
|
||||
}
|
||||
```
|
||||
|
||||
Install our command to the `$GOPATH/bin` directory:
|
||||
|
||||
```
|
||||
$ go install
|
||||
```
|
||||
|
||||
Finally run our new command:
|
||||
|
||||
```
|
||||
$ greet
|
||||
Hello friend!
|
||||
```
|
||||
|
||||
`cli.go` also generates neat help text:
|
||||
|
||||
```
|
||||
$ greet help
|
||||
NAME:
|
||||
greet - fight the loneliness!
|
||||
|
||||
USAGE:
|
||||
greet [global options] command [command options] [arguments...]
|
||||
|
||||
VERSION:
|
||||
0.0.0
|
||||
|
||||
COMMANDS:
|
||||
help, h Shows a list of commands or help for one command
|
||||
|
||||
GLOBAL OPTIONS
|
||||
--version Shows version information
|
||||
```
|
||||
|
||||
### Arguments
|
||||
|
||||
You can lookup arguments by calling the `Args` function on `cli.Context`.
|
||||
|
||||
``` go
|
||||
...
|
||||
app.Action = func(c *cli.Context) {
|
||||
println("Hello", c.Args()[0])
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
### Flags
|
||||
|
||||
Setting and querying flags is simple.
|
||||
|
||||
``` go
|
||||
...
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
},
|
||||
}
|
||||
app.Action = func(c *cli.Context) {
|
||||
name := "someone"
|
||||
if c.NArg() > 0 {
|
||||
name = c.Args()[0]
|
||||
}
|
||||
if c.String("lang") == "spanish" {
|
||||
println("Hola", name)
|
||||
} else {
|
||||
println("Hello", name)
|
||||
}
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
You can also set a destination variable for a flag, to which the content will be scanned.
|
||||
|
||||
``` go
|
||||
...
|
||||
var language string
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
Destination: &language,
|
||||
},
|
||||
}
|
||||
app.Action = func(c *cli.Context) {
|
||||
name := "someone"
|
||||
if c.NArg() > 0 {
|
||||
name = c.Args()[0]
|
||||
}
|
||||
if language == "spanish" {
|
||||
println("Hola", name)
|
||||
} else {
|
||||
println("Hello", name)
|
||||
}
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
See full list of flags at http://godoc.org/github.com/codegangsta/cli
|
||||
|
||||
#### Alternate Names
|
||||
|
||||
You can set alternate (or short) names for flags by providing a comma-delimited list for the `Name`. e.g.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
That flag can then be set with `--lang spanish` or `-l spanish`. Note that giving two different forms of the same flag in the same command invocation is an error.
|
||||
|
||||
#### Values from the Environment
|
||||
|
||||
You can also have the default value set from the environment via `EnvVar`. e.g.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
EnvVar: "APP_LANG",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
The `EnvVar` may also be given as a comma-delimited "cascade", where the first environment variable that resolves is used as the default.
|
||||
|
||||
``` go
|
||||
app.Flags = []cli.Flag {
|
||||
cli.StringFlag{
|
||||
Name: "lang, l",
|
||||
Value: "english",
|
||||
Usage: "language for the greeting",
|
||||
EnvVar: "LEGACY_COMPAT_LANG,APP_LANG,LANG",
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
#### Values from alternate input sources (YAML and others)
|
||||
|
||||
There is a separate package altsrc that adds support for getting flag values from other input sources like YAML.
|
||||
|
||||
In order to get values for a flag from an alternate input source the following code would be added to wrap an existing cli.Flag like below:
|
||||
|
||||
``` go
|
||||
altsrc.NewIntFlag(cli.IntFlag{Name: "test"})
|
||||
```
|
||||
|
||||
Initialization must also occur for these flags. Below is an example initializing getting data from a yaml file below.
|
||||
|
||||
``` go
|
||||
command.Before = altsrc.InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
|
||||
```
|
||||
|
||||
The code above will use the "load" string as a flag name to get the file name of a yaml file from the cli.Context.
|
||||
It will then use that file name to initialize the yaml input source for any flags that are defined on that command.
|
||||
As a note the "load" flag used would also have to be defined on the command flags in order for this code snipped to work.
|
||||
|
||||
Currently only YAML files are supported but developers can add support for other input sources by implementing the
|
||||
altsrc.InputSourceContext for their given sources.
|
||||
|
||||
Here is a more complete sample of a command using YAML support:
|
||||
|
||||
``` go
|
||||
command := &cli.Command{
|
||||
Name: "test-cmd",
|
||||
Aliases: []string{"tc"},
|
||||
Usage: "this is for testing",
|
||||
Description: "testing",
|
||||
Action: func(c *cli.Context) {
|
||||
// Action to run
|
||||
},
|
||||
Flags: []cli.Flag{
|
||||
NewIntFlag(cli.IntFlag{Name: "test"}),
|
||||
cli.StringFlag{Name: "load"}},
|
||||
}
|
||||
command.Before = InitInputSourceWithContext(command.Flags, NewYamlSourceFromFlagFunc("load"))
|
||||
err := command.Run(c)
|
||||
```
|
||||
|
||||
### Subcommands
|
||||
|
||||
Subcommands can be defined for a more git-like command line app.
|
||||
|
||||
```go
|
||||
...
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Aliases: []string{"a"},
|
||||
Usage: "add a task to the list",
|
||||
Action: func(c *cli.Context) {
|
||||
println("added task: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "complete",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "complete a task on the list",
|
||||
Action: func(c *cli.Context) {
|
||||
println("completed task: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "template",
|
||||
Aliases: []string{"r"},
|
||||
Usage: "options for task templates",
|
||||
Subcommands: []cli.Command{
|
||||
{
|
||||
Name: "add",
|
||||
Usage: "add a new template",
|
||||
Action: func(c *cli.Context) {
|
||||
println("new task template: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Usage: "remove an existing template",
|
||||
Action: func(c *cli.Context) {
|
||||
println("removed task template: ", c.Args().First())
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
### Subcommands categories
|
||||
|
||||
For additional organization in apps that have many subcommands, you can
|
||||
associate a category for each command to group them together in the help
|
||||
output.
|
||||
|
||||
E.g.
|
||||
|
||||
```go
|
||||
...
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "noop",
|
||||
},
|
||||
{
|
||||
Name: "add",
|
||||
Category: "template",
|
||||
},
|
||||
{
|
||||
Name: "remove",
|
||||
Category: "template",
|
||||
},
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
Will include:
|
||||
|
||||
```
|
||||
...
|
||||
COMMANDS:
|
||||
noop
|
||||
|
||||
Template actions:
|
||||
add
|
||||
remove
|
||||
...
|
||||
```
|
||||
|
||||
### Bash Completion
|
||||
|
||||
You can enable completion commands by setting the `EnableBashCompletion`
|
||||
flag on the `App` object. By default, this setting will only auto-complete to
|
||||
show an app's subcommands, but you can write your own completion methods for
|
||||
the App or its subcommands.
|
||||
|
||||
```go
|
||||
...
|
||||
var tasks = []string{"cook", "clean", "laundry", "eat", "sleep", "code"}
|
||||
app := cli.NewApp()
|
||||
app.EnableBashCompletion = true
|
||||
app.Commands = []cli.Command{
|
||||
{
|
||||
Name: "complete",
|
||||
Aliases: []string{"c"},
|
||||
Usage: "complete a task on the list",
|
||||
Action: func(c *cli.Context) {
|
||||
println("completed task: ", c.Args().First())
|
||||
},
|
||||
BashComplete: func(c *cli.Context) {
|
||||
// This will complete if no args are passed
|
||||
if c.NArg() > 0 {
|
||||
return
|
||||
}
|
||||
for _, t := range tasks {
|
||||
fmt.Println(t)
|
||||
}
|
||||
},
|
||||
}
|
||||
}
|
||||
...
|
||||
```
|
||||
|
||||
#### To Enable
|
||||
|
||||
Source the `autocomplete/bash_autocomplete` file in your `.bashrc` file while
|
||||
setting the `PROG` variable to the name of your program:
|
||||
|
||||
`PROG=myprogram source /.../cli/autocomplete/bash_autocomplete`
|
||||
|
||||
#### To Distribute
|
||||
|
||||
Copy `autocomplete/bash_autocomplete` into `/etc/bash_completion.d/` and rename
|
||||
it to the name of the program you wish to add autocomplete support for (or
|
||||
automatically install it there if you are distributing a package). Don't forget
|
||||
to source the file to make it active in the current shell.
|
||||
|
||||
```
|
||||
sudo cp src/bash_autocomplete /etc/bash_completion.d/<myprogram>
|
||||
source /etc/bash_completion.d/<myprogram>
|
||||
```
|
||||
|
||||
Alternatively, you can just document that users should source the generic
|
||||
`autocomplete/bash_autocomplete` in their bash configuration with `$PROG` set
|
||||
to the name of their program (as above).
|
||||
|
||||
## Contribution Guidelines
|
||||
|
||||
Feel free to put up a pull request to fix a bug or maybe add a feature. I will give it a code review and make sure that it does not break backwards compatibility. If I or any other collaborators agree that it is in line with the vision of the project, we will work with you to get the code into a mergeable state and merge it into the master branch.
|
||||
|
||||
If you have contributed something significant to the project, I will most likely add you as a collaborator. As a collaborator you are given the ability to merge others pull requests. It is very important that new code does not break existing code, so be careful about what code you do choose to merge. If you have any questions feel free to link @codegangsta to the issue in question and we can review it together.
|
||||
|
||||
If you feel like you have contributed to the project but have not yet been added as a collaborator, I probably forgot to add you. Hit @codegangsta up over email and we will get it figured out.
|
|
@ -1,363 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"os"
|
||||
"path"
|
||||
"sort"
|
||||
"time"
|
||||
)
|
||||
|
||||
// App is the main structure of a cli application. It is recommended that
|
||||
// an app be created with the cli.NewApp() function
|
||||
type App struct {
|
||||
// The name of the program. Defaults to path.Base(os.Args[0])
|
||||
Name string
|
||||
// Full name of command for help, defaults to Name
|
||||
HelpName string
|
||||
// Description of the program.
|
||||
Usage string
|
||||
// Text to override the USAGE section of help
|
||||
UsageText string
|
||||
// Description of the program argument format.
|
||||
ArgsUsage string
|
||||
// Version of the program
|
||||
Version string
|
||||
// List of commands to execute
|
||||
Commands []Command
|
||||
// List of flags to parse
|
||||
Flags []Flag
|
||||
// Boolean to enable bash completion commands
|
||||
EnableBashCompletion bool
|
||||
// Boolean to hide built-in help command
|
||||
HideHelp bool
|
||||
// Boolean to hide built-in version flag and the VERSION section of help
|
||||
HideVersion bool
|
||||
// Populate on app startup, only gettable throught method Categories()
|
||||
categories CommandCategories
|
||||
// An action to execute when the bash-completion flag is set
|
||||
BashComplete func(context *Context)
|
||||
// An action to execute before any subcommands are run, but after the context is ready
|
||||
// If a non-nil error is returned, no subcommands are run
|
||||
Before func(context *Context) error
|
||||
// An action to execute after any subcommands are run, but after the subcommand has finished
|
||||
// It is run even if Action() panics
|
||||
After func(context *Context) error
|
||||
// The action to execute when no subcommands are specified
|
||||
Action func(context *Context)
|
||||
// Execute this function if the proper command cannot be found
|
||||
CommandNotFound func(context *Context, command string)
|
||||
// Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages.
|
||||
// This function is able to replace the original error messages.
|
||||
// If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted.
|
||||
OnUsageError func(context *Context, err error, isSubcommand bool) error
|
||||
// Compilation date
|
||||
Compiled time.Time
|
||||
// List of all authors who contributed
|
||||
Authors []Author
|
||||
// Copyright of the binary if any
|
||||
Copyright string
|
||||
// Name of Author (Note: Use App.Authors, this is deprecated)
|
||||
Author string
|
||||
// Email of Author (Note: Use App.Authors, this is deprecated)
|
||||
Email string
|
||||
// Writer writer to write output to
|
||||
Writer io.Writer
|
||||
}
|
||||
|
||||
// Tries to find out when this binary was compiled.
|
||||
// Returns the current time if it fails to find it.
|
||||
func compileTime() time.Time {
|
||||
info, err := os.Stat(os.Args[0])
|
||||
if err != nil {
|
||||
return time.Now()
|
||||
}
|
||||
return info.ModTime()
|
||||
}
|
||||
|
||||
// Creates a new cli Application with some reasonable defaults for Name, Usage, Version and Action.
|
||||
func NewApp() *App {
|
||||
return &App{
|
||||
Name: path.Base(os.Args[0]),
|
||||
HelpName: path.Base(os.Args[0]),
|
||||
Usage: "A new cli application",
|
||||
UsageText: "",
|
||||
Version: "0.0.0",
|
||||
BashComplete: DefaultAppComplete,
|
||||
Action: helpCommand.Action,
|
||||
Compiled: compileTime(),
|
||||
Writer: os.Stdout,
|
||||
}
|
||||
}
|
||||
|
||||
// Entry point to the cli app. Parses the arguments slice and routes to the proper flag/args combination
|
||||
func (a *App) Run(arguments []string) (err error) {
|
||||
if a.Author != "" || a.Email != "" {
|
||||
a.Authors = append(a.Authors, Author{Name: a.Author, Email: a.Email})
|
||||
}
|
||||
|
||||
newCmds := []Command{}
|
||||
for _, c := range a.Commands {
|
||||
if c.HelpName == "" {
|
||||
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
|
||||
}
|
||||
newCmds = append(newCmds, c)
|
||||
}
|
||||
a.Commands = newCmds
|
||||
|
||||
a.categories = CommandCategories{}
|
||||
for _, command := range a.Commands {
|
||||
a.categories = a.categories.AddCommand(command.Category, command)
|
||||
}
|
||||
sort.Sort(a.categories)
|
||||
|
||||
// append help to commands
|
||||
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
|
||||
a.Commands = append(a.Commands, helpCommand)
|
||||
if (HelpFlag != BoolFlag{}) {
|
||||
a.appendFlag(HelpFlag)
|
||||
}
|
||||
}
|
||||
|
||||
//append version/help flags
|
||||
if a.EnableBashCompletion {
|
||||
a.appendFlag(BashCompletionFlag)
|
||||
}
|
||||
|
||||
if !a.HideVersion {
|
||||
a.appendFlag(VersionFlag)
|
||||
}
|
||||
|
||||
// parse flags
|
||||
set := flagSet(a.Name, a.Flags)
|
||||
set.SetOutput(ioutil.Discard)
|
||||
err = set.Parse(arguments[1:])
|
||||
nerr := normalizeFlags(a.Flags, set)
|
||||
context := NewContext(a, set, nil)
|
||||
if nerr != nil {
|
||||
fmt.Fprintln(a.Writer, nerr)
|
||||
ShowAppHelp(context)
|
||||
return nerr
|
||||
}
|
||||
|
||||
if checkCompletions(context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if a.OnUsageError != nil {
|
||||
err := a.OnUsageError(context, err, false)
|
||||
return err
|
||||
} else {
|
||||
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
||||
ShowAppHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if !a.HideHelp && checkHelp(context) {
|
||||
ShowAppHelp(context)
|
||||
return nil
|
||||
}
|
||||
|
||||
if !a.HideVersion && checkVersion(context) {
|
||||
ShowVersion(context)
|
||||
return nil
|
||||
}
|
||||
|
||||
if a.After != nil {
|
||||
defer func() {
|
||||
if afterErr := a.After(context); afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if a.Before != nil {
|
||||
err = a.Before(context)
|
||||
if err != nil {
|
||||
fmt.Fprintf(a.Writer, "%v\n\n", err)
|
||||
ShowAppHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
args := context.Args()
|
||||
if args.Present() {
|
||||
name := args.First()
|
||||
c := a.Command(name)
|
||||
if c != nil {
|
||||
return c.Run(context)
|
||||
}
|
||||
}
|
||||
|
||||
// Run default Action
|
||||
a.Action(context)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Another entry point to the cli app, takes care of passing arguments and error handling
|
||||
func (a *App) RunAndExitOnError() {
|
||||
if err := a.Run(os.Args); err != nil {
|
||||
fmt.Fprintln(os.Stderr, err)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
// Invokes the subcommand given the context, parses ctx.Args() to generate command-specific flags
|
||||
func (a *App) RunAsSubcommand(ctx *Context) (err error) {
|
||||
// append help to commands
|
||||
if len(a.Commands) > 0 {
|
||||
if a.Command(helpCommand.Name) == nil && !a.HideHelp {
|
||||
a.Commands = append(a.Commands, helpCommand)
|
||||
if (HelpFlag != BoolFlag{}) {
|
||||
a.appendFlag(HelpFlag)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
newCmds := []Command{}
|
||||
for _, c := range a.Commands {
|
||||
if c.HelpName == "" {
|
||||
c.HelpName = fmt.Sprintf("%s %s", a.HelpName, c.Name)
|
||||
}
|
||||
newCmds = append(newCmds, c)
|
||||
}
|
||||
a.Commands = newCmds
|
||||
|
||||
// append flags
|
||||
if a.EnableBashCompletion {
|
||||
a.appendFlag(BashCompletionFlag)
|
||||
}
|
||||
|
||||
// parse flags
|
||||
set := flagSet(a.Name, a.Flags)
|
||||
set.SetOutput(ioutil.Discard)
|
||||
err = set.Parse(ctx.Args().Tail())
|
||||
nerr := normalizeFlags(a.Flags, set)
|
||||
context := NewContext(a, set, ctx)
|
||||
|
||||
if nerr != nil {
|
||||
fmt.Fprintln(a.Writer, nerr)
|
||||
fmt.Fprintln(a.Writer)
|
||||
if len(a.Commands) > 0 {
|
||||
ShowSubcommandHelp(context)
|
||||
} else {
|
||||
ShowCommandHelp(ctx, context.Args().First())
|
||||
}
|
||||
return nerr
|
||||
}
|
||||
|
||||
if checkCompletions(context) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if a.OnUsageError != nil {
|
||||
err = a.OnUsageError(context, err, true)
|
||||
return err
|
||||
} else {
|
||||
fmt.Fprintf(a.Writer, "%s\n\n", "Incorrect Usage.")
|
||||
ShowSubcommandHelp(context)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
if len(a.Commands) > 0 {
|
||||
if checkSubcommandHelp(context) {
|
||||
return nil
|
||||
}
|
||||
} else {
|
||||
if checkCommandHelp(ctx, context.Args().First()) {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
|
||||
if a.After != nil {
|
||||
defer func() {
|
||||
afterErr := a.After(context)
|
||||
if afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if a.Before != nil {
|
||||
err := a.Before(context)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
args := context.Args()
|
||||
if args.Present() {
|
||||
name := args.First()
|
||||
c := a.Command(name)
|
||||
if c != nil {
|
||||
return c.Run(context)
|
||||
}
|
||||
}
|
||||
|
||||
// Run default Action
|
||||
a.Action(context)
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the named command on App. Returns nil if the command does not exist
|
||||
func (a *App) Command(name string) *Command {
|
||||
for _, c := range a.Commands {
|
||||
if c.HasName(name) {
|
||||
return &c
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returnes the array containing all the categories with the commands they contain
|
||||
func (a *App) Categories() CommandCategories {
|
||||
return a.categories
|
||||
}
|
||||
|
||||
func (a *App) hasFlag(flag Flag) bool {
|
||||
for _, f := range a.Flags {
|
||||
if flag == f {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func (a *App) appendFlag(flag Flag) {
|
||||
if !a.hasFlag(flag) {
|
||||
a.Flags = append(a.Flags, flag)
|
||||
}
|
||||
}
|
||||
|
||||
// Author represents someone who has contributed to a cli project.
|
||||
type Author struct {
|
||||
Name string // The Authors name
|
||||
Email string // The Authors email
|
||||
}
|
||||
|
||||
// String makes Author comply to the Stringer interface, to allow an easy print in the templating process
|
||||
func (a Author) String() string {
|
||||
e := ""
|
||||
if a.Email != "" {
|
||||
e = "<" + a.Email + "> "
|
||||
}
|
||||
|
||||
return fmt.Sprintf("%v %v", a.Name, e)
|
||||
}
|
|
@ -1,16 +0,0 @@
|
|||
version: "{build}"
|
||||
|
||||
os: Windows Server 2012 R2
|
||||
|
||||
install:
|
||||
- go version
|
||||
- go env
|
||||
|
||||
build_script:
|
||||
- cd %APPVEYOR_BUILD_FOLDER%
|
||||
- go vet ./...
|
||||
- go test -v ./...
|
||||
|
||||
test: off
|
||||
|
||||
deploy: off
|
|
@ -1,30 +0,0 @@
|
|||
package cli
|
||||
|
||||
type CommandCategories []*CommandCategory
|
||||
|
||||
type CommandCategory struct {
|
||||
Name string
|
||||
Commands Commands
|
||||
}
|
||||
|
||||
func (c CommandCategories) Less(i, j int) bool {
|
||||
return c[i].Name < c[j].Name
|
||||
}
|
||||
|
||||
func (c CommandCategories) Len() int {
|
||||
return len(c)
|
||||
}
|
||||
|
||||
func (c CommandCategories) Swap(i, j int) {
|
||||
c[i], c[j] = c[j], c[i]
|
||||
}
|
||||
|
||||
func (c CommandCategories) AddCommand(category string, command Command) CommandCategories {
|
||||
for _, commandCategory := range c {
|
||||
if commandCategory.Name == category {
|
||||
commandCategory.Commands = append(commandCategory.Commands, command)
|
||||
return c
|
||||
}
|
||||
}
|
||||
return append(c, &CommandCategory{Name: category, Commands: []Command{command}})
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
// Package cli provides a minimal framework for creating and organizing command line
|
||||
// Go applications. cli is designed to be easy to understand and write, the most simple
|
||||
// cli application can be written as follows:
|
||||
// func main() {
|
||||
// cli.NewApp().Run(os.Args)
|
||||
// }
|
||||
//
|
||||
// Of course this application does not do much, so let's make this an actual application:
|
||||
// func main() {
|
||||
// app := cli.NewApp()
|
||||
// app.Name = "greet"
|
||||
// app.Usage = "say a greeting"
|
||||
// app.Action = func(c *cli.Context) {
|
||||
// println("Greetings")
|
||||
// }
|
||||
//
|
||||
// app.Run(os.Args)
|
||||
// }
|
||||
package cli
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
type MultiError struct {
|
||||
Errors []error
|
||||
}
|
||||
|
||||
func NewMultiError(err ...error) MultiError {
|
||||
return MultiError{Errors: err}
|
||||
}
|
||||
|
||||
func (m MultiError) Error() string {
|
||||
errs := make([]string, len(m.Errors))
|
||||
for i, err := range m.Errors {
|
||||
errs[i] = err.Error()
|
||||
}
|
||||
|
||||
return strings.Join(errs, "\n")
|
||||
}
|
|
@ -1,262 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io/ioutil"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Command is a subcommand for a cli.App.
|
||||
type Command struct {
|
||||
// The name of the command
|
||||
Name string
|
||||
// short name of the command. Typically one character (deprecated, use `Aliases`)
|
||||
ShortName string
|
||||
// A list of aliases for the command
|
||||
Aliases []string
|
||||
// A short description of the usage of this command
|
||||
Usage string
|
||||
// Custom text to show on USAGE section of help
|
||||
UsageText string
|
||||
// A longer explanation of how the command works
|
||||
Description string
|
||||
// A short description of the arguments of this command
|
||||
ArgsUsage string
|
||||
// The category the command is part of
|
||||
Category string
|
||||
// The function to call when checking for bash command completions
|
||||
BashComplete func(context *Context)
|
||||
// An action to execute before any sub-subcommands are run, but after the context is ready
|
||||
// If a non-nil error is returned, no sub-subcommands are run
|
||||
Before func(context *Context) error
|
||||
// An action to execute after any subcommands are run, but before the subcommand has finished
|
||||
// It is run even if Action() panics
|
||||
After func(context *Context) error
|
||||
// The function to call when this command is invoked
|
||||
Action func(context *Context)
|
||||
// Execute this function, if an usage error occurs. This is useful for displaying customized usage error messages.
|
||||
// This function is able to replace the original error messages.
|
||||
// If this function is not set, the "Incorrect usage" is displayed and the execution is interrupted.
|
||||
OnUsageError func(context *Context, err error) error
|
||||
// List of child commands
|
||||
Subcommands Commands
|
||||
// List of flags to parse
|
||||
Flags []Flag
|
||||
// Treat all flags as normal arguments if true
|
||||
SkipFlagParsing bool
|
||||
// Boolean to hide built-in help command
|
||||
HideHelp bool
|
||||
|
||||
// Full name of command for help, defaults to full command name, including parent commands.
|
||||
HelpName string
|
||||
commandNamePath []string
|
||||
}
|
||||
|
||||
// Returns the full name of the command.
|
||||
// For subcommands this ensures that parent commands are part of the command path
|
||||
func (c Command) FullName() string {
|
||||
if c.commandNamePath == nil {
|
||||
return c.Name
|
||||
}
|
||||
return strings.Join(c.commandNamePath, " ")
|
||||
}
|
||||
|
||||
type Commands []Command
|
||||
|
||||
// Invokes the command given the context, parses ctx.Args() to generate command-specific flags
|
||||
func (c Command) Run(ctx *Context) (err error) {
|
||||
if len(c.Subcommands) > 0 {
|
||||
return c.startApp(ctx)
|
||||
}
|
||||
|
||||
if !c.HideHelp && (HelpFlag != BoolFlag{}) {
|
||||
// append help to flags
|
||||
c.Flags = append(
|
||||
c.Flags,
|
||||
HelpFlag,
|
||||
)
|
||||
}
|
||||
|
||||
if ctx.App.EnableBashCompletion {
|
||||
c.Flags = append(c.Flags, BashCompletionFlag)
|
||||
}
|
||||
|
||||
set := flagSet(c.Name, c.Flags)
|
||||
set.SetOutput(ioutil.Discard)
|
||||
|
||||
if !c.SkipFlagParsing {
|
||||
firstFlagIndex := -1
|
||||
terminatorIndex := -1
|
||||
for index, arg := range ctx.Args() {
|
||||
if arg == "--" {
|
||||
terminatorIndex = index
|
||||
break
|
||||
} else if arg == "-" {
|
||||
// Do nothing. A dash alone is not really a flag.
|
||||
continue
|
||||
} else if strings.HasPrefix(arg, "-") && firstFlagIndex == -1 {
|
||||
firstFlagIndex = index
|
||||
}
|
||||
}
|
||||
|
||||
if firstFlagIndex > -1 {
|
||||
args := ctx.Args()
|
||||
regularArgs := make([]string, len(args[1:firstFlagIndex]))
|
||||
copy(regularArgs, args[1:firstFlagIndex])
|
||||
|
||||
var flagArgs []string
|
||||
if terminatorIndex > -1 {
|
||||
flagArgs = args[firstFlagIndex:terminatorIndex]
|
||||
regularArgs = append(regularArgs, args[terminatorIndex:]...)
|
||||
} else {
|
||||
flagArgs = args[firstFlagIndex:]
|
||||
}
|
||||
|
||||
err = set.Parse(append(flagArgs, regularArgs...))
|
||||
} else {
|
||||
err = set.Parse(ctx.Args().Tail())
|
||||
}
|
||||
} else {
|
||||
if c.SkipFlagParsing {
|
||||
err = set.Parse(append([]string{"--"}, ctx.Args().Tail()...))
|
||||
}
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
if c.OnUsageError != nil {
|
||||
err := c.OnUsageError(ctx, err)
|
||||
return err
|
||||
} else {
|
||||
fmt.Fprintln(ctx.App.Writer, "Incorrect Usage.")
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
nerr := normalizeFlags(c.Flags, set)
|
||||
if nerr != nil {
|
||||
fmt.Fprintln(ctx.App.Writer, nerr)
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
return nerr
|
||||
}
|
||||
context := NewContext(ctx.App, set, ctx)
|
||||
|
||||
if checkCommandCompletions(context, c.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if checkCommandHelp(context, c.Name) {
|
||||
return nil
|
||||
}
|
||||
|
||||
if c.After != nil {
|
||||
defer func() {
|
||||
afterErr := c.After(context)
|
||||
if afterErr != nil {
|
||||
if err != nil {
|
||||
err = NewMultiError(err, afterErr)
|
||||
} else {
|
||||
err = afterErr
|
||||
}
|
||||
}
|
||||
}()
|
||||
}
|
||||
|
||||
if c.Before != nil {
|
||||
err := c.Before(context)
|
||||
if err != nil {
|
||||
fmt.Fprintln(ctx.App.Writer, err)
|
||||
fmt.Fprintln(ctx.App.Writer)
|
||||
ShowCommandHelp(ctx, c.Name)
|
||||
return err
|
||||
}
|
||||
}
|
||||
|
||||
context.Command = c
|
||||
c.Action(context)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c Command) Names() []string {
|
||||
names := []string{c.Name}
|
||||
|
||||
if c.ShortName != "" {
|
||||
names = append(names, c.ShortName)
|
||||
}
|
||||
|
||||
return append(names, c.Aliases...)
|
||||
}
|
||||
|
||||
// Returns true if Command.Name or Command.ShortName matches given name
|
||||
func (c Command) HasName(name string) bool {
|
||||
for _, n := range c.Names() {
|
||||
if n == name {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func (c Command) startApp(ctx *Context) error {
|
||||
app := NewApp()
|
||||
|
||||
// set the name and usage
|
||||
app.Name = fmt.Sprintf("%s %s", ctx.App.Name, c.Name)
|
||||
if c.HelpName == "" {
|
||||
app.HelpName = c.HelpName
|
||||
} else {
|
||||
app.HelpName = app.Name
|
||||
}
|
||||
|
||||
if c.Description != "" {
|
||||
app.Usage = c.Description
|
||||
} else {
|
||||
app.Usage = c.Usage
|
||||
}
|
||||
|
||||
// set CommandNotFound
|
||||
app.CommandNotFound = ctx.App.CommandNotFound
|
||||
|
||||
// set the flags and commands
|
||||
app.Commands = c.Subcommands
|
||||
app.Flags = c.Flags
|
||||
app.HideHelp = c.HideHelp
|
||||
|
||||
app.Version = ctx.App.Version
|
||||
app.HideVersion = ctx.App.HideVersion
|
||||
app.Compiled = ctx.App.Compiled
|
||||
app.Author = ctx.App.Author
|
||||
app.Email = ctx.App.Email
|
||||
app.Writer = ctx.App.Writer
|
||||
|
||||
app.categories = CommandCategories{}
|
||||
for _, command := range c.Subcommands {
|
||||
app.categories = app.categories.AddCommand(command.Category, command)
|
||||
}
|
||||
|
||||
sort.Sort(app.categories)
|
||||
|
||||
// bash completion
|
||||
app.EnableBashCompletion = ctx.App.EnableBashCompletion
|
||||
if c.BashComplete != nil {
|
||||
app.BashComplete = c.BashComplete
|
||||
}
|
||||
|
||||
// set the actions
|
||||
app.Before = c.Before
|
||||
app.After = c.After
|
||||
if c.Action != nil {
|
||||
app.Action = c.Action
|
||||
} else {
|
||||
app.Action = helpSubcommand.Action
|
||||
}
|
||||
|
||||
for index, cc := range app.Commands {
|
||||
app.Commands[index].commandNamePath = []string{c.Name, cc.Name}
|
||||
}
|
||||
|
||||
return app.RunAsSubcommand(ctx)
|
||||
}
|
|
@ -1,393 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"flag"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// Context is a type that is passed through to
|
||||
// each Handler action in a cli application. Context
|
||||
// can be used to retrieve context-specific Args and
|
||||
// parsed command-line options.
|
||||
type Context struct {
|
||||
App *App
|
||||
Command Command
|
||||
flagSet *flag.FlagSet
|
||||
setFlags map[string]bool
|
||||
globalSetFlags map[string]bool
|
||||
parentContext *Context
|
||||
}
|
||||
|
||||
// Creates a new context. For use in when invoking an App or Command action.
|
||||
func NewContext(app *App, set *flag.FlagSet, parentCtx *Context) *Context {
|
||||
return &Context{App: app, flagSet: set, parentContext: parentCtx}
|
||||
}
|
||||
|
||||
// Looks up the value of a local int flag, returns 0 if no int flag exists
|
||||
func (c *Context) Int(name string) int {
|
||||
return lookupInt(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local time.Duration flag, returns 0 if no time.Duration flag exists
|
||||
func (c *Context) Duration(name string) time.Duration {
|
||||
return lookupDuration(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local float64 flag, returns 0 if no float64 flag exists
|
||||
func (c *Context) Float64(name string) float64 {
|
||||
return lookupFloat64(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local bool flag, returns false if no bool flag exists
|
||||
func (c *Context) Bool(name string) bool {
|
||||
return lookupBool(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local boolT flag, returns false if no bool flag exists
|
||||
func (c *Context) BoolT(name string) bool {
|
||||
return lookupBoolT(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local string flag, returns "" if no string flag exists
|
||||
func (c *Context) String(name string) string {
|
||||
return lookupString(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local string slice flag, returns nil if no string slice flag exists
|
||||
func (c *Context) StringSlice(name string) []string {
|
||||
return lookupStringSlice(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local int slice flag, returns nil if no int slice flag exists
|
||||
func (c *Context) IntSlice(name string) []int {
|
||||
return lookupIntSlice(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a local generic flag, returns nil if no generic flag exists
|
||||
func (c *Context) Generic(name string) interface{} {
|
||||
return lookupGeneric(name, c.flagSet)
|
||||
}
|
||||
|
||||
// Looks up the value of a global int flag, returns 0 if no int flag exists
|
||||
func (c *Context) GlobalInt(name string) int {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupInt(name, fs)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Looks up the value of a global time.Duration flag, returns 0 if no time.Duration flag exists
|
||||
func (c *Context) GlobalDuration(name string) time.Duration {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupDuration(name, fs)
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// Looks up the value of a global bool flag, returns false if no bool flag exists
|
||||
func (c *Context) GlobalBool(name string) bool {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupBool(name, fs)
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Looks up the value of a global string flag, returns "" if no string flag exists
|
||||
func (c *Context) GlobalString(name string) string {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupString(name, fs)
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Looks up the value of a global string slice flag, returns nil if no string slice flag exists
|
||||
func (c *Context) GlobalStringSlice(name string) []string {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupStringSlice(name, fs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Looks up the value of a global int slice flag, returns nil if no int slice flag exists
|
||||
func (c *Context) GlobalIntSlice(name string) []int {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupIntSlice(name, fs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Looks up the value of a global generic flag, returns nil if no generic flag exists
|
||||
func (c *Context) GlobalGeneric(name string) interface{} {
|
||||
if fs := lookupGlobalFlagSet(name, c); fs != nil {
|
||||
return lookupGeneric(name, fs)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Returns the number of flags set
|
||||
func (c *Context) NumFlags() int {
|
||||
return c.flagSet.NFlag()
|
||||
}
|
||||
|
||||
// Determines if the flag was actually set
|
||||
func (c *Context) IsSet(name string) bool {
|
||||
if c.setFlags == nil {
|
||||
c.setFlags = make(map[string]bool)
|
||||
c.flagSet.Visit(func(f *flag.Flag) {
|
||||
c.setFlags[f.Name] = true
|
||||
})
|
||||
}
|
||||
return c.setFlags[name] == true
|
||||
}
|
||||
|
||||
// Determines if the global flag was actually set
|
||||
func (c *Context) GlobalIsSet(name string) bool {
|
||||
if c.globalSetFlags == nil {
|
||||
c.globalSetFlags = make(map[string]bool)
|
||||
ctx := c
|
||||
if ctx.parentContext != nil {
|
||||
ctx = ctx.parentContext
|
||||
}
|
||||
for ; ctx != nil && c.globalSetFlags[name] == false; ctx = ctx.parentContext {
|
||||
ctx.flagSet.Visit(func(f *flag.Flag) {
|
||||
c.globalSetFlags[f.Name] = true
|
||||
})
|
||||
}
|
||||
}
|
||||
return c.globalSetFlags[name]
|
||||
}
|
||||
|
||||
// Returns a slice of flag names used in this context.
|
||||
func (c *Context) FlagNames() (names []string) {
|
||||
for _, flag := range c.Command.Flags {
|
||||
name := strings.Split(flag.GetName(), ",")[0]
|
||||
if name == "help" {
|
||||
continue
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Returns a slice of global flag names used by the app.
|
||||
func (c *Context) GlobalFlagNames() (names []string) {
|
||||
for _, flag := range c.App.Flags {
|
||||
name := strings.Split(flag.GetName(), ",")[0]
|
||||
if name == "help" || name == "version" {
|
||||
continue
|
||||
}
|
||||
names = append(names, name)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Returns the parent context, if any
|
||||
func (c *Context) Parent() *Context {
|
||||
return c.parentContext
|
||||
}
|
||||
|
||||
type Args []string
|
||||
|
||||
// Returns the command line arguments associated with the context.
|
||||
func (c *Context) Args() Args {
|
||||
args := Args(c.flagSet.Args())
|
||||
return args
|
||||
}
|
||||
|
||||
// Returns the number of the command line arguments.
|
||||
func (c *Context) NArg() int {
|
||||
return len(c.Args())
|
||||
}
|
||||
|
||||
// Returns the nth argument, or else a blank string
|
||||
func (a Args) Get(n int) string {
|
||||
if len(a) > n {
|
||||
return a[n]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// Returns the first argument, or else a blank string
|
||||
func (a Args) First() string {
|
||||
return a.Get(0)
|
||||
}
|
||||
|
||||
// Return the rest of the arguments (not the first one)
|
||||
// or else an empty string slice
|
||||
func (a Args) Tail() []string {
|
||||
if len(a) >= 2 {
|
||||
return []string(a)[1:]
|
||||
}
|
||||
return []string{}
|
||||
}
|
||||
|
||||
// Checks if there are any arguments present
|
||||
func (a Args) Present() bool {
|
||||
return len(a) != 0
|
||||
}
|
||||
|
||||
// Swaps arguments at the given indexes
|
||||
func (a Args) Swap(from, to int) error {
|
||||
if from >= len(a) || to >= len(a) {
|
||||
return errors.New("index out of range")
|
||||
}
|
||||
a[from], a[to] = a[to], a[from]
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupGlobalFlagSet(name string, ctx *Context) *flag.FlagSet {
|
||||
if ctx.parentContext != nil {
|
||||
ctx = ctx.parentContext
|
||||
}
|
||||
for ; ctx != nil; ctx = ctx.parentContext {
|
||||
if f := ctx.flagSet.Lookup(name); f != nil {
|
||||
return ctx.flagSet
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupInt(name string, set *flag.FlagSet) int {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := strconv.Atoi(f.Value.String())
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func lookupDuration(name string, set *flag.FlagSet) time.Duration {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := time.ParseDuration(f.Value.String())
|
||||
if err == nil {
|
||||
return val
|
||||
}
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func lookupFloat64(name string, set *flag.FlagSet) float64 {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := strconv.ParseFloat(f.Value.String(), 64)
|
||||
if err != nil {
|
||||
return 0
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
return 0
|
||||
}
|
||||
|
||||
func lookupString(name string, set *flag.FlagSet) string {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
return f.Value.String()
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
func lookupStringSlice(name string, set *flag.FlagSet) []string {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
return (f.Value.(*StringSlice)).Value()
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupIntSlice(name string, set *flag.FlagSet) []int {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
return (f.Value.(*IntSlice)).Value()
|
||||
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupGeneric(name string, set *flag.FlagSet) interface{} {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
return f.Value
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func lookupBool(name string, set *flag.FlagSet) bool {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := strconv.ParseBool(f.Value.String())
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func lookupBoolT(name string, set *flag.FlagSet) bool {
|
||||
f := set.Lookup(name)
|
||||
if f != nil {
|
||||
val, err := strconv.ParseBool(f.Value.String())
|
||||
if err != nil {
|
||||
return true
|
||||
}
|
||||
return val
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func copyFlag(name string, ff *flag.Flag, set *flag.FlagSet) {
|
||||
switch ff.Value.(type) {
|
||||
case *StringSlice:
|
||||
default:
|
||||
set.Set(name, ff.Value.String())
|
||||
}
|
||||
}
|
||||
|
||||
func normalizeFlags(flags []Flag, set *flag.FlagSet) error {
|
||||
visited := make(map[string]bool)
|
||||
set.Visit(func(f *flag.Flag) {
|
||||
visited[f.Name] = true
|
||||
})
|
||||
for _, f := range flags {
|
||||
parts := strings.Split(f.GetName(), ",")
|
||||
if len(parts) == 1 {
|
||||
continue
|
||||
}
|
||||
var ff *flag.Flag
|
||||
for _, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
if visited[name] {
|
||||
if ff != nil {
|
||||
return errors.New("Cannot use two forms of the same flag: " + name + " " + ff.Name)
|
||||
}
|
||||
ff = set.Lookup(name)
|
||||
}
|
||||
}
|
||||
if ff == nil {
|
||||
continue
|
||||
}
|
||||
for _, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
if !visited[name] {
|
||||
copyFlag(name, ff, set)
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,546 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// This flag enables bash-completion for all commands and subcommands
|
||||
var BashCompletionFlag = BoolFlag{
|
||||
Name: "generate-bash-completion",
|
||||
}
|
||||
|
||||
// This flag prints the version for the application
|
||||
var VersionFlag = BoolFlag{
|
||||
Name: "version, v",
|
||||
Usage: "print the version",
|
||||
}
|
||||
|
||||
// This flag prints the help for all commands and subcommands
|
||||
// Set to the zero value (BoolFlag{}) to disable flag -- keeps subcommand
|
||||
// unless HideHelp is set to true)
|
||||
var HelpFlag = BoolFlag{
|
||||
Name: "help, h",
|
||||
Usage: "show help",
|
||||
}
|
||||
|
||||
// Flag is a common interface related to parsing flags in cli.
|
||||
// For more advanced flag parsing techniques, it is recommended that
|
||||
// this interface be implemented.
|
||||
type Flag interface {
|
||||
fmt.Stringer
|
||||
// Apply Flag settings to the given flag set
|
||||
Apply(*flag.FlagSet)
|
||||
GetName() string
|
||||
}
|
||||
|
||||
func flagSet(name string, flags []Flag) *flag.FlagSet {
|
||||
set := flag.NewFlagSet(name, flag.ContinueOnError)
|
||||
|
||||
for _, f := range flags {
|
||||
f.Apply(set)
|
||||
}
|
||||
return set
|
||||
}
|
||||
|
||||
func eachName(longName string, fn func(string)) {
|
||||
parts := strings.Split(longName, ",")
|
||||
for _, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
fn(name)
|
||||
}
|
||||
}
|
||||
|
||||
// Generic is a generic parseable type identified by a specific flag
|
||||
type Generic interface {
|
||||
Set(value string) error
|
||||
String() string
|
||||
}
|
||||
|
||||
// GenericFlag is the flag type for types implementing Generic
|
||||
type GenericFlag struct {
|
||||
Name string
|
||||
Value Generic
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the string representation of the generic flag to display the
|
||||
// help text to the user (uses the String() method of the generic flag to show
|
||||
// the value)
|
||||
func (f GenericFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage))
|
||||
}
|
||||
|
||||
func (f GenericFlag) FormatValueHelp() string {
|
||||
if f.Value == nil {
|
||||
return ""
|
||||
}
|
||||
s := f.Value.String()
|
||||
if len(s) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("\"%s\"", s)
|
||||
}
|
||||
|
||||
// Apply takes the flagset and calls Set on the generic flag with the value
|
||||
// provided by the user for parsing by the flag
|
||||
func (f GenericFlag) Apply(set *flag.FlagSet) {
|
||||
val := f.Value
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
val.Set(envVal)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
set.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f GenericFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringSlice is an opaque type for []string to satisfy flag.Value
|
||||
type StringSlice []string
|
||||
|
||||
// Set appends the string value to the list of values
|
||||
func (f *StringSlice) Set(value string) error {
|
||||
*f = append(*f, value)
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f *StringSlice) String() string {
|
||||
return fmt.Sprintf("%s", *f)
|
||||
}
|
||||
|
||||
// Value returns the slice of strings set by this flag
|
||||
func (f *StringSlice) Value() []string {
|
||||
return *f
|
||||
}
|
||||
|
||||
// StringSlice is a string flag that can be specified multiple times on the
|
||||
// command-line
|
||||
type StringSliceFlag struct {
|
||||
Name string
|
||||
Value *StringSlice
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f StringSliceFlag) String() string {
|
||||
firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ")
|
||||
pref := prefixFor(firstName)
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f StringSliceFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
newVal := &StringSlice{}
|
||||
for _, s := range strings.Split(envVal, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
newVal.Set(s)
|
||||
}
|
||||
f.Value = newVal
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Value == nil {
|
||||
f.Value = &StringSlice{}
|
||||
}
|
||||
set.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f StringSliceFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringSlice is an opaque type for []int to satisfy flag.Value
|
||||
type IntSlice []int
|
||||
|
||||
// Set parses the value into an integer and appends it to the list of values
|
||||
func (f *IntSlice) Set(value string) error {
|
||||
tmp, err := strconv.Atoi(value)
|
||||
if err != nil {
|
||||
return err
|
||||
} else {
|
||||
*f = append(*f, tmp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f *IntSlice) String() string {
|
||||
return fmt.Sprintf("%d", *f)
|
||||
}
|
||||
|
||||
// Value returns the slice of ints set by this flag
|
||||
func (f *IntSlice) Value() []int {
|
||||
return *f
|
||||
}
|
||||
|
||||
// IntSliceFlag is an int flag that can be specified multiple times on the
|
||||
// command-line
|
||||
type IntSliceFlag struct {
|
||||
Name string
|
||||
Value *IntSlice
|
||||
Usage string
|
||||
EnvVar string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f IntSliceFlag) String() string {
|
||||
firstName := strings.Trim(strings.Split(f.Name, ",")[0], " ")
|
||||
pref := prefixFor(firstName)
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s [%v]\t%v", prefixedNames(f.Name), pref+firstName+" option "+pref+firstName+" option", f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f IntSliceFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
newVal := &IntSlice{}
|
||||
for _, s := range strings.Split(envVal, ",") {
|
||||
s = strings.TrimSpace(s)
|
||||
err := newVal.Set(s)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, err.Error())
|
||||
}
|
||||
}
|
||||
f.Value = newVal
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Value == nil {
|
||||
f.Value = &IntSlice{}
|
||||
}
|
||||
set.Var(f.Value, name, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f IntSliceFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// BoolFlag is a switch that defaults to false
|
||||
type BoolFlag struct {
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *bool
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f BoolFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f BoolFlag) Apply(set *flag.FlagSet) {
|
||||
val := false
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValBool, err := strconv.ParseBool(envVal)
|
||||
if err == nil {
|
||||
val = envValBool
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.BoolVar(f.Destination, name, val, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Bool(name, val, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f BoolFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// BoolTFlag this represents a boolean flag that is true by default, but can
|
||||
// still be set to false by --some-flag=false
|
||||
type BoolTFlag struct {
|
||||
Name string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *bool
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f BoolTFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s\t%v", prefixedNames(f.Name), f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f BoolTFlag) Apply(set *flag.FlagSet) {
|
||||
val := true
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValBool, err := strconv.ParseBool(envVal)
|
||||
if err == nil {
|
||||
val = envValBool
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.BoolVar(f.Destination, name, val, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Bool(name, val, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f BoolTFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// StringFlag represents a flag that takes as string value
|
||||
type StringFlag struct {
|
||||
Name string
|
||||
Value string
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *string
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f StringFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s %v\t%v", prefixedNames(f.Name), f.FormatValueHelp(), f.Usage))
|
||||
}
|
||||
|
||||
func (f StringFlag) FormatValueHelp() string {
|
||||
s := f.Value
|
||||
if len(s) == 0 {
|
||||
return ""
|
||||
}
|
||||
return fmt.Sprintf("\"%s\"", s)
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f StringFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
f.Value = envVal
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.StringVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.String(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f StringFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// IntFlag is a flag that takes an integer
|
||||
// Errors if the value provided cannot be parsed
|
||||
type IntFlag struct {
|
||||
Name string
|
||||
Value int
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *int
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f IntFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f IntFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValInt, err := strconv.ParseInt(envVal, 0, 64)
|
||||
if err == nil {
|
||||
f.Value = int(envValInt)
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.IntVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Int(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f IntFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// DurationFlag is a flag that takes a duration specified in Go's duration
|
||||
// format: https://golang.org/pkg/time/#ParseDuration
|
||||
type DurationFlag struct {
|
||||
Name string
|
||||
Value time.Duration
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *time.Duration
|
||||
}
|
||||
|
||||
// String returns a readable representation of this value (for usage defaults)
|
||||
func (f DurationFlag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f DurationFlag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValDuration, err := time.ParseDuration(envVal)
|
||||
if err == nil {
|
||||
f.Value = envValDuration
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.DurationVar(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Duration(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f DurationFlag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
// Float64Flag is a flag that takes an float value
|
||||
// Errors if the value provided cannot be parsed
|
||||
type Float64Flag struct {
|
||||
Name string
|
||||
Value float64
|
||||
Usage string
|
||||
EnvVar string
|
||||
Destination *float64
|
||||
}
|
||||
|
||||
// String returns the usage
|
||||
func (f Float64Flag) String() string {
|
||||
return withEnvHint(f.EnvVar, fmt.Sprintf("%s \"%v\"\t%v", prefixedNames(f.Name), f.Value, f.Usage))
|
||||
}
|
||||
|
||||
// Apply populates the flag given the flag set and environment
|
||||
func (f Float64Flag) Apply(set *flag.FlagSet) {
|
||||
if f.EnvVar != "" {
|
||||
for _, envVar := range strings.Split(f.EnvVar, ",") {
|
||||
envVar = strings.TrimSpace(envVar)
|
||||
if envVal := os.Getenv(envVar); envVal != "" {
|
||||
envValFloat, err := strconv.ParseFloat(envVal, 10)
|
||||
if err == nil {
|
||||
f.Value = float64(envValFloat)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
eachName(f.Name, func(name string) {
|
||||
if f.Destination != nil {
|
||||
set.Float64Var(f.Destination, name, f.Value, f.Usage)
|
||||
return
|
||||
}
|
||||
set.Float64(name, f.Value, f.Usage)
|
||||
})
|
||||
}
|
||||
|
||||
func (f Float64Flag) GetName() string {
|
||||
return f.Name
|
||||
}
|
||||
|
||||
func prefixFor(name string) (prefix string) {
|
||||
if len(name) == 1 {
|
||||
prefix = "-"
|
||||
} else {
|
||||
prefix = "--"
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
func prefixedNames(fullName string) (prefixed string) {
|
||||
parts := strings.Split(fullName, ",")
|
||||
for i, name := range parts {
|
||||
name = strings.Trim(name, " ")
|
||||
prefixed += prefixFor(name) + name
|
||||
if i < len(parts)-1 {
|
||||
prefixed += ", "
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func withEnvHint(envVar, str string) string {
|
||||
envText := ""
|
||||
if envVar != "" {
|
||||
prefix := "$"
|
||||
suffix := ""
|
||||
sep := ", $"
|
||||
if runtime.GOOS == "windows" {
|
||||
prefix = "%"
|
||||
suffix = "%"
|
||||
sep = "%, %"
|
||||
}
|
||||
envText = fmt.Sprintf(" [%s%s%s]", prefix, strings.Join(strings.Split(envVar, ","), sep), suffix)
|
||||
}
|
||||
return str + envText
|
||||
}
|
|
@ -1,253 +0,0 @@
|
|||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"text/tabwriter"
|
||||
"text/template"
|
||||
)
|
||||
|
||||
// The text template for the Default help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var AppHelpTemplate = `NAME:
|
||||
{{.Name}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{if .UsageText}}{{.UsageText}}{{else}}{{.HelpName}} {{if .Flags}}[global options]{{end}}{{if .Commands}} command [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{end}}
|
||||
{{if .Version}}{{if not .HideVersion}}
|
||||
VERSION:
|
||||
{{.Version}}
|
||||
{{end}}{{end}}{{if len .Authors}}
|
||||
AUTHOR(S):
|
||||
{{range .Authors}}{{ . }}{{end}}
|
||||
{{end}}{{if .Commands}}
|
||||
COMMANDS:{{range .Categories}}{{if .Name}}
|
||||
{{.Name}}{{ ":" }}{{end}}{{range .Commands}}
|
||||
{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}{{end}}
|
||||
{{end}}{{end}}{{if .Flags}}
|
||||
GLOBAL OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{end}}{{if .Copyright }}
|
||||
COPYRIGHT:
|
||||
{{.Copyright}}
|
||||
{{end}}
|
||||
`
|
||||
|
||||
// The text template for the command help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var CommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}}{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}{{if .Category}}
|
||||
|
||||
CATEGORY:
|
||||
{{.Category}}{{end}}{{if .Description}}
|
||||
|
||||
DESCRIPTION:
|
||||
{{.Description}}{{end}}{{if .Flags}}
|
||||
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{ end }}
|
||||
`
|
||||
|
||||
// The text template for the subcommand help topic.
|
||||
// cli.go uses text/template to render templates. You can
|
||||
// render custom help text by setting this variable.
|
||||
var SubcommandHelpTemplate = `NAME:
|
||||
{{.HelpName}} - {{.Usage}}
|
||||
|
||||
USAGE:
|
||||
{{.HelpName}} command{{if .Flags}} [command options]{{end}} {{if .ArgsUsage}}{{.ArgsUsage}}{{else}}[arguments...]{{end}}
|
||||
|
||||
COMMANDS:{{range .Categories}}{{if .Name}}
|
||||
{{.Name}}{{ ":" }}{{end}}{{range .Commands}}
|
||||
{{.Name}}{{with .ShortName}}, {{.}}{{end}}{{ "\t" }}{{.Usage}}{{end}}
|
||||
{{end}}{{if .Flags}}
|
||||
OPTIONS:
|
||||
{{range .Flags}}{{.}}
|
||||
{{end}}{{end}}
|
||||
`
|
||||
|
||||
var helpCommand = Command{
|
||||
Name: "help",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "Shows a list of commands or help for one command",
|
||||
ArgsUsage: "[command]",
|
||||
Action: func(c *Context) {
|
||||
args := c.Args()
|
||||
if args.Present() {
|
||||
ShowCommandHelp(c, args.First())
|
||||
} else {
|
||||
ShowAppHelp(c)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
var helpSubcommand = Command{
|
||||
Name: "help",
|
||||
Aliases: []string{"h"},
|
||||
Usage: "Shows a list of commands or help for one command",
|
||||
ArgsUsage: "[command]",
|
||||
Action: func(c *Context) {
|
||||
args := c.Args()
|
||||
if args.Present() {
|
||||
ShowCommandHelp(c, args.First())
|
||||
} else {
|
||||
ShowSubcommandHelp(c)
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
// Prints help for the App or Command
|
||||
type helpPrinter func(w io.Writer, templ string, data interface{})
|
||||
|
||||
var HelpPrinter helpPrinter = printHelp
|
||||
|
||||
// Prints version for the App
|
||||
var VersionPrinter = printVersion
|
||||
|
||||
func ShowAppHelp(c *Context) {
|
||||
HelpPrinter(c.App.Writer, AppHelpTemplate, c.App)
|
||||
}
|
||||
|
||||
// Prints the list of subcommands as the default app completion method
|
||||
func DefaultAppComplete(c *Context) {
|
||||
for _, command := range c.App.Commands {
|
||||
for _, name := range command.Names() {
|
||||
fmt.Fprintln(c.App.Writer, name)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Prints help for the given command
|
||||
func ShowCommandHelp(ctx *Context, command string) {
|
||||
// show the subcommand help for a command with subcommands
|
||||
if command == "" {
|
||||
HelpPrinter(ctx.App.Writer, SubcommandHelpTemplate, ctx.App)
|
||||
return
|
||||
}
|
||||
|
||||
for _, c := range ctx.App.Commands {
|
||||
if c.HasName(command) {
|
||||
HelpPrinter(ctx.App.Writer, CommandHelpTemplate, c)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if ctx.App.CommandNotFound != nil {
|
||||
ctx.App.CommandNotFound(ctx, command)
|
||||
} else {
|
||||
fmt.Fprintf(ctx.App.Writer, "No help topic for '%v'\n", command)
|
||||
}
|
||||
}
|
||||
|
||||
// Prints help for the given subcommand
|
||||
func ShowSubcommandHelp(c *Context) {
|
||||
ShowCommandHelp(c, c.Command.Name)
|
||||
}
|
||||
|
||||
// Prints the version number of the App
|
||||
func ShowVersion(c *Context) {
|
||||
VersionPrinter(c)
|
||||
}
|
||||
|
||||
func printVersion(c *Context) {
|
||||
fmt.Fprintf(c.App.Writer, "%v version %v\n", c.App.Name, c.App.Version)
|
||||
}
|
||||
|
||||
// Prints the lists of commands within a given context
|
||||
func ShowCompletions(c *Context) {
|
||||
a := c.App
|
||||
if a != nil && a.BashComplete != nil {
|
||||
a.BashComplete(c)
|
||||
}
|
||||
}
|
||||
|
||||
// Prints the custom completions for a given command
|
||||
func ShowCommandCompletions(ctx *Context, command string) {
|
||||
c := ctx.App.Command(command)
|
||||
if c != nil && c.BashComplete != nil {
|
||||
c.BashComplete(ctx)
|
||||
}
|
||||
}
|
||||
|
||||
func printHelp(out io.Writer, templ string, data interface{}) {
|
||||
funcMap := template.FuncMap{
|
||||
"join": strings.Join,
|
||||
}
|
||||
|
||||
w := tabwriter.NewWriter(out, 0, 8, 1, '\t', 0)
|
||||
t := template.Must(template.New("help").Funcs(funcMap).Parse(templ))
|
||||
err := t.Execute(w, data)
|
||||
if err != nil {
|
||||
// If the writer is closed, t.Execute will fail, and there's nothing
|
||||
// we can do to recover. We could send this to os.Stderr if we need.
|
||||
return
|
||||
}
|
||||
w.Flush()
|
||||
}
|
||||
|
||||
func checkVersion(c *Context) bool {
|
||||
found := false
|
||||
if VersionFlag.Name != "" {
|
||||
eachName(VersionFlag.Name, func(name string) {
|
||||
if c.GlobalBool(name) || c.Bool(name) {
|
||||
found = true
|
||||
}
|
||||
})
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func checkHelp(c *Context) bool {
|
||||
found := false
|
||||
if HelpFlag.Name != "" {
|
||||
eachName(HelpFlag.Name, func(name string) {
|
||||
if c.GlobalBool(name) || c.Bool(name) {
|
||||
found = true
|
||||
}
|
||||
})
|
||||
}
|
||||
return found
|
||||
}
|
||||
|
||||
func checkCommandHelp(c *Context, name string) bool {
|
||||
if c.Bool("h") || c.Bool("help") {
|
||||
ShowCommandHelp(c, name)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkSubcommandHelp(c *Context) bool {
|
||||
if c.GlobalBool("h") || c.GlobalBool("help") {
|
||||
ShowSubcommandHelp(c)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkCompletions(c *Context) bool {
|
||||
if (c.GlobalBool(BashCompletionFlag.Name) || c.Bool(BashCompletionFlag.Name)) && c.App.EnableBashCompletion {
|
||||
ShowCompletions(c)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
func checkCommandCompletions(c *Context, name string) bool {
|
||||
if c.Bool(BashCompletionFlag.Name) && c.App.EnableBashCompletion {
|
||||
ShowCommandCompletions(c, name)
|
||||
return true
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2014 Brian Goff
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
|
@ -1,19 +0,0 @@
|
|||
package md2man
|
||||
|
||||
import (
|
||||
"github.com/russross/blackfriday"
|
||||
)
|
||||
|
||||
func Render(doc []byte) []byte {
|
||||
renderer := RoffRenderer(0)
|
||||
extensions := 0
|
||||
extensions |= blackfriday.EXTENSION_NO_INTRA_EMPHASIS
|
||||
extensions |= blackfriday.EXTENSION_TABLES
|
||||
extensions |= blackfriday.EXTENSION_FENCED_CODE
|
||||
extensions |= blackfriday.EXTENSION_AUTOLINK
|
||||
extensions |= blackfriday.EXTENSION_SPACE_HEADERS
|
||||
extensions |= blackfriday.EXTENSION_FOOTNOTES
|
||||
extensions |= blackfriday.EXTENSION_TITLEBLOCK
|
||||
|
||||
return blackfriday.Markdown(doc, renderer, extensions)
|
||||
}
|
|
@ -1,269 +0,0 @@
|
|||
package md2man
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"html"
|
||||
"strings"
|
||||
|
||||
"github.com/russross/blackfriday"
|
||||
)
|
||||
|
||||
type roffRenderer struct{}
|
||||
|
||||
func RoffRenderer(flags int) blackfriday.Renderer {
|
||||
return &roffRenderer{}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) GetFlags() int {
|
||||
return 0
|
||||
}
|
||||
|
||||
func (r *roffRenderer) TitleBlock(out *bytes.Buffer, text []byte) {
|
||||
out.WriteString(".TH ")
|
||||
|
||||
splitText := bytes.Split(text, []byte("\n"))
|
||||
for i, line := range splitText {
|
||||
line = bytes.TrimPrefix(line, []byte("% "))
|
||||
if i == 0 {
|
||||
line = bytes.Replace(line, []byte("("), []byte("\" \""), 1)
|
||||
line = bytes.Replace(line, []byte(")"), []byte("\" \""), 1)
|
||||
}
|
||||
line = append([]byte("\""), line...)
|
||||
line = append(line, []byte("\" ")...)
|
||||
out.Write(line)
|
||||
}
|
||||
|
||||
out.WriteString(" \"\"\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) BlockCode(out *bytes.Buffer, text []byte, lang string) {
|
||||
out.WriteString("\n.PP\n.RS\n\n.nf\n")
|
||||
escapeSpecialChars(out, text)
|
||||
out.WriteString("\n.fi\n.RE\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) BlockQuote(out *bytes.Buffer, text []byte) {
|
||||
out.WriteString("\n.PP\n.RS\n")
|
||||
out.Write(text)
|
||||
out.WriteString("\n.RE\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) BlockHtml(out *bytes.Buffer, text []byte) {
|
||||
out.Write(text)
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Header(out *bytes.Buffer, text func() bool, level int, id string) {
|
||||
marker := out.Len()
|
||||
|
||||
switch {
|
||||
case marker == 0:
|
||||
// This is the doc header
|
||||
out.WriteString(".TH ")
|
||||
case level == 1:
|
||||
out.WriteString("\n\n.SH ")
|
||||
case level == 2:
|
||||
out.WriteString("\n.SH ")
|
||||
default:
|
||||
out.WriteString("\n.SS ")
|
||||
}
|
||||
|
||||
if !text() {
|
||||
out.Truncate(marker)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
func (r *roffRenderer) HRule(out *bytes.Buffer) {
|
||||
out.WriteString("\n.ti 0\n\\l'\\n(.lu'\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) List(out *bytes.Buffer, text func() bool, flags int) {
|
||||
marker := out.Len()
|
||||
out.WriteString(".IP ")
|
||||
if flags&blackfriday.LIST_TYPE_ORDERED != 0 {
|
||||
out.WriteString("\\(bu 2")
|
||||
} else {
|
||||
out.WriteString("\\n+[step" + string(flags) + "]")
|
||||
}
|
||||
out.WriteString("\n")
|
||||
if !text() {
|
||||
out.Truncate(marker)
|
||||
return
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
func (r *roffRenderer) ListItem(out *bytes.Buffer, text []byte, flags int) {
|
||||
out.WriteString("\n\\item ")
|
||||
out.Write(text)
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Paragraph(out *bytes.Buffer, text func() bool) {
|
||||
marker := out.Len()
|
||||
out.WriteString("\n.PP\n")
|
||||
if !text() {
|
||||
out.Truncate(marker)
|
||||
return
|
||||
}
|
||||
if marker != 0 {
|
||||
out.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
// TODO: This might now work
|
||||
func (r *roffRenderer) Table(out *bytes.Buffer, header []byte, body []byte, columnData []int) {
|
||||
out.WriteString(".TS\nallbox;\n")
|
||||
|
||||
out.Write(header)
|
||||
out.Write(body)
|
||||
out.WriteString("\n.TE\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) TableRow(out *bytes.Buffer, text []byte) {
|
||||
if out.Len() > 0 {
|
||||
out.WriteString("\n")
|
||||
}
|
||||
out.Write(text)
|
||||
out.WriteString("\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) TableHeaderCell(out *bytes.Buffer, text []byte, align int) {
|
||||
if out.Len() > 0 {
|
||||
out.WriteString(" ")
|
||||
}
|
||||
out.Write(text)
|
||||
out.WriteString(" ")
|
||||
}
|
||||
|
||||
// TODO: This is probably broken
|
||||
func (r *roffRenderer) TableCell(out *bytes.Buffer, text []byte, align int) {
|
||||
if out.Len() > 0 {
|
||||
out.WriteString("\t")
|
||||
}
|
||||
out.Write(text)
|
||||
out.WriteString("\t")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Footnotes(out *bytes.Buffer, text func() bool) {
|
||||
|
||||
}
|
||||
|
||||
func (r *roffRenderer) FootnoteItem(out *bytes.Buffer, name, text []byte, flags int) {
|
||||
|
||||
}
|
||||
|
||||
func (r *roffRenderer) AutoLink(out *bytes.Buffer, link []byte, kind int) {
|
||||
out.WriteString("\n\\[la]")
|
||||
out.Write(link)
|
||||
out.WriteString("\\[ra]")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) CodeSpan(out *bytes.Buffer, text []byte) {
|
||||
out.WriteString("\\fB\\fC")
|
||||
escapeSpecialChars(out, text)
|
||||
out.WriteString("\\fR")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) DoubleEmphasis(out *bytes.Buffer, text []byte) {
|
||||
out.WriteString("\\fB")
|
||||
out.Write(text)
|
||||
out.WriteString("\\fP")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Emphasis(out *bytes.Buffer, text []byte) {
|
||||
out.WriteString("\\fI")
|
||||
out.Write(text)
|
||||
out.WriteString("\\fP")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Image(out *bytes.Buffer, link []byte, title []byte, alt []byte) {
|
||||
}
|
||||
|
||||
func (r *roffRenderer) LineBreak(out *bytes.Buffer) {
|
||||
out.WriteString("\n.br\n")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Link(out *bytes.Buffer, link []byte, title []byte, content []byte) {
|
||||
r.AutoLink(out, link, 0)
|
||||
}
|
||||
|
||||
func (r *roffRenderer) RawHtmlTag(out *bytes.Buffer, tag []byte) {
|
||||
out.Write(tag)
|
||||
}
|
||||
|
||||
func (r *roffRenderer) TripleEmphasis(out *bytes.Buffer, text []byte) {
|
||||
out.WriteString("\\s+2")
|
||||
out.Write(text)
|
||||
out.WriteString("\\s-2")
|
||||
}
|
||||
|
||||
func (r *roffRenderer) StrikeThrough(out *bytes.Buffer, text []byte) {
|
||||
}
|
||||
|
||||
func (r *roffRenderer) FootnoteRef(out *bytes.Buffer, ref []byte, id int) {
|
||||
|
||||
}
|
||||
|
||||
func (r *roffRenderer) Entity(out *bytes.Buffer, entity []byte) {
|
||||
out.WriteString(html.UnescapeString(string(entity)))
|
||||
}
|
||||
|
||||
func processFooterText(text []byte) []byte {
|
||||
text = bytes.TrimPrefix(text, []byte("% "))
|
||||
newText := []byte{}
|
||||
textArr := strings.Split(string(text), ") ")
|
||||
|
||||
for i, w := range textArr {
|
||||
if i == 0 {
|
||||
w = strings.Replace(w, "(", "\" \"", 1)
|
||||
w = fmt.Sprintf("\"%s\"", w)
|
||||
} else {
|
||||
w = fmt.Sprintf(" \"%s\"", w)
|
||||
}
|
||||
newText = append(newText, []byte(w)...)
|
||||
}
|
||||
newText = append(newText, []byte(" \"\"")...)
|
||||
|
||||
return newText
|
||||
}
|
||||
|
||||
func (r *roffRenderer) NormalText(out *bytes.Buffer, text []byte) {
|
||||
escapeSpecialChars(out, text)
|
||||
}
|
||||
|
||||
func (r *roffRenderer) DocumentHeader(out *bytes.Buffer) {
|
||||
}
|
||||
|
||||
func (r *roffRenderer) DocumentFooter(out *bytes.Buffer) {
|
||||
}
|
||||
|
||||
func needsBackslash(c byte) bool {
|
||||
for _, r := range []byte("-_&\\~") {
|
||||
if c == r {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func escapeSpecialChars(out *bytes.Buffer, text []byte) {
|
||||
for i := 0; i < len(text); i++ {
|
||||
// directly copy normal characters
|
||||
org := i
|
||||
|
||||
for i < len(text) && !needsBackslash(text[i]) {
|
||||
i++
|
||||
}
|
||||
if i > org {
|
||||
out.Write(text[org:i])
|
||||
}
|
||||
|
||||
// escape a character
|
||||
if i >= len(text) {
|
||||
break
|
||||
}
|
||||
out.WriteByte('\\')
|
||||
out.WriteByte(text[i])
|
||||
}
|
||||
}
|
|
@ -1,13 +0,0 @@
|
|||
Copyright (c) 2012-2013 Dave Collins <dave@davec.name>
|
||||
|
||||
Permission to use, copy, modify, and distribute this software for any
|
||||
purpose with or without fee is hereby granted, provided that the above
|
||||
copyright notice and this permission notice appear in all copies.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
|
@ -1,450 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"reflect"
|
||||
"sort"
|
||||
"strconv"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
const (
|
||||
// ptrSize is the size of a pointer on the current arch.
|
||||
ptrSize = unsafe.Sizeof((*byte)(nil))
|
||||
)
|
||||
|
||||
var (
|
||||
// offsetPtr, offsetScalar, and offsetFlag are the offsets for the
|
||||
// internal reflect.Value fields. These values are valid before golang
|
||||
// commit ecccf07e7f9d which changed the format. The are also valid
|
||||
// after commit 82f48826c6c7 which changed the format again to mirror
|
||||
// the original format. Code in the init function updates these offsets
|
||||
// as necessary.
|
||||
offsetPtr = uintptr(ptrSize)
|
||||
offsetScalar = uintptr(0)
|
||||
offsetFlag = uintptr(ptrSize * 2)
|
||||
|
||||
// flagKindWidth and flagKindShift indicate various bits that the
|
||||
// reflect package uses internally to track kind information.
|
||||
//
|
||||
// flagRO indicates whether or not the value field of a reflect.Value is
|
||||
// read-only.
|
||||
//
|
||||
// flagIndir indicates whether the value field of a reflect.Value is
|
||||
// the actual data or a pointer to the data.
|
||||
//
|
||||
// These values are valid before golang commit 90a7c3c86944 which
|
||||
// changed their positions. Code in the init function updates these
|
||||
// flags as necessary.
|
||||
flagKindWidth = uintptr(5)
|
||||
flagKindShift = uintptr(flagKindWidth - 1)
|
||||
flagRO = uintptr(1 << 0)
|
||||
flagIndir = uintptr(1 << 1)
|
||||
)
|
||||
|
||||
func init() {
|
||||
// Older versions of reflect.Value stored small integers directly in the
|
||||
// ptr field (which is named val in the older versions). Versions
|
||||
// between commits ecccf07e7f9d and 82f48826c6c7 added a new field named
|
||||
// scalar for this purpose which unfortunately came before the flag
|
||||
// field, so the offset of the flag field is different for those
|
||||
// versions.
|
||||
//
|
||||
// This code constructs a new reflect.Value from a known small integer
|
||||
// and checks if the size of the reflect.Value struct indicates it has
|
||||
// the scalar field. When it does, the offsets are updated accordingly.
|
||||
vv := reflect.ValueOf(0xf00)
|
||||
if unsafe.Sizeof(vv) == (ptrSize * 4) {
|
||||
offsetScalar = ptrSize * 2
|
||||
offsetFlag = ptrSize * 3
|
||||
}
|
||||
|
||||
// Commit 90a7c3c86944 changed the flag positions such that the low
|
||||
// order bits are the kind. This code extracts the kind from the flags
|
||||
// field and ensures it's the correct type. When it's not, the flag
|
||||
// order has been changed to the newer format, so the flags are updated
|
||||
// accordingly.
|
||||
upf := unsafe.Pointer(uintptr(unsafe.Pointer(&vv)) + offsetFlag)
|
||||
upfv := *(*uintptr)(upf)
|
||||
flagKindMask := uintptr((1<<flagKindWidth - 1) << flagKindShift)
|
||||
if (upfv&flagKindMask)>>flagKindShift != uintptr(reflect.Int) {
|
||||
flagKindShift = 0
|
||||
flagRO = 1 << 5
|
||||
flagIndir = 1 << 6
|
||||
}
|
||||
}
|
||||
|
||||
// unsafeReflectValue converts the passed reflect.Value into a one that bypasses
|
||||
// the typical safety restrictions preventing access to unaddressable and
|
||||
// unexported data. It works by digging the raw pointer to the underlying
|
||||
// value out of the protected value and generating a new unprotected (unsafe)
|
||||
// reflect.Value to it.
|
||||
//
|
||||
// This allows us to check for implementations of the Stringer and error
|
||||
// interfaces to be used for pretty printing ordinarily unaddressable and
|
||||
// inaccessible values such as unexported struct fields.
|
||||
func unsafeReflectValue(v reflect.Value) (rv reflect.Value) {
|
||||
indirects := 1
|
||||
vt := v.Type()
|
||||
upv := unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetPtr)
|
||||
rvf := *(*uintptr)(unsafe.Pointer(uintptr(unsafe.Pointer(&v)) + offsetFlag))
|
||||
if rvf&flagIndir != 0 {
|
||||
vt = reflect.PtrTo(v.Type())
|
||||
indirects++
|
||||
} else if offsetScalar != 0 {
|
||||
// The value is in the scalar field when it's not one of the
|
||||
// reference types.
|
||||
switch vt.Kind() {
|
||||
case reflect.Uintptr:
|
||||
case reflect.Chan:
|
||||
case reflect.Func:
|
||||
case reflect.Map:
|
||||
case reflect.Ptr:
|
||||
case reflect.UnsafePointer:
|
||||
default:
|
||||
upv = unsafe.Pointer(uintptr(unsafe.Pointer(&v)) +
|
||||
offsetScalar)
|
||||
}
|
||||
}
|
||||
|
||||
pv := reflect.NewAt(vt, upv)
|
||||
rv = pv
|
||||
for i := 0; i < indirects; i++ {
|
||||
rv = rv.Elem()
|
||||
}
|
||||
return rv
|
||||
}
|
||||
|
||||
// Some constants in the form of bytes to avoid string overhead. This mirrors
|
||||
// the technique used in the fmt package.
|
||||
var (
|
||||
panicBytes = []byte("(PANIC=")
|
||||
plusBytes = []byte("+")
|
||||
iBytes = []byte("i")
|
||||
trueBytes = []byte("true")
|
||||
falseBytes = []byte("false")
|
||||
interfaceBytes = []byte("(interface {})")
|
||||
commaNewlineBytes = []byte(",\n")
|
||||
newlineBytes = []byte("\n")
|
||||
openBraceBytes = []byte("{")
|
||||
openBraceNewlineBytes = []byte("{\n")
|
||||
closeBraceBytes = []byte("}")
|
||||
asteriskBytes = []byte("*")
|
||||
colonBytes = []byte(":")
|
||||
colonSpaceBytes = []byte(": ")
|
||||
openParenBytes = []byte("(")
|
||||
closeParenBytes = []byte(")")
|
||||
spaceBytes = []byte(" ")
|
||||
pointerChainBytes = []byte("->")
|
||||
nilAngleBytes = []byte("<nil>")
|
||||
maxNewlineBytes = []byte("<max depth reached>\n")
|
||||
maxShortBytes = []byte("<max>")
|
||||
circularBytes = []byte("<already shown>")
|
||||
circularShortBytes = []byte("<shown>")
|
||||
invalidAngleBytes = []byte("<invalid>")
|
||||
openBracketBytes = []byte("[")
|
||||
closeBracketBytes = []byte("]")
|
||||
percentBytes = []byte("%")
|
||||
precisionBytes = []byte(".")
|
||||
openAngleBytes = []byte("<")
|
||||
closeAngleBytes = []byte(">")
|
||||
openMapBytes = []byte("map[")
|
||||
closeMapBytes = []byte("]")
|
||||
lenEqualsBytes = []byte("len=")
|
||||
capEqualsBytes = []byte("cap=")
|
||||
)
|
||||
|
||||
// hexDigits is used to map a decimal value to a hex digit.
|
||||
var hexDigits = "0123456789abcdef"
|
||||
|
||||
// catchPanic handles any panics that might occur during the handleMethods
|
||||
// calls.
|
||||
func catchPanic(w io.Writer, v reflect.Value) {
|
||||
if err := recover(); err != nil {
|
||||
w.Write(panicBytes)
|
||||
fmt.Fprintf(w, "%v", err)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// handleMethods attempts to call the Error and String methods on the underlying
|
||||
// type the passed reflect.Value represents and outputes the result to Writer w.
|
||||
//
|
||||
// It handles panics in any called methods by catching and displaying the error
|
||||
// as the formatted value.
|
||||
func handleMethods(cs *ConfigState, w io.Writer, v reflect.Value) (handled bool) {
|
||||
// We need an interface to check if the type implements the error or
|
||||
// Stringer interface. However, the reflect package won't give us an
|
||||
// interface on certain things like unexported struct fields in order
|
||||
// to enforce visibility rules. We use unsafe to bypass these restrictions
|
||||
// since this package does not mutate the values.
|
||||
if !v.CanInterface() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
|
||||
// Choose whether or not to do error and Stringer interface lookups against
|
||||
// the base type or a pointer to the base type depending on settings.
|
||||
// Technically calling one of these methods with a pointer receiver can
|
||||
// mutate the value, however, types which choose to satisify an error or
|
||||
// Stringer interface with a pointer receiver should not be mutating their
|
||||
// state inside these interface methods.
|
||||
var viface interface{}
|
||||
if !cs.DisablePointerMethods {
|
||||
if !v.CanAddr() {
|
||||
v = unsafeReflectValue(v)
|
||||
}
|
||||
viface = v.Addr().Interface()
|
||||
} else {
|
||||
if v.CanAddr() {
|
||||
v = v.Addr()
|
||||
}
|
||||
viface = v.Interface()
|
||||
}
|
||||
|
||||
// Is it an error or Stringer?
|
||||
switch iface := viface.(type) {
|
||||
case error:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.Error()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
|
||||
w.Write([]byte(iface.Error()))
|
||||
return true
|
||||
|
||||
case fmt.Stringer:
|
||||
defer catchPanic(w, v)
|
||||
if cs.ContinueOnMethod {
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(iface.String()))
|
||||
w.Write(closeParenBytes)
|
||||
w.Write(spaceBytes)
|
||||
return false
|
||||
}
|
||||
w.Write([]byte(iface.String()))
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// printBool outputs a boolean value as true or false to Writer w.
|
||||
func printBool(w io.Writer, val bool) {
|
||||
if val {
|
||||
w.Write(trueBytes)
|
||||
} else {
|
||||
w.Write(falseBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// printInt outputs a signed integer value to Writer w.
|
||||
func printInt(w io.Writer, val int64, base int) {
|
||||
w.Write([]byte(strconv.FormatInt(val, base)))
|
||||
}
|
||||
|
||||
// printUint outputs an unsigned integer value to Writer w.
|
||||
func printUint(w io.Writer, val uint64, base int) {
|
||||
w.Write([]byte(strconv.FormatUint(val, base)))
|
||||
}
|
||||
|
||||
// printFloat outputs a floating point value using the specified precision,
|
||||
// which is expected to be 32 or 64bit, to Writer w.
|
||||
func printFloat(w io.Writer, val float64, precision int) {
|
||||
w.Write([]byte(strconv.FormatFloat(val, 'g', -1, precision)))
|
||||
}
|
||||
|
||||
// printComplex outputs a complex value using the specified float precision
|
||||
// for the real and imaginary parts to Writer w.
|
||||
func printComplex(w io.Writer, c complex128, floatPrecision int) {
|
||||
r := real(c)
|
||||
w.Write(openParenBytes)
|
||||
w.Write([]byte(strconv.FormatFloat(r, 'g', -1, floatPrecision)))
|
||||
i := imag(c)
|
||||
if i >= 0 {
|
||||
w.Write(plusBytes)
|
||||
}
|
||||
w.Write([]byte(strconv.FormatFloat(i, 'g', -1, floatPrecision)))
|
||||
w.Write(iBytes)
|
||||
w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// printHexPtr outputs a uintptr formatted as hexidecimal with a leading '0x'
|
||||
// prefix to Writer w.
|
||||
func printHexPtr(w io.Writer, p uintptr) {
|
||||
// Null pointer.
|
||||
num := uint64(p)
|
||||
if num == 0 {
|
||||
w.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Max uint64 is 16 bytes in hex + 2 bytes for '0x' prefix
|
||||
buf := make([]byte, 18)
|
||||
|
||||
// It's simpler to construct the hex string right to left.
|
||||
base := uint64(16)
|
||||
i := len(buf) - 1
|
||||
for num >= base {
|
||||
buf[i] = hexDigits[num%base]
|
||||
num /= base
|
||||
i--
|
||||
}
|
||||
buf[i] = hexDigits[num]
|
||||
|
||||
// Add '0x' prefix.
|
||||
i--
|
||||
buf[i] = 'x'
|
||||
i--
|
||||
buf[i] = '0'
|
||||
|
||||
// Strip unused leading bytes.
|
||||
buf = buf[i:]
|
||||
w.Write(buf)
|
||||
}
|
||||
|
||||
// valuesSorter implements sort.Interface to allow a slice of reflect.Value
|
||||
// elements to be sorted.
|
||||
type valuesSorter struct {
|
||||
values []reflect.Value
|
||||
strings []string // either nil or same len and values
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// newValuesSorter initializes a valuesSorter instance, which holds a set of
|
||||
// surrogate keys on which the data should be sorted. It uses flags in
|
||||
// ConfigState to decide if and how to populate those surrogate keys.
|
||||
func newValuesSorter(values []reflect.Value, cs *ConfigState) sort.Interface {
|
||||
vs := &valuesSorter{values: values, cs: cs}
|
||||
if canSortSimply(vs.values[0].Kind()) {
|
||||
return vs
|
||||
}
|
||||
if !cs.DisableMethods {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
b := bytes.Buffer{}
|
||||
if !handleMethods(cs, &b, vs.values[i]) {
|
||||
vs.strings = nil
|
||||
break
|
||||
}
|
||||
vs.strings[i] = b.String()
|
||||
}
|
||||
}
|
||||
if vs.strings == nil && cs.SpewKeys {
|
||||
vs.strings = make([]string, len(values))
|
||||
for i := range vs.values {
|
||||
vs.strings[i] = Sprintf("%#v", vs.values[i].Interface())
|
||||
}
|
||||
}
|
||||
return vs
|
||||
}
|
||||
|
||||
// canSortSimply tests whether a reflect.Kind is a primitive that can be sorted
|
||||
// directly, or whether it should be considered for sorting by surrogate keys
|
||||
// (if the ConfigState allows it).
|
||||
func canSortSimply(kind reflect.Kind) bool {
|
||||
// This switch parallels valueSortLess, except for the default case.
|
||||
switch kind {
|
||||
case reflect.Bool:
|
||||
return true
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return true
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return true
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return true
|
||||
case reflect.String:
|
||||
return true
|
||||
case reflect.Uintptr:
|
||||
return true
|
||||
case reflect.Array:
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// Len returns the number of values in the slice. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Len() int {
|
||||
return len(s.values)
|
||||
}
|
||||
|
||||
// Swap swaps the values at the passed indices. It is part of the
|
||||
// sort.Interface implementation.
|
||||
func (s *valuesSorter) Swap(i, j int) {
|
||||
s.values[i], s.values[j] = s.values[j], s.values[i]
|
||||
if s.strings != nil {
|
||||
s.strings[i], s.strings[j] = s.strings[j], s.strings[i]
|
||||
}
|
||||
}
|
||||
|
||||
// valueSortLess returns whether the first value should sort before the second
|
||||
// value. It is used by valueSorter.Less as part of the sort.Interface
|
||||
// implementation.
|
||||
func valueSortLess(a, b reflect.Value) bool {
|
||||
switch a.Kind() {
|
||||
case reflect.Bool:
|
||||
return !a.Bool() && b.Bool()
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
return a.Int() < b.Int()
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Float32, reflect.Float64:
|
||||
return a.Float() < b.Float()
|
||||
case reflect.String:
|
||||
return a.String() < b.String()
|
||||
case reflect.Uintptr:
|
||||
return a.Uint() < b.Uint()
|
||||
case reflect.Array:
|
||||
// Compare the contents of both arrays.
|
||||
l := a.Len()
|
||||
for i := 0; i < l; i++ {
|
||||
av := a.Index(i)
|
||||
bv := b.Index(i)
|
||||
if av.Interface() == bv.Interface() {
|
||||
continue
|
||||
}
|
||||
return valueSortLess(av, bv)
|
||||
}
|
||||
}
|
||||
return a.String() < b.String()
|
||||
}
|
||||
|
||||
// Less returns whether the value at index i should sort before the
|
||||
// value at index j. It is part of the sort.Interface implementation.
|
||||
func (s *valuesSorter) Less(i, j int) bool {
|
||||
if s.strings == nil {
|
||||
return valueSortLess(s.values[i], s.values[j])
|
||||
}
|
||||
return s.strings[i] < s.strings[j]
|
||||
}
|
||||
|
||||
// sortValues is a sort function that handles both native types and any type that
|
||||
// can be converted to error or Stringer. Other inputs are sorted according to
|
||||
// their Value.String() value to ensure display stability.
|
||||
func sortValues(values []reflect.Value, cs *ConfigState) {
|
||||
if len(values) == 0 {
|
||||
return
|
||||
}
|
||||
sort.Sort(newValuesSorter(values, cs))
|
||||
}
|
|
@ -1,294 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
)
|
||||
|
||||
// ConfigState houses the configuration options used by spew to format and
|
||||
// display values. There is a global instance, Config, that is used to control
|
||||
// all top-level Formatter and Dump functionality. Each ConfigState instance
|
||||
// provides methods equivalent to the top-level functions.
|
||||
//
|
||||
// The zero value for ConfigState provides no indentation. You would typically
|
||||
// want to set it to a space or a tab.
|
||||
//
|
||||
// Alternatively, you can use NewDefaultConfig to get a ConfigState instance
|
||||
// with default settings. See the documentation of NewDefaultConfig for default
|
||||
// values.
|
||||
type ConfigState struct {
|
||||
// Indent specifies the string to use for each indentation level. The
|
||||
// global config instance that all top-level functions use set this to a
|
||||
// single space by default. If you would like more indentation, you might
|
||||
// set this to a tab with "\t" or perhaps two spaces with " ".
|
||||
Indent string
|
||||
|
||||
// MaxDepth controls the maximum number of levels to descend into nested
|
||||
// data structures. The default, 0, means there is no limit.
|
||||
//
|
||||
// NOTE: Circular data structures are properly detected, so it is not
|
||||
// necessary to set this value unless you specifically want to limit deeply
|
||||
// nested data structures.
|
||||
MaxDepth int
|
||||
|
||||
// DisableMethods specifies whether or not error and Stringer interfaces are
|
||||
// invoked for types that implement them.
|
||||
DisableMethods bool
|
||||
|
||||
// DisablePointerMethods specifies whether or not to check for and invoke
|
||||
// error and Stringer interfaces on types which only accept a pointer
|
||||
// receiver when the current type is not a pointer.
|
||||
//
|
||||
// NOTE: This might be an unsafe action since calling one of these methods
|
||||
// with a pointer receiver could technically mutate the value, however,
|
||||
// in practice, types which choose to satisify an error or Stringer
|
||||
// interface with a pointer receiver should not be mutating their state
|
||||
// inside these interface methods.
|
||||
DisablePointerMethods bool
|
||||
|
||||
// ContinueOnMethod specifies whether or not recursion should continue once
|
||||
// a custom error or Stringer interface is invoked. The default, false,
|
||||
// means it will print the results of invoking the custom error or Stringer
|
||||
// interface and return immediately instead of continuing to recurse into
|
||||
// the internals of the data type.
|
||||
//
|
||||
// NOTE: This flag does not have any effect if method invocation is disabled
|
||||
// via the DisableMethods or DisablePointerMethods options.
|
||||
ContinueOnMethod bool
|
||||
|
||||
// SortKeys specifies map keys should be sorted before being printed. Use
|
||||
// this to have a more deterministic, diffable output. Note that only
|
||||
// native types (bool, int, uint, floats, uintptr and string) and types
|
||||
// that support the error or Stringer interfaces (if methods are
|
||||
// enabled) are supported, with other types sorted according to the
|
||||
// reflect.Value.String() output which guarantees display stability.
|
||||
SortKeys bool
|
||||
|
||||
// SpewKeys specifies that, as a last resort attempt, map keys should
|
||||
// be spewed to strings and sorted by those strings. This is only
|
||||
// considered if SortKeys is true.
|
||||
SpewKeys bool
|
||||
}
|
||||
|
||||
// Config is the active configuration of the top-level functions.
|
||||
// The configuration can be changed by modifying the contents of spew.Config.
|
||||
var Config = ConfigState{Indent: " "}
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the formatted string as a value that satisfies error. See NewFormatter
|
||||
// for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a Formatter interface returned by c.NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a Formatter interface returned by c.NewFormatter. It returns
|
||||
// the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a Formatter interface returned by c.NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(c.NewFormatter(a), c.NewFormatter(b))
|
||||
func (c *ConfigState) Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(c.convertArgs(a)...)
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), and %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
c.Printf, c.Println, or c.Printf.
|
||||
*/
|
||||
func (c *ConfigState) NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(c, v)
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func (c *ConfigState) Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(c, w, a...)
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by modifying the public members
|
||||
of c. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func (c *ConfigState) Dump(a ...interface{}) {
|
||||
fdump(c, os.Stdout, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func (c *ConfigState) Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(c, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a spew Formatter interface using
|
||||
// the ConfigState associated with s.
|
||||
func (c *ConfigState) convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = newFormatter(c, arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
||||
|
||||
// NewDefaultConfig returns a ConfigState with the following default settings.
|
||||
//
|
||||
// Indent: " "
|
||||
// MaxDepth: 0
|
||||
// DisableMethods: false
|
||||
// DisablePointerMethods: false
|
||||
// ContinueOnMethod: false
|
||||
// SortKeys: false
|
||||
func NewDefaultConfig() *ConfigState {
|
||||
return &ConfigState{Indent: " "}
|
||||
}
|
|
@ -1,202 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
/*
|
||||
Package spew implements a deep pretty printer for Go data structures to aid in
|
||||
debugging.
|
||||
|
||||
A quick overview of the additional features spew provides over the built-in
|
||||
printing facilities for Go data types are as follows:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output (only when using
|
||||
Dump style)
|
||||
|
||||
There are two different approaches spew allows for dumping Go data structures:
|
||||
|
||||
* Dump style which prints with newlines, customizable indentation,
|
||||
and additional debug information such as types and all pointer addresses
|
||||
used to indirect to the final value
|
||||
* A custom Formatter interface that integrates cleanly with the standard fmt
|
||||
package and replaces %v, %+v, %#v, and %#+v to provide inline printing
|
||||
similar to the default %v while providing the additional functionality
|
||||
outlined above and passing unsupported format verbs such as %x and %q
|
||||
along to fmt
|
||||
|
||||
Quick Start
|
||||
|
||||
This section demonstrates how to quickly get started with spew. See the
|
||||
sections below for further details on formatting and configuration options.
|
||||
|
||||
To dump a variable with full newlines, indentation, type, and pointer
|
||||
information use Dump, Fdump, or Sdump:
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
spew.Fdump(someWriter, myVar1, myVar2, ...)
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Alternatively, if you would prefer to use format strings with a compacted inline
|
||||
printing style, use the convenience wrappers Printf, Fprintf, etc with
|
||||
%v (most compact), %+v (adds pointer addresses), %#v (adds types), or
|
||||
%#+v (adds types and pointer addresses):
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Fprintf(someWriter, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(someWriter, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
Configuration Options
|
||||
|
||||
Configuration of spew is handled by fields in the ConfigState type. For
|
||||
convenience, all of the top-level functions use a global state available
|
||||
via the spew.Config global.
|
||||
|
||||
It is also possible to create a ConfigState instance that provides methods
|
||||
equivalent to the top-level functions. This allows concurrent configuration
|
||||
options. See the ConfigState documentation for more details.
|
||||
|
||||
The following configuration options are available:
|
||||
* Indent
|
||||
String to use for each indentation level for Dump functions.
|
||||
It is a single space by default. A popular alternative is "\t".
|
||||
|
||||
* MaxDepth
|
||||
Maximum number of levels to descend into nested data structures.
|
||||
There is no limit by default.
|
||||
|
||||
* DisableMethods
|
||||
Disables invocation of error and Stringer interface methods.
|
||||
Method invocation is enabled by default.
|
||||
|
||||
* DisablePointerMethods
|
||||
Disables invocation of error and Stringer interface methods on types
|
||||
which only accept pointer receivers from non-pointer variables.
|
||||
Pointer method invocation is enabled by default.
|
||||
|
||||
* ContinueOnMethod
|
||||
Enables recursion into types after invoking error and Stringer interface
|
||||
methods. Recursion after method invocation is disabled by default.
|
||||
|
||||
* SortKeys
|
||||
Specifies map keys should be sorted before being printed. Use
|
||||
this to have a more deterministic, diffable output. Note that
|
||||
only native types (bool, int, uint, floats, uintptr and string)
|
||||
and types which implement error or Stringer interfaces are
|
||||
supported with other types sorted according to the
|
||||
reflect.Value.String() output which guarantees display
|
||||
stability. Natural map order is used by default.
|
||||
|
||||
* SpewKeys
|
||||
Specifies that, as a last resort attempt, map keys should be
|
||||
spewed to strings and sorted by those strings. This is only
|
||||
considered if SortKeys is true.
|
||||
|
||||
Dump Usage
|
||||
|
||||
Simply call spew.Dump with a list of variables you want to dump:
|
||||
|
||||
spew.Dump(myVar1, myVar2, ...)
|
||||
|
||||
You may also call spew.Fdump if you would prefer to output to an arbitrary
|
||||
io.Writer. For example, to dump to standard error:
|
||||
|
||||
spew.Fdump(os.Stderr, myVar1, myVar2, ...)
|
||||
|
||||
A third option is to call spew.Sdump to get the formatted output as a string:
|
||||
|
||||
str := spew.Sdump(myVar1, myVar2, ...)
|
||||
|
||||
Sample Dump Output
|
||||
|
||||
See the Dump example for details on the setup of the types and variables being
|
||||
shown here.
|
||||
|
||||
(main.Foo) {
|
||||
unexportedField: (*main.Bar)(0xf84002e210)({
|
||||
flag: (main.Flag) flagTwo,
|
||||
data: (uintptr) <nil>
|
||||
}),
|
||||
ExportedField: (map[interface {}]interface {}) (len=1) {
|
||||
(string) (len=3) "one": (bool) true
|
||||
}
|
||||
}
|
||||
|
||||
Byte (and uint8) arrays and slices are displayed uniquely like the hexdump -C
|
||||
command as shown.
|
||||
([]uint8) (len=32 cap=32) {
|
||||
00000000 11 12 13 14 15 16 17 18 19 1a 1b 1c 1d 1e 1f 20 |............... |
|
||||
00000010 21 22 23 24 25 26 27 28 29 2a 2b 2c 2d 2e 2f 30 |!"#$%&'()*+,-./0|
|
||||
00000020 31 32 |12|
|
||||
}
|
||||
|
||||
Custom Formatter
|
||||
|
||||
Spew provides a custom formatter that implements the fmt.Formatter interface
|
||||
so that it integrates cleanly with standard fmt package printing functions. The
|
||||
formatter is useful for inline printing of smaller data types similar to the
|
||||
standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Custom Formatter Usage
|
||||
|
||||
The simplest way to make use of the spew custom formatter is to call one of the
|
||||
convenience functions such as spew.Printf, spew.Println, or spew.Printf. The
|
||||
functions have syntax you are most likely already familiar with:
|
||||
|
||||
spew.Printf("myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Printf("myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
spew.Println(myVar, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar1: %v -- myVar2: %+v", myVar1, myVar2)
|
||||
spew.Fprintf(os.Stderr, "myVar3: %#v -- myVar4: %#+v", myVar3, myVar4)
|
||||
|
||||
See the Index for the full list convenience functions.
|
||||
|
||||
Sample Formatter Output
|
||||
|
||||
Double pointer to a uint8:
|
||||
%v: <**>5
|
||||
%+v: <**>(0xf8400420d0->0xf8400420c8)5
|
||||
%#v: (**uint8)5
|
||||
%#+v: (**uint8)(0xf8400420d0->0xf8400420c8)5
|
||||
|
||||
Pointer to circular struct with a uint8 field and a pointer to itself:
|
||||
%v: <*>{1 <*><shown>}
|
||||
%+v: <*>(0xf84003e260){ui8:1 c:<*>(0xf84003e260)<shown>}
|
||||
%#v: (*main.circular){ui8:(uint8)1 c:(*main.circular)<shown>}
|
||||
%#+v: (*main.circular)(0xf84003e260){ui8:(uint8)1 c:(*main.circular)(0xf84003e260)<shown>}
|
||||
|
||||
See the Printf example for details on the setup of variables being shown
|
||||
here.
|
||||
|
||||
Errors
|
||||
|
||||
Since it is possible for custom Stringer/error interfaces to panic, spew
|
||||
detects them and handles them internally by printing the panic information
|
||||
inline with the output. Since spew is intended to provide deep pretty printing
|
||||
capabilities on structures, it intentionally does not return any errors.
|
||||
*/
|
||||
package spew
|
|
@ -1,506 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/hex"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
// uint8Type is a reflect.Type representing a uint8. It is used to
|
||||
// convert cgo types to uint8 slices for hexdumping.
|
||||
uint8Type = reflect.TypeOf(uint8(0))
|
||||
|
||||
// cCharRE is a regular expression that matches a cgo char.
|
||||
// It is used to detect character arrays to hexdump them.
|
||||
cCharRE = regexp.MustCompile("^.*\\._Ctype_char$")
|
||||
|
||||
// cUnsignedCharRE is a regular expression that matches a cgo unsigned
|
||||
// char. It is used to detect unsigned character arrays to hexdump
|
||||
// them.
|
||||
cUnsignedCharRE = regexp.MustCompile("^.*\\._Ctype_unsignedchar$")
|
||||
|
||||
// cUint8tCharRE is a regular expression that matches a cgo uint8_t.
|
||||
// It is used to detect uint8_t arrays to hexdump them.
|
||||
cUint8tCharRE = regexp.MustCompile("^.*\\._Ctype_uint8_t$")
|
||||
)
|
||||
|
||||
// dumpState contains information about the state of a dump operation.
|
||||
type dumpState struct {
|
||||
w io.Writer
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
ignoreNextIndent bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// indent performs indentation according to the depth level and cs.Indent
|
||||
// option.
|
||||
func (d *dumpState) indent() {
|
||||
if d.ignoreNextIndent {
|
||||
d.ignoreNextIndent = false
|
||||
return
|
||||
}
|
||||
d.w.Write(bytes.Repeat([]byte(d.cs.Indent), d.depth))
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (d *dumpState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface && !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// dumpPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (d *dumpState) dumpPtr(v reflect.Value) {
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range d.pointers {
|
||||
if depth >= d.depth {
|
||||
delete(d.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by dereferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := d.pointers[addr]; ok && pd < d.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
d.pointers[addr] = d.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type information.
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
d.w.Write([]byte(ve.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
|
||||
// Display pointer information.
|
||||
if len(pointerChain) > 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
d.w.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(d.w, addr)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
d.w.Write(openParenBytes)
|
||||
switch {
|
||||
case nilFound == true:
|
||||
d.w.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
d.w.Write(circularBytes)
|
||||
|
||||
default:
|
||||
d.ignoreNextType = true
|
||||
d.dump(ve)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// dumpSlice handles formatting of arrays and slices. Byte (uint8 under
|
||||
// reflection) arrays and slices are dumped in hexdump -C fashion.
|
||||
func (d *dumpState) dumpSlice(v reflect.Value) {
|
||||
// Determine whether this type should be hex dumped or not. Also,
|
||||
// for types which should be hexdumped, try to use the underlying data
|
||||
// first, then fall back to trying to convert them to a uint8 slice.
|
||||
var buf []uint8
|
||||
doConvert := false
|
||||
doHexDump := false
|
||||
numEntries := v.Len()
|
||||
if numEntries > 0 {
|
||||
vt := v.Index(0).Type()
|
||||
vts := vt.String()
|
||||
switch {
|
||||
// C types that need to be converted.
|
||||
case cCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUnsignedCharRE.MatchString(vts):
|
||||
fallthrough
|
||||
case cUint8tCharRE.MatchString(vts):
|
||||
doConvert = true
|
||||
|
||||
// Try to use existing uint8 slices and fall back to converting
|
||||
// and copying if that fails.
|
||||
case vt.Kind() == reflect.Uint8:
|
||||
// We need an addressable interface to convert the type back
|
||||
// into a byte slice. However, the reflect package won't give
|
||||
// us an interface on certain things like unexported struct
|
||||
// fields in order to enforce visibility rules. We use unsafe
|
||||
// to bypass these restrictions since this package does not
|
||||
// mutate the values.
|
||||
vs := v
|
||||
if !vs.CanInterface() || !vs.CanAddr() {
|
||||
vs = unsafeReflectValue(vs)
|
||||
}
|
||||
vs = vs.Slice(0, numEntries)
|
||||
|
||||
// Use the existing uint8 slice if it can be type
|
||||
// asserted.
|
||||
iface := vs.Interface()
|
||||
if slice, ok := iface.([]uint8); ok {
|
||||
buf = slice
|
||||
doHexDump = true
|
||||
break
|
||||
}
|
||||
|
||||
// The underlying data needs to be converted if it can't
|
||||
// be type asserted to a uint8 slice.
|
||||
doConvert = true
|
||||
}
|
||||
|
||||
// Copy and convert the underlying type if needed.
|
||||
if doConvert && vt.ConvertibleTo(uint8Type) {
|
||||
// Convert and copy each element into a uint8 byte
|
||||
// slice.
|
||||
buf = make([]uint8, numEntries)
|
||||
for i := 0; i < numEntries; i++ {
|
||||
vv := v.Index(i)
|
||||
buf[i] = uint8(vv.Convert(uint8Type).Uint())
|
||||
}
|
||||
doHexDump = true
|
||||
}
|
||||
}
|
||||
|
||||
// Hexdump the entire slice as needed.
|
||||
if doHexDump {
|
||||
indent := strings.Repeat(d.cs.Indent, d.depth)
|
||||
str := indent + hex.Dump(buf)
|
||||
str = strings.Replace(str, "\n", "\n"+indent, -1)
|
||||
str = strings.TrimRight(str, d.cs.Indent)
|
||||
d.w.Write([]byte(str))
|
||||
return
|
||||
}
|
||||
|
||||
// Recursively call dump for each item.
|
||||
for i := 0; i < numEntries; i++ {
|
||||
d.dump(d.unpackValue(v.Index(i)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// dump is the main workhorse for dumping a value. It uses the passed reflect
|
||||
// value to figure out what kind of object we are dealing with and formats it
|
||||
// appropriately. It is a recursive function, however circular data structures
|
||||
// are detected and handled properly.
|
||||
func (d *dumpState) dump(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
d.w.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
d.indent()
|
||||
d.dumpPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !d.ignoreNextType {
|
||||
d.indent()
|
||||
d.w.Write(openParenBytes)
|
||||
d.w.Write([]byte(v.Type().String()))
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.ignoreNextType = false
|
||||
|
||||
// Display length and capacity if the built-in len and cap functions
|
||||
// work with the value's kind and the len/cap itself is non-zero.
|
||||
valueLen, valueCap := 0, 0
|
||||
switch v.Kind() {
|
||||
case reflect.Array, reflect.Slice, reflect.Chan:
|
||||
valueLen, valueCap = v.Len(), v.Cap()
|
||||
case reflect.Map, reflect.String:
|
||||
valueLen = v.Len()
|
||||
}
|
||||
if valueLen != 0 || valueCap != 0 {
|
||||
d.w.Write(openParenBytes)
|
||||
if valueLen != 0 {
|
||||
d.w.Write(lenEqualsBytes)
|
||||
printInt(d.w, int64(valueLen), 10)
|
||||
}
|
||||
if valueCap != 0 {
|
||||
if valueLen != 0 {
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
d.w.Write(capEqualsBytes)
|
||||
printInt(d.w, int64(valueCap), 10)
|
||||
}
|
||||
d.w.Write(closeParenBytes)
|
||||
d.w.Write(spaceBytes)
|
||||
}
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods flag
|
||||
// is enabled
|
||||
if !d.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(d.cs, d.w, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(d.w, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(d.w, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(d.w, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(d.w, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(d.w, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(d.w, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(d.w, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
d.dumpSlice(v)
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.String:
|
||||
d.w.Write([]byte(strconv.Quote(v.String())))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
d.w.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
keys := v.MapKeys()
|
||||
if d.cs.SortKeys {
|
||||
sortValues(keys, d.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
d.dump(d.unpackValue(key))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.MapIndex(key)))
|
||||
if i < (numEntries - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
d.w.Write(openBraceNewlineBytes)
|
||||
d.depth++
|
||||
if (d.cs.MaxDepth != 0) && (d.depth > d.cs.MaxDepth) {
|
||||
d.indent()
|
||||
d.w.Write(maxNewlineBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
numFields := v.NumField()
|
||||
for i := 0; i < numFields; i++ {
|
||||
d.indent()
|
||||
vtf := vt.Field(i)
|
||||
d.w.Write([]byte(vtf.Name))
|
||||
d.w.Write(colonSpaceBytes)
|
||||
d.ignoreNextIndent = true
|
||||
d.dump(d.unpackValue(v.Field(i)))
|
||||
if i < (numFields - 1) {
|
||||
d.w.Write(commaNewlineBytes)
|
||||
} else {
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
}
|
||||
d.depth--
|
||||
d.indent()
|
||||
d.w.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(d.w, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(d.w, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it in case any new
|
||||
// types are added.
|
||||
default:
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(d.w, "%v", v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(d.w, "%v", v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// fdump is a helper function to consolidate the logic from the various public
|
||||
// methods which take varying writers and config states.
|
||||
func fdump(cs *ConfigState, w io.Writer, a ...interface{}) {
|
||||
for _, arg := range a {
|
||||
if arg == nil {
|
||||
w.Write(interfaceBytes)
|
||||
w.Write(spaceBytes)
|
||||
w.Write(nilAngleBytes)
|
||||
w.Write(newlineBytes)
|
||||
continue
|
||||
}
|
||||
|
||||
d := dumpState{w: w, cs: cs}
|
||||
d.pointers = make(map[uintptr]int)
|
||||
d.dump(reflect.ValueOf(arg))
|
||||
d.w.Write(newlineBytes)
|
||||
}
|
||||
}
|
||||
|
||||
// Fdump formats and displays the passed arguments to io.Writer w. It formats
|
||||
// exactly the same as Dump.
|
||||
func Fdump(w io.Writer, a ...interface{}) {
|
||||
fdump(&Config, w, a...)
|
||||
}
|
||||
|
||||
// Sdump returns a string with the passed arguments formatted exactly the same
|
||||
// as Dump.
|
||||
func Sdump(a ...interface{}) string {
|
||||
var buf bytes.Buffer
|
||||
fdump(&Config, &buf, a...)
|
||||
return buf.String()
|
||||
}
|
||||
|
||||
/*
|
||||
Dump displays the passed parameters to standard out with newlines, customizable
|
||||
indentation, and additional debug information such as complete types and all
|
||||
pointer addresses used to indirect to the final value. It provides the
|
||||
following features over the built-in printing facilities provided by the fmt
|
||||
package:
|
||||
|
||||
* Pointers are dereferenced and followed
|
||||
* Circular data structures are detected and handled properly
|
||||
* Custom Stringer/error interfaces are optionally invoked, including
|
||||
on unexported types
|
||||
* Custom types which only implement the Stringer/error interfaces via
|
||||
a pointer receiver are optionally invoked when passing non-pointer
|
||||
variables
|
||||
* Byte arrays and slices are dumped like the hexdump -C command which
|
||||
includes offsets, byte values in hex, and ASCII output
|
||||
|
||||
The configuration options are controlled by an exported package global,
|
||||
spew.Config. See ConfigState for options documentation.
|
||||
|
||||
See Fdump if you would prefer dumping to an arbitrary io.Writer or Sdump to
|
||||
get the formatted result as a string.
|
||||
*/
|
||||
func Dump(a ...interface{}) {
|
||||
fdump(&Config, os.Stdout, a...)
|
||||
}
|
|
@ -1,419 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"reflect"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// supportedFlags is a list of all the character flags supported by fmt package.
|
||||
const supportedFlags = "0-+# "
|
||||
|
||||
// formatState implements the fmt.Formatter interface and contains information
|
||||
// about the state of a formatting operation. The NewFormatter function can
|
||||
// be used to get a new Formatter which can be used directly as arguments
|
||||
// in standard fmt package printing calls.
|
||||
type formatState struct {
|
||||
value interface{}
|
||||
fs fmt.State
|
||||
depth int
|
||||
pointers map[uintptr]int
|
||||
ignoreNextType bool
|
||||
cs *ConfigState
|
||||
}
|
||||
|
||||
// buildDefaultFormat recreates the original format string without precision
|
||||
// and width information to pass in to fmt.Sprintf in the case of an
|
||||
// unrecognized type. Unless new types are added to the language, this
|
||||
// function won't ever be called.
|
||||
func (f *formatState) buildDefaultFormat() (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
buf.WriteRune('v')
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// constructOrigFormat recreates the original format string including precision
|
||||
// and width information to pass along to the standard fmt package. This allows
|
||||
// automatic deferral of all format strings this package doesn't support.
|
||||
func (f *formatState) constructOrigFormat(verb rune) (format string) {
|
||||
buf := bytes.NewBuffer(percentBytes)
|
||||
|
||||
for _, flag := range supportedFlags {
|
||||
if f.fs.Flag(int(flag)) {
|
||||
buf.WriteRune(flag)
|
||||
}
|
||||
}
|
||||
|
||||
if width, ok := f.fs.Width(); ok {
|
||||
buf.WriteString(strconv.Itoa(width))
|
||||
}
|
||||
|
||||
if precision, ok := f.fs.Precision(); ok {
|
||||
buf.Write(precisionBytes)
|
||||
buf.WriteString(strconv.Itoa(precision))
|
||||
}
|
||||
|
||||
buf.WriteRune(verb)
|
||||
|
||||
format = buf.String()
|
||||
return format
|
||||
}
|
||||
|
||||
// unpackValue returns values inside of non-nil interfaces when possible and
|
||||
// ensures that types for values which have been unpacked from an interface
|
||||
// are displayed when the show types flag is also set.
|
||||
// This is useful for data types like structs, arrays, slices, and maps which
|
||||
// can contain varying types packed inside an interface.
|
||||
func (f *formatState) unpackValue(v reflect.Value) reflect.Value {
|
||||
if v.Kind() == reflect.Interface {
|
||||
f.ignoreNextType = false
|
||||
if !v.IsNil() {
|
||||
v = v.Elem()
|
||||
}
|
||||
}
|
||||
return v
|
||||
}
|
||||
|
||||
// formatPtr handles formatting of pointers by indirecting them as necessary.
|
||||
func (f *formatState) formatPtr(v reflect.Value) {
|
||||
// Display nil if top level pointer is nil.
|
||||
showTypes := f.fs.Flag('#')
|
||||
if v.IsNil() && (!showTypes || f.ignoreNextType) {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Remove pointers at or below the current depth from map used to detect
|
||||
// circular refs.
|
||||
for k, depth := range f.pointers {
|
||||
if depth >= f.depth {
|
||||
delete(f.pointers, k)
|
||||
}
|
||||
}
|
||||
|
||||
// Keep list of all dereferenced pointers to possibly show later.
|
||||
pointerChain := make([]uintptr, 0)
|
||||
|
||||
// Figure out how many levels of indirection there are by derferencing
|
||||
// pointers and unpacking interfaces down the chain while detecting circular
|
||||
// references.
|
||||
nilFound := false
|
||||
cycleFound := false
|
||||
indirects := 0
|
||||
ve := v
|
||||
for ve.Kind() == reflect.Ptr {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
indirects++
|
||||
addr := ve.Pointer()
|
||||
pointerChain = append(pointerChain, addr)
|
||||
if pd, ok := f.pointers[addr]; ok && pd < f.depth {
|
||||
cycleFound = true
|
||||
indirects--
|
||||
break
|
||||
}
|
||||
f.pointers[addr] = f.depth
|
||||
|
||||
ve = ve.Elem()
|
||||
if ve.Kind() == reflect.Interface {
|
||||
if ve.IsNil() {
|
||||
nilFound = true
|
||||
break
|
||||
}
|
||||
ve = ve.Elem()
|
||||
}
|
||||
}
|
||||
|
||||
// Display type or indirection level depending on flags.
|
||||
if showTypes && !f.ignoreNextType {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write(bytes.Repeat(asteriskBytes, indirects))
|
||||
f.fs.Write([]byte(ve.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
} else {
|
||||
if nilFound || cycleFound {
|
||||
indirects += strings.Count(ve.Type().String(), "*")
|
||||
}
|
||||
f.fs.Write(openAngleBytes)
|
||||
f.fs.Write([]byte(strings.Repeat("*", indirects)))
|
||||
f.fs.Write(closeAngleBytes)
|
||||
}
|
||||
|
||||
// Display pointer information depending on flags.
|
||||
if f.fs.Flag('+') && (len(pointerChain) > 0) {
|
||||
f.fs.Write(openParenBytes)
|
||||
for i, addr := range pointerChain {
|
||||
if i > 0 {
|
||||
f.fs.Write(pointerChainBytes)
|
||||
}
|
||||
printHexPtr(f.fs, addr)
|
||||
}
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
|
||||
// Display dereferenced value.
|
||||
switch {
|
||||
case nilFound == true:
|
||||
f.fs.Write(nilAngleBytes)
|
||||
|
||||
case cycleFound == true:
|
||||
f.fs.Write(circularShortBytes)
|
||||
|
||||
default:
|
||||
f.ignoreNextType = true
|
||||
f.format(ve)
|
||||
}
|
||||
}
|
||||
|
||||
// format is the main workhorse for providing the Formatter interface. It
|
||||
// uses the passed reflect value to figure out what kind of object we are
|
||||
// dealing with and formats it appropriately. It is a recursive function,
|
||||
// however circular data structures are detected and handled properly.
|
||||
func (f *formatState) format(v reflect.Value) {
|
||||
// Handle invalid reflect values immediately.
|
||||
kind := v.Kind()
|
||||
if kind == reflect.Invalid {
|
||||
f.fs.Write(invalidAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle pointers specially.
|
||||
if kind == reflect.Ptr {
|
||||
f.formatPtr(v)
|
||||
return
|
||||
}
|
||||
|
||||
// Print type information unless already handled elsewhere.
|
||||
if !f.ignoreNextType && f.fs.Flag('#') {
|
||||
f.fs.Write(openParenBytes)
|
||||
f.fs.Write([]byte(v.Type().String()))
|
||||
f.fs.Write(closeParenBytes)
|
||||
}
|
||||
f.ignoreNextType = false
|
||||
|
||||
// Call Stringer/error interfaces if they exist and the handle methods
|
||||
// flag is enabled.
|
||||
if !f.cs.DisableMethods {
|
||||
if (kind != reflect.Invalid) && (kind != reflect.Interface) {
|
||||
if handled := handleMethods(f.cs, f.fs, v); handled {
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
switch kind {
|
||||
case reflect.Invalid:
|
||||
// Do nothing. We should never get here since invalid has already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Bool:
|
||||
printBool(f.fs, v.Bool())
|
||||
|
||||
case reflect.Int8, reflect.Int16, reflect.Int32, reflect.Int64, reflect.Int:
|
||||
printInt(f.fs, v.Int(), 10)
|
||||
|
||||
case reflect.Uint8, reflect.Uint16, reflect.Uint32, reflect.Uint64, reflect.Uint:
|
||||
printUint(f.fs, v.Uint(), 10)
|
||||
|
||||
case reflect.Float32:
|
||||
printFloat(f.fs, v.Float(), 32)
|
||||
|
||||
case reflect.Float64:
|
||||
printFloat(f.fs, v.Float(), 64)
|
||||
|
||||
case reflect.Complex64:
|
||||
printComplex(f.fs, v.Complex(), 32)
|
||||
|
||||
case reflect.Complex128:
|
||||
printComplex(f.fs, v.Complex(), 64)
|
||||
|
||||
case reflect.Slice:
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
fallthrough
|
||||
|
||||
case reflect.Array:
|
||||
f.fs.Write(openBracketBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
numEntries := v.Len()
|
||||
for i := 0; i < numEntries; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.Index(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBracketBytes)
|
||||
|
||||
case reflect.String:
|
||||
f.fs.Write([]byte(v.String()))
|
||||
|
||||
case reflect.Interface:
|
||||
// The only time we should get here is for nil interfaces due to
|
||||
// unpackValue calls.
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
}
|
||||
|
||||
case reflect.Ptr:
|
||||
// Do nothing. We should never get here since pointers have already
|
||||
// been handled above.
|
||||
|
||||
case reflect.Map:
|
||||
// nil maps should be indicated as different than empty maps
|
||||
if v.IsNil() {
|
||||
f.fs.Write(nilAngleBytes)
|
||||
break
|
||||
}
|
||||
|
||||
f.fs.Write(openMapBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
keys := v.MapKeys()
|
||||
if f.cs.SortKeys {
|
||||
sortValues(keys, f.cs)
|
||||
}
|
||||
for i, key := range keys {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(key))
|
||||
f.fs.Write(colonBytes)
|
||||
f.ignoreNextType = true
|
||||
f.format(f.unpackValue(v.MapIndex(key)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeMapBytes)
|
||||
|
||||
case reflect.Struct:
|
||||
numFields := v.NumField()
|
||||
f.fs.Write(openBraceBytes)
|
||||
f.depth++
|
||||
if (f.cs.MaxDepth != 0) && (f.depth > f.cs.MaxDepth) {
|
||||
f.fs.Write(maxShortBytes)
|
||||
} else {
|
||||
vt := v.Type()
|
||||
for i := 0; i < numFields; i++ {
|
||||
if i > 0 {
|
||||
f.fs.Write(spaceBytes)
|
||||
}
|
||||
vtf := vt.Field(i)
|
||||
if f.fs.Flag('+') || f.fs.Flag('#') {
|
||||
f.fs.Write([]byte(vtf.Name))
|
||||
f.fs.Write(colonBytes)
|
||||
}
|
||||
f.format(f.unpackValue(v.Field(i)))
|
||||
}
|
||||
}
|
||||
f.depth--
|
||||
f.fs.Write(closeBraceBytes)
|
||||
|
||||
case reflect.Uintptr:
|
||||
printHexPtr(f.fs, uintptr(v.Uint()))
|
||||
|
||||
case reflect.UnsafePointer, reflect.Chan, reflect.Func:
|
||||
printHexPtr(f.fs, v.Pointer())
|
||||
|
||||
// There were not any other types at the time this code was written, but
|
||||
// fall back to letting the default fmt package handle it if any get added.
|
||||
default:
|
||||
format := f.buildDefaultFormat()
|
||||
if v.CanInterface() {
|
||||
fmt.Fprintf(f.fs, format, v.Interface())
|
||||
} else {
|
||||
fmt.Fprintf(f.fs, format, v.String())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Format satisfies the fmt.Formatter interface. See NewFormatter for usage
|
||||
// details.
|
||||
func (f *formatState) Format(fs fmt.State, verb rune) {
|
||||
f.fs = fs
|
||||
|
||||
// Use standard formatting for verbs that are not v.
|
||||
if verb != 'v' {
|
||||
format := f.constructOrigFormat(verb)
|
||||
fmt.Fprintf(fs, format, f.value)
|
||||
return
|
||||
}
|
||||
|
||||
if f.value == nil {
|
||||
if fs.Flag('#') {
|
||||
fs.Write(interfaceBytes)
|
||||
}
|
||||
fs.Write(nilAngleBytes)
|
||||
return
|
||||
}
|
||||
|
||||
f.format(reflect.ValueOf(f.value))
|
||||
}
|
||||
|
||||
// newFormatter is a helper function to consolidate the logic from the various
|
||||
// public methods which take varying config states.
|
||||
func newFormatter(cs *ConfigState, v interface{}) fmt.Formatter {
|
||||
fs := &formatState{value: v, cs: cs}
|
||||
fs.pointers = make(map[uintptr]int)
|
||||
return fs
|
||||
}
|
||||
|
||||
/*
|
||||
NewFormatter returns a custom formatter that satisfies the fmt.Formatter
|
||||
interface. As a result, it integrates cleanly with standard fmt package
|
||||
printing functions. The formatter is useful for inline printing of smaller data
|
||||
types similar to the standard %v format specifier.
|
||||
|
||||
The custom formatter only responds to the %v (most compact), %+v (adds pointer
|
||||
addresses), %#v (adds types), or %#+v (adds types and pointer addresses) verb
|
||||
combinations. Any other verbs such as %x and %q will be sent to the the
|
||||
standard fmt package for formatting. In addition, the custom formatter ignores
|
||||
the width and precision arguments (however they will still work on the format
|
||||
specifiers not handled by the custom formatter).
|
||||
|
||||
Typically this function shouldn't be called directly. It is much easier to make
|
||||
use of the custom formatter by calling one of the convenience functions such as
|
||||
Printf, Println, or Fprintf.
|
||||
*/
|
||||
func NewFormatter(v interface{}) fmt.Formatter {
|
||||
return newFormatter(&Config, v)
|
||||
}
|
|
@ -1,148 +0,0 @@
|
|||
/*
|
||||
* Copyright (c) 2013 Dave Collins <dave@davec.name>
|
||||
*
|
||||
* Permission to use, copy, modify, and distribute this software for any
|
||||
* purpose with or without fee is hereby granted, provided that the above
|
||||
* copyright notice and this permission notice appear in all copies.
|
||||
*
|
||||
* THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
|
||||
* WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
|
||||
* MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
|
||||
* ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
|
||||
* WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
|
||||
* ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
|
||||
* OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
|
||||
*/
|
||||
|
||||
package spew
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
// Errorf is a wrapper for fmt.Errorf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the formatted string as a value that satisfies error. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Errorf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Errorf(format string, a ...interface{}) (err error) {
|
||||
return fmt.Errorf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprint is a wrapper for fmt.Fprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprint(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprint(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprint(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintf is a wrapper for fmt.Fprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintf(w, format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintf(w io.Writer, format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintf(w, format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Fprintln is a wrapper for fmt.Fprintln that treats each argument as if it
|
||||
// passed with a default Formatter interface returned by NewFormatter. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Fprintln(w, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Fprintln(w io.Writer, a ...interface{}) (n int, err error) {
|
||||
return fmt.Fprintln(w, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Print is a wrapper for fmt.Print that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Print(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Print(a ...interface{}) (n int, err error) {
|
||||
return fmt.Print(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Printf is a wrapper for fmt.Printf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Printf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Printf(format string, a ...interface{}) (n int, err error) {
|
||||
return fmt.Printf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Println is a wrapper for fmt.Println that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the number of bytes written and any write error encountered. See
|
||||
// NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Println(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Println(a ...interface{}) (n int, err error) {
|
||||
return fmt.Println(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprint is a wrapper for fmt.Sprint that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprint(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprint(a ...interface{}) string {
|
||||
return fmt.Sprint(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintf is a wrapper for fmt.Sprintf that treats each argument as if it were
|
||||
// passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintf(format, spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintf(format string, a ...interface{}) string {
|
||||
return fmt.Sprintf(format, convertArgs(a)...)
|
||||
}
|
||||
|
||||
// Sprintln is a wrapper for fmt.Sprintln that treats each argument as if it
|
||||
// were passed with a default Formatter interface returned by NewFormatter. It
|
||||
// returns the resulting string. See NewFormatter for formatting details.
|
||||
//
|
||||
// This function is shorthand for the following syntax:
|
||||
//
|
||||
// fmt.Sprintln(spew.NewFormatter(a), spew.NewFormatter(b))
|
||||
func Sprintln(a ...interface{}) string {
|
||||
return fmt.Sprintln(convertArgs(a)...)
|
||||
}
|
||||
|
||||
// convertArgs accepts a slice of arguments and returns a slice of the same
|
||||
// length with each argument converted to a default spew Formatter interface.
|
||||
func convertArgs(args []interface{}) (formatters []interface{}) {
|
||||
formatters = make([]interface{}, len(args))
|
||||
for index, arg := range args {
|
||||
formatters[index] = NewFormatter(arg)
|
||||
}
|
||||
return formatters
|
||||
}
|
|
@ -1,172 +0,0 @@
|
|||
package jsonmessage
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/docker/docker/pkg/term"
|
||||
"github.com/docker/docker/pkg/timeutils"
|
||||
"github.com/docker/docker/pkg/units"
|
||||
)
|
||||
|
||||
type JSONError struct {
|
||||
Code int `json:"code,omitempty"`
|
||||
Message string `json:"message,omitempty"`
|
||||
}
|
||||
|
||||
func (e *JSONError) Error() string {
|
||||
return e.Message
|
||||
}
|
||||
|
||||
type JSONProgress struct {
|
||||
terminalFd uintptr
|
||||
Current int `json:"current,omitempty"`
|
||||
Total int `json:"total,omitempty"`
|
||||
Start int64 `json:"start,omitempty"`
|
||||
}
|
||||
|
||||
func (p *JSONProgress) String() string {
|
||||
var (
|
||||
width = 200
|
||||
pbBox string
|
||||
numbersBox string
|
||||
timeLeftBox string
|
||||
)
|
||||
|
||||
ws, err := term.GetWinsize(p.terminalFd)
|
||||
if err == nil {
|
||||
width = int(ws.Width)
|
||||
}
|
||||
|
||||
if p.Current <= 0 && p.Total <= 0 {
|
||||
return ""
|
||||
}
|
||||
current := units.HumanSize(float64(p.Current))
|
||||
if p.Total <= 0 {
|
||||
return fmt.Sprintf("%8v", current)
|
||||
}
|
||||
total := units.HumanSize(float64(p.Total))
|
||||
percentage := int(float64(p.Current)/float64(p.Total)*100) / 2
|
||||
if percentage > 50 {
|
||||
percentage = 50
|
||||
}
|
||||
if width > 110 {
|
||||
// this number can't be negetive gh#7136
|
||||
numSpaces := 0
|
||||
if 50-percentage > 0 {
|
||||
numSpaces = 50 - percentage
|
||||
}
|
||||
pbBox = fmt.Sprintf("[%s>%s] ", strings.Repeat("=", percentage), strings.Repeat(" ", numSpaces))
|
||||
}
|
||||
numbersBox = fmt.Sprintf("%8v/%v", current, total)
|
||||
|
||||
if p.Current > 0 && p.Start > 0 && percentage < 50 {
|
||||
fromStart := time.Now().UTC().Sub(time.Unix(int64(p.Start), 0))
|
||||
perEntry := fromStart / time.Duration(p.Current)
|
||||
left := time.Duration(p.Total-p.Current) * perEntry
|
||||
left = (left / time.Second) * time.Second
|
||||
|
||||
if width > 50 {
|
||||
timeLeftBox = " " + left.String()
|
||||
}
|
||||
}
|
||||
return pbBox + numbersBox + timeLeftBox
|
||||
}
|
||||
|
||||
type JSONMessage struct {
|
||||
Stream string `json:"stream,omitempty"`
|
||||
Status string `json:"status,omitempty"`
|
||||
Progress *JSONProgress `json:"progressDetail,omitempty"`
|
||||
ProgressMessage string `json:"progress,omitempty"` //deprecated
|
||||
ID string `json:"id,omitempty"`
|
||||
From string `json:"from,omitempty"`
|
||||
Time int64 `json:"time,omitempty"`
|
||||
Error *JSONError `json:"errorDetail,omitempty"`
|
||||
ErrorMessage string `json:"error,omitempty"` //deprecated
|
||||
}
|
||||
|
||||
func (jm *JSONMessage) Display(out io.Writer, isTerminal bool) error {
|
||||
if jm.Error != nil {
|
||||
if jm.Error.Code == 401 {
|
||||
return fmt.Errorf("Authentication is required.")
|
||||
}
|
||||
return jm.Error
|
||||
}
|
||||
var endl string
|
||||
if isTerminal && jm.Stream == "" && jm.Progress != nil {
|
||||
// <ESC>[2K = erase entire current line
|
||||
fmt.Fprintf(out, "%c[2K\r", 27)
|
||||
endl = "\r"
|
||||
} else if jm.Progress != nil && jm.Progress.String() != "" { //disable progressbar in non-terminal
|
||||
return nil
|
||||
}
|
||||
if jm.Time != 0 {
|
||||
fmt.Fprintf(out, "%s ", time.Unix(jm.Time, 0).Format(timeutils.RFC3339NanoFixed))
|
||||
}
|
||||
if jm.ID != "" {
|
||||
fmt.Fprintf(out, "%s: ", jm.ID)
|
||||
}
|
||||
if jm.From != "" {
|
||||
fmt.Fprintf(out, "(from %s) ", jm.From)
|
||||
}
|
||||
if jm.Progress != nil && isTerminal {
|
||||
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.Progress.String(), endl)
|
||||
} else if jm.ProgressMessage != "" { //deprecated
|
||||
fmt.Fprintf(out, "%s %s%s", jm.Status, jm.ProgressMessage, endl)
|
||||
} else if jm.Stream != "" {
|
||||
fmt.Fprintf(out, "%s%s", jm.Stream, endl)
|
||||
} else {
|
||||
fmt.Fprintf(out, "%s%s\n", jm.Status, endl)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func DisplayJSONMessagesStream(in io.Reader, out io.Writer, terminalFd uintptr, isTerminal bool) error {
|
||||
var (
|
||||
dec = json.NewDecoder(in)
|
||||
ids = make(map[string]int)
|
||||
diff = 0
|
||||
)
|
||||
for {
|
||||
var jm JSONMessage
|
||||
if err := dec.Decode(&jm); err != nil {
|
||||
if err == io.EOF {
|
||||
break
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
if jm.Progress != nil {
|
||||
jm.Progress.terminalFd = terminalFd
|
||||
}
|
||||
if jm.ID != "" && (jm.Progress != nil || jm.ProgressMessage != "") {
|
||||
line, ok := ids[jm.ID]
|
||||
if !ok {
|
||||
line = len(ids)
|
||||
ids[jm.ID] = line
|
||||
if isTerminal {
|
||||
fmt.Fprintf(out, "\n")
|
||||
}
|
||||
diff = 0
|
||||
} else {
|
||||
diff = len(ids) - line
|
||||
}
|
||||
if jm.ID != "" && isTerminal {
|
||||
// <ESC>[{diff}A = move cursor up diff rows
|
||||
fmt.Fprintf(out, "%c[%dA", 27, diff)
|
||||
}
|
||||
}
|
||||
err := jm.Display(out, isTerminal)
|
||||
if jm.ID != "" && isTerminal {
|
||||
// <ESC>[{diff}B = move cursor down diff rows
|
||||
fmt.Fprintf(out, "%c[%dB", 27, diff)
|
||||
}
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
|
@ -1,69 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"strings"
|
||||
)
|
||||
|
||||
// Parse fstab type mount options into mount() flags
|
||||
// and device specific data
|
||||
func parseOptions(options string) (int, string) {
|
||||
var (
|
||||
flag int
|
||||
data []string
|
||||
)
|
||||
|
||||
flags := map[string]struct {
|
||||
clear bool
|
||||
flag int
|
||||
}{
|
||||
"defaults": {false, 0},
|
||||
"ro": {false, RDONLY},
|
||||
"rw": {true, RDONLY},
|
||||
"suid": {true, NOSUID},
|
||||
"nosuid": {false, NOSUID},
|
||||
"dev": {true, NODEV},
|
||||
"nodev": {false, NODEV},
|
||||
"exec": {true, NOEXEC},
|
||||
"noexec": {false, NOEXEC},
|
||||
"sync": {false, SYNCHRONOUS},
|
||||
"async": {true, SYNCHRONOUS},
|
||||
"dirsync": {false, DIRSYNC},
|
||||
"remount": {false, REMOUNT},
|
||||
"mand": {false, MANDLOCK},
|
||||
"nomand": {true, MANDLOCK},
|
||||
"atime": {true, NOATIME},
|
||||
"noatime": {false, NOATIME},
|
||||
"diratime": {true, NODIRATIME},
|
||||
"nodiratime": {false, NODIRATIME},
|
||||
"bind": {false, BIND},
|
||||
"rbind": {false, RBIND},
|
||||
"unbindable": {false, UNBINDABLE},
|
||||
"runbindable": {false, RUNBINDABLE},
|
||||
"private": {false, PRIVATE},
|
||||
"rprivate": {false, RPRIVATE},
|
||||
"shared": {false, SHARED},
|
||||
"rshared": {false, RSHARED},
|
||||
"slave": {false, SLAVE},
|
||||
"rslave": {false, RSLAVE},
|
||||
"relatime": {false, RELATIME},
|
||||
"norelatime": {true, RELATIME},
|
||||
"strictatime": {false, STRICTATIME},
|
||||
"nostrictatime": {true, STRICTATIME},
|
||||
}
|
||||
|
||||
for _, o := range strings.Split(options, ",") {
|
||||
// If the option does not exist in the flags table or the flag
|
||||
// is not supported on the platform,
|
||||
// then it is a data value for a specific fs type
|
||||
if f, exists := flags[o]; exists && f.flag != 0 {
|
||||
if f.clear {
|
||||
flag &= ^f.flag
|
||||
} else {
|
||||
flag |= f.flag
|
||||
}
|
||||
} else {
|
||||
data = append(data, o)
|
||||
}
|
||||
}
|
||||
return flag, strings.Join(data, ",")
|
||||
}
|
|
@ -1,48 +0,0 @@
|
|||
// +build freebsd,cgo
|
||||
|
||||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
const (
|
||||
// RDONLY will mount the filesystem as read-only.
|
||||
RDONLY = C.MNT_RDONLY
|
||||
|
||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||
// take effect.
|
||||
NOSUID = C.MNT_NOSUID
|
||||
|
||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||
NOEXEC = C.MNT_NOEXEC
|
||||
|
||||
// SYNCHRONOUS will allow any I/O to the file system to be done synchronously.
|
||||
SYNCHRONOUS = C.MNT_SYNCHRONOUS
|
||||
|
||||
// NOATIME will not update the file access time when reading from a file.
|
||||
NOATIME = C.MNT_NOATIME
|
||||
)
|
||||
|
||||
// These flags are unsupported.
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
UNBINDABLE = 0
|
||||
RUNBINDABLE = 0
|
||||
PRIVATE = 0
|
||||
RPRIVATE = 0
|
||||
SHARED = 0
|
||||
RSHARED = 0
|
||||
SLAVE = 0
|
||||
RSLAVE = 0
|
||||
RBIND = 0
|
||||
RELATIVE = 0
|
||||
RELATIME = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
)
|
|
@ -1,85 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
const (
|
||||
// RDONLY will mount the file system read-only.
|
||||
RDONLY = syscall.MS_RDONLY
|
||||
|
||||
// NOSUID will not allow set-user-identifier or set-group-identifier bits to
|
||||
// take effect.
|
||||
NOSUID = syscall.MS_NOSUID
|
||||
|
||||
// NODEV will not interpret character or block special devices on the file
|
||||
// system.
|
||||
NODEV = syscall.MS_NODEV
|
||||
|
||||
// NOEXEC will not allow execution of any binaries on the mounted file system.
|
||||
NOEXEC = syscall.MS_NOEXEC
|
||||
|
||||
// SYNCHRONOUS will allow I/O to the file system to be done synchronously.
|
||||
SYNCHRONOUS = syscall.MS_SYNCHRONOUS
|
||||
|
||||
// DIRSYNC will force all directory updates within the file system to be done
|
||||
// synchronously. This affects the following system calls: creat, link,
|
||||
// unlink, symlink, mkdir, rmdir, mknod and rename.
|
||||
DIRSYNC = syscall.MS_DIRSYNC
|
||||
|
||||
// REMOUNT will attempt to remount an already-mounted file system. This is
|
||||
// commonly used to change the mount flags for a file system, especially to
|
||||
// make a readonly file system writeable. It does not change device or mount
|
||||
// point.
|
||||
REMOUNT = syscall.MS_REMOUNT
|
||||
|
||||
// MANDLOCK will force mandatory locks on a filesystem.
|
||||
MANDLOCK = syscall.MS_MANDLOCK
|
||||
|
||||
// NOATIME will not update the file access time when reading from a file.
|
||||
NOATIME = syscall.MS_NOATIME
|
||||
|
||||
// NODIRATIME will not update the directory access time.
|
||||
NODIRATIME = syscall.MS_NODIRATIME
|
||||
|
||||
// BIND remounts a subtree somewhere else.
|
||||
BIND = syscall.MS_BIND
|
||||
|
||||
// RBIND remounts a subtree and all possible submounts somewhere else.
|
||||
RBIND = syscall.MS_BIND | syscall.MS_REC
|
||||
|
||||
// UNBINDABLE creates a mount which cannot be cloned through a bind operation.
|
||||
UNBINDABLE = syscall.MS_UNBINDABLE
|
||||
|
||||
// RUNBINDABLE marks the entire mount tree as UNBINDABLE.
|
||||
RUNBINDABLE = syscall.MS_UNBINDABLE | syscall.MS_REC
|
||||
|
||||
// PRIVATE creates a mount which carries no propagation abilities.
|
||||
PRIVATE = syscall.MS_PRIVATE
|
||||
|
||||
// RPRIVATE marks the entire mount tree as PRIVATE.
|
||||
RPRIVATE = syscall.MS_PRIVATE | syscall.MS_REC
|
||||
|
||||
// SLAVE creates a mount which receives propagation from its master, but not
|
||||
// vice versa.
|
||||
SLAVE = syscall.MS_SLAVE
|
||||
|
||||
// RSLAVE marks the entire mount tree as SLAVE.
|
||||
RSLAVE = syscall.MS_SLAVE | syscall.MS_REC
|
||||
|
||||
// SHARED creates a mount which provides the ability to create mirrors of
|
||||
// that mount such that mounts and unmounts within any of the mirrors
|
||||
// propagate to the other mirrors.
|
||||
SHARED = syscall.MS_SHARED
|
||||
|
||||
// RSHARED marks the entire mount tree as SHARED.
|
||||
RSHARED = syscall.MS_SHARED | syscall.MS_REC
|
||||
|
||||
// RELATIME updates inode access times relative to modify or change time.
|
||||
RELATIME = syscall.MS_RELATIME
|
||||
|
||||
// STRICTATIME allows to explicitly request full atime updates. This makes
|
||||
// it possible for the kernel to default to relatime or noatime but still
|
||||
// allow userspace to override it.
|
||||
STRICTATIME = syscall.MS_STRICTATIME
|
||||
)
|
|
@ -1,30 +0,0 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
// These flags are unsupported.
|
||||
const (
|
||||
BIND = 0
|
||||
DIRSYNC = 0
|
||||
MANDLOCK = 0
|
||||
NOATIME = 0
|
||||
NODEV = 0
|
||||
NODIRATIME = 0
|
||||
NOEXEC = 0
|
||||
NOSUID = 0
|
||||
UNBINDABLE = 0
|
||||
RUNBINDABLE = 0
|
||||
PRIVATE = 0
|
||||
RPRIVATE = 0
|
||||
SHARED = 0
|
||||
RSHARED = 0
|
||||
SLAVE = 0
|
||||
RSLAVE = 0
|
||||
RBIND = 0
|
||||
RELATIME = 0
|
||||
RELATIVE = 0
|
||||
REMOUNT = 0
|
||||
STRICTATIME = 0
|
||||
SYNCHRONOUS = 0
|
||||
RDONLY = 0
|
||||
)
|
|
@ -1,74 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetMounts retrieves a list of mounts for the current running process.
|
||||
func GetMounts() ([]*Info, error) {
|
||||
return parseMountTable()
|
||||
}
|
||||
|
||||
// Mounted looks at /proc/self/mountinfo to determine of the specified
|
||||
// mountpoint has been mounted
|
||||
func Mounted(mountpoint string) (bool, error) {
|
||||
entries, err := parseMountTable()
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
|
||||
// Search the table for the mountpoint
|
||||
for _, e := range entries {
|
||||
if e.Mountpoint == mountpoint {
|
||||
return true, nil
|
||||
}
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// Mount will mount filesystem according to the specified configuration, on the
|
||||
// condition that the target path is *not* already mounted. Options must be
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func Mount(device, target, mType, options string) error {
|
||||
flag, _ := parseOptions(options)
|
||||
if flag&REMOUNT != REMOUNT {
|
||||
if mounted, err := Mounted(target); err != nil || mounted {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return ForceMount(device, target, mType, options)
|
||||
}
|
||||
|
||||
// ForceMount will mount a filesystem according to the specified configuration,
|
||||
// *regardless* if the target path is not already mounted. Options must be
|
||||
// specified like the mount or fstab unix commands: "opt1=val1,opt2=val2". See
|
||||
// flags.go for supported option flags.
|
||||
func ForceMount(device, target, mType, options string) error {
|
||||
flag, data := parseOptions(options)
|
||||
if err := mount(device, target, mType, uintptr(flag), data); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Unmount will unmount the target filesystem, so long as it is mounted.
|
||||
func Unmount(target string) error {
|
||||
if mounted, err := Mounted(target); err != nil || !mounted {
|
||||
return err
|
||||
}
|
||||
return ForceUnmount(target)
|
||||
}
|
||||
|
||||
// ForceUnmount will force an unmount of the target filesystem, regardless if
|
||||
// it is mounted or not.
|
||||
func ForceUnmount(target string) (err error) {
|
||||
// Simple retry logic for unmount
|
||||
for i := 0; i < 10; i++ {
|
||||
if err = unmount(target, 0); err == nil {
|
||||
return nil
|
||||
}
|
||||
time.Sleep(100 * time.Millisecond)
|
||||
}
|
||||
return
|
||||
}
|
|
@ -1,59 +0,0 @@
|
|||
package mount
|
||||
|
||||
/*
|
||||
#include <errno.h>
|
||||
#include <stdlib.h>
|
||||
#include <string.h>
|
||||
#include <sys/_iovec.h>
|
||||
#include <sys/mount.h>
|
||||
#include <sys/param.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
"syscall"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
func allocateIOVecs(options []string) []C.struct_iovec {
|
||||
out := make([]C.struct_iovec, len(options))
|
||||
for i, option := range options {
|
||||
out[i].iov_base = unsafe.Pointer(C.CString(option))
|
||||
out[i].iov_len = C.size_t(len(option) + 1)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
isNullFS := false
|
||||
|
||||
xs := strings.Split(data, ",")
|
||||
for _, x := range xs {
|
||||
if x == "bind" {
|
||||
isNullFS = true
|
||||
}
|
||||
}
|
||||
|
||||
options := []string{"fspath", target}
|
||||
if isNullFS {
|
||||
options = append(options, "fstype", "nullfs", "target", device)
|
||||
} else {
|
||||
options = append(options, "fstype", mType, "from", device)
|
||||
}
|
||||
rawOptions := allocateIOVecs(options)
|
||||
for _, rawOption := range rawOptions {
|
||||
defer C.free(rawOption.iov_base)
|
||||
}
|
||||
|
||||
if errno := C.nmount(&rawOptions[0], C.uint(len(options)), C.int(flag)); errno != 0 {
|
||||
reason := C.GoString(C.strerror(*C.__error()))
|
||||
return fmt.Errorf("Failed to call nmount: %s", reason)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
return syscall.Unmount(target, flag)
|
||||
}
|
|
@ -1,21 +0,0 @@
|
|||
package mount
|
||||
|
||||
import (
|
||||
"syscall"
|
||||
)
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
if err := syscall.Mount(device, target, mType, flag, data); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
// If we have a bind mount or remount, remount...
|
||||
if flag&syscall.MS_BIND == syscall.MS_BIND && flag&syscall.MS_RDONLY == syscall.MS_RDONLY {
|
||||
return syscall.Mount(device, target, mType, flag|syscall.MS_REMOUNT, data)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
return syscall.Unmount(target, flag)
|
||||
}
|
|
@ -1,11 +0,0 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
func mount(device, target, mType string, flag uintptr, data string) error {
|
||||
panic("Not implemented")
|
||||
}
|
||||
|
||||
func unmount(target string, flag int) error {
|
||||
panic("Not implemented")
|
||||
}
|
|
@ -1,40 +0,0 @@
|
|||
package mount
|
||||
|
||||
// Info reveals information about a particular mounted filesystem. This
|
||||
// struct is populated from the content in the /proc/<pid>/mountinfo file.
|
||||
type Info struct {
|
||||
// ID is a unique identifier of the mount (may be reused after umount).
|
||||
ID int
|
||||
|
||||
// Parent indicates the ID of the mount parent (or of self for the top of the
|
||||
// mount tree).
|
||||
Parent int
|
||||
|
||||
// Major indicates one half of the device ID which identifies the device class.
|
||||
Major int
|
||||
|
||||
// Minor indicates one half of the device ID which identifies a specific
|
||||
// instance of device.
|
||||
Minor int
|
||||
|
||||
// Root of the mount within the filesystem.
|
||||
Root string
|
||||
|
||||
// Mountpoint indicates the mount point relative to the process's root.
|
||||
Mountpoint string
|
||||
|
||||
// Opts represents mount-specific options.
|
||||
Opts string
|
||||
|
||||
// Optional represents optional fields.
|
||||
Optional string
|
||||
|
||||
// Fstype indicates the type of filesystem, such as EXT3.
|
||||
Fstype string
|
||||
|
||||
// Source indicates filesystem specific information or "none".
|
||||
Source string
|
||||
|
||||
// VfsOpts represents per super block options.
|
||||
VfsOpts string
|
||||
}
|
|
@ -1,41 +0,0 @@
|
|||
package mount
|
||||
|
||||
/*
|
||||
#include <sys/param.h>
|
||||
#include <sys/ucred.h>
|
||||
#include <sys/mount.h>
|
||||
*/
|
||||
import "C"
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"reflect"
|
||||
"unsafe"
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts.
|
||||
func parseMountTable() ([]*Info, error) {
|
||||
var rawEntries *C.struct_statfs
|
||||
|
||||
count := int(C.getmntinfo(&rawEntries, C.MNT_WAIT))
|
||||
if count == 0 {
|
||||
return nil, fmt.Errorf("Failed to call getmntinfo")
|
||||
}
|
||||
|
||||
var entries []C.struct_statfs
|
||||
header := (*reflect.SliceHeader)(unsafe.Pointer(&entries))
|
||||
header.Cap = count
|
||||
header.Len = count
|
||||
header.Data = uintptr(unsafe.Pointer(rawEntries))
|
||||
|
||||
var out []*Info
|
||||
for _, entry := range entries {
|
||||
var mountinfo Info
|
||||
mountinfo.Mountpoint = C.GoString(&entry.f_mntonname[0])
|
||||
mountinfo.Source = C.GoString(&entry.f_mntfromname[0])
|
||||
mountinfo.Fstype = C.GoString(&entry.f_fstypename[0])
|
||||
out = append(out, &mountinfo)
|
||||
}
|
||||
return out, nil
|
||||
}
|
|
@ -1,95 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
/* 36 35 98:0 /mnt1 /mnt2 rw,noatime master:1 - ext3 /dev/root rw,errors=continue
|
||||
(1)(2)(3) (4) (5) (6) (7) (8) (9) (10) (11)
|
||||
|
||||
(1) mount ID: unique identifier of the mount (may be reused after umount)
|
||||
(2) parent ID: ID of parent (or of self for the top of the mount tree)
|
||||
(3) major:minor: value of st_dev for files on filesystem
|
||||
(4) root: root of the mount within the filesystem
|
||||
(5) mount point: mount point relative to the process's root
|
||||
(6) mount options: per mount options
|
||||
(7) optional fields: zero or more fields of the form "tag[:value]"
|
||||
(8) separator: marks the end of the optional fields
|
||||
(9) filesystem type: name of filesystem of the form "type[.subtype]"
|
||||
(10) mount source: filesystem specific information or "none"
|
||||
(11) super options: per super block options*/
|
||||
mountinfoFormat = "%d %d %d:%d %s %s %s %s"
|
||||
)
|
||||
|
||||
// Parse /proc/self/mountinfo because comparing Dev and ino does not work from
|
||||
// bind mounts
|
||||
func parseMountTable() ([]*Info, error) {
|
||||
f, err := os.Open("/proc/self/mountinfo")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
||||
|
||||
func parseInfoFile(r io.Reader) ([]*Info, error) {
|
||||
var (
|
||||
s = bufio.NewScanner(r)
|
||||
out = []*Info{}
|
||||
)
|
||||
|
||||
for s.Scan() {
|
||||
if err := s.Err(); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
var (
|
||||
p = &Info{}
|
||||
text = s.Text()
|
||||
optionalFields string
|
||||
)
|
||||
|
||||
if _, err := fmt.Sscanf(text, mountinfoFormat,
|
||||
&p.ID, &p.Parent, &p.Major, &p.Minor,
|
||||
&p.Root, &p.Mountpoint, &p.Opts, &optionalFields); err != nil {
|
||||
return nil, fmt.Errorf("Scanning '%s' failed: %s", text, err)
|
||||
}
|
||||
// Safe as mountinfo encodes mountpoints with spaces as \040.
|
||||
index := strings.Index(text, " - ")
|
||||
postSeparatorFields := strings.Fields(text[index+3:])
|
||||
if len(postSeparatorFields) < 3 {
|
||||
return nil, fmt.Errorf("Error found less than 3 fields post '-' in %q", text)
|
||||
}
|
||||
|
||||
if optionalFields != "-" {
|
||||
p.Optional = optionalFields
|
||||
}
|
||||
|
||||
p.Fstype = postSeparatorFields[0]
|
||||
p.Source = postSeparatorFields[1]
|
||||
p.VfsOpts = strings.Join(postSeparatorFields[2:], " ")
|
||||
out = append(out, p)
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
// PidMountInfo collects the mounts for a specific process ID. If the process
|
||||
// ID is unknown, it is better to use `GetMounts` which will inspect
|
||||
// "/proc/self/mountinfo" instead.
|
||||
func PidMountInfo(pid int) ([]*Info, error) {
|
||||
f, err := os.Open(fmt.Sprintf("/proc/%d/mountinfo", pid))
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
defer f.Close()
|
||||
|
||||
return parseInfoFile(f)
|
||||
}
|
|
@ -1,12 +0,0 @@
|
|||
// +build !linux,!freebsd freebsd,!cgo
|
||||
|
||||
package mount
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
func parseMountTable() ([]*Info, error) {
|
||||
return nil, fmt.Errorf("mount.parseMountTable is not implemented on %s/%s", runtime.GOOS, runtime.GOARCH)
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
// +build linux
|
||||
|
||||
package mount
|
||||
|
||||
// MakeShared ensures a mounted filesystem has the SHARED mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeShared(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "shared")
|
||||
}
|
||||
|
||||
// MakeRShared ensures a mounted filesystem has the RSHARED mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeRShared(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rshared")
|
||||
}
|
||||
|
||||
// MakePrivate ensures a mounted filesystem has the PRIVATE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakePrivate(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "private")
|
||||
}
|
||||
|
||||
// MakeRPrivate ensures a mounted filesystem has the RPRIVATE mount option
|
||||
// enabled. See the supported options in flags.go for further reference.
|
||||
func MakeRPrivate(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rprivate")
|
||||
}
|
||||
|
||||
// MakeSlave ensures a mounted filesystem has the SLAVE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeSlave(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "slave")
|
||||
}
|
||||
|
||||
// MakeRSlave ensures a mounted filesystem has the RSLAVE mount option enabled.
|
||||
// See the supported options in flags.go for further reference.
|
||||
func MakeRSlave(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "rslave")
|
||||
}
|
||||
|
||||
// MakeUnbindable ensures a mounted filesystem has the UNBINDABLE mount option
|
||||
// enabled. See the supported options in flags.go for further reference.
|
||||
func MakeUnbindable(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "unbindable")
|
||||
}
|
||||
|
||||
// MakeRUnbindable ensures a mounted filesystem has the RUNBINDABLE mount
|
||||
// option enabled. See the supported options in flags.go for further reference.
|
||||
func MakeRUnbindable(mountPoint string) error {
|
||||
return ensureMountedAs(mountPoint, "runbindable")
|
||||
}
|
||||
|
||||
func ensureMountedAs(mountPoint, options string) error {
|
||||
mounted, err := Mounted(mountPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if !mounted {
|
||||
if err := Mount(mountPoint, mountPoint, "none", "bind,rw"); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
mounted, err = Mounted(mountPoint)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return ForceMount("", mountPoint, "none", options)
|
||||
}
|
|
@ -1,170 +0,0 @@
|
|||
package parsers
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"net/url"
|
||||
"path"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// FIXME: Change this not to receive default value as parameter
|
||||
func ParseHost(defaultTCPAddr, defaultUnixAddr, addr string) (string, error) {
|
||||
addr = strings.TrimSpace(addr)
|
||||
if addr == "" {
|
||||
if runtime.GOOS != "windows" {
|
||||
addr = fmt.Sprintf("unix://%s", defaultUnixAddr)
|
||||
} else {
|
||||
// Note - defaultTCPAddr already includes tcp:// prefix
|
||||
addr = fmt.Sprintf("%s", defaultTCPAddr)
|
||||
}
|
||||
}
|
||||
addrParts := strings.Split(addr, "://")
|
||||
if len(addrParts) == 1 {
|
||||
addrParts = []string{"tcp", addrParts[0]}
|
||||
}
|
||||
|
||||
switch addrParts[0] {
|
||||
case "tcp":
|
||||
return ParseTCPAddr(addrParts[1], defaultTCPAddr)
|
||||
case "unix":
|
||||
return ParseUnixAddr(addrParts[1], defaultUnixAddr)
|
||||
case "fd":
|
||||
return addr, nil
|
||||
default:
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
}
|
||||
|
||||
func ParseUnixAddr(addr string, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, "unix://")
|
||||
if strings.Contains(addr, "://") {
|
||||
return "", fmt.Errorf("Invalid proto, expected unix: %s", addr)
|
||||
}
|
||||
if addr == "" {
|
||||
addr = defaultAddr
|
||||
}
|
||||
return fmt.Sprintf("unix://%s", addr), nil
|
||||
}
|
||||
|
||||
func ParseTCPAddr(addr string, defaultAddr string) (string, error) {
|
||||
addr = strings.TrimPrefix(addr, "tcp://")
|
||||
if strings.Contains(addr, "://") || addr == "" {
|
||||
return "", fmt.Errorf("Invalid proto, expected tcp: %s", addr)
|
||||
}
|
||||
|
||||
u, err := url.Parse("tcp://" + addr)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
hostParts := strings.Split(u.Host, ":")
|
||||
if len(hostParts) != 2 {
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
host := hostParts[0]
|
||||
if host == "" {
|
||||
host = defaultAddr
|
||||
}
|
||||
|
||||
p, err := strconv.Atoi(hostParts[1])
|
||||
if err != nil && p == 0 {
|
||||
return "", fmt.Errorf("Invalid bind address format: %s", addr)
|
||||
}
|
||||
return fmt.Sprintf("tcp://%s:%d%s", host, p, u.Path), nil
|
||||
}
|
||||
|
||||
// Get a repos name and returns the right reposName + tag|digest
|
||||
// The tag can be confusing because of a port in a repository name.
|
||||
// Ex: localhost.localdomain:5000/samalba/hipache:latest
|
||||
// Digest ex: localhost:5000/foo/bar@sha256:bc8813ea7b3603864987522f02a76101c17ad122e1c46d790efc0fca78ca7bfb
|
||||
func ParseRepositoryTag(repos string) (string, string) {
|
||||
n := strings.Index(repos, "@")
|
||||
if n >= 0 {
|
||||
parts := strings.Split(repos, "@")
|
||||
return parts[0], parts[1]
|
||||
}
|
||||
n = strings.LastIndex(repos, ":")
|
||||
if n < 0 {
|
||||
return repos, ""
|
||||
}
|
||||
if tag := repos[n+1:]; !strings.Contains(tag, "/") {
|
||||
return repos[:n], tag
|
||||
}
|
||||
return repos, ""
|
||||
}
|
||||
|
||||
func PartParser(template, data string) (map[string]string, error) {
|
||||
// ip:public:private
|
||||
var (
|
||||
templateParts = strings.Split(template, ":")
|
||||
parts = strings.Split(data, ":")
|
||||
out = make(map[string]string, len(templateParts))
|
||||
)
|
||||
if len(parts) != len(templateParts) {
|
||||
return nil, fmt.Errorf("Invalid format to parse. %s should match template %s", data, template)
|
||||
}
|
||||
|
||||
for i, t := range templateParts {
|
||||
value := ""
|
||||
if len(parts) > i {
|
||||
value = parts[i]
|
||||
}
|
||||
out[t] = value
|
||||
}
|
||||
return out, nil
|
||||
}
|
||||
|
||||
func ParseKeyValueOpt(opt string) (string, string, error) {
|
||||
parts := strings.SplitN(opt, "=", 2)
|
||||
if len(parts) != 2 {
|
||||
return "", "", fmt.Errorf("Unable to parse key/value option: %s", opt)
|
||||
}
|
||||
return strings.TrimSpace(parts[0]), strings.TrimSpace(parts[1]), nil
|
||||
}
|
||||
|
||||
func ParsePortRange(ports string) (uint64, uint64, error) {
|
||||
if ports == "" {
|
||||
return 0, 0, fmt.Errorf("Empty string specified for ports.")
|
||||
}
|
||||
if !strings.Contains(ports, "-") {
|
||||
start, err := strconv.ParseUint(ports, 10, 16)
|
||||
end := start
|
||||
return start, end, err
|
||||
}
|
||||
|
||||
parts := strings.Split(ports, "-")
|
||||
start, err := strconv.ParseUint(parts[0], 10, 16)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
end, err := strconv.ParseUint(parts[1], 10, 16)
|
||||
if err != nil {
|
||||
return 0, 0, err
|
||||
}
|
||||
if end < start {
|
||||
return 0, 0, fmt.Errorf("Invalid range specified for the Port: %s", ports)
|
||||
}
|
||||
return start, end, nil
|
||||
}
|
||||
|
||||
func ParseLink(val string) (string, string, error) {
|
||||
if val == "" {
|
||||
return "", "", fmt.Errorf("empty string specified for links")
|
||||
}
|
||||
arr := strings.Split(val, ":")
|
||||
if len(arr) > 2 {
|
||||
return "", "", fmt.Errorf("bad format for links: %s", val)
|
||||
}
|
||||
if len(arr) == 1 {
|
||||
return val, val, nil
|
||||
}
|
||||
// This is kept because we can actually get an HostConfig with links
|
||||
// from an already created container and the format is not `foo:bar`
|
||||
// but `/foo:/c1/bar`
|
||||
if strings.HasPrefix(arr[0], "/") {
|
||||
_, alias := path.Split(arr[1])
|
||||
return arr[0][1:], alias, nil
|
||||
}
|
||||
return arr[0], arr[1], nil
|
||||
}
|
File diff suppressed because it is too large
Load Diff
|
@ -1,234 +0,0 @@
|
|||
package winconsole
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
|
||||
const (
|
||||
ANSI_ESCAPE_PRIMARY = 0x1B
|
||||
ANSI_ESCAPE_SECONDARY = 0x5B
|
||||
ANSI_COMMAND_FIRST = 0x40
|
||||
ANSI_COMMAND_LAST = 0x7E
|
||||
ANSI_PARAMETER_SEP = ";"
|
||||
ANSI_CMD_G0 = '('
|
||||
ANSI_CMD_G1 = ')'
|
||||
ANSI_CMD_G2 = '*'
|
||||
ANSI_CMD_G3 = '+'
|
||||
ANSI_CMD_DECPNM = '>'
|
||||
ANSI_CMD_DECPAM = '='
|
||||
ANSI_CMD_OSC = ']'
|
||||
ANSI_CMD_STR_TERM = '\\'
|
||||
ANSI_BEL = 0x07
|
||||
KEY_EVENT = 1
|
||||
)
|
||||
|
||||
// Interface that implements terminal handling
|
||||
type terminalEmulator interface {
|
||||
HandleOutputCommand(fd uintptr, command []byte) (n int, err error)
|
||||
HandleInputSequence(fd uintptr, command []byte) (n int, err error)
|
||||
WriteChars(fd uintptr, w io.Writer, p []byte) (n int, err error)
|
||||
ReadChars(fd uintptr, w io.Reader, p []byte) (n int, err error)
|
||||
}
|
||||
|
||||
type terminalWriter struct {
|
||||
wrappedWriter io.Writer
|
||||
emulator terminalEmulator
|
||||
command []byte
|
||||
inSequence bool
|
||||
fd uintptr
|
||||
}
|
||||
|
||||
type terminalReader struct {
|
||||
wrappedReader io.ReadCloser
|
||||
emulator terminalEmulator
|
||||
command []byte
|
||||
inSequence bool
|
||||
fd uintptr
|
||||
}
|
||||
|
||||
// http://manpages.ubuntu.com/manpages/intrepid/man4/console_codes.4.html
|
||||
func isAnsiCommandChar(b byte) bool {
|
||||
switch {
|
||||
case ANSI_COMMAND_FIRST <= b && b <= ANSI_COMMAND_LAST && b != ANSI_ESCAPE_SECONDARY:
|
||||
return true
|
||||
case b == ANSI_CMD_G1 || b == ANSI_CMD_OSC || b == ANSI_CMD_DECPAM || b == ANSI_CMD_DECPNM:
|
||||
// non-CSI escape sequence terminator
|
||||
return true
|
||||
case b == ANSI_CMD_STR_TERM || b == ANSI_BEL:
|
||||
// String escape sequence terminator
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func isCharacterSelectionCmdChar(b byte) bool {
|
||||
return (b == ANSI_CMD_G0 || b == ANSI_CMD_G1 || b == ANSI_CMD_G2 || b == ANSI_CMD_G3)
|
||||
}
|
||||
|
||||
func isXtermOscSequence(command []byte, current byte) bool {
|
||||
return (len(command) >= 2 && command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_CMD_OSC && current != ANSI_BEL)
|
||||
}
|
||||
|
||||
// Write writes len(p) bytes from p to the underlying data stream.
|
||||
// http://golang.org/pkg/io/#Writer
|
||||
func (tw *terminalWriter) Write(p []byte) (n int, err error) {
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if tw.emulator == nil {
|
||||
return tw.wrappedWriter.Write(p)
|
||||
}
|
||||
// Emulate terminal by extracting commands and executing them
|
||||
totalWritten := 0
|
||||
start := 0 // indicates start of the next chunk
|
||||
end := len(p)
|
||||
for current := 0; current < end; current++ {
|
||||
if tw.inSequence {
|
||||
// inside escape sequence
|
||||
tw.command = append(tw.command, p[current])
|
||||
if isAnsiCommandChar(p[current]) {
|
||||
if !isXtermOscSequence(tw.command, p[current]) {
|
||||
// found the last command character.
|
||||
// Now we have a complete command.
|
||||
nchar, err := tw.emulator.HandleOutputCommand(tw.fd, tw.command)
|
||||
totalWritten += nchar
|
||||
if err != nil {
|
||||
return totalWritten, err
|
||||
}
|
||||
|
||||
// clear the command
|
||||
// don't include current character again
|
||||
tw.command = tw.command[:0]
|
||||
start = current + 1
|
||||
tw.inSequence = false
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if p[current] == ANSI_ESCAPE_PRIMARY {
|
||||
// entering escape sequnce
|
||||
tw.inSequence = true
|
||||
// indicates end of "normal sequence", write whatever you have so far
|
||||
if len(p[start:current]) > 0 {
|
||||
nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:current])
|
||||
totalWritten += nw
|
||||
if err != nil {
|
||||
return totalWritten, err
|
||||
}
|
||||
}
|
||||
// include the current character as part of the next sequence
|
||||
tw.command = append(tw.command, p[current])
|
||||
}
|
||||
}
|
||||
}
|
||||
// note that so far, start of the escape sequence triggers writing out of bytes to console.
|
||||
// For the part _after_ the end of last escape sequence, it is not written out yet. So write it out
|
||||
if !tw.inSequence {
|
||||
// assumption is that we can't be inside sequence and therefore command should be empty
|
||||
if len(p[start:]) > 0 {
|
||||
nw, err := tw.emulator.WriteChars(tw.fd, tw.wrappedWriter, p[start:])
|
||||
totalWritten += nw
|
||||
if err != nil {
|
||||
return totalWritten, err
|
||||
}
|
||||
}
|
||||
}
|
||||
return totalWritten, nil
|
||||
|
||||
}
|
||||
|
||||
// Read reads up to len(p) bytes into p.
|
||||
// http://golang.org/pkg/io/#Reader
|
||||
func (tr *terminalReader) Read(p []byte) (n int, err error) {
|
||||
//Implementations of Read are discouraged from returning a zero byte count
|
||||
// with a nil error, except when len(p) == 0.
|
||||
if len(p) == 0 {
|
||||
return 0, nil
|
||||
}
|
||||
if nil == tr.emulator {
|
||||
return tr.readFromWrappedReader(p)
|
||||
}
|
||||
return tr.emulator.ReadChars(tr.fd, tr.wrappedReader, p)
|
||||
}
|
||||
|
||||
// Close the underlying stream
|
||||
func (tr *terminalReader) Close() (err error) {
|
||||
return tr.wrappedReader.Close()
|
||||
}
|
||||
|
||||
func (tr *terminalReader) readFromWrappedReader(p []byte) (n int, err error) {
|
||||
return tr.wrappedReader.Read(p)
|
||||
}
|
||||
|
||||
type ansiCommand struct {
|
||||
CommandBytes []byte
|
||||
Command string
|
||||
Parameters []string
|
||||
IsSpecial bool
|
||||
}
|
||||
|
||||
func parseAnsiCommand(command []byte) *ansiCommand {
|
||||
if isCharacterSelectionCmdChar(command[1]) {
|
||||
// Is Character Set Selection commands
|
||||
return &ansiCommand{
|
||||
CommandBytes: command,
|
||||
Command: string(command),
|
||||
IsSpecial: true,
|
||||
}
|
||||
}
|
||||
// last char is command character
|
||||
lastCharIndex := len(command) - 1
|
||||
|
||||
retValue := &ansiCommand{
|
||||
CommandBytes: command,
|
||||
Command: string(command[lastCharIndex]),
|
||||
IsSpecial: false,
|
||||
}
|
||||
// more than a single escape
|
||||
if lastCharIndex != 0 {
|
||||
start := 1
|
||||
// skip if double char escape sequence
|
||||
if command[0] == ANSI_ESCAPE_PRIMARY && command[1] == ANSI_ESCAPE_SECONDARY {
|
||||
start++
|
||||
}
|
||||
// convert this to GetNextParam method
|
||||
retValue.Parameters = strings.Split(string(command[start:lastCharIndex]), ANSI_PARAMETER_SEP)
|
||||
}
|
||||
return retValue
|
||||
}
|
||||
|
||||
func (c *ansiCommand) getParam(index int) string {
|
||||
if len(c.Parameters) > index {
|
||||
return c.Parameters[index]
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
func (ac *ansiCommand) String() string {
|
||||
return fmt.Sprintf("0x%v \"%v\" (\"%v\")",
|
||||
bytesToHex(ac.CommandBytes),
|
||||
ac.Command,
|
||||
strings.Join(ac.Parameters, "\",\""))
|
||||
}
|
||||
|
||||
func bytesToHex(b []byte) string {
|
||||
hex := make([]string, len(b))
|
||||
for i, ch := range b {
|
||||
hex[i] = fmt.Sprintf("%X", ch)
|
||||
}
|
||||
return strings.Join(hex, "")
|
||||
}
|
||||
|
||||
func parseInt16OrDefault(s string, defaultValue int16) (n int16, err error) {
|
||||
if s == "" {
|
||||
return defaultValue, nil
|
||||
}
|
||||
parsedValue, err := strconv.ParseInt(s, 10, 16)
|
||||
if err != nil {
|
||||
return defaultValue, err
|
||||
}
|
||||
return int16(parsedValue), nil
|
||||
}
|
|
@ -1,26 +0,0 @@
|
|||
package timeutils
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"time"
|
||||
)
|
||||
|
||||
const (
|
||||
// RFC3339NanoFixed is our own version of RFC339Nano because we want one
|
||||
// that pads the nano seconds part with zeros to ensure
|
||||
// the timestamps are aligned in the logs.
|
||||
RFC3339NanoFixed = "2006-01-02T15:04:05.000000000Z07:00"
|
||||
// JSONFormat is the format used by FastMarshalJSON
|
||||
JSONFormat = `"` + time.RFC3339Nano + `"`
|
||||
)
|
||||
|
||||
// FastMarshalJSON avoids one of the extra allocations that
|
||||
// time.MarshalJSON is making.
|
||||
func FastMarshalJSON(t time.Time) (string, error) {
|
||||
if y := t.Year(); y < 0 || y >= 10000 {
|
||||
// RFC 3339 is clear that years are 4 digits exactly.
|
||||
// See golang.org/issue/4556#c15 for more discussion.
|
||||
return "", errors.New("time.MarshalJSON: year outside of range [0,9999]")
|
||||
}
|
||||
return t.Format(JSONFormat), nil
|
||||
}
|
|
@ -1,36 +0,0 @@
|
|||
package timeutils
|
||||
|
||||
import (
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
// GetTimestamp tries to parse given string as golang duration,
|
||||
// then RFC3339 time and finally as a Unix timestamp. If
|
||||
// any of these were successful, it returns a Unix timestamp
|
||||
// as string otherwise returns the given value back.
|
||||
// In case of duration input, the returned timestamp is computed
|
||||
// as the given reference time minus the amount of the duration.
|
||||
func GetTimestamp(value string, reference time.Time) string {
|
||||
if d, err := time.ParseDuration(value); value != "0" && err == nil {
|
||||
return strconv.FormatInt(reference.Add(-d).Unix(), 10)
|
||||
}
|
||||
|
||||
var format string
|
||||
if strings.Contains(value, ".") {
|
||||
format = time.RFC3339Nano
|
||||
} else {
|
||||
format = time.RFC3339
|
||||
}
|
||||
|
||||
loc := time.FixedZone(time.Now().Zone())
|
||||
if len(value) < len(format) {
|
||||
format = format[:len(value)]
|
||||
}
|
||||
t, err := time.ParseInLocation(format, value, loc)
|
||||
if err != nil {
|
||||
return value
|
||||
}
|
||||
return strconv.FormatInt(t.Unix(), 10)
|
||||
}
|
|
@ -1,31 +0,0 @@
|
|||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"time"
|
||||
)
|
||||
|
||||
// HumanDuration returns a human-readable approximation of a duration
|
||||
// (eg. "About a minute", "4 hours ago", etc.)
|
||||
func HumanDuration(d time.Duration) string {
|
||||
if seconds := int(d.Seconds()); seconds < 1 {
|
||||
return "Less than a second"
|
||||
} else if seconds < 60 {
|
||||
return fmt.Sprintf("%d seconds", seconds)
|
||||
} else if minutes := int(d.Minutes()); minutes == 1 {
|
||||
return "About a minute"
|
||||
} else if minutes < 60 {
|
||||
return fmt.Sprintf("%d minutes", minutes)
|
||||
} else if hours := int(d.Hours()); hours == 1 {
|
||||
return "About an hour"
|
||||
} else if hours < 48 {
|
||||
return fmt.Sprintf("%d hours", hours)
|
||||
} else if hours < 24*7*2 {
|
||||
return fmt.Sprintf("%d days", hours/24)
|
||||
} else if hours < 24*30*3 {
|
||||
return fmt.Sprintf("%d weeks", hours/24/7)
|
||||
} else if hours < 24*365*2 {
|
||||
return fmt.Sprintf("%d months", hours/24/30)
|
||||
}
|
||||
return fmt.Sprintf("%d years", int(d.Hours())/24/365)
|
||||
}
|
|
@ -1,93 +0,0 @@
|
|||
package units
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// See: http://en.wikipedia.org/wiki/Binary_prefix
|
||||
const (
|
||||
// Decimal
|
||||
|
||||
KB = 1000
|
||||
MB = 1000 * KB
|
||||
GB = 1000 * MB
|
||||
TB = 1000 * GB
|
||||
PB = 1000 * TB
|
||||
|
||||
// Binary
|
||||
|
||||
KiB = 1024
|
||||
MiB = 1024 * KiB
|
||||
GiB = 1024 * MiB
|
||||
TiB = 1024 * GiB
|
||||
PiB = 1024 * TiB
|
||||
)
|
||||
|
||||
type unitMap map[string]int64
|
||||
|
||||
var (
|
||||
decimalMap = unitMap{"k": KB, "m": MB, "g": GB, "t": TB, "p": PB}
|
||||
binaryMap = unitMap{"k": KiB, "m": MiB, "g": GiB, "t": TiB, "p": PiB}
|
||||
sizeRegex = regexp.MustCompile(`^(\d+)([kKmMgGtTpP])?[bB]?$`)
|
||||
)
|
||||
|
||||
var decimapAbbrs = []string{"B", "kB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB"}
|
||||
var binaryAbbrs = []string{"B", "KiB", "MiB", "GiB", "TiB", "PiB", "EiB", "ZiB", "YiB"}
|
||||
|
||||
// CustomSize returns a human-readable approximation of a size
|
||||
// using custom format
|
||||
func CustomSize(format string, size float64, base float64, _map []string) string {
|
||||
i := 0
|
||||
for size >= base {
|
||||
size = size / base
|
||||
i++
|
||||
}
|
||||
return fmt.Sprintf(format, size, _map[i])
|
||||
}
|
||||
|
||||
// HumanSize returns a human-readable approximation of a size
|
||||
// using SI standard (eg. "44kB", "17MB")
|
||||
func HumanSize(size float64) string {
|
||||
return CustomSize("%.4g %s", size, 1000.0, decimapAbbrs)
|
||||
}
|
||||
|
||||
func BytesSize(size float64) string {
|
||||
return CustomSize("%.4g %s", size, 1024.0, binaryAbbrs)
|
||||
}
|
||||
|
||||
// FromHumanSize returns an integer from a human-readable specification of a
|
||||
// size using SI standard (eg. "44kB", "17MB")
|
||||
func FromHumanSize(size string) (int64, error) {
|
||||
return parseSize(size, decimalMap)
|
||||
}
|
||||
|
||||
// RAMInBytes parses a human-readable string representing an amount of RAM
|
||||
// in bytes, kibibytes, mebibytes, gibibytes, or tebibytes and
|
||||
// returns the number of bytes, or -1 if the string is unparseable.
|
||||
// Units are case-insensitive, and the 'b' suffix is optional.
|
||||
func RAMInBytes(size string) (int64, error) {
|
||||
return parseSize(size, binaryMap)
|
||||
}
|
||||
|
||||
// Parses the human-readable size string into the amount it represents
|
||||
func parseSize(sizeStr string, uMap unitMap) (int64, error) {
|
||||
matches := sizeRegex.FindStringSubmatch(sizeStr)
|
||||
if len(matches) != 3 {
|
||||
return -1, fmt.Errorf("invalid size: '%s'", sizeStr)
|
||||
}
|
||||
|
||||
size, err := strconv.ParseInt(matches[1], 10, 0)
|
||||
if err != nil {
|
||||
return -1, err
|
||||
}
|
||||
|
||||
unitPrefix := strings.ToLower(matches[2])
|
||||
if mul, ok := uMap[unitPrefix]; ok {
|
||||
size *= mul
|
||||
}
|
||||
|
||||
return size, nil
|
||||
}
|
|
@ -1,70 +0,0 @@
|
|||
# Compiled Object files, Static and Dynamic libs (Shared Objects)
|
||||
*.o
|
||||
*.a
|
||||
*.so
|
||||
|
||||
# Folders
|
||||
_obj
|
||||
_test
|
||||
|
||||
# Architecture specific extensions/prefixes
|
||||
*.[568vq]
|
||||
[568vq].out
|
||||
|
||||
*.cgo1.go
|
||||
*.cgo2.c
|
||||
_cgo_defun.c
|
||||
_cgo_gotypes.go
|
||||
_cgo_export.*
|
||||
|
||||
_testmain.go
|
||||
|
||||
*.exe
|
||||
|
||||
restful.html
|
||||
|
||||
*.out
|
||||
|
||||
tmp.prof
|
||||
|
||||
go-restful.test
|
||||
|
||||
examples/restful-basic-authentication
|
||||
|
||||
examples/restful-encoding-filter
|
||||
|
||||
examples/restful-filters
|
||||
|
||||
examples/restful-hello-world
|
||||
|
||||
examples/restful-resource-functions
|
||||
|
||||
examples/restful-serve-static
|
||||
|
||||
examples/restful-user-service
|
||||
|
||||
*.DS_Store
|
||||
examples/restful-user-resource
|
||||
|
||||
examples/restful-multi-containers
|
||||
|
||||
examples/restful-form-handling
|
||||
|
||||
examples/restful-CORS-filter
|
||||
|
||||
examples/restful-options-filter
|
||||
|
||||
examples/restful-curly-router
|
||||
|
||||
examples/restful-cpuprofiler-service
|
||||
|
||||
examples/restful-pre-post-filters
|
||||
|
||||
curly.prof
|
||||
|
||||
examples/restful-NCSA-logging
|
||||
|
||||
examples/restful-html-template
|
||||
|
||||
s.html
|
||||
restful-path-tail
|
|
@ -1,159 +0,0 @@
|
|||
Change history of go-restful
|
||||
=
|
||||
2015-09-27
|
||||
- rename new WriteStatusAnd... to WriteHeaderAnd... for consistency
|
||||
|
||||
2015-09-25
|
||||
- fixed problem with changing Header after WriteHeader (issue 235)
|
||||
|
||||
2015-09-14
|
||||
- changed behavior of WriteHeader (immediate write) and WriteEntity (no status write)
|
||||
- added support for custom EntityReaderWriters.
|
||||
|
||||
2015-08-06
|
||||
- add support for reading entities from compressed request content
|
||||
- use sync.Pool for compressors of http response and request body
|
||||
- add Description to Parameter for documentation in Swagger UI
|
||||
|
||||
2015-03-20
|
||||
- add configurable logging
|
||||
|
||||
2015-03-18
|
||||
- if not specified, the Operation is derived from the Route function
|
||||
|
||||
2015-03-17
|
||||
- expose Parameter creation functions
|
||||
- make trace logger an interface
|
||||
- fix OPTIONSFilter
|
||||
- customize rendering of ServiceError
|
||||
- JSR311 router now handles wildcards
|
||||
- add Notes to Route
|
||||
|
||||
2014-11-27
|
||||
- (api add) PrettyPrint per response. (as proposed in #167)
|
||||
|
||||
2014-11-12
|
||||
- (api add) ApiVersion(.) for documentation in Swagger UI
|
||||
|
||||
2014-11-10
|
||||
- (api change) struct fields tagged with "description" show up in Swagger UI
|
||||
|
||||
2014-10-31
|
||||
- (api change) ReturnsError -> Returns
|
||||
- (api add) RouteBuilder.Do(aBuilder) for DRY use of RouteBuilder
|
||||
- fix swagger nested structs
|
||||
- sort Swagger response messages by code
|
||||
|
||||
2014-10-23
|
||||
- (api add) ReturnsError allows you to document Http codes in swagger
|
||||
- fixed problem with greedy CurlyRouter
|
||||
- (api add) Access-Control-Max-Age in CORS
|
||||
- add tracing functionality (injectable) for debugging purposes
|
||||
- support JSON parse 64bit int
|
||||
- fix empty parameters for swagger
|
||||
- WebServicesUrl is now optional for swagger
|
||||
- fixed duplicate AccessControlAllowOrigin in CORS
|
||||
- (api change) expose ServeMux in container
|
||||
- (api add) added AllowedDomains in CORS
|
||||
- (api add) ParameterNamed for detailed documentation
|
||||
|
||||
2014-04-16
|
||||
- (api add) expose constructor of Request for testing.
|
||||
|
||||
2014-06-27
|
||||
- (api add) ParameterNamed gives access to a Parameter definition and its data (for further specification).
|
||||
- (api add) SetCacheReadEntity allow scontrol over whether or not the request body is being cached (default true for compatibility reasons).
|
||||
|
||||
2014-07-03
|
||||
- (api add) CORS can be configured with a list of allowed domains
|
||||
|
||||
2014-03-12
|
||||
- (api add) Route path parameters can use wildcard or regular expressions. (requires CurlyRouter)
|
||||
|
||||
2014-02-26
|
||||
- (api add) Request now provides information about the matched Route, see method SelectedRoutePath
|
||||
|
||||
2014-02-17
|
||||
- (api change) renamed parameter constants (go-lint checks)
|
||||
|
||||
2014-01-10
|
||||
- (api add) support for CloseNotify, see http://golang.org/pkg/net/http/#CloseNotifier
|
||||
|
||||
2014-01-07
|
||||
- (api change) Write* methods in Response now return the error or nil.
|
||||
- added example of serving HTML from a Go template.
|
||||
- fixed comparing Allowed headers in CORS (is now case-insensitive)
|
||||
|
||||
2013-11-13
|
||||
- (api add) Response knows how many bytes are written to the response body.
|
||||
|
||||
2013-10-29
|
||||
- (api add) RecoverHandler(handler RecoverHandleFunction) to change how panic recovery is handled. Default behavior is to log and return a stacktrace. This may be a security issue as it exposes sourcecode information.
|
||||
|
||||
2013-10-04
|
||||
- (api add) Response knows what HTTP status has been written
|
||||
- (api add) Request can have attributes (map of string->interface, also called request-scoped variables
|
||||
|
||||
2013-09-12
|
||||
- (api change) Router interface simplified
|
||||
- Implemented CurlyRouter, a Router that does not use|allow regular expressions in paths
|
||||
|
||||
2013-08-05
|
||||
- add OPTIONS support
|
||||
- add CORS support
|
||||
|
||||
2013-08-27
|
||||
- fixed some reported issues (see github)
|
||||
- (api change) deprecated use of WriteError; use WriteErrorString instead
|
||||
|
||||
2014-04-15
|
||||
- (fix) v1.0.1 tag: fix Issue 111: WriteErrorString
|
||||
|
||||
2013-08-08
|
||||
- (api add) Added implementation Container: a WebServices collection with its own http.ServeMux allowing multiple endpoints per program. Existing uses of go-restful will register their services to the DefaultContainer.
|
||||
- (api add) the swagger package has be extended to have a UI per container.
|
||||
- if panic is detected then a small stack trace is printed (thanks to runner-mei)
|
||||
- (api add) WriteErrorString to Response
|
||||
|
||||
Important API changes:
|
||||
|
||||
- (api remove) package variable DoNotRecover no longer works ; use restful.DefaultContainer.DoNotRecover(true) instead.
|
||||
- (api remove) package variable EnableContentEncoding no longer works ; use restful.DefaultContainer.EnableContentEncoding(true) instead.
|
||||
|
||||
|
||||
2013-07-06
|
||||
|
||||
- (api add) Added support for response encoding (gzip and deflate(zlib)). This feature is disabled on default (for backwards compatibility). Use restful.EnableContentEncoding = true in your initialization to enable this feature.
|
||||
|
||||
2013-06-19
|
||||
|
||||
- (improve) DoNotRecover option, moved request body closer, improved ReadEntity
|
||||
|
||||
2013-06-03
|
||||
|
||||
- (api change) removed Dispatcher interface, hide PathExpression
|
||||
- changed receiver names of type functions to be more idiomatic Go
|
||||
|
||||
2013-06-02
|
||||
|
||||
- (optimize) Cache the RegExp compilation of Paths.
|
||||
|
||||
2013-05-22
|
||||
|
||||
- (api add) Added support for request/response filter functions
|
||||
|
||||
2013-05-18
|
||||
|
||||
|
||||
- (api add) Added feature to change the default Http Request Dispatch function (travis cline)
|
||||
- (api change) Moved Swagger Webservice to swagger package (see example restful-user)
|
||||
|
||||
[2012-11-14 .. 2013-05-18>
|
||||
|
||||
- See https://github.com/emicklei/go-restful/commits
|
||||
|
||||
2012-11-14
|
||||
|
||||
- Initial commit
|
||||
|
||||
|
|
@ -1,22 +0,0 @@
|
|||
Copyright (c) 2012,2013 Ernest Micklei
|
||||
|
||||
MIT License
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining
|
||||
a copy of this software and associated documentation files (the
|
||||
"Software"), to deal in the Software without restriction, including
|
||||
without limitation the rights to use, copy, modify, merge, publish,
|
||||
distribute, sublicense, and/or sell copies of the Software, and to
|
||||
permit persons to whom the Software is furnished to do so, subject to
|
||||
the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be
|
||||
included in all copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
|
||||
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
|
||||
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
|
||||
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
|
||||
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
|
||||
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
|
||||
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
|
|
@ -1,74 +0,0 @@
|
|||
go-restful
|
||||
==========
|
||||
|
||||
package for building REST-style Web Services using Google Go
|
||||
|
||||
REST asks developers to use HTTP methods explicitly and in a way that's consistent with the protocol definition. This basic REST design principle establishes a one-to-one mapping between create, read, update, and delete (CRUD) operations and HTTP methods. According to this mapping:
|
||||
|
||||
- GET = Retrieve a representation of a resource
|
||||
- POST = Create if you are sending content to the server to create a subordinate of the specified resource collection, using some server-side algorithm.
|
||||
- PUT = Create if you are sending the full content of the specified resource (URI).
|
||||
- PUT = Update if you are updating the full content of the specified resource.
|
||||
- DELETE = Delete if you are requesting the server to delete the resource
|
||||
- PATCH = Update partial content of a resource
|
||||
- OPTIONS = Get information about the communication options for the request URI
|
||||
|
||||
### Example
|
||||
|
||||
```Go
|
||||
ws := new(restful.WebService)
|
||||
ws.
|
||||
Path("/users").
|
||||
Consumes(restful.MIME_XML, restful.MIME_JSON).
|
||||
Produces(restful.MIME_JSON, restful.MIME_XML)
|
||||
|
||||
ws.Route(ws.GET("/{user-id}").To(u.findUser).
|
||||
Doc("get a user").
|
||||
Param(ws.PathParameter("user-id", "identifier of the user").DataType("string")).
|
||||
Writes(User{}))
|
||||
...
|
||||
|
||||
func (u UserResource) findUser(request *restful.Request, response *restful.Response) {
|
||||
id := request.PathParameter("user-id")
|
||||
...
|
||||
}
|
||||
```
|
||||
|
||||
[Full API of a UserResource](https://github.com/emicklei/go-restful/tree/master/examples/restful-user-resource.go)
|
||||
|
||||
### Features
|
||||
|
||||
- Routes for request → function mapping with path parameter (e.g. {id}) support
|
||||
- Configurable router:
|
||||
- Routing algorithm after [JSR311](http://jsr311.java.net/nonav/releases/1.1/spec/spec.html) that is implemented using (but doest **not** accept) regular expressions (See RouterJSR311 which is used by default)
|
||||
- Fast routing algorithm that allows static elements, regular expressions and dynamic parameters in the URL path (e.g. /meetings/{id} or /static/{subpath:*}, See CurlyRouter)
|
||||
- Request API for reading structs from JSON/XML and accesing parameters (path,query,header)
|
||||
- Response API for writing structs to JSON/XML and setting headers
|
||||
- Filters for intercepting the request → response flow on Service or Route level
|
||||
- Request-scoped variables using attributes
|
||||
- Containers for WebServices on different HTTP endpoints
|
||||
- Content encoding (gzip,deflate) of request and response payloads
|
||||
- Automatic responses on OPTIONS (using a filter)
|
||||
- Automatic CORS request handling (using a filter)
|
||||
- API declaration for Swagger UI (see swagger package)
|
||||
- Panic recovery to produce HTTP 500, customizable using RecoverHandler(...)
|
||||
- Route errors produce HTTP 404/405/406/415 errors, customizable using ServiceErrorHandler(...)
|
||||
- Configurable (trace) logging
|
||||
- Customizable encoding using EntityReaderWriter registration
|
||||
- Customizable gzip/deflate readers and writers using CompressorProvider registration
|
||||
|
||||
### Resources
|
||||
|
||||
- [Documentation on godoc.org](http://godoc.org/github.com/emicklei/go-restful)
|
||||
- [Code examples](https://github.com/emicklei/go-restful/tree/master/examples)
|
||||
- [Example posted on blog](http://ernestmicklei.com/2012/11/24/go-restful-first-working-example/)
|
||||
- [Design explained on blog](http://ernestmicklei.com/2012/11/11/go-restful-api-design/)
|
||||
- [sourcegraph](https://sourcegraph.com/github.com/emicklei/go-restful)
|
||||
- [gopkg.in](https://gopkg.in/emicklei/go-restful.v1)
|
||||
- [showcase: Mora - MongoDB REST Api server](https://github.com/emicklei/mora)
|
||||
|
||||
[](https://drone.io/github.com/emicklei/go-restful/latest)
|
||||
|
||||
(c) 2012 - 2015, http://ernestmicklei.com. MIT License
|
||||
|
||||
Type ```git shortlog -s``` for a full list of contributors.
|
|
@ -1 +0,0 @@
|
|||
{"SkipDirs": ["examples"]}
|
|
@ -1,10 +0,0 @@
|
|||
#go test -run=none -file bench_test.go -test.bench . -cpuprofile=bench_test.out
|
||||
|
||||
go test -c
|
||||
./go-restful.test -test.run=none -test.cpuprofile=tmp.prof -test.bench=BenchmarkMany
|
||||
./go-restful.test -test.run=none -test.cpuprofile=curly.prof -test.bench=BenchmarkManyCurly
|
||||
|
||||
#go tool pprof go-restful.test tmp.prof
|
||||
go tool pprof go-restful.test curly.prof
|
||||
|
||||
|
|
@ -1,110 +0,0 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"errors"
|
||||
"io"
|
||||
"net/http"
|
||||
"strings"
|
||||
)
|
||||
|
||||
// OBSOLETE : use restful.DefaultContainer.EnableContentEncoding(true) to change this setting.
|
||||
var EnableContentEncoding = false
|
||||
|
||||
// CompressingResponseWriter is a http.ResponseWriter that can perform content encoding (gzip and zlib)
|
||||
type CompressingResponseWriter struct {
|
||||
writer http.ResponseWriter
|
||||
compressor io.WriteCloser
|
||||
encoding string
|
||||
}
|
||||
|
||||
// Header is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) Header() http.Header {
|
||||
return c.writer.Header()
|
||||
}
|
||||
|
||||
// WriteHeader is part of http.ResponseWriter interface
|
||||
func (c *CompressingResponseWriter) WriteHeader(status int) {
|
||||
c.writer.WriteHeader(status)
|
||||
}
|
||||
|
||||
// Write is part of http.ResponseWriter interface
|
||||
// It is passed through the compressor
|
||||
func (c *CompressingResponseWriter) Write(bytes []byte) (int, error) {
|
||||
if c.isCompressorClosed() {
|
||||
return -1, errors.New("Compressing error: tried to write data using closed compressor")
|
||||
}
|
||||
return c.compressor.Write(bytes)
|
||||
}
|
||||
|
||||
// CloseNotify is part of http.CloseNotifier interface
|
||||
func (c *CompressingResponseWriter) CloseNotify() <-chan bool {
|
||||
return c.writer.(http.CloseNotifier).CloseNotify()
|
||||
}
|
||||
|
||||
// Close the underlying compressor
|
||||
func (c *CompressingResponseWriter) Close() error {
|
||||
if c.isCompressorClosed() {
|
||||
return errors.New("Compressing error: tried to close already closed compressor")
|
||||
}
|
||||
|
||||
c.compressor.Close()
|
||||
if ENCODING_GZIP == c.encoding {
|
||||
currentCompressorProvider.ReleaseGzipWriter(c.compressor.(*gzip.Writer))
|
||||
}
|
||||
if ENCODING_DEFLATE == c.encoding {
|
||||
currentCompressorProvider.ReleaseZlibWriter(c.compressor.(*zlib.Writer))
|
||||
}
|
||||
// gc hint needed?
|
||||
c.compressor = nil
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *CompressingResponseWriter) isCompressorClosed() bool {
|
||||
return nil == c.compressor
|
||||
}
|
||||
|
||||
// WantsCompressedResponse reads the Accept-Encoding header to see if and which encoding is requested.
|
||||
func wantsCompressedResponse(httpRequest *http.Request) (bool, string) {
|
||||
header := httpRequest.Header.Get(HEADER_AcceptEncoding)
|
||||
gi := strings.Index(header, ENCODING_GZIP)
|
||||
zi := strings.Index(header, ENCODING_DEFLATE)
|
||||
// use in order of appearance
|
||||
if gi == -1 {
|
||||
return zi != -1, ENCODING_DEFLATE
|
||||
} else if zi == -1 {
|
||||
return gi != -1, ENCODING_GZIP
|
||||
} else {
|
||||
if gi < zi {
|
||||
return true, ENCODING_GZIP
|
||||
}
|
||||
return true, ENCODING_DEFLATE
|
||||
}
|
||||
}
|
||||
|
||||
// NewCompressingResponseWriter create a CompressingResponseWriter for a known encoding = {gzip,deflate}
|
||||
func NewCompressingResponseWriter(httpWriter http.ResponseWriter, encoding string) (*CompressingResponseWriter, error) {
|
||||
httpWriter.Header().Set(HEADER_ContentEncoding, encoding)
|
||||
c := new(CompressingResponseWriter)
|
||||
c.writer = httpWriter
|
||||
var err error
|
||||
if ENCODING_GZIP == encoding {
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_GZIP
|
||||
} else if ENCODING_DEFLATE == encoding {
|
||||
w := currentCompressorProvider.AcquireZlibWriter()
|
||||
w.Reset(httpWriter)
|
||||
c.compressor = w
|
||||
c.encoding = ENCODING_DEFLATE
|
||||
} else {
|
||||
return nil, errors.New("Unknown encoding:" + encoding)
|
||||
}
|
||||
return c, err
|
||||
}
|
|
@ -1,103 +0,0 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
// BoundedCachedCompressors is a CompressorProvider that uses a cache with a fixed amount
|
||||
// of writers and readers (resources).
|
||||
// If a new resource is acquired and all are in use, it will return a new unmanaged resource.
|
||||
type BoundedCachedCompressors struct {
|
||||
gzipWriters chan *gzip.Writer
|
||||
gzipReaders chan *gzip.Reader
|
||||
zlibWriters chan *zlib.Writer
|
||||
writersCapacity int
|
||||
readersCapacity int
|
||||
}
|
||||
|
||||
// NewBoundedCachedCompressors returns a new, with filled cache, BoundedCachedCompressors.
|
||||
func NewBoundedCachedCompressors(writersCapacity, readersCapacity int) *BoundedCachedCompressors {
|
||||
b := &BoundedCachedCompressors{
|
||||
gzipWriters: make(chan *gzip.Writer, writersCapacity),
|
||||
gzipReaders: make(chan *gzip.Reader, readersCapacity),
|
||||
zlibWriters: make(chan *zlib.Writer, writersCapacity),
|
||||
writersCapacity: writersCapacity,
|
||||
readersCapacity: readersCapacity,
|
||||
}
|
||||
for ix := 0; ix < writersCapacity; ix++ {
|
||||
b.gzipWriters <- newGzipWriter()
|
||||
b.zlibWriters <- newZlibWriter()
|
||||
}
|
||||
for ix := 0; ix < readersCapacity; ix++ {
|
||||
b.gzipReaders <- newGzipReader()
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
// AcquireGzipWriter returns an resettable *gzip.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipWriter() *gzip.Writer {
|
||||
var writer *gzip.Writer
|
||||
select {
|
||||
case writer, _ = <-b.gzipWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newGzipWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseGzipWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipWriters) < b.writersCapacity {
|
||||
b.gzipWriters <- w
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireGzipReader returns a *gzip.Reader. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireGzipReader() *gzip.Reader {
|
||||
var reader *gzip.Reader
|
||||
select {
|
||||
case reader, _ = <-b.gzipReaders:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
reader = newGzipReader()
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
// ReleaseGzipReader accepts a reader (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.gzipReaders) < b.readersCapacity {
|
||||
b.gzipReaders <- r
|
||||
}
|
||||
}
|
||||
|
||||
// AcquireZlibWriter returns an resettable *zlib.Writer. Needs to be released.
|
||||
func (b *BoundedCachedCompressors) AcquireZlibWriter() *zlib.Writer {
|
||||
var writer *zlib.Writer
|
||||
select {
|
||||
case writer, _ = <-b.zlibWriters:
|
||||
default:
|
||||
// return a new unmanaged one
|
||||
writer = newZlibWriter()
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
// ReleaseZlibWriter accepts a writer (does not have to be one that was cached)
|
||||
// only when the cache has room for it. It will ignore it otherwise.
|
||||
func (b *BoundedCachedCompressors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
// forget the unmanaged ones
|
||||
if len(b.zlibWriters) < b.writersCapacity {
|
||||
b.zlibWriters <- w
|
||||
}
|
||||
}
|
|
@ -1,91 +0,0 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// SyncPoolCompessors is a CompressorProvider that use the standard sync.Pool.
|
||||
type SyncPoolCompessors struct {
|
||||
GzipWriterPool *sync.Pool
|
||||
GzipReaderPool *sync.Pool
|
||||
ZlibWriterPool *sync.Pool
|
||||
}
|
||||
|
||||
// NewSyncPoolCompessors returns a new ("empty") SyncPoolCompessors.
|
||||
func NewSyncPoolCompessors() *SyncPoolCompessors {
|
||||
return &SyncPoolCompessors{
|
||||
GzipWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipWriter() },
|
||||
},
|
||||
GzipReaderPool: &sync.Pool{
|
||||
New: func() interface{} { return newGzipReader() },
|
||||
},
|
||||
ZlibWriterPool: &sync.Pool{
|
||||
New: func() interface{} { return newZlibWriter() },
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipWriter() *gzip.Writer {
|
||||
return s.GzipWriterPool.Get().(*gzip.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipWriter(w *gzip.Writer) {
|
||||
s.GzipWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireGzipReader() *gzip.Reader {
|
||||
return s.GzipReaderPool.Get().(*gzip.Reader)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseGzipReader(r *gzip.Reader) {
|
||||
s.GzipReaderPool.Put(r)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) AcquireZlibWriter() *zlib.Writer {
|
||||
return s.ZlibWriterPool.Get().(*zlib.Writer)
|
||||
}
|
||||
|
||||
func (s *SyncPoolCompessors) ReleaseZlibWriter(w *zlib.Writer) {
|
||||
s.ZlibWriterPool.Put(w)
|
||||
}
|
||||
|
||||
func newGzipWriter() *gzip.Writer {
|
||||
// create with an empty bytes writer; it will be replaced before using the gzipWriter
|
||||
writer, err := gzip.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
||||
|
||||
func newGzipReader() *gzip.Reader {
|
||||
// create with an empty reader (but with GZIP header); it will be replaced before using the gzipReader
|
||||
// we can safely use currentCompressProvider because it is set on package initialization.
|
||||
w := currentCompressorProvider.AcquireGzipWriter()
|
||||
defer currentCompressorProvider.ReleaseGzipWriter(w)
|
||||
b := new(bytes.Buffer)
|
||||
w.Reset(b)
|
||||
w.Flush()
|
||||
w.Close()
|
||||
reader, err := gzip.NewReader(bytes.NewReader(b.Bytes()))
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return reader
|
||||
}
|
||||
|
||||
func newZlibWriter() *zlib.Writer {
|
||||
writer, err := zlib.NewWriterLevel(new(bytes.Buffer), gzip.BestSpeed)
|
||||
if err != nil {
|
||||
panic(err.Error())
|
||||
}
|
||||
return writer
|
||||
}
|
|
@ -1,53 +0,0 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2015 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"compress/gzip"
|
||||
"compress/zlib"
|
||||
)
|
||||
|
||||
type CompressorProvider interface {
|
||||
// Returns a *gzip.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireGzipWriter() *gzip.Writer
|
||||
|
||||
// Releases an aqcuired *gzip.Writer.
|
||||
ReleaseGzipWriter(w *gzip.Writer)
|
||||
|
||||
// Returns a *gzip.Reader which needs to be released later.
|
||||
AcquireGzipReader() *gzip.Reader
|
||||
|
||||
// Releases an aqcuired *gzip.Reader.
|
||||
ReleaseGzipReader(w *gzip.Reader)
|
||||
|
||||
// Returns a *zlib.Writer which needs to be released later.
|
||||
// Before using it, call Reset().
|
||||
AcquireZlibWriter() *zlib.Writer
|
||||
|
||||
// Releases an aqcuired *zlib.Writer.
|
||||
ReleaseZlibWriter(w *zlib.Writer)
|
||||
}
|
||||
|
||||
// DefaultCompressorProvider is the actual provider of compressors (zlib or gzip).
|
||||
var currentCompressorProvider CompressorProvider
|
||||
|
||||
func init() {
|
||||
currentCompressorProvider = NewSyncPoolCompessors()
|
||||
}
|
||||
|
||||
// CurrentCompressorProvider returns the current CompressorProvider.
|
||||
// It is initialized using a SyncPoolCompessors.
|
||||
func CurrentCompressorProvider() CompressorProvider {
|
||||
return currentCompressorProvider
|
||||
}
|
||||
|
||||
// CompressorProvider sets the actual provider of compressors (zlib or gzip).
|
||||
func SetCompressorProvider(p CompressorProvider) {
|
||||
if p == nil {
|
||||
panic("cannot set compressor provider to nil")
|
||||
}
|
||||
currentCompressorProvider = p
|
||||
}
|
|
@ -1,30 +0,0 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
const (
|
||||
MIME_XML = "application/xml" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_JSON = "application/json" // Accept or Content-Type used in Consumes() and/or Produces()
|
||||
MIME_OCTET = "application/octet-stream" // If Content-Type is not present in request, use the default
|
||||
|
||||
HEADER_Allow = "Allow"
|
||||
HEADER_Accept = "Accept"
|
||||
HEADER_Origin = "Origin"
|
||||
HEADER_ContentType = "Content-Type"
|
||||
HEADER_LastModified = "Last-Modified"
|
||||
HEADER_AcceptEncoding = "Accept-Encoding"
|
||||
HEADER_ContentEncoding = "Content-Encoding"
|
||||
HEADER_AccessControlExposeHeaders = "Access-Control-Expose-Headers"
|
||||
HEADER_AccessControlRequestMethod = "Access-Control-Request-Method"
|
||||
HEADER_AccessControlRequestHeaders = "Access-Control-Request-Headers"
|
||||
HEADER_AccessControlAllowMethods = "Access-Control-Allow-Methods"
|
||||
HEADER_AccessControlAllowOrigin = "Access-Control-Allow-Origin"
|
||||
HEADER_AccessControlAllowCredentials = "Access-Control-Allow-Credentials"
|
||||
HEADER_AccessControlAllowHeaders = "Access-Control-Allow-Headers"
|
||||
HEADER_AccessControlMaxAge = "Access-Control-Max-Age"
|
||||
|
||||
ENCODING_GZIP = "gzip"
|
||||
ENCODING_DEFLATE = "deflate"
|
||||
)
|
|
@ -1,338 +0,0 @@
|
|||
package restful
|
||||
|
||||
// Copyright 2013 Ernest Micklei. All rights reserved.
|
||||
// Use of this source code is governed by a license
|
||||
// that can be found in the LICENSE file.
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"runtime"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/emicklei/go-restful/log"
|
||||
)
|
||||
|
||||
// Container holds a collection of WebServices and a http.ServeMux to dispatch http requests.
|
||||
// The requests are further dispatched to routes of WebServices using a RouteSelector
|
||||
type Container struct {
|
||||
webServicesLock sync.RWMutex
|
||||
webServices []*WebService
|
||||
ServeMux *http.ServeMux
|
||||
isRegisteredOnRoot bool
|
||||
containerFilters []FilterFunction
|
||||
doNotRecover bool // default is false
|
||||
recoverHandleFunc RecoverHandleFunction
|
||||
serviceErrorHandleFunc ServiceErrorHandleFunction
|
||||
router RouteSelector // default is a RouterJSR311, CurlyRouter is the faster alternative
|
||||
contentEncodingEnabled bool // default is false
|
||||
}
|
||||
|
||||
// NewContainer creates a new Container using a new ServeMux and default router (RouterJSR311)
|
||||
func NewContainer() *Container {
|
||||
return &Container{
|
||||
webServices: []*WebService{},
|
||||
ServeMux: http.NewServeMux(),
|
||||
isRegisteredOnRoot: false,
|
||||
containerFilters: []FilterFunction{},
|
||||
doNotRecover: false,
|
||||
recoverHandleFunc: logStackOnRecover,
|
||||
serviceErrorHandleFunc: writeServiceError,
|
||||
router: RouterJSR311{},
|
||||
contentEncodingEnabled: false}
|
||||
}
|
||||
|
||||
// RecoverHandleFunction declares functions that can be used to handle a panic situation.
|
||||
// The first argument is what recover() returns. The second must be used to communicate an error response.
|
||||
type RecoverHandleFunction func(interface{}, http.ResponseWriter)
|
||||
|
||||
// RecoverHandler changes the default function (logStackOnRecover) to be called
|
||||
// when a panic is detected. DoNotRecover must be have its default value (=false).
|
||||
func (c *Container) RecoverHandler(handler RecoverHandleFunction) {
|
||||
c.recoverHandleFunc = handler
|
||||
}
|
||||
|
||||
// ServiceErrorHandleFunction declares functions that can be used to handle a service error situation.
|
||||
// The first argument is the service error, the second is the request that resulted in the error and
|
||||
// the third must be used to communicate an error response.
|
||||
type ServiceErrorHandleFunction func(ServiceError, *Request, *Response)
|
||||
|
||||
// ServiceErrorHandler changes the default function (writeServiceError) to be called
|
||||
// when a ServiceError is detected.
|
||||
func (c *Container) ServiceErrorHandler(handler ServiceErrorHandleFunction) {
|
||||
c.serviceErrorHandleFunc = handler
|
||||
}
|
||||
|
||||
// DoNotRecover controls whether panics will be caught to return HTTP 500.
|
||||
// If set to true, Route functions are responsible for handling any error situation.
|
||||
// Default value is false = recover from panics. This has performance implications.
|
||||
func (c *Container) DoNotRecover(doNot bool) {
|
||||
c.doNotRecover = doNot
|
||||
}
|
||||
|
||||
// Router changes the default Router (currently RouterJSR311)
|
||||
func (c *Container) Router(aRouter RouteSelector) {
|
||||
c.router = aRouter
|
||||
}
|
||||
|
||||
// EnableContentEncoding (default=false) allows for GZIP or DEFLATE encoding of responses.
|
||||
func (c *Container) EnableContentEncoding(enabled bool) {
|
||||
c.contentEncodingEnabled = enabled
|
||||
}
|
||||
|
||||
// Add a WebService to the Container. It will detect duplicate root paths and panic in that case.
|
||||
func (c *Container) Add(service *WebService) *Container {
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
// If registered on root then no additional specific mapping is needed
|
||||
if !c.isRegisteredOnRoot {
|
||||
pattern := c.fixedPrefixPath(service.RootPath())
|
||||
// check if root path registration is needed
|
||||
if "/" == pattern || "" == pattern {
|
||||
c.ServeMux.HandleFunc("/", c.dispatch)
|
||||
c.isRegisteredOnRoot = true
|
||||
} else {
|
||||
// detect if registration already exists
|
||||
alreadyMapped := false
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
alreadyMapped = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !alreadyMapped {
|
||||
c.ServeMux.HandleFunc(pattern, c.dispatch)
|
||||
if !strings.HasSuffix(pattern, "/") {
|
||||
c.ServeMux.HandleFunc(pattern+"/", c.dispatch)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// cannot have duplicate root paths
|
||||
for _, each := range c.webServices {
|
||||
if each.RootPath() == service.RootPath() {
|
||||
log.Printf("[restful] WebService with duplicate root path detected:['%v']", each)
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
// if rootPath was not set then lazy initialize it
|
||||
if len(service.rootPath) == 0 {
|
||||
service.Path("/")
|
||||
}
|
||||
c.webServices = append(c.webServices, service)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Container) Remove(ws *WebService) error {
|
||||
c.webServicesLock.Lock()
|
||||
defer c.webServicesLock.Unlock()
|
||||
newServices := []*WebService{}
|
||||
for ix := range c.webServices {
|
||||
if c.webServices[ix].rootPath != ws.rootPath {
|
||||
newServices = append(newServices, c.webServices[ix])
|
||||
}
|
||||
}
|
||||
c.webServices = newServices
|
||||
return nil
|
||||
}
|
||||
|
||||
// logStackOnRecover is the default RecoverHandleFunction and is called
|
||||
// when DoNotRecover is false and the recoverHandleFunc is not set for the container.
|
||||
// Default implementation logs the stacktrace and writes the stacktrace on the response.
|
||||
// This may be a security issue as it exposes sourcecode information.
|
||||
func logStackOnRecover(panicReason interface{}, httpWriter http.ResponseWriter) {
|
||||
var buffer bytes.Buffer
|
||||
buffer.WriteString(fmt.Sprintf("[restful] recover from panic situation: - %v\r\n", panicReason))
|
||||
for i := 2; ; i += 1 {
|
||||
_, file, line, ok := runtime.Caller(i)
|
||||
if !ok {
|
||||
break
|
||||
}
|
||||
buffer.WriteString(fmt.Sprintf(" %s:%d\r\n", file, line))
|
||||
}
|
||||
log.Print(buffer.String())
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
httpWriter.Write(buffer.Bytes())
|
||||
}
|
||||
|
||||
// writeServiceError is the default ServiceErrorHandleFunction and is called
|
||||
// when a ServiceError is returned during route selection. Default implementation
|
||||
// calls resp.WriteErrorString(err.Code, err.Message)
|
||||
func writeServiceError(err ServiceError, req *Request, resp *Response) {
|
||||
resp.WriteErrorString(err.Code, err.Message)
|
||||
}
|
||||
|
||||
// Dispatch the incoming Http Request to a matching WebService.
|
||||
func (c *Container) dispatch(httpWriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
writer := httpWriter
|
||||
|
||||
// CompressingResponseWriter should be closed after all operations are done
|
||||
defer func() {
|
||||
if compressWriter, ok := writer.(*CompressingResponseWriter); ok {
|
||||
compressWriter.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Instal panic recovery unless told otherwise
|
||||
if !c.doNotRecover { // catch all for 500 response
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
c.recoverHandleFunc(r, writer)
|
||||
return
|
||||
}
|
||||
}()
|
||||
}
|
||||
// Install closing the request body (if any)
|
||||
defer func() {
|
||||
if nil != httpRequest.Body {
|
||||
httpRequest.Body.Close()
|
||||
}
|
||||
}()
|
||||
|
||||
// Detect if compression is needed
|
||||
// assume without compression, test for override
|
||||
if c.contentEncodingEnabled {
|
||||
doCompress, encoding := wantsCompressedResponse(httpRequest)
|
||||
if doCompress {
|
||||
var err error
|
||||
writer, err = NewCompressingResponseWriter(httpWriter, encoding)
|
||||
if err != nil {
|
||||
log.Print("[restful] unable to install compressor: ", err)
|
||||
httpWriter.WriteHeader(http.StatusInternalServerError)
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
// Find best match Route ; err is non nil if no match was found
|
||||
var webService *WebService
|
||||
var route *Route
|
||||
var err error
|
||||
func() {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
webService, route, err = c.router.SelectRoute(
|
||||
c.webServices,
|
||||
httpRequest)
|
||||
}()
|
||||
if err != nil {
|
||||
// a non-200 response has already been written
|
||||
// run container filters anyway ; they should not touch the response...
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
switch err.(type) {
|
||||
case ServiceError:
|
||||
ser := err.(ServiceError)
|
||||
c.serviceErrorHandleFunc(ser, req, resp)
|
||||
}
|
||||
// TODO
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(writer))
|
||||
return
|
||||
}
|
||||
wrappedRequest, wrappedResponse := route.wrapRequestResponse(writer, httpRequest)
|
||||
// pass through filters (if any)
|
||||
if len(c.containerFilters)+len(webService.filters)+len(route.Filters) > 0 {
|
||||
// compose filter chain
|
||||
allFilters := []FilterFunction{}
|
||||
allFilters = append(allFilters, c.containerFilters...)
|
||||
allFilters = append(allFilters, webService.filters...)
|
||||
allFilters = append(allFilters, route.Filters...)
|
||||
chain := FilterChain{Filters: allFilters, Target: func(req *Request, resp *Response) {
|
||||
// handle request by route after passing all filters
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}}
|
||||
chain.ProcessFilter(wrappedRequest, wrappedResponse)
|
||||
} else {
|
||||
// no filters, handle request by route
|
||||
route.Function(wrappedRequest, wrappedResponse)
|
||||
}
|
||||
}
|
||||
|
||||
// fixedPrefixPath returns the fixed part of the partspec ; it may include template vars {}
|
||||
func (c Container) fixedPrefixPath(pathspec string) string {
|
||||
varBegin := strings.Index(pathspec, "{")
|
||||
if -1 == varBegin {
|
||||
return pathspec
|
||||
}
|
||||
return pathspec[:varBegin]
|
||||
}
|
||||
|
||||
// ServeHTTP implements net/http.Handler therefore a Container can be a Handler in a http.Server
|
||||
func (c Container) ServeHTTP(httpwriter http.ResponseWriter, httpRequest *http.Request) {
|
||||
c.ServeMux.ServeHTTP(httpwriter, httpRequest)
|
||||
}
|
||||
|
||||
// Handle registers the handler for the given pattern. If a handler already exists for pattern, Handle panics.
|
||||
func (c Container) Handle(pattern string, handler http.Handler) {
|
||||
c.ServeMux.Handle(pattern, handler)
|
||||
}
|
||||
|
||||
// HandleWithFilter registers the handler for the given pattern.
|
||||
// Container's filter chain is applied for handler.
|
||||
// If a handler already exists for pattern, HandleWithFilter panics.
|
||||
func (c *Container) HandleWithFilter(pattern string, handler http.Handler) {
|
||||
f := func(httpResponse http.ResponseWriter, httpRequest *http.Request) {
|
||||
if len(c.containerFilters) == 0 {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
return
|
||||
}
|
||||
|
||||
chain := FilterChain{Filters: c.containerFilters, Target: func(req *Request, resp *Response) {
|
||||
handler.ServeHTTP(httpResponse, httpRequest)
|
||||
}}
|
||||
chain.ProcessFilter(NewRequest(httpRequest), NewResponse(httpResponse))
|
||||
}
|
||||
|
||||
c.Handle(pattern, http.HandlerFunc(f))
|
||||
}
|
||||
|
||||
// Filter appends a container FilterFunction. These are called before dispatching
|
||||
// a http.Request to a WebService from the container
|
||||
func (c *Container) Filter(filter FilterFunction) {
|
||||
c.containerFilters = append(c.containerFilters, filter)
|
||||
}
|
||||
|
||||
// RegisteredWebServices returns the collections of added WebServices
|
||||
func (c Container) RegisteredWebServices() []*WebService {
|
||||
c.webServicesLock.RLock()
|
||||
defer c.webServicesLock.RUnlock()
|
||||
result := make([]*WebService, len(c.webServices))
|
||||
for ix := range c.webServices {
|
||||
result[ix] = c.webServices[ix]
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// computeAllowedMethods returns a list of HTTP methods that are valid for a Request
|
||||
func (c Container) computeAllowedMethods(req *Request) []string {
|
||||
// Go through all RegisteredWebServices() and all its Routes to collect the options
|
||||
methods := []string{}
|
||||
requestPath := req.Request.URL.Path
|
||||
for _, ws := range c.RegisteredWebServices() {
|
||||
matches := ws.pathExpr.Matcher.FindStringSubmatch(requestPath)
|
||||
if matches != nil {
|
||||
finalMatch := matches[len(matches)-1]
|
||||
for _, rt := range ws.Routes() {
|
||||
matches := rt.pathExpr.Matcher.FindStringSubmatch(finalMatch)
|
||||
if matches != nil {
|
||||
lastMatch := matches[len(matches)-1]
|
||||
if lastMatch == "" || lastMatch == "/" { // do not include if value is neither empty nor ‘/’.
|
||||
methods = append(methods, rt.Method)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
// methods = append(methods, "OPTIONS") not sure about this
|
||||
return methods
|
||||
}
|
||||
|
||||
// newBasicRequestResponse creates a pair of Request,Response from its http versions.
|
||||
// It is basic because no parameter or (produces) content-type information is given.
|
||||
func newBasicRequestResponse(httpWriter http.ResponseWriter, httpRequest *http.Request) (*Request, *Response) {
|
||||
resp := NewResponse(httpWriter)
|
||||
resp.requestAccept = httpRequest.Header.Get(HEADER_Accept)
|
||||
return NewRequest(httpRequest), resp
|
||||
}
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue