diff --git a/Godeps/Godeps.json b/Godeps/Godeps.json
index 00a29df287..a79da162ea 100644
--- a/Godeps/Godeps.json
+++ b/Godeps/Godeps.json
@@ -105,8 +105,8 @@
},
{
"ImportPath": "github.com/Sirupsen/logrus",
- "Comment": "v0.6.2-10-g51fe59a",
- "Rev": "51fe59aca108dc5680109e7b2051cbdcfa5a253c"
+ "Comment": "v1.0.0",
+ "Rev": "202f25545ea4cf9b191ff7f846df5d87c9382c2b"
},
{
"ImportPath": "github.com/abbot/go-http-auth",
@@ -415,6 +415,66 @@
"Comment": "spec-v0.3.1",
"Rev": "137b4975ecab6e1f0c24c1e3c228a50a3cfba75e"
},
+ {
+ "ImportPath": "github.com/containers/image/copy",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/docker",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/docker/archive",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/docker/policyconfiguration",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/docker/reference",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/docker/tarfile",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/image",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/manifest",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/pkg/compression",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/pkg/strslice",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/signature",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/transports",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/types",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/image/version",
+ "Rev": "dbd0a4cee2480da39048095a326506ae114d635a"
+ },
+ {
+ "ImportPath": "github.com/containers/storage/pkg/homedir",
+ "Rev": "87cb51ae7a688abb71dbe6e4ac5c962d9121b862"
+ },
{
"ImportPath": "github.com/coreos/etcd/alarm",
"Comment": "v3.1.5",
@@ -778,6 +838,16 @@
"Comment": "v3.0.0-4-g01aeca5",
"Rev": "01aeca54ebda6e0fbfafd0a524d234159c05ec20"
},
+ {
+ "ImportPath": "github.com/docker/distribution",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
+ {
+ "ImportPath": "github.com/docker/distribution/context",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
{
"ImportPath": "github.com/docker/distribution/digest",
"Comment": "v2.4.0-rc.1-38-gcd27f179",
@@ -788,6 +858,41 @@
"Comment": "v2.4.0-rc.1-38-gcd27f179",
"Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
},
+ {
+ "ImportPath": "github.com/docker/distribution/registry/api/errcode",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
+ {
+ "ImportPath": "github.com/docker/distribution/registry/api/v2",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
+ {
+ "ImportPath": "github.com/docker/distribution/registry/client",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
+ {
+ "ImportPath": "github.com/docker/distribution/registry/client/transport",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
+ {
+ "ImportPath": "github.com/docker/distribution/registry/storage/cache",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
+ {
+ "ImportPath": "github.com/docker/distribution/registry/storage/cache/memory",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
+ {
+ "ImportPath": "github.com/docker/distribution/uuid",
+ "Comment": "v2.4.0-rc.1-38-gcd27f179",
+ "Rev": "cd27f179f2c10c5d300e6d09025b538c475b0d51"
+ },
{
"ImportPath": "github.com/docker/docker/pkg/jsonlog",
"Comment": "v1.11.2",
@@ -900,24 +1005,28 @@
},
{
"ImportPath": "github.com/docker/go-connections/nat",
- "Comment": "v0.2.0-2-gf549a93",
- "Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb"
+ "Comment": "v0.2.1-5-g55aadc3",
+ "Rev": "55aadc3cc561684699edcdd0921b9293c3ee6b49"
},
{
"ImportPath": "github.com/docker/go-connections/sockets",
- "Comment": "v0.2.0-2-gf549a93",
- "Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb"
+ "Comment": "v0.2.1-5-g55aadc3",
+ "Rev": "55aadc3cc561684699edcdd0921b9293c3ee6b49"
},
{
"ImportPath": "github.com/docker/go-connections/tlsconfig",
- "Comment": "v0.2.0-2-gf549a93",
- "Rev": "f549a9393d05688dff0992ef3efd8bbe6c628aeb"
+ "Comment": "v0.2.1-5-g55aadc3",
+ "Rev": "55aadc3cc561684699edcdd0921b9293c3ee6b49"
},
{
"ImportPath": "github.com/docker/go-units",
"Comment": "v0.3.1-10-ge30f1e7",
"Rev": "e30f1e79f3cd72542f2026ceec18d3bd67ab859c"
},
+ {
+ "ImportPath": "github.com/docker/libtrust",
+ "Rev": "aabc10ec26b754e797f9028f4589c5b7bd90dc20"
+ },
{
"ImportPath": "github.com/docker/machine/commands/mcndirs",
"Comment": "docs-v0.8.2-2016-09-26-183-g07d63b6f",
@@ -1578,6 +1687,14 @@
"ImportPath": "github.com/gophercloud/gophercloud/pagination",
"Rev": "ed590d9afe113c6107cd60717b196155e6579e78"
},
+ {
+ "ImportPath": "github.com/gorilla/context",
+ "Rev": "215affda49addc4c8ef7e2534915df2c8c35c6cd"
+ },
+ {
+ "ImportPath": "github.com/gorilla/mux",
+ "Rev": "8096f47503459bcc74d1f4c487b7e6e42e5746b5"
+ },
{
"ImportPath": "github.com/gorilla/websocket",
"Rev": "6eb6ad425a89d9da7a5549bc6da8f79ba5c17844"
@@ -1849,6 +1966,10 @@
"ImportPath": "github.com/mreiferson/go-httpclient",
"Rev": "31f0106b4474f14bc441575c19d3a5fa21aa1f6c"
},
+ {
+ "ImportPath": "github.com/mtrmac/gpgme",
+ "Rev": "b2432428689ca58c2b8e8dea9449d3295cf96fc9"
+ },
{
"ImportPath": "github.com/mxk/go-flowrate/flowrate",
"Rev": "cca7078d478f8520f85629ad7c68962d31ed7682"
@@ -1857,6 +1978,21 @@
"ImportPath": "github.com/olekukonko/tablewriter",
"Rev": "bdcc175572fd7abece6c831e643891b9331bc9e7"
},
+ {
+ "ImportPath": "github.com/opencontainers/go-digest",
+ "Comment": "v1.0.0-rc0-6-g279bed9",
+ "Rev": "279bed98673dd5bef374d3b6e4b09e2af76183bf"
+ },
+ {
+ "ImportPath": "github.com/opencontainers/image-spec/specs-go",
+ "Comment": "v1.0.0-6-g7100382",
+ "Rev": "710038243d857231f17df1c3f4c10850154bd1f7"
+ },
+ {
+ "ImportPath": "github.com/opencontainers/image-spec/specs-go/v1",
+ "Comment": "v1.0.0-6-g7100382",
+ "Rev": "710038243d857231f17df1c3f4c10850154bd1f7"
+ },
{
"ImportPath": "github.com/opencontainers/runc/libcontainer",
"Comment": "v1.0.0-rc2-49-gd223e2ad",
@@ -2392,6 +2528,10 @@
"ImportPath": "golang.org/x/crypto/blowfish",
"Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
},
+ {
+ "ImportPath": "golang.org/x/crypto/cast5",
+ "Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
+ },
{
"ImportPath": "golang.org/x/crypto/curve25519",
"Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
@@ -2408,6 +2548,30 @@
"ImportPath": "golang.org/x/crypto/nacl/secretbox",
"Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
},
+ {
+ "ImportPath": "golang.org/x/crypto/openpgp",
+ "Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/openpgp/armor",
+ "Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/openpgp/elgamal",
+ "Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/openpgp/errors",
+ "Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/openpgp/packet",
+ "Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
+ },
+ {
+ "ImportPath": "golang.org/x/crypto/openpgp/s2k",
+ "Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
+ },
{
"ImportPath": "golang.org/x/crypto/pkcs12",
"Rev": "d172538b2cfce0c13cee31e647d0367aa8cd2486"
@@ -2508,6 +2672,10 @@
"ImportPath": "golang.org/x/sync/syncmap",
"Rev": "f52d1811a62927559de87708c8913c1650ce4f26"
},
+ {
+ "ImportPath": "golang.org/x/sync/errgroup",
+ "Rev": "f52d1811a62927559de87708c8913c1650ce4f26"
+ },
{
"ImportPath": "golang.org/x/sys/unix",
"Rev": "8f0908ab3b2457e2e15403d3697c9ef5cb4b57a9"
diff --git a/Makefile b/Makefile
index 9e7d4b6013..1844d55bce 100644
--- a/Makefile
+++ b/Makefile
@@ -67,7 +67,11 @@ KUBE_CROSS_DOCKER_CMD := docker run -w /go/src/$(REPOPATH) --user $(shell id -u)
# $(call MINIKUBE_GO_BUILD_CMD, output file, OS)
define MINIKUBE_GO_BUILD_CMD
+<<<<<<< HEAD
$(MINIKUBE_ENV_$(2)) go build --installsuffix cgo -ldflags="$(MINIKUBE_LDFLAGS) $(K8S_VERSION_LDFLAGS)" -a -o $(1) k8s.io/minikube/cmd/minikube
+=======
+ $(MINIKUBE_ENV_$(2)) go build -tags "container_image_ostree_stub containers_image_openpgp" --installsuffix cgo -ldflags="$(MINIKUBE_LDFLAGS) $(K8S_VERSION_LDFLAGS)" -a -o $(1) k8s.io/minikube/cmd/minikube
+>>>>>>> a98f9553f... Vendor changes
endef
ifeq ($(BUILD_IN_DOCKER),y)
diff --git a/vendor/github.com/Sirupsen/logrus/.travis.yml b/vendor/github.com/Sirupsen/logrus/.travis.yml
index c3af3ce27c..924f3c46be 100644
--- a/vendor/github.com/Sirupsen/logrus/.travis.yml
+++ b/vendor/github.com/Sirupsen/logrus/.travis.yml
@@ -1,10 +1,13 @@
language: go
go:
- - 1.2
- - 1.3
+ - 1.6.x
+ - 1.7.x
+ - 1.8.x
- tip
+env:
+ - GOMAXPROCS=4 GORACE=halt_on_error=1
install:
- - go get github.com/stretchr/testify
- - go get github.com/stvp/go-udp-testing
- - go get github.com/tobi/airbrake-go
- - go get github.com/getsentry/raven-go
+ - go get github.com/stretchr/testify/assert
+script:
+ - go test -race -v .
+ - cd hooks/null && go test -race -v .
diff --git a/vendor/github.com/Sirupsen/logrus/CHANGELOG.md b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
new file mode 100644
index 0000000000..63d415e12d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/CHANGELOG.md
@@ -0,0 +1,100 @@
+# 1.0.0
+
+* Officially changed name to lower-case
+* bug: colors on Windows 10 (#541)
+* bug: fix race in accessing level (#512)
+
+# 0.11.5
+
+* feature: add writer and writerlevel to entry (#372)
+
+# 0.11.4
+
+* bug: fix undefined variable on solaris (#493)
+
+# 0.11.3
+
+* formatter: configure quoting of empty values (#484)
+* formatter: configure quoting character (default is `"`) (#484)
+* bug: fix not importing io correctly in non-linux environments (#481)
+
+# 0.11.2
+
+* bug: fix windows terminal detection (#476)
+
+# 0.11.1
+
+* bug: fix tty detection with custom out (#471)
+
+# 0.11.0
+
+* performance: Use bufferpool to allocate (#370)
+* terminal: terminal detection for app-engine (#343)
+* feature: exit handler (#375)
+
+# 0.10.0
+
+* feature: Add a test hook (#180)
+* feature: `ParseLevel` is now case-insensitive (#326)
+* feature: `FieldLogger` interface that generalizes `Logger` and `Entry` (#308)
+* performance: avoid re-allocations on `WithFields` (#335)
+
+# 0.9.0
+
+* logrus/text_formatter: don't emit empty msg
+* logrus/hooks/airbrake: move out of main repository
+* logrus/hooks/sentry: move out of main repository
+* logrus/hooks/papertrail: move out of main repository
+* logrus/hooks/bugsnag: move out of main repository
+* logrus/core: run tests with `-race`
+* logrus/core: detect TTY based on `stderr`
+* logrus/core: support `WithError` on logger
+* logrus/core: Solaris support
+
+# 0.8.7
+
+* logrus/core: fix possible race (#216)
+* logrus/doc: small typo fixes and doc improvements
+
+
+# 0.8.6
+
+* hooks/raven: allow passing an initialized client
+
+# 0.8.5
+
+* logrus/core: revert #208
+
+# 0.8.4
+
+* formatter/text: fix data race (#218)
+
+# 0.8.3
+
+* logrus/core: fix entry log level (#208)
+* logrus/core: improve performance of text formatter by 40%
+* logrus/core: expose `LevelHooks` type
+* logrus/core: add support for DragonflyBSD and NetBSD
+* formatter/text: print structs more verbosely
+
+# 0.8.2
+
+* logrus: fix more Fatal family functions
+
+# 0.8.1
+
+* logrus: fix not exiting on `Fatalf` and `Fatalln`
+
+# 0.8.0
+
+* logrus: defaults to stderr instead of stdout
+* hooks/sentry: add special field for `*http.Request`
+* formatter/text: ignore Windows for colors
+
+# 0.7.3
+
+* formatter/\*: allow configuration of timestamp layout
+
+# 0.7.2
+
+* formatter/text: Add configuration option for time format (#158)
diff --git a/vendor/github.com/Sirupsen/logrus/README.md b/vendor/github.com/Sirupsen/logrus/README.md
index b6aa84c987..cbe8b69625 100644
--- a/vendor/github.com/Sirupsen/logrus/README.md
+++ b/vendor/github.com/Sirupsen/logrus/README.md
@@ -1,17 +1,29 @@
-# Logrus
[](https://travis-ci.org/Sirupsen/logrus)
+# Logrus
[](https://travis-ci.org/sirupsen/logrus) [](https://godoc.org/github.com/sirupsen/logrus)
Logrus is a structured logger for Go (golang), completely API compatible with
the standard library logger. [Godoc][godoc]. **Please note the Logrus API is not
-yet stable (pre 1.0), the core API is unlikely change much but please version
-control your Logrus to make sure you aren't fetching latest `master` on every
-build.**
+yet stable (pre 1.0). Logrus itself is completely stable and has been used in
+many large deployments. The core API is unlikely to change much but please
+version control your Logrus to make sure you aren't fetching latest `master` on
+every build.**
+
+**Seeing weird case-sensitive problems?** Unfortunately, the author failed to
+realize the consequences of renaming to lower-case. Due to the Go package
+environment, this caused issues. Regretfully, there's no turning back now.
+Everything using `logrus` will need to use the lower-case:
+`github.com/sirupsen/logrus`. Any package that isn't, should be changed.
+
+I am terribly sorry for this inconvenience. Logrus strives hard for backwards
+compatibility, and the author failed to realize the cascading consequences of
+such a name-change. To fix Glide, see [these
+comments](https://github.com/sirupsen/logrus/issues/553#issuecomment-306591437).
Nicely color-coded in development (when a TTY is attached, otherwise just
plain text):

-With `log.Formatter = new(logrus.JSONFormatter)`, for easy parsing by logstash
+With `log.SetFormatter(&log.JSONFormatter{})`, for easy parsing by logstash
or Splunk:
```json
@@ -31,18 +43,26 @@ ocean","size":10,"time":"2014-03-10 19:57:38.562264131 -0400 EDT"}
"time":"2014-03-10 19:57:38.562543128 -0400 EDT"}
```
-With the default `log.Formatter = new(logrus.TextFormatter)` when a TTY is not
+With the default `log.SetFormatter(&log.TextFormatter{})` when a TTY is not
attached, the output is compatible with the
-[l2met](http://r.32k.io/l2met-introduction) format:
+[logfmt](http://godoc.org/github.com/kr/logfmt) format:
```text
-time="2014-04-20 15:36:23.830442383 -0400 EDT" level="info" msg="A group of walrus emerges from the ocean" animal="walrus" size=10
-time="2014-04-20 15:36:23.830584199 -0400 EDT" level="warning" msg="The group's number increased tremendously!" omg=true number=122
-time="2014-04-20 15:36:23.830596521 -0400 EDT" level="info" msg="A giant walrus appears!" animal="walrus" size=10
-time="2014-04-20 15:36:23.830611837 -0400 EDT" level="info" msg="Tremendously sized cow enters the ocean." animal="walrus" size=9
-time="2014-04-20 15:36:23.830626464 -0400 EDT" level="fatal" msg="The ice breaks!" omg=true number=100
+time="2015-03-26T01:27:38-04:00" level=debug msg="Started observing beach" animal=walrus number=8
+time="2015-03-26T01:27:38-04:00" level=info msg="A group of walrus emerges from the ocean" animal=walrus size=10
+time="2015-03-26T01:27:38-04:00" level=warning msg="The group's number increased tremendously!" number=122 omg=true
+time="2015-03-26T01:27:38-04:00" level=debug msg="Temperature changes" temperature=-4
+time="2015-03-26T01:27:38-04:00" level=panic msg="It's over 9000!" animal=orca size=9009
+time="2015-03-26T01:27:38-04:00" level=fatal msg="The ice breaks!" err=&{0x2082280c0 map[animal:orca size:9009] 2015-03-26 01:27:38.441574009 -0400 EDT panic It's over 9000!} number=100 omg=true
+exit status 1
```
+#### Case-sensitivity
+
+The organization's name was changed to lower-case--and this will not be changed
+back. If you are getting import conflicts due to case sensitivity, please use
+the lower-case import: `github.com/sirupsen/logrus`.
+
#### Example
The simplest way to use Logrus is simply the package-level exported logger:
@@ -51,7 +71,7 @@ The simplest way to use Logrus is simply the package-level exported logger:
package main
import (
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
)
func main() {
@@ -62,7 +82,7 @@ func main() {
```
Note that it's completely api-compatible with the stdlib logger, so you can
-replace your `log` imports everywhere with `log "github.com/Sirupsen/logrus"`
+replace your `log` imports everywhere with `log "github.com/sirupsen/logrus"`
and you'll now have the flexibility of Logrus. You can customize it all you
want:
@@ -71,20 +91,16 @@ package main
import (
"os"
- log "github.com/Sirupsen/logrus"
- "github.com/Sirupsen/logrus/hooks/airbrake"
+ log "github.com/sirupsen/logrus"
)
func init() {
// Log as JSON instead of the default ASCII formatter.
log.SetFormatter(&log.JSONFormatter{})
- // Use the Airbrake hook to report errors that have Error severity or above to
- // an exception tracker. You can create custom hooks, see the Hooks section.
- log.AddHook(&logrus_airbrake.AirbrakeHook{})
-
- // Output to stderr instead of stdout, could also be a file.
- log.SetOutput(os.Stderr)
+ // Output to stdout instead of the default stderr
+ // Can be any io.Writer, see below for File example
+ log.SetOutput(os.Stdout)
// Only log the warning severity or above.
log.SetLevel(log.WarnLevel)
@@ -105,6 +121,16 @@ func main() {
"omg": true,
"number": 100,
}).Fatal("The ice breaks!")
+
+ // A common pattern is to re-use fields between logging statements by re-using
+ // the logrus.Entry returned from WithFields()
+ contextLogger := log.WithFields(log.Fields{
+ "common": "this is a common field",
+ "other": "I also should be logged always",
+ })
+
+ contextLogger.Info("I'll be logged with common and other field")
+ contextLogger.Info("Me too")
}
```
@@ -115,7 +141,8 @@ application, you can also create an instance of the `logrus` Logger:
package main
import (
- "github.com/Sirupsen/logrus"
+ "os"
+ "github.com/sirupsen/logrus"
)
// Create a new instance of the logger. You can have any number of instances.
@@ -124,7 +151,15 @@ var log = logrus.New()
func main() {
// The API for setting attributes is a little different than the package level
// exported logger. See Godoc.
- log.Out = os.Stderr
+ log.Out = os.Stdout
+
+ // You could set this to any `io.Writer` such as a file
+ // file, err := os.OpenFile("logrus.log", os.O_CREATE|os.O_WRONLY, 0666)
+ // if err == nil {
+ // log.Out = file
+ // } else {
+ // log.Info("Failed to log to file, using default stderr")
+ // }
log.WithFields(logrus.Fields{
"animal": "walrus",
@@ -135,7 +170,7 @@ func main() {
#### Fields
-Logrus encourages careful, structured logging though logging fields instead of
+Logrus encourages careful, structured logging through logging fields instead of
long, unparseable error messages. For example, instead of: `log.Fatalf("Failed
to send event %s to topic %s with key %d")`, you should log the much more
discoverable:
@@ -157,60 +192,42 @@ In general, with Logrus using any of the `printf`-family functions should be
seen as a hint you should add a field, however, you can still use the
`printf`-family functions with Logrus.
+#### Default Fields
+
+Often it's helpful to have fields _always_ attached to log statements in an
+application or parts of one. For example, you may want to always log the
+`request_id` and `user_ip` in the context of a request. Instead of writing
+`log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})` on
+every line, you can create a `logrus.Entry` to pass around instead:
+
+```go
+requestLogger := log.WithFields(log.Fields{"request_id": request_id, "user_ip": user_ip})
+requestLogger.Info("something happened on that request") # will log request_id and user_ip
+requestLogger.Warn("something not great happened")
+```
+
#### Hooks
You can add hooks for logging levels. For example to send errors to an exception
tracking service on `Error`, `Fatal` and `Panic`, info to StatsD or log to
multiple places simultaneously, e.g. syslog.
-```go
-// Not the real implementation of the Airbrake hook. Just a simple sample.
-import (
- log "github.com/Sirupsen/logrus"
-)
-
-func init() {
- log.AddHook(new(AirbrakeHook))
-}
-
-type AirbrakeHook struct{}
-
-// `Fire()` takes the entry that the hook is fired for. `entry.Data[]` contains
-// the fields for the entry. See the Fields section of the README.
-func (hook *AirbrakeHook) Fire(entry *logrus.Entry) error {
- err := airbrake.Notify(entry.Data["error"].(error))
- if err != nil {
- log.WithFields(log.Fields{
- "source": "airbrake",
- "endpoint": airbrake.Endpoint,
- }).Info("Failed to send error to Airbrake")
- }
-
- return nil
-}
-
-// `Levels()` returns a slice of `Levels` the hook is fired for.
-func (hook *AirbrakeHook) Levels() []log.Level {
- return []log.Level{
- log.ErrorLevel,
- log.FatalLevel,
- log.PanicLevel,
- }
-}
-```
-
-Logrus comes with built-in hooks. Add those, or your custom hook, in `init`:
+Logrus comes with [built-in hooks](hooks/). Add those, or your custom hook, in
+`init`:
```go
import (
- log "github.com/Sirupsen/logrus"
- "github.com/Sirupsen/logrus/hooks/airbrake"
- "github.com/Sirupsen/logrus/hooks/syslog"
+ log "github.com/sirupsen/logrus"
+ "gopkg.in/gemnasium/logrus-airbrake-hook.v2" // the package is named "aibrake"
+ logrus_syslog "github.com/sirupsen/logrus/hooks/syslog"
"log/syslog"
)
func init() {
- log.AddHook(new(logrus_airbrake.AirbrakeHook))
+
+ // Use the Airbrake hook to report errors that have Error severity or above to
+ // an exception tracker. You can create custom hooks, see the Hooks section.
+ log.AddHook(airbrake.NewHook(123, "xyz", "production"))
hook, err := logrus_syslog.NewSyslogHook("udp", "localhost:514", syslog.LOG_INFO, "")
if err != nil {
@@ -220,23 +237,55 @@ func init() {
}
}
```
+Note: Syslog hook also support connecting to local syslog (Ex. "/dev/log" or "/var/run/syslog" or "/var/run/log"). For the detail, please check the [syslog hook README](hooks/syslog/README.md).
-* [`github.com/Sirupsen/logrus/hooks/airbrake`](https://github.com/Sirupsen/logrus/blob/master/hooks/airbrake/airbrake.go)
- Send errors to an exception tracking service compatible with the Airbrake API.
- Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes.
-
-* [`github.com/Sirupsen/logrus/hooks/papertrail`](https://github.com/Sirupsen/logrus/blob/master/hooks/papertrail/papertrail.go)
- Send errors to the Papertrail hosted logging service via UDP.
-
-* [`github.com/Sirupsen/logrus/hooks/syslog`](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go)
- Send errors to remote syslog server.
- Uses standard library `log/syslog` behind the scenes.
-
-* [`github.com/nubo/hiprus`](https://github.com/nubo/hiprus)
- Send errors to a channel in hipchat.
-
-* [`github.com/sebest/logrusly`](https://github.com/sebest/logrusly)
- Send logs to Loggly (https://www.loggly.com/)
+| Hook | Description |
+| ----- | ----------- |
+| [Airbrake "legacy"](https://github.com/gemnasium/logrus-airbrake-legacy-hook) | Send errors to an exception tracking service compatible with the Airbrake API V2. Uses [`airbrake-go`](https://github.com/tobi/airbrake-go) behind the scenes. |
+| [Airbrake](https://github.com/gemnasium/logrus-airbrake-hook) | Send errors to the Airbrake API V3. Uses the official [`gobrake`](https://github.com/airbrake/gobrake) behind the scenes. |
+| [Amazon Kinesis](https://github.com/evalphobia/logrus_kinesis) | Hook for logging to [Amazon Kinesis](https://aws.amazon.com/kinesis/) |
+| [Amqp-Hook](https://github.com/vladoatanasov/logrus_amqp) | Hook for logging to Amqp broker (Like RabbitMQ) |
+| [Bugsnag](https://github.com/Shopify/logrus-bugsnag/blob/master/bugsnag.go) | Send errors to the Bugsnag exception tracking service. |
+| [DeferPanic](https://github.com/deferpanic/dp-logrus) | Hook for logging to DeferPanic |
+| [Discordrus](https://github.com/kz/discordrus) | Hook for logging to [Discord](https://discordapp.com/) |
+| [ElasticSearch](https://github.com/sohlich/elogrus) | Hook for logging to ElasticSearch|
+| [Firehose](https://github.com/beaubrewer/logrus_firehose) | Hook for logging to [Amazon Firehose](https://aws.amazon.com/kinesis/firehose/)
+| [Fluentd](https://github.com/evalphobia/logrus_fluent) | Hook for logging to fluentd |
+| [Go-Slack](https://github.com/multiplay/go-slack) | Hook for logging to [Slack](https://slack.com) |
+| [Graylog](https://github.com/gemnasium/logrus-graylog-hook) | Hook for logging to [Graylog](http://graylog2.org/) |
+| [Hiprus](https://github.com/nubo/hiprus) | Send errors to a channel in hipchat. |
+| [Honeybadger](https://github.com/agonzalezro/logrus_honeybadger) | Hook for sending exceptions to Honeybadger |
+| [InfluxDB](https://github.com/Abramovic/logrus_influxdb) | Hook for logging to influxdb |
+| [Influxus](http://github.com/vlad-doru/influxus) | Hook for concurrently logging to [InfluxDB](http://influxdata.com/) |
+| [Journalhook](https://github.com/wercker/journalhook) | Hook for logging to `systemd-journald` |
+| [KafkaLogrus](https://github.com/goibibo/KafkaLogrus) | Hook for logging to kafka |
+| [LFShook](https://github.com/rifflock/lfshook) | Hook for logging to the local filesystem |
+| [Logentries](https://github.com/jcftang/logentriesrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logentrus](https://github.com/puddingfactory/logentrus) | Hook for logging to [Logentries](https://logentries.com/) |
+| [Logmatic.io](https://github.com/logmatic/logmatic-go) | Hook for logging to [Logmatic.io](http://logmatic.io/) |
+| [Logrusly](https://github.com/sebest/logrusly) | Send logs to [Loggly](https://www.loggly.com/) |
+| [Logstash](https://github.com/bshuster-repo/logrus-logstash-hook) | Hook for logging to [Logstash](https://www.elastic.co/products/logstash) |
+| [Mail](https://github.com/zbindenren/logrus_mail) | Hook for sending exceptions via mail |
+| [Mongodb](https://github.com/weekface/mgorus) | Hook for logging to mongodb |
+| [NATS-Hook](https://github.com/rybit/nats_logrus_hook) | Hook for logging to [NATS](https://nats.io) |
+| [Octokit](https://github.com/dorajistyle/logrus-octokit-hook) | Hook for logging to github via octokit |
+| [Papertrail](https://github.com/polds/logrus-papertrail-hook) | Send errors to the [Papertrail](https://papertrailapp.com) hosted logging service via UDP. |
+| [PostgreSQL](https://github.com/gemnasium/logrus-postgresql-hook) | Send logs to [PostgreSQL](http://postgresql.org) |
+| [Pushover](https://github.com/toorop/logrus_pushover) | Send error via [Pushover](https://pushover.net) |
+| [Raygun](https://github.com/squirkle/logrus-raygun-hook) | Hook for logging to [Raygun.io](http://raygun.io/) |
+| [Redis-Hook](https://github.com/rogierlommers/logrus-redis-hook) | Hook for logging to a ELK stack (through Redis) |
+| [Rollrus](https://github.com/heroku/rollrus) | Hook for sending errors to rollbar |
+| [Scribe](https://github.com/sagar8192/logrus-scribe-hook) | Hook for logging to [Scribe](https://github.com/facebookarchive/scribe)|
+| [Sentry](https://github.com/evalphobia/logrus_sentry) | Send errors to the Sentry error logging and aggregation service. |
+| [Slackrus](https://github.com/johntdyer/slackrus) | Hook for Slack chat. |
+| [Stackdriver](https://github.com/knq/sdhook) | Hook for logging to [Google Stackdriver](https://cloud.google.com/logging/) |
+| [Sumorus](https://github.com/doublefree/sumorus) | Hook for logging to [SumoLogic](https://www.sumologic.com/)|
+| [Syslog](https://github.com/Sirupsen/logrus/blob/master/hooks/syslog/syslog.go) | Send errors to remote syslog server. Uses standard library `log/syslog` behind the scenes. |
+| [Syslog TLS](https://github.com/shinji62/logrus-syslog-ng) | Send errors to remote syslog server with TLS support. |
+| [TraceView](https://github.com/evalphobia/logrus_appneta) | Hook for logging to [AppNeta TraceView](https://www.appneta.com/products/traceview/) |
+| [Typetalk](https://github.com/dragon3/logrus-typetalk-hook) | Hook for logging to [Typetalk](https://www.typetalk.in/) |
+| [logz.io](https://github.com/ripcurld00d/logrus-logzio-hook) | Hook for logging to [logz.io](https://logz.io), a Log as a Service using Logstash |
+| [SQS-Hook](https://github.com/tsarpaul/logrus_sqs) | Hook for logging to [Amazon Simple Queue Service (SQS)](https://aws.amazon.com/sqs/) |
#### Level logging
@@ -285,17 +334,17 @@ could do:
```go
import (
- log "github.com/Sirupsen/logrus"
+ log "github.com/sirupsen/logrus"
)
init() {
// do something here to set environment depending on an environment variable
// or command-line flag
if Environment == "production" {
- log.SetFormatter(logrus.JSONFormatter)
+ log.SetFormatter(&log.JSONFormatter{})
} else {
// The TextFormatter is default, you don't actually have to do this.
- log.SetFormatter(logrus.TextFormatter)
+ log.SetFormatter(&log.TextFormatter{})
}
}
```
@@ -312,12 +361,17 @@ The built-in logging formatters are:
without colors.
* *Note:* to force colored output when there is no TTY, set the `ForceColors`
field to `true`. To force no colored output even if there is a TTY set the
- `DisableColors` field to `true`
+ `DisableColors` field to `true`. For Windows, see
+ [github.com/mattn/go-colorable](https://github.com/mattn/go-colorable).
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#TextFormatter).
* `logrus.JSONFormatter`. Logs fields as JSON.
+ * All options are listed in the [generated docs](https://godoc.org/github.com/sirupsen/logrus#JSONFormatter).
Third party logging formatters:
-* [`zalgo`](https://github.com/aybabtme/logzalgo): invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
+* [`logstash`](https://github.com/bshuster-repo/logrus-logstash-hook). Logs fields as [Logstash](http://logstash.net) Events.
+* [`prefixed`](https://github.com/x-cray/logrus-prefixed-formatter). Displays log entry source along with alternative layout.
+* [`zalgo`](https://github.com/aybabtme/logzalgo). Invoking the P͉̫o̳̼̊w̖͈̰͎e̬͔̭͂r͚̼̹̲ ̫͓͉̳͈ō̠͕͖̚f̝͍̠ ͕̲̞͖͑Z̖̫̤̫ͪa͉̬͈̗l͖͎g̳̥o̰̥̅!̣͔̲̻͊̄ ̙̘̦̹̦.
You can define your formatter by implementing the `Formatter` interface,
requiring a `Format` method. `Format` takes an `*Entry`. `entry.Data` is a
@@ -330,7 +384,7 @@ type MyJSONFormatter struct {
log.SetFormatter(new(MyJSONFormatter))
-func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
+func (f *MyJSONFormatter) Format(entry *Entry) ([]byte, error) {
// Note this doesn't include Time, Level and Message which are available on
// the Entry. Consult `godoc` on information about those fields or read the
// source of the official loggers.
@@ -342,11 +396,106 @@ func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
}
```
+#### Logger as an `io.Writer`
+
+Logrus can be transformed into an `io.Writer`. That writer is the end of an `io.Pipe` and it is your responsibility to close it.
+
+```go
+w := logger.Writer()
+defer w.Close()
+
+srv := http.Server{
+ // create a stdlib log.Logger that writes to
+ // logrus.Logger.
+ ErrorLog: log.New(w, "", 0),
+}
+```
+
+Each line written to that writer will be printed the usual way, using formatters
+and hooks. The level for those entries is `info`.
+
+This means that we can override the standard library logger easily:
+
+```go
+logger := logrus.New()
+logger.Formatter = &logrus.JSONFormatter{}
+
+// Use logrus for standard log output
+// Note that `log` here references stdlib's log
+// Not logrus imported under the name `log`.
+log.SetOutput(logger.Writer())
+```
+
#### Rotation
Log rotation is not provided with Logrus. Log rotation should be done by an
-external program (like `logrotated(8)`) that can compress and delete old log
+external program (like `logrotate(8)`) that can compress and delete old log
entries. It should not be a feature of the application-level logger.
+#### Tools
-[godoc]: https://godoc.org/github.com/Sirupsen/logrus
+| Tool | Description |
+| ---- | ----------- |
+|[Logrus Mate](https://github.com/gogap/logrus_mate)|Logrus mate is a tool for Logrus to manage loggers, you can initial logger's level, hook and formatter by config file, the logger will generated with different config at different environment.|
+|[Logrus Viper Helper](https://github.com/heirko/go-contrib/tree/master/logrusHelper)|An Helper around Logrus to wrap with spf13/Viper to load configuration with fangs! And to simplify Logrus configuration use some behavior of [Logrus Mate](https://github.com/gogap/logrus_mate). [sample](https://github.com/heirko/iris-contrib/blob/master/middleware/logrus-logger/example) |
+
+#### Testing
+
+Logrus has a built in facility for asserting the presence of log messages. This is implemented through the `test` hook and provides:
+
+* decorators for existing logger (`test.NewLocal` and `test.NewGlobal`) which basically just add the `test` hook
+* a test logger (`test.NewNullLogger`) that just records log messages (and does not output any):
+
+```go
+import(
+ "github.com/sirupsen/logrus"
+ "github.com/sirupsen/logrus/hooks/null"
+ "github.com/stretchr/testify/assert"
+ "testing"
+)
+
+func TestSomething(t*testing.T){
+ logger, hook := null.NewNullLogger()
+ logger.Error("Helloerror")
+
+ assert.Equal(t, 1, len(hook.Entries))
+ assert.Equal(t, logrus.ErrorLevel, hook.LastEntry().Level)
+ assert.Equal(t, "Helloerror", hook.LastEntry().Message)
+
+ hook.Reset()
+ assert.Nil(t, hook.LastEntry())
+}
+```
+
+#### Fatal handlers
+
+Logrus can register one or more functions that will be called when any `fatal`
+level message is logged. The registered handlers will be executed before
+logrus performs a `os.Exit(1)`. This behavior may be helpful if callers need
+to gracefully shutdown. Unlike a `panic("Something went wrong...")` call which can be intercepted with a deferred `recover` a call to `os.Exit(1)` can not be intercepted.
+
+```
+...
+handler := func() {
+ // gracefully shutdown something...
+}
+logrus.RegisterExitHandler(handler)
+...
+```
+
+#### Thread safety
+
+By default Logger is protected by mutex for concurrent writes, this mutex is invoked when calling hooks and writing logs.
+If you are sure such locking is not needed, you can call logger.SetNoLock() to disable the locking.
+
+Situation when locking is not needed includes:
+
+* You have no hooks registered, or hooks calling is already thread-safe.
+
+* Writing to logger.Out is already thread-safe, for example:
+
+ 1) logger.Out is protected by locks.
+
+ 2) logger.Out is a os.File handler opened with `O_APPEND` flag, and every write is smaller than 4k. (This allow multi-thread/multi-process writing)
+
+ (Refer to http://www.notthewizard.com/2014/06/17/are-files-appends-really-atomic/)
diff --git a/vendor/github.com/Sirupsen/logrus/alt_exit.go b/vendor/github.com/Sirupsen/logrus/alt_exit.go
new file mode 100644
index 0000000000..8af90637a9
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/alt_exit.go
@@ -0,0 +1,64 @@
+package logrus
+
+// The following code was sourced and modified from the
+// https://github.com/tebeka/atexit package governed by the following license:
+//
+// Copyright (c) 2012 Miki Tebeka .
+//
+// Permission is hereby granted, free of charge, to any person obtaining a copy of
+// this software and associated documentation files (the "Software"), to deal in
+// the Software without restriction, including without limitation the rights to
+// use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of
+// the Software, and to permit persons to whom the Software is furnished to do so,
+// subject to the following conditions:
+//
+// The above copyright notice and this permission notice shall be included in all
+// copies or substantial portions of the Software.
+//
+// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS
+// FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR
+// COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER
+// IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
+// CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+
+import (
+ "fmt"
+ "os"
+)
+
+var handlers = []func(){}
+
+func runHandler(handler func()) {
+ defer func() {
+ if err := recover(); err != nil {
+ fmt.Fprintln(os.Stderr, "Error: Logrus exit handler error:", err)
+ }
+ }()
+
+ handler()
+}
+
+func runHandlers() {
+ for _, handler := range handlers {
+ runHandler(handler)
+ }
+}
+
+// Exit runs all the Logrus atexit handlers and then terminates the program using os.Exit(code)
+func Exit(code int) {
+ runHandlers()
+ os.Exit(code)
+}
+
+// RegisterExitHandler adds a Logrus Exit handler, call logrus.Exit to invoke
+// all handlers. The handlers will also be invoked when any Fatal log entry is
+// made.
+//
+// This method is useful when a caller wishes to use logrus to log a fatal
+// message but also needs to gracefully shutdown. An example usecase could be
+// closing database connections, or sending a alert that the application is
+// closing.
+func RegisterExitHandler(handler func()) {
+ handlers = append(handlers, handler)
+}
diff --git a/vendor/github.com/Sirupsen/logrus/doc.go b/vendor/github.com/Sirupsen/logrus/doc.go
new file mode 100644
index 0000000000..da67aba06d
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/doc.go
@@ -0,0 +1,26 @@
+/*
+Package logrus is a structured logger for Go, completely API compatible with the standard library logger.
+
+
+The simplest way to use Logrus is simply the package-level exported logger:
+
+ package main
+
+ import (
+ log "github.com/sirupsen/logrus"
+ )
+
+ func main() {
+ log.WithFields(log.Fields{
+ "animal": "walrus",
+ "number": 1,
+ "size": 10,
+ }).Info("A walrus appears")
+ }
+
+Output:
+ time="2015-09-07T08:48:33Z" level=info msg="A walrus appears" animal=walrus number=1 size=10
+
+For a full guide visit https://github.com/sirupsen/logrus
+*/
+package logrus
diff --git a/vendor/github.com/Sirupsen/logrus/entry.go b/vendor/github.com/Sirupsen/logrus/entry.go
index e164eecb5f..320e5d5b8b 100644
--- a/vendor/github.com/Sirupsen/logrus/entry.go
+++ b/vendor/github.com/Sirupsen/logrus/entry.go
@@ -3,11 +3,24 @@ package logrus
import (
"bytes"
"fmt"
- "io"
"os"
+ "sync"
"time"
)
+var bufferPool *sync.Pool
+
+func init() {
+ bufferPool = &sync.Pool{
+ New: func() interface{} {
+ return new(bytes.Buffer)
+ },
+ }
+}
+
+// Defines the key when adding errors using WithError.
+var ErrorKey = "error"
+
// An entry is the final or intermediate Logrus logging entry. It contains all
// the fields passed with WithField{,s}. It's finally logged when Debug, Info,
// Warn, Error, Fatal or Panic is called on it. These objects can be reused and
@@ -26,6 +39,9 @@ type Entry struct {
// Message passed to Debug, Info, Warn, Error, Fatal or Panic
Message string
+
+ // When formatter is called in entry.log(), an Buffer may be set to entry
+ Buffer *bytes.Buffer
}
func NewEntry(logger *Logger) *Entry {
@@ -36,21 +52,20 @@ func NewEntry(logger *Logger) *Entry {
}
}
-// Returns a reader for the entry, which is a proxy to the formatter.
-func (entry *Entry) Reader() (*bytes.Buffer, error) {
- serialized, err := entry.Logger.Formatter.Format(entry)
- return bytes.NewBuffer(serialized), err
-}
-
// Returns the string representation from the reader and ultimately the
// formatter.
func (entry *Entry) String() (string, error) {
- reader, err := entry.Reader()
+ serialized, err := entry.Logger.Formatter.Format(entry)
if err != nil {
return "", err
}
+ str := string(serialized)
+ return str, nil
+}
- return reader.String(), err
+// Add an error as single field (using the key defined in ErrorKey) to the Entry.
+func (entry *Entry) WithError(err error) *Entry {
+ return entry.WithField(ErrorKey, err)
}
// Add a single field to the Entry.
@@ -60,7 +75,7 @@ func (entry *Entry) WithField(key string, value interface{}) *Entry {
// Add a map of fields to the Entry.
func (entry *Entry) WithFields(fields Fields) *Entry {
- data := Fields{}
+ data := make(Fields, len(entry.Data)+len(fields))
for k, v := range entry.Data {
data[k] = v
}
@@ -70,42 +85,48 @@ func (entry *Entry) WithFields(fields Fields) *Entry {
return &Entry{Logger: entry.Logger, Data: data}
}
-func (entry *Entry) log(level Level, msg string) {
+// This function is not declared with a pointer value because otherwise
+// race conditions will occur when using multiple goroutines
+func (entry Entry) log(level Level, msg string) {
+ var buffer *bytes.Buffer
entry.Time = time.Now()
entry.Level = level
entry.Message = msg
- if err := entry.Logger.Hooks.Fire(level, entry); err != nil {
+ if err := entry.Logger.Hooks.Fire(level, &entry); err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to fire hook: %v\n", err)
entry.Logger.mu.Unlock()
}
-
- reader, err := entry.Reader()
+ buffer = bufferPool.Get().(*bytes.Buffer)
+ buffer.Reset()
+ defer bufferPool.Put(buffer)
+ entry.Buffer = buffer
+ serialized, err := entry.Logger.Formatter.Format(&entry)
+ entry.Buffer = nil
if err != nil {
entry.Logger.mu.Lock()
fmt.Fprintf(os.Stderr, "Failed to obtain reader, %v\n", err)
entry.Logger.mu.Unlock()
- }
-
- entry.Logger.mu.Lock()
- defer entry.Logger.mu.Unlock()
-
- _, err = io.Copy(entry.Logger.Out, reader)
- if err != nil {
- fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ } else {
+ entry.Logger.mu.Lock()
+ _, err = entry.Logger.Out.Write(serialized)
+ if err != nil {
+ fmt.Fprintf(os.Stderr, "Failed to write to log, %v\n", err)
+ }
+ entry.Logger.mu.Unlock()
}
// To avoid Entry#log() returning a value that only would make sense for
// panic() to use in Entry#Panic(), we avoid the allocation by checking
// directly here.
if level <= PanicLevel {
- panic(entry)
+ panic(&entry)
}
}
func (entry *Entry) Debug(args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
+ if entry.Logger.level() >= DebugLevel {
entry.log(DebugLevel, fmt.Sprint(args...))
}
}
@@ -115,32 +136,36 @@ func (entry *Entry) Print(args ...interface{}) {
}
func (entry *Entry) Info(args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
+ if entry.Logger.level() >= InfoLevel {
entry.log(InfoLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Warn(args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
+ if entry.Logger.level() >= WarnLevel {
entry.log(WarnLevel, fmt.Sprint(args...))
}
}
+func (entry *Entry) Warning(args ...interface{}) {
+ entry.Warn(args...)
+}
+
func (entry *Entry) Error(args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
+ if entry.Logger.level() >= ErrorLevel {
entry.log(ErrorLevel, fmt.Sprint(args...))
}
}
func (entry *Entry) Fatal(args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
+ if entry.Logger.level() >= FatalLevel {
entry.log(FatalLevel, fmt.Sprint(args...))
}
- os.Exit(1)
+ Exit(1)
}
func (entry *Entry) Panic(args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
+ if entry.Logger.level() >= PanicLevel {
entry.log(PanicLevel, fmt.Sprint(args...))
}
panic(fmt.Sprint(args...))
@@ -149,13 +174,13 @@ func (entry *Entry) Panic(args ...interface{}) {
// Entry Printf family functions
func (entry *Entry) Debugf(format string, args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
+ if entry.Logger.level() >= DebugLevel {
entry.Debug(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Infof(format string, args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
+ if entry.Logger.level() >= InfoLevel {
entry.Info(fmt.Sprintf(format, args...))
}
}
@@ -165,7 +190,7 @@ func (entry *Entry) Printf(format string, args ...interface{}) {
}
func (entry *Entry) Warnf(format string, args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
+ if entry.Logger.level() >= WarnLevel {
entry.Warn(fmt.Sprintf(format, args...))
}
}
@@ -175,19 +200,20 @@ func (entry *Entry) Warningf(format string, args ...interface{}) {
}
func (entry *Entry) Errorf(format string, args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
+ if entry.Logger.level() >= ErrorLevel {
entry.Error(fmt.Sprintf(format, args...))
}
}
func (entry *Entry) Fatalf(format string, args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
+ if entry.Logger.level() >= FatalLevel {
entry.Fatal(fmt.Sprintf(format, args...))
}
+ Exit(1)
}
func (entry *Entry) Panicf(format string, args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
+ if entry.Logger.level() >= PanicLevel {
entry.Panic(fmt.Sprintf(format, args...))
}
}
@@ -195,13 +221,13 @@ func (entry *Entry) Panicf(format string, args ...interface{}) {
// Entry Println family functions
func (entry *Entry) Debugln(args ...interface{}) {
- if entry.Logger.Level >= DebugLevel {
+ if entry.Logger.level() >= DebugLevel {
entry.Debug(entry.sprintlnn(args...))
}
}
func (entry *Entry) Infoln(args ...interface{}) {
- if entry.Logger.Level >= InfoLevel {
+ if entry.Logger.level() >= InfoLevel {
entry.Info(entry.sprintlnn(args...))
}
}
@@ -211,7 +237,7 @@ func (entry *Entry) Println(args ...interface{}) {
}
func (entry *Entry) Warnln(args ...interface{}) {
- if entry.Logger.Level >= WarnLevel {
+ if entry.Logger.level() >= WarnLevel {
entry.Warn(entry.sprintlnn(args...))
}
}
@@ -221,19 +247,20 @@ func (entry *Entry) Warningln(args ...interface{}) {
}
func (entry *Entry) Errorln(args ...interface{}) {
- if entry.Logger.Level >= ErrorLevel {
+ if entry.Logger.level() >= ErrorLevel {
entry.Error(entry.sprintlnn(args...))
}
}
func (entry *Entry) Fatalln(args ...interface{}) {
- if entry.Logger.Level >= FatalLevel {
+ if entry.Logger.level() >= FatalLevel {
entry.Fatal(entry.sprintlnn(args...))
}
+ Exit(1)
}
func (entry *Entry) Panicln(args ...interface{}) {
- if entry.Logger.Level >= PanicLevel {
+ if entry.Logger.level() >= PanicLevel {
entry.Panic(entry.sprintlnn(args...))
}
}
diff --git a/vendor/github.com/Sirupsen/logrus/exported.go b/vendor/github.com/Sirupsen/logrus/exported.go
index d087124481..1aeaa90ba2 100644
--- a/vendor/github.com/Sirupsen/logrus/exported.go
+++ b/vendor/github.com/Sirupsen/logrus/exported.go
@@ -9,6 +9,10 @@ var (
std = New()
)
+func StandardLogger() *Logger {
+ return std
+}
+
// SetOutput sets the standard logger output.
func SetOutput(out io.Writer) {
std.mu.Lock()
@@ -27,12 +31,14 @@ func SetFormatter(formatter Formatter) {
func SetLevel(level Level) {
std.mu.Lock()
defer std.mu.Unlock()
- std.Level = level
+ std.setLevel(level)
}
// GetLevel returns the standard logger level.
func GetLevel() Level {
- return std.Level
+ std.mu.Lock()
+ defer std.mu.Unlock()
+ return std.level()
}
// AddHook adds a hook to the standard logger hooks.
@@ -42,6 +48,11 @@ func AddHook(hook Hook) {
std.Hooks.Add(hook)
}
+// WithError creates an entry from the standard logger and adds an error to it, using the value defined in ErrorKey as key.
+func WithError(err error) *Entry {
+ return std.WithField(ErrorKey, err)
+}
+
// WithField creates an entry from the standard logger and adds a field to
// it. If you want multiple fields, use `WithFields`.
//
diff --git a/vendor/github.com/Sirupsen/logrus/formatter.go b/vendor/github.com/Sirupsen/logrus/formatter.go
index 038ce9fd29..b5fbe934d1 100644
--- a/vendor/github.com/Sirupsen/logrus/formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/formatter.go
@@ -1,5 +1,9 @@
package logrus
+import "time"
+
+const DefaultTimestampFormat = time.RFC3339
+
// The Formatter interface is used to implement a custom Formatter. It takes an
// `Entry`. It exposes all the fields, including the default ones:
//
@@ -27,18 +31,15 @@ type Formatter interface {
// It's not exported because it's still using Data in an opinionated way. It's to
// avoid code duplication between the two default formatters.
func prefixFieldClashes(data Fields) {
- _, ok := data["time"]
- if ok {
- data["fields.time"] = data["time"]
+ if t, ok := data["time"]; ok {
+ data["fields.time"] = t
}
- _, ok = data["msg"]
- if ok {
- data["fields.msg"] = data["msg"]
+ if m, ok := data["msg"]; ok {
+ data["fields.msg"] = m
}
- _, ok = data["level"]
- if ok {
- data["fields.level"] = data["level"]
+ if l, ok := data["level"]; ok {
+ data["fields.level"] = l
}
}
diff --git a/vendor/github.com/Sirupsen/logrus/hooks.go b/vendor/github.com/Sirupsen/logrus/hooks.go
index 0da2b3653f..3f151cdc39 100644
--- a/vendor/github.com/Sirupsen/logrus/hooks.go
+++ b/vendor/github.com/Sirupsen/logrus/hooks.go
@@ -11,11 +11,11 @@ type Hook interface {
}
// Internal type for storing the hooks on a logger instance.
-type levelHooks map[Level][]Hook
+type LevelHooks map[Level][]Hook
// Add a hook to an instance of logger. This is called with
// `log.Hooks.Add(new(MyHook))` where `MyHook` implements the `Hook` interface.
-func (hooks levelHooks) Add(hook Hook) {
+func (hooks LevelHooks) Add(hook Hook) {
for _, level := range hook.Levels() {
hooks[level] = append(hooks[level], hook)
}
@@ -23,7 +23,7 @@ func (hooks levelHooks) Add(hook Hook) {
// Fire all the hooks for the passed level. Used by `entry.log` to fire
// appropriate hooks for a log entry.
-func (hooks levelHooks) Fire(level Level, entry *Entry) error {
+func (hooks LevelHooks) Fire(level Level, entry *Entry) error {
for _, hook := range hooks[level] {
if err := hook.Fire(entry); err != nil {
return err
diff --git a/vendor/github.com/Sirupsen/logrus/json_formatter.go b/vendor/github.com/Sirupsen/logrus/json_formatter.go
index b09227c2b5..e787ea1750 100644
--- a/vendor/github.com/Sirupsen/logrus/json_formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/json_formatter.go
@@ -3,20 +3,68 @@ package logrus
import (
"encoding/json"
"fmt"
- "time"
)
-type JSONFormatter struct{}
+type fieldKey string
+type FieldMap map[fieldKey]string
+
+const (
+ FieldKeyMsg = "msg"
+ FieldKeyLevel = "level"
+ FieldKeyTime = "time"
+)
+
+func (f FieldMap) resolve(key fieldKey) string {
+ if k, ok := f[key]; ok {
+ return k
+ }
+
+ return string(key)
+}
+
+type JSONFormatter struct {
+ // TimestampFormat sets the format used for marshaling timestamps.
+ TimestampFormat string
+
+ // DisableTimestamp allows disabling automatic timestamps in output
+ DisableTimestamp bool
+
+ // FieldMap allows users to customize the names of keys for various fields.
+ // As an example:
+ // formatter := &JSONFormatter{
+ // FieldMap: FieldMap{
+ // FieldKeyTime: "@timestamp",
+ // FieldKeyLevel: "@level",
+ // FieldKeyMsg: "@message",
+ // },
+ // }
+ FieldMap FieldMap
+}
func (f *JSONFormatter) Format(entry *Entry) ([]byte, error) {
data := make(Fields, len(entry.Data)+3)
for k, v := range entry.Data {
- data[k] = v
+ switch v := v.(type) {
+ case error:
+ // Otherwise errors are ignored by `encoding/json`
+ // https://github.com/sirupsen/logrus/issues/137
+ data[k] = v.Error()
+ default:
+ data[k] = v
+ }
}
prefixFieldClashes(data)
- data["time"] = entry.Time.Format(time.RFC3339)
- data["msg"] = entry.Message
- data["level"] = entry.Level.String()
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
+
+ if !f.DisableTimestamp {
+ data[f.FieldMap.resolve(FieldKeyTime)] = entry.Time.Format(timestampFormat)
+ }
+ data[f.FieldMap.resolve(FieldKeyMsg)] = entry.Message
+ data[f.FieldMap.resolve(FieldKeyLevel)] = entry.Level.String()
serialized, err := json.Marshal(data)
if err != nil {
diff --git a/vendor/github.com/Sirupsen/logrus/logger.go b/vendor/github.com/Sirupsen/logrus/logger.go
index b392e547a7..370fff5d1b 100644
--- a/vendor/github.com/Sirupsen/logrus/logger.go
+++ b/vendor/github.com/Sirupsen/logrus/logger.go
@@ -4,17 +4,18 @@ import (
"io"
"os"
"sync"
+ "sync/atomic"
)
type Logger struct {
// The logs are `io.Copy`'d to this in a mutex. It's common to set this to a
- // file, or leave it default which is `os.Stdout`. You can also set this to
+ // file, or leave it default which is `os.Stderr`. You can also set this to
// something more adventorous, such as logging to Kafka.
Out io.Writer
// Hooks for the logger instance. These allow firing events based on logging
// levels and log entries. For example, to send errors to an error tracking
// service, log to StatsD or dump the core on fatal errors.
- Hooks levelHooks
+ Hooks LevelHooks
// All log entries pass through the formatter before logged to Out. The
// included formatters are `TextFormatter` and `JSONFormatter` for which
// TextFormatter is the default. In development (when a TTY is attached) it
@@ -26,8 +27,31 @@ type Logger struct {
// to) `logrus.Info`, which allows Info(), Warn(), Error() and Fatal() to be
// logged. `logrus.Debug` is useful in
Level Level
- // Used to sync writing to the log.
- mu sync.Mutex
+ // Used to sync writing to the log. Locking is enabled by Default
+ mu MutexWrap
+ // Reusable empty entry
+ entryPool sync.Pool
+}
+
+type MutexWrap struct {
+ lock sync.Mutex
+ disabled bool
+}
+
+func (mw *MutexWrap) Lock() {
+ if !mw.disabled {
+ mw.lock.Lock()
+ }
+}
+
+func (mw *MutexWrap) Unlock() {
+ if !mw.disabled {
+ mw.lock.Unlock()
+ }
+}
+
+func (mw *MutexWrap) Disable() {
+ mw.disabled = true
}
// Creates a new logger. Configuration should be set by changing `Formatter`,
@@ -37,125 +61,257 @@ type Logger struct {
// var log = &Logger{
// Out: os.Stderr,
// Formatter: new(JSONFormatter),
-// Hooks: make(levelHooks),
+// Hooks: make(LevelHooks),
// Level: logrus.DebugLevel,
// }
//
// It's recommended to make this a global instance called `log`.
func New() *Logger {
return &Logger{
- Out: os.Stdout,
+ Out: os.Stderr,
Formatter: new(TextFormatter),
- Hooks: make(levelHooks),
+ Hooks: make(LevelHooks),
Level: InfoLevel,
}
}
-// Adds a field to the log entry, note that you it doesn't log until you call
+func (logger *Logger) newEntry() *Entry {
+ entry, ok := logger.entryPool.Get().(*Entry)
+ if ok {
+ return entry
+ }
+ return NewEntry(logger)
+}
+
+func (logger *Logger) releaseEntry(entry *Entry) {
+ logger.entryPool.Put(entry)
+}
+
+// Adds a field to the log entry, note that it doesn't log until you call
// Debug, Print, Info, Warn, Fatal or Panic. It only creates a log entry.
-// Ff you want multiple fields, use `WithFields`.
+// If you want multiple fields, use `WithFields`.
func (logger *Logger) WithField(key string, value interface{}) *Entry {
- return NewEntry(logger).WithField(key, value)
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithField(key, value)
}
// Adds a struct of fields to the log entry. All it does is call `WithField` for
// each `Field`.
func (logger *Logger) WithFields(fields Fields) *Entry {
- return NewEntry(logger).WithFields(fields)
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithFields(fields)
+}
+
+// Add an error as single field to the log entry. All it does is call
+// `WithError` for the given `error`.
+func (logger *Logger) WithError(err error) *Entry {
+ entry := logger.newEntry()
+ defer logger.releaseEntry(entry)
+ return entry.WithError(err)
}
func (logger *Logger) Debugf(format string, args ...interface{}) {
- NewEntry(logger).Debugf(format, args...)
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugf(format, args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Infof(format string, args ...interface{}) {
- NewEntry(logger).Infof(format, args...)
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infof(format, args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Printf(format string, args ...interface{}) {
- NewEntry(logger).Printf(format, args...)
+ entry := logger.newEntry()
+ entry.Printf(format, args...)
+ logger.releaseEntry(entry)
}
func (logger *Logger) Warnf(format string, args ...interface{}) {
- NewEntry(logger).Warnf(format, args...)
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Warningf(format string, args ...interface{}) {
- NewEntry(logger).Warnf(format, args...)
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnf(format, args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Errorf(format string, args ...interface{}) {
- NewEntry(logger).Errorf(format, args...)
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorf(format, args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Fatalf(format string, args ...interface{}) {
- NewEntry(logger).Fatalf(format, args...)
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalf(format, args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
}
func (logger *Logger) Panicf(format string, args ...interface{}) {
- NewEntry(logger).Panicf(format, args...)
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicf(format, args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Debug(args ...interface{}) {
- NewEntry(logger).Debug(args...)
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debug(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Info(args ...interface{}) {
- NewEntry(logger).Info(args...)
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Print(args ...interface{}) {
- NewEntry(logger).Info(args...)
+ entry := logger.newEntry()
+ entry.Info(args...)
+ logger.releaseEntry(entry)
}
func (logger *Logger) Warn(args ...interface{}) {
- NewEntry(logger).Warn(args...)
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Warning(args ...interface{}) {
- NewEntry(logger).Warn(args...)
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warn(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Error(args ...interface{}) {
- NewEntry(logger).Error(args...)
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Error(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Fatal(args ...interface{}) {
- NewEntry(logger).Fatal(args...)
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatal(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
}
func (logger *Logger) Panic(args ...interface{}) {
- NewEntry(logger).Panic(args...)
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panic(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Debugln(args ...interface{}) {
- NewEntry(logger).Debugln(args...)
+ if logger.level() >= DebugLevel {
+ entry := logger.newEntry()
+ entry.Debugln(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Infoln(args ...interface{}) {
- NewEntry(logger).Infoln(args...)
+ if logger.level() >= InfoLevel {
+ entry := logger.newEntry()
+ entry.Infoln(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Println(args ...interface{}) {
- NewEntry(logger).Println(args...)
+ entry := logger.newEntry()
+ entry.Println(args...)
+ logger.releaseEntry(entry)
}
func (logger *Logger) Warnln(args ...interface{}) {
- NewEntry(logger).Warnln(args...)
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Warningln(args ...interface{}) {
- NewEntry(logger).Warnln(args...)
+ if logger.level() >= WarnLevel {
+ entry := logger.newEntry()
+ entry.Warnln(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Errorln(args ...interface{}) {
- NewEntry(logger).Errorln(args...)
+ if logger.level() >= ErrorLevel {
+ entry := logger.newEntry()
+ entry.Errorln(args...)
+ logger.releaseEntry(entry)
+ }
}
func (logger *Logger) Fatalln(args ...interface{}) {
- NewEntry(logger).Fatalln(args...)
+ if logger.level() >= FatalLevel {
+ entry := logger.newEntry()
+ entry.Fatalln(args...)
+ logger.releaseEntry(entry)
+ }
+ Exit(1)
}
func (logger *Logger) Panicln(args ...interface{}) {
- NewEntry(logger).Panicln(args...)
+ if logger.level() >= PanicLevel {
+ entry := logger.newEntry()
+ entry.Panicln(args...)
+ logger.releaseEntry(entry)
+ }
+}
+
+//When file is opened with appending mode, it's safe to
+//write concurrently to a file (within 4k message on Linux).
+//In these cases user can choose to disable the lock.
+func (logger *Logger) SetNoLock() {
+ logger.mu.Disable()
+}
+
+func (logger *Logger) level() Level {
+ return Level(atomic.LoadUint32((*uint32)(&logger.Level)))
+}
+
+func (logger *Logger) setLevel(level Level) {
+ atomic.StoreUint32((*uint32)(&logger.Level), uint32(level))
}
diff --git a/vendor/github.com/Sirupsen/logrus/logrus.go b/vendor/github.com/Sirupsen/logrus/logrus.go
index 43ee12e90e..dd38999741 100644
--- a/vendor/github.com/Sirupsen/logrus/logrus.go
+++ b/vendor/github.com/Sirupsen/logrus/logrus.go
@@ -3,13 +3,14 @@ package logrus
import (
"fmt"
"log"
+ "strings"
)
// Fields type, used to pass to `WithFields`.
type Fields map[string]interface{}
// Level type
-type Level uint8
+type Level uint32
// Convert the Level to a string. E.g. PanicLevel becomes "panic".
func (level Level) String() string {
@@ -33,7 +34,7 @@ func (level Level) String() string {
// ParseLevel takes a string level and returns the Logrus log level constant.
func ParseLevel(lvl string) (Level, error) {
- switch lvl {
+ switch strings.ToLower(lvl) {
case "panic":
return PanicLevel, nil
case "fatal":
@@ -52,6 +53,16 @@ func ParseLevel(lvl string) (Level, error) {
return l, fmt.Errorf("not a valid logrus Level: %q", lvl)
}
+// A constant exposing all logging levels
+var AllLevels = []Level{
+ PanicLevel,
+ FatalLevel,
+ ErrorLevel,
+ WarnLevel,
+ InfoLevel,
+ DebugLevel,
+}
+
// These are the different logging levels. You can set the logging level to log
// on your instance of logger, obtained with `logrus.New()`.
const (
@@ -74,7 +85,11 @@ const (
)
// Won't compile if StdLogger can't be realized by a log.Logger
-var _ StdLogger = &log.Logger{}
+var (
+ _ StdLogger = &log.Logger{}
+ _ StdLogger = &Entry{}
+ _ StdLogger = &Logger{}
+)
// StdLogger is what your logrus-enabled library should take, that way
// it'll accept a stdlib logger and a logrus logger. There's no standard
@@ -92,3 +107,37 @@ type StdLogger interface {
Panicf(string, ...interface{})
Panicln(...interface{})
}
+
+// The FieldLogger interface generalizes the Entry and Logger types
+type FieldLogger interface {
+ WithField(key string, value interface{}) *Entry
+ WithFields(fields Fields) *Entry
+ WithError(err error) *Entry
+
+ Debugf(format string, args ...interface{})
+ Infof(format string, args ...interface{})
+ Printf(format string, args ...interface{})
+ Warnf(format string, args ...interface{})
+ Warningf(format string, args ...interface{})
+ Errorf(format string, args ...interface{})
+ Fatalf(format string, args ...interface{})
+ Panicf(format string, args ...interface{})
+
+ Debug(args ...interface{})
+ Info(args ...interface{})
+ Print(args ...interface{})
+ Warn(args ...interface{})
+ Warning(args ...interface{})
+ Error(args ...interface{})
+ Fatal(args ...interface{})
+ Panic(args ...interface{})
+
+ Debugln(args ...interface{})
+ Infoln(args ...interface{})
+ Println(args ...interface{})
+ Warnln(args ...interface{})
+ Warningln(args ...interface{})
+ Errorln(args ...interface{})
+ Fatalln(args ...interface{})
+ Panicln(args ...interface{})
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_appengine.go b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
new file mode 100644
index 0000000000..e011a86945
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_appengine.go
@@ -0,0 +1,10 @@
+// +build appengine
+
+package logrus
+
+import "io"
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ return true
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_bsd.go b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
new file mode 100644
index 0000000000..5f6be4d3c0
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_bsd.go
@@ -0,0 +1,10 @@
+// +build darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
+
+package logrus
+
+import "syscall"
+
+const ioctlReadTermios = syscall.TIOCGETA
+
+type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go b/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
deleted file mode 100644
index 8fe02a4aec..0000000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_darwin.go
+++ /dev/null
@@ -1,12 +0,0 @@
-// Based on ssh/terminal:
-// Copyright 2013 The Go Authors. All rights reserved.
-// Use of this source code is governed by a BSD-style
-// license that can be found in the LICENSE file.
-
-package logrus
-
-import "syscall"
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-type Termios syscall.Termios
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go b/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
deleted file mode 100644
index 0428ee5d52..0000000000
--- a/vendor/github.com/Sirupsen/logrus/terminal_freebsd.go
+++ /dev/null
@@ -1,20 +0,0 @@
-/*
- Go 1.2 doesn't include Termios for FreeBSD. This should be added in 1.3 and this could be merged with terminal_darwin.
-*/
-package logrus
-
-import (
- "syscall"
-)
-
-const ioctlReadTermios = syscall.TIOCGETA
-
-type Termios struct {
- Iflag uint32
- Oflag uint32
- Cflag uint32
- Lflag uint32
- Cc [20]uint8
- Ispeed uint32
- Ospeed uint32
-}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_linux.go b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
index a2c0b40db6..308160ca80 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_linux.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_linux.go
@@ -3,6 +3,8 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
+// +build !appengine
+
package logrus
import "syscall"
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
index 276447bd5c..190297abf3 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_notwindows.go
@@ -3,19 +3,26 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build linux,!appengine darwin freebsd
+// +build linux darwin freebsd openbsd netbsd dragonfly
+// +build !appengine
package logrus
import (
+ "io"
+ "os"
"syscall"
"unsafe"
)
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal() bool {
- fd := syscall.Stdout
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
var termios Termios
- _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(fd), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
- return err == 0
+ switch v := f.(type) {
+ case *os.File:
+ _, _, err := syscall.Syscall6(syscall.SYS_IOCTL, uintptr(v.Fd()), ioctlReadTermios, uintptr(unsafe.Pointer(&termios)), 0, 0, 0)
+ return err == 0
+ default:
+ return false
+ }
}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_solaris.go b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
new file mode 100644
index 0000000000..3c86b1abee
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/terminal_solaris.go
@@ -0,0 +1,21 @@
+// +build solaris,!appengine
+
+package logrus
+
+import (
+ "io"
+ "os"
+
+ "golang.org/x/sys/unix"
+)
+
+// IsTerminal returns true if the given file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ switch v := f.(type) {
+ case *os.File:
+ _, err := unix.IoctlGetTermios(int(v.Fd()), unix.TCGETA)
+ return err == nil
+ default:
+ return false
+ }
+}
diff --git a/vendor/github.com/Sirupsen/logrus/terminal_windows.go b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
index 2e09f6f7e3..7a336307e5 100644
--- a/vendor/github.com/Sirupsen/logrus/terminal_windows.go
+++ b/vendor/github.com/Sirupsen/logrus/terminal_windows.go
@@ -3,11 +3,18 @@
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
-// +build windows
+// +build windows,!appengine
package logrus
import (
+ "bytes"
+ "errors"
+ "io"
+ "os"
+ "os/exec"
+ "strconv"
+ "strings"
"syscall"
"unsafe"
)
@@ -16,12 +23,60 @@ var kernel32 = syscall.NewLazyDLL("kernel32.dll")
var (
procGetConsoleMode = kernel32.NewProc("GetConsoleMode")
+ procSetConsoleMode = kernel32.NewProc("SetConsoleMode")
)
-// IsTerminal returns true if the given file descriptor is a terminal.
-func IsTerminal() bool {
- fd := syscall.Stdout
- var st uint32
- r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(fd), uintptr(unsafe.Pointer(&st)), 0)
- return r != 0 && e == 0
+const (
+ enableProcessedOutput = 0x0001
+ enableWrapAtEolOutput = 0x0002
+ enableVirtualTerminalProcessing = 0x0004
+)
+
+func getVersion() (float64, error) {
+ stdout, stderr := &bytes.Buffer{}, &bytes.Buffer{}
+ cmd := exec.Command("cmd", "ver")
+ cmd.Stdout = stdout
+ cmd.Stderr = stderr
+ err := cmd.Run()
+ if err != nil {
+ return -1, err
+ }
+
+ // The output should be like "Microsoft Windows [Version XX.X.XXXXXX]"
+ version := strings.Replace(stdout.String(), "\n", "", -1)
+ version = strings.Replace(version, "\r\n", "", -1)
+
+ x1 := strings.Index(version, "[Version")
+
+ if x1 == -1 || strings.Index(version, "]") == -1 {
+ return -1, errors.New("Can't determine Windows version")
+ }
+
+ return strconv.ParseFloat(version[x1+9:x1+13], 64)
+}
+
+func init() {
+ ver, err := getVersion()
+ if err != nil {
+ return
+ }
+
+ // Activate Virtual Processing for Windows CMD
+ // Info: https://msdn.microsoft.com/en-us/library/windows/desktop/ms686033(v=vs.85).aspx
+ if ver >= 10 {
+ handle := syscall.Handle(os.Stderr.Fd())
+ procSetConsoleMode.Call(uintptr(handle), enableProcessedOutput|enableWrapAtEolOutput|enableVirtualTerminalProcessing)
+ }
+}
+
+// IsTerminal returns true if stderr's file descriptor is a terminal.
+func IsTerminal(f io.Writer) bool {
+ switch v := f.(type) {
+ case *os.File:
+ var st uint32
+ r, _, e := syscall.Syscall(procGetConsoleMode.Addr(), 2, uintptr(v.Fd()), uintptr(unsafe.Pointer(&st)), 0)
+ return r != 0 && e == 0
+ default:
+ return false
+ }
}
diff --git a/vendor/github.com/Sirupsen/logrus/text_formatter.go b/vendor/github.com/Sirupsen/logrus/text_formatter.go
index 78e7889356..ba88854061 100644
--- a/vendor/github.com/Sirupsen/logrus/text_formatter.go
+++ b/vendor/github.com/Sirupsen/logrus/text_formatter.go
@@ -3,9 +3,9 @@ package logrus
import (
"bytes"
"fmt"
- "regexp"
"sort"
"strings"
+ "sync"
"time"
)
@@ -15,54 +15,98 @@ const (
green = 32
yellow = 33
blue = 34
+ gray = 37
)
var (
baseTimestamp time.Time
- isTerminal bool
- noQuoteNeeded *regexp.Regexp
)
func init() {
baseTimestamp = time.Now()
- isTerminal = IsTerminal()
-}
-
-func miniTS() int {
- return int(time.Since(baseTimestamp) / time.Second)
}
type TextFormatter struct {
// Set to true to bypass checking for a TTY before outputting colors.
- ForceColors bool
+ ForceColors bool
+
+ // Force disabling colors.
DisableColors bool
- // Set to true to disable timestamp logging (useful when the output
- // is redirected to a logging system already adding a timestamp)
+
+ // Disable timestamp logging. useful when output is redirected to logging
+ // system that already adds timestamps.
DisableTimestamp bool
+
+ // Enable logging the full timestamp when a TTY is attached instead of just
+ // the time passed since beginning of execution.
+ FullTimestamp bool
+
+ // TimestampFormat to use for display when a full timestamp is printed
+ TimestampFormat string
+
+ // The fields are sorted by default for a consistent output. For applications
+ // that log extremely frequently and don't use the JSON formatter this may not
+ // be desired.
+ DisableSorting bool
+
+ // QuoteEmptyFields will wrap empty fields in quotes if true
+ QuoteEmptyFields bool
+
+ // QuoteCharacter can be set to the override the default quoting character "
+ // with something else. For example: ', or `.
+ QuoteCharacter string
+
+ // Whether the logger's out is to a terminal
+ isTerminal bool
+
+ sync.Once
+}
+
+func (f *TextFormatter) init(entry *Entry) {
+ if len(f.QuoteCharacter) == 0 {
+ f.QuoteCharacter = "\""
+ }
+ if entry.Logger != nil {
+ f.isTerminal = IsTerminal(entry.Logger.Out)
+ }
}
func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
-
- var keys []string
+ var b *bytes.Buffer
+ keys := make([]string, 0, len(entry.Data))
for k := range entry.Data {
keys = append(keys, k)
}
- sort.Strings(keys)
- b := &bytes.Buffer{}
+ if !f.DisableSorting {
+ sort.Strings(keys)
+ }
+ if entry.Buffer != nil {
+ b = entry.Buffer
+ } else {
+ b = &bytes.Buffer{}
+ }
prefixFieldClashes(entry.Data)
- isColored := (f.ForceColors || isTerminal) && !f.DisableColors
+ f.Do(func() { f.init(entry) })
+ isColored := (f.ForceColors || f.isTerminal) && !f.DisableColors
+
+ timestampFormat := f.TimestampFormat
+ if timestampFormat == "" {
+ timestampFormat = DefaultTimestampFormat
+ }
if isColored {
- printColored(b, entry, keys)
+ f.printColored(b, entry, keys, timestampFormat)
} else {
if !f.DisableTimestamp {
- f.appendKeyValue(b, "time", entry.Time.Format(time.RFC3339))
+ f.appendKeyValue(b, "time", entry.Time.Format(timestampFormat))
}
f.appendKeyValue(b, "level", entry.Level.String())
- f.appendKeyValue(b, "msg", entry.Message)
+ if entry.Message != "" {
+ f.appendKeyValue(b, "msg", entry.Message)
+ }
for _, key := range keys {
f.appendKeyValue(b, key, entry.Data[key])
}
@@ -72,9 +116,11 @@ func (f *TextFormatter) Format(entry *Entry) ([]byte, error) {
return b.Bytes(), nil
}
-func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
+func (f *TextFormatter) printColored(b *bytes.Buffer, entry *Entry, keys []string, timestampFormat string) {
var levelColor int
switch entry.Level {
+ case DebugLevel:
+ levelColor = gray
case WarnLevel:
levelColor = yellow
case ErrorLevel, FatalLevel, PanicLevel:
@@ -85,40 +131,59 @@ func printColored(b *bytes.Buffer, entry *Entry, keys []string) {
levelText := strings.ToUpper(entry.Level.String())[0:4]
- fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, miniTS(), entry.Message)
+ if f.DisableTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m %-44s ", levelColor, levelText, entry.Message)
+ } else if !f.FullTimestamp {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%04d] %-44s ", levelColor, levelText, int(entry.Time.Sub(baseTimestamp)/time.Second), entry.Message)
+ } else {
+ fmt.Fprintf(b, "\x1b[%dm%s\x1b[0m[%s] %-44s ", levelColor, levelText, entry.Time.Format(timestampFormat), entry.Message)
+ }
for _, k := range keys {
v := entry.Data[k]
- fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=%v", levelColor, k, v)
+ fmt.Fprintf(b, " \x1b[%dm%s\x1b[0m=", levelColor, k)
+ f.appendValue(b, v)
}
}
-func needsQuoting(text string) bool {
+func (f *TextFormatter) needsQuoting(text string) bool {
+ if f.QuoteEmptyFields && len(text) == 0 {
+ return true
+ }
for _, ch := range text {
if !((ch >= 'a' && ch <= 'z') ||
(ch >= 'A' && ch <= 'Z') ||
- (ch >= '0' && ch < '9') ||
+ (ch >= '0' && ch <= '9') ||
ch == '-' || ch == '.') {
- return false
+ return true
}
}
- return true
+ return false
}
-func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key, value interface{}) {
- switch value.(type) {
+func (f *TextFormatter) appendKeyValue(b *bytes.Buffer, key string, value interface{}) {
+
+ b.WriteString(key)
+ b.WriteByte('=')
+ f.appendValue(b, value)
+ b.WriteByte(' ')
+}
+
+func (f *TextFormatter) appendValue(b *bytes.Buffer, value interface{}) {
+ switch value := value.(type) {
case string:
- if needsQuoting(value.(string)) {
- fmt.Fprintf(b, "%v=%s ", key, value)
+ if !f.needsQuoting(value) {
+ b.WriteString(value)
} else {
- fmt.Fprintf(b, "%v=%q ", key, value)
+ fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, value, f.QuoteCharacter)
}
case error:
- if needsQuoting(value.(error).Error()) {
- fmt.Fprintf(b, "%v=%s ", key, value)
+ errmsg := value.Error()
+ if !f.needsQuoting(errmsg) {
+ b.WriteString(errmsg)
} else {
- fmt.Fprintf(b, "%v=%q ", key, value)
+ fmt.Fprintf(b, "%s%v%s", f.QuoteCharacter, errmsg, f.QuoteCharacter)
}
default:
- fmt.Fprintf(b, "%v=%v ", key, value)
+ fmt.Fprint(b, value)
}
}
diff --git a/vendor/github.com/Sirupsen/logrus/writer.go b/vendor/github.com/Sirupsen/logrus/writer.go
new file mode 100644
index 0000000000..7bdebedc60
--- /dev/null
+++ b/vendor/github.com/Sirupsen/logrus/writer.go
@@ -0,0 +1,62 @@
+package logrus
+
+import (
+ "bufio"
+ "io"
+ "runtime"
+)
+
+func (logger *Logger) Writer() *io.PipeWriter {
+ return logger.WriterLevel(InfoLevel)
+}
+
+func (logger *Logger) WriterLevel(level Level) *io.PipeWriter {
+ return NewEntry(logger).WriterLevel(level)
+}
+
+func (entry *Entry) Writer() *io.PipeWriter {
+ return entry.WriterLevel(InfoLevel)
+}
+
+func (entry *Entry) WriterLevel(level Level) *io.PipeWriter {
+ reader, writer := io.Pipe()
+
+ var printFunc func(args ...interface{})
+
+ switch level {
+ case DebugLevel:
+ printFunc = entry.Debug
+ case InfoLevel:
+ printFunc = entry.Info
+ case WarnLevel:
+ printFunc = entry.Warn
+ case ErrorLevel:
+ printFunc = entry.Error
+ case FatalLevel:
+ printFunc = entry.Fatal
+ case PanicLevel:
+ printFunc = entry.Panic
+ default:
+ printFunc = entry.Print
+ }
+
+ go entry.writerScanner(reader, printFunc)
+ runtime.SetFinalizer(writer, writerFinalizer)
+
+ return writer
+}
+
+func (entry *Entry) writerScanner(reader *io.PipeReader, printFunc func(args ...interface{})) {
+ scanner := bufio.NewScanner(reader)
+ for scanner.Scan() {
+ printFunc(scanner.Text())
+ }
+ if err := scanner.Err(); err != nil {
+ entry.Errorf("Error while reading from Writer: %s", err)
+ }
+ reader.Close()
+}
+
+func writerFinalizer(writer *io.PipeWriter) {
+ writer.Close()
+}
diff --git a/vendor/github.com/containers/image/LICENSE b/vendor/github.com/containers/image/LICENSE
new file mode 100644
index 0000000000..9535635306
--- /dev/null
+++ b/vendor/github.com/containers/image/LICENSE
@@ -0,0 +1,189 @@
+
+ Apache License
+ Version 2.0, January 2004
+ https://www.apache.org/licenses/
+
+ TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
+
+ 1. Definitions.
+
+ "License" shall mean the terms and conditions for use, reproduction,
+ and distribution as defined by Sections 1 through 9 of this document.
+
+ "Licensor" shall mean the copyright owner or entity authorized by
+ the copyright owner that is granting the License.
+
+ "Legal Entity" shall mean the union of the acting entity and all
+ other entities that control, are controlled by, or are under common
+ control with that entity. For the purposes of this definition,
+ "control" means (i) the power, direct or indirect, to cause the
+ direction or management of such entity, whether by contract or
+ otherwise, or (ii) ownership of fifty percent (50%) or more of the
+ outstanding shares, or (iii) beneficial ownership of such entity.
+
+ "You" (or "Your") shall mean an individual or Legal Entity
+ exercising permissions granted by this License.
+
+ "Source" form shall mean the preferred form for making modifications,
+ including but not limited to software source code, documentation
+ source, and configuration files.
+
+ "Object" form shall mean any form resulting from mechanical
+ transformation or translation of a Source form, including but
+ not limited to compiled object code, generated documentation,
+ and conversions to other media types.
+
+ "Work" shall mean the work of authorship, whether in Source or
+ Object form, made available under the License, as indicated by a
+ copyright notice that is included in or attached to the work
+ (an example is provided in the Appendix below).
+
+ "Derivative Works" shall mean any work, whether in Source or Object
+ form, that is based on (or derived from) the Work and for which the
+ editorial revisions, annotations, elaborations, or other modifications
+ represent, as a whole, an original work of authorship. For the purposes
+ of this License, Derivative Works shall not include works that remain
+ separable from, or merely link (or bind by name) to the interfaces of,
+ the Work and Derivative Works thereof.
+
+ "Contribution" shall mean any work of authorship, including
+ the original version of the Work and any modifications or additions
+ to that Work or Derivative Works thereof, that is intentionally
+ submitted to Licensor for inclusion in the Work by the copyright owner
+ or by an individual or Legal Entity authorized to submit on behalf of
+ the copyright owner. For the purposes of this definition, "submitted"
+ means any form of electronic, verbal, or written communication sent
+ to the Licensor or its representatives, including but not limited to
+ communication on electronic mailing lists, source code control systems,
+ and issue tracking systems that are managed by, or on behalf of, the
+ Licensor for the purpose of discussing and improving the Work, but
+ excluding communication that is conspicuously marked or otherwise
+ designated in writing by the copyright owner as "Not a Contribution."
+
+ "Contributor" shall mean Licensor and any individual or Legal Entity
+ on behalf of whom a Contribution has been received by Licensor and
+ subsequently incorporated within the Work.
+
+ 2. Grant of Copyright License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ copyright license to reproduce, prepare Derivative Works of,
+ publicly display, publicly perform, sublicense, and distribute the
+ Work and such Derivative Works in Source or Object form.
+
+ 3. Grant of Patent License. Subject to the terms and conditions of
+ this License, each Contributor hereby grants to You a perpetual,
+ worldwide, non-exclusive, no-charge, royalty-free, irrevocable
+ (except as stated in this section) patent license to make, have made,
+ use, offer to sell, sell, import, and otherwise transfer the Work,
+ where such license applies only to those patent claims licensable
+ by such Contributor that are necessarily infringed by their
+ Contribution(s) alone or by combination of their Contribution(s)
+ with the Work to which such Contribution(s) was submitted. If You
+ institute patent litigation against any entity (including a
+ cross-claim or counterclaim in a lawsuit) alleging that the Work
+ or a Contribution incorporated within the Work constitutes direct
+ or contributory patent infringement, then any patent licenses
+ granted to You under this License for that Work shall terminate
+ as of the date such litigation is filed.
+
+ 4. Redistribution. You may reproduce and distribute copies of the
+ Work or Derivative Works thereof in any medium, with or without
+ modifications, and in Source or Object form, provided that You
+ meet the following conditions:
+
+ (a) You must give any other recipients of the Work or
+ Derivative Works a copy of this License; and
+
+ (b) You must cause any modified files to carry prominent notices
+ stating that You changed the files; and
+
+ (c) You must retain, in the Source form of any Derivative Works
+ that You distribute, all copyright, patent, trademark, and
+ attribution notices from the Source form of the Work,
+ excluding those notices that do not pertain to any part of
+ the Derivative Works; and
+
+ (d) If the Work includes a "NOTICE" text file as part of its
+ distribution, then any Derivative Works that You distribute must
+ include a readable copy of the attribution notices contained
+ within such NOTICE file, excluding those notices that do not
+ pertain to any part of the Derivative Works, in at least one
+ of the following places: within a NOTICE text file distributed
+ as part of the Derivative Works; within the Source form or
+ documentation, if provided along with the Derivative Works; or,
+ within a display generated by the Derivative Works, if and
+ wherever such third-party notices normally appear. The contents
+ of the NOTICE file are for informational purposes only and
+ do not modify the License. You may add Your own attribution
+ notices within Derivative Works that You distribute, alongside
+ or as an addendum to the NOTICE text from the Work, provided
+ that such additional attribution notices cannot be construed
+ as modifying the License.
+
+ You may add Your own copyright statement to Your modifications and
+ may provide additional or different license terms and conditions
+ for use, reproduction, or distribution of Your modifications, or
+ for any such Derivative Works as a whole, provided Your use,
+ reproduction, and distribution of the Work otherwise complies with
+ the conditions stated in this License.
+
+ 5. Submission of Contributions. Unless You explicitly state otherwise,
+ any Contribution intentionally submitted for inclusion in the Work
+ by You to the Licensor shall be under the terms and conditions of
+ this License, without any additional terms or conditions.
+ Notwithstanding the above, nothing herein shall supersede or modify
+ the terms of any separate license agreement you may have executed
+ with Licensor regarding such Contributions.
+
+ 6. Trademarks. This License does not grant permission to use the trade
+ names, trademarks, service marks, or product names of the Licensor,
+ except as required for reasonable and customary use in describing the
+ origin of the Work and reproducing the content of the NOTICE file.
+
+ 7. Disclaimer of Warranty. Unless required by applicable law or
+ agreed to in writing, Licensor provides the Work (and each
+ Contributor provides its Contributions) on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
+ implied, including, without limitation, any warranties or conditions
+ of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
+ PARTICULAR PURPOSE. You are solely responsible for determining the
+ appropriateness of using or redistributing the Work and assume any
+ risks associated with Your exercise of permissions under this License.
+
+ 8. Limitation of Liability. In no event and under no legal theory,
+ whether in tort (including negligence), contract, or otherwise,
+ unless required by applicable law (such as deliberate and grossly
+ negligent acts) or agreed to in writing, shall any Contributor be
+ liable to You for damages, including any direct, indirect, special,
+ incidental, or consequential damages of any character arising as a
+ result of this License or out of the use or inability to use the
+ Work (including but not limited to damages for loss of goodwill,
+ work stoppage, computer failure or malfunction, or any and all
+ other commercial damages or losses), even if such Contributor
+ has been advised of the possibility of such damages.
+
+ 9. Accepting Warranty or Additional Liability. While redistributing
+ the Work or Derivative Works thereof, You may choose to offer,
+ and charge a fee for, acceptance of support, warranty, indemnity,
+ or other liability obligations and/or rights consistent with this
+ License. However, in accepting such obligations, You may act only
+ on Your own behalf and on Your sole responsibility, not on behalf
+ of any other Contributor, and only if You agree to indemnify,
+ defend, and hold each Contributor harmless for any liability
+ incurred by, or claims asserted against, such Contributor by reason
+ of your accepting any such warranty or additional liability.
+
+ END OF TERMS AND CONDITIONS
+
+ Licensed under the Apache License, Version 2.0 (the "License");
+ you may not use this file except in compliance with the License.
+ You may obtain a copy of the License at
+
+ https://www.apache.org/licenses/LICENSE-2.0
+
+ Unless required by applicable law or agreed to in writing, software
+ distributed under the License is distributed on an "AS IS" BASIS,
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+ See the License for the specific language governing permissions and
+ limitations under the License.
diff --git a/vendor/github.com/containers/image/copy/copy.go b/vendor/github.com/containers/image/copy/copy.go
new file mode 100644
index 0000000000..13d35e7083
--- /dev/null
+++ b/vendor/github.com/containers/image/copy/copy.go
@@ -0,0 +1,663 @@
+package copy
+
+import (
+ "bytes"
+ "compress/gzip"
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "reflect"
+ "runtime"
+ "strings"
+ "time"
+
+ pb "gopkg.in/cheggaaa/pb.v1"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/image"
+ "github.com/containers/image/pkg/compression"
+ "github.com/containers/image/signature"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+type digestingReader struct {
+ source io.Reader
+ digester digest.Digester
+ expectedDigest digest.Digest
+ validationFailed bool
+}
+
+// imageCopier allows us to keep track of diffID values for blobs, and other
+// data, that we're copying between images, and cache other information that
+// might allow us to take some shortcuts
+type imageCopier struct {
+ copiedBlobs map[digest.Digest]digest.Digest
+ cachedDiffIDs map[digest.Digest]digest.Digest
+ manifestUpdates *types.ManifestUpdateOptions
+ dest types.ImageDestination
+ src types.Image
+ rawSource types.ImageSource
+ diffIDsAreNeeded bool
+ canModifyManifest bool
+ reportWriter io.Writer
+ progressInterval time.Duration
+ progress chan types.ProgressProperties
+}
+
+// newDigestingReader returns an io.Reader implementation with contents of source, which will eventually return a non-EOF error
+// and set validationFailed to true if the source stream does not match expectedDigest.
+func newDigestingReader(source io.Reader, expectedDigest digest.Digest) (*digestingReader, error) {
+ if err := expectedDigest.Validate(); err != nil {
+ return nil, errors.Errorf("Invalid digest specification %s", expectedDigest)
+ }
+ digestAlgorithm := expectedDigest.Algorithm()
+ if !digestAlgorithm.Available() {
+ return nil, errors.Errorf("Invalid digest specification %s: unsupported digest algorithm %s", expectedDigest, digestAlgorithm)
+ }
+ return &digestingReader{
+ source: source,
+ digester: digestAlgorithm.Digester(),
+ expectedDigest: expectedDigest,
+ validationFailed: false,
+ }, nil
+}
+
+func (d *digestingReader) Read(p []byte) (int, error) {
+ n, err := d.source.Read(p)
+ if n > 0 {
+ if n2, err := d.digester.Hash().Write(p[:n]); n2 != n || err != nil {
+ // Coverage: This should not happen, the hash.Hash interface requires
+ // d.digest.Write to never return an error, and the io.Writer interface
+ // requires n2 == len(input) if no error is returned.
+ return 0, errors.Wrapf(err, "Error updating digest during verification: %d vs. %d", n2, n)
+ }
+ }
+ if err == io.EOF {
+ actualDigest := d.digester.Digest()
+ if actualDigest != d.expectedDigest {
+ d.validationFailed = true
+ return 0, errors.Errorf("Digest did not match, expected %s, got %s", d.expectedDigest, actualDigest)
+ }
+ }
+ return n, err
+}
+
+// Options allows supplying non-default configuration modifying the behavior of CopyImage.
+type Options struct {
+ RemoveSignatures bool // Remove any pre-existing signatures. SignBy will still add a new signature.
+ SignBy string // If non-empty, asks for a signature to be added during the copy, and specifies a key ID, as accepted by signature.NewGPGSigningMechanism().SignDockerManifest(),
+ ReportWriter io.Writer
+ SourceCtx *types.SystemContext
+ DestinationCtx *types.SystemContext
+ ProgressInterval time.Duration // time to wait between reports to signal the progress channel
+ Progress chan types.ProgressProperties // Reported to when ProgressInterval has arrived for a single artifact+offset.
+}
+
+// Image copies image from srcRef to destRef, using policyContext to validate
+// source image admissibility.
+func Image(policyContext *signature.PolicyContext, destRef, srcRef types.ImageReference, options *Options) (retErr error) {
+ // NOTE this function uses an output parameter for the error return value.
+ // Setting this and returning is the ideal way to return an error.
+ //
+ // the defers in this routine will wrap the error return with its own errors
+ // which can be valuable context in the middle of a multi-streamed copy.
+ if options == nil {
+ options = &Options{}
+ }
+
+ reportWriter := ioutil.Discard
+
+ if options.ReportWriter != nil {
+ reportWriter = options.ReportWriter
+ }
+
+ writeReport := func(f string, a ...interface{}) {
+ fmt.Fprintf(reportWriter, f, a...)
+ }
+
+ dest, err := destRef.NewImageDestination(options.DestinationCtx)
+ if err != nil {
+ return errors.Wrapf(err, "Error initializing destination %s", transports.ImageName(destRef))
+ }
+ defer func() {
+ if err := dest.Close(); err != nil {
+ retErr = errors.Wrapf(retErr, " (dest: %v)", err)
+ }
+ }()
+
+ destSupportedManifestMIMETypes := dest.SupportedManifestMIMETypes()
+
+ rawSource, err := srcRef.NewImageSource(options.SourceCtx, destSupportedManifestMIMETypes)
+ if err != nil {
+ return errors.Wrapf(err, "Error initializing source %s", transports.ImageName(srcRef))
+ }
+ unparsedImage := image.UnparsedFromSource(rawSource)
+ defer func() {
+ if unparsedImage != nil {
+ if err := unparsedImage.Close(); err != nil {
+ retErr = errors.Wrapf(retErr, " (unparsed: %v)", err)
+ }
+ }
+ }()
+
+ // Please keep this policy check BEFORE reading any other information about the image.
+ if allowed, err := policyContext.IsRunningImageAllowed(unparsedImage); !allowed || err != nil { // Be paranoid and fail if either return value indicates so.
+ return errors.Wrap(err, "Source image rejected")
+ }
+ src, err := image.FromUnparsedImage(unparsedImage)
+ if err != nil {
+ return errors.Wrapf(err, "Error initializing image from source %s", transports.ImageName(srcRef))
+ }
+ unparsedImage = nil
+ defer func() {
+ if err := src.Close(); err != nil {
+ retErr = errors.Wrapf(retErr, " (source: %v)", err)
+ }
+ }()
+
+ if err := checkImageDestinationForCurrentRuntimeOS(src, dest); err != nil {
+ return err
+ }
+
+ if src.IsMultiImage() {
+ return errors.Errorf("can not copy %s: manifest contains multiple images", transports.ImageName(srcRef))
+ }
+
+ var sigs [][]byte
+ if options.RemoveSignatures {
+ sigs = [][]byte{}
+ } else {
+ writeReport("Getting image source signatures\n")
+ s, err := src.Signatures(context.TODO())
+ if err != nil {
+ return errors.Wrap(err, "Error reading signatures")
+ }
+ sigs = s
+ }
+ if len(sigs) != 0 {
+ writeReport("Checking if image destination supports signatures\n")
+ if err := dest.SupportsSignatures(); err != nil {
+ return errors.Wrap(err, "Can not copy signatures")
+ }
+ }
+
+ canModifyManifest := len(sigs) == 0
+ manifestUpdates := types.ManifestUpdateOptions{}
+ manifestUpdates.InformationOnly.Destination = dest
+
+ if err := updateEmbeddedDockerReference(&manifestUpdates, dest, src, canModifyManifest); err != nil {
+ return err
+ }
+
+ // We compute preferredManifestMIMEType only to show it in error messages.
+ // Without having to add this context in an error message, we would be happy enough to know only that no conversion is needed.
+ preferredManifestMIMEType, otherManifestMIMETypeCandidates, err := determineManifestConversion(&manifestUpdates, src, destSupportedManifestMIMETypes, canModifyManifest)
+ if err != nil {
+ return err
+ }
+
+ // If src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates) will be true, it needs to be true by the time we get here.
+ ic := imageCopier{
+ copiedBlobs: make(map[digest.Digest]digest.Digest),
+ cachedDiffIDs: make(map[digest.Digest]digest.Digest),
+ manifestUpdates: &manifestUpdates,
+ dest: dest,
+ src: src,
+ rawSource: rawSource,
+ diffIDsAreNeeded: src.UpdatedImageNeedsLayerDiffIDs(manifestUpdates),
+ canModifyManifest: canModifyManifest,
+ reportWriter: reportWriter,
+ progressInterval: options.ProgressInterval,
+ progress: options.Progress,
+ }
+
+ if err := ic.copyLayers(); err != nil {
+ return err
+ }
+
+ // With docker/distribution registries we do not know whether the registry accepts schema2 or schema1 only;
+ // and at least with the OpenShift registry "acceptschema2" option, there is no way to detect the support
+ // without actually trying to upload something and getting a types.ManifestTypeRejectedError.
+ // So, try the preferred manifest MIME type. If the process succeeds, fine…
+ manifest, err := ic.copyUpdatedConfigAndManifest()
+ if err != nil {
+ logrus.Debugf("Writing manifest using preferred type %s failed: %v", preferredManifestMIMEType, err)
+ // … if it fails, _and_ the failure is because the manifest is rejected, we may have other options.
+ if _, isManifestRejected := errors.Cause(err).(types.ManifestTypeRejectedError); !isManifestRejected || len(otherManifestMIMETypeCandidates) == 0 {
+ // We don’t have other options.
+ // In principle the code below would handle this as well, but the resulting error message is fairly ugly.
+ // Don’t bother the user with MIME types if we have no choice.
+ return err
+ }
+ // If the original MIME type is acceptable, determineManifestConversion always uses it as preferredManifestMIMEType.
+ // So if we are here, we will definitely be trying to convert the manifest.
+ // With !canModifyManifest, that would just be a string of repeated failures for the same reason,
+ // so let’s bail out early and with a better error message.
+ if !canModifyManifest {
+ return errors.Wrap(err, "Writing manifest failed (and converting it is not possible)")
+ }
+
+ // errs is a list of errors when trying various manifest types. Also serves as an "upload succeeded" flag when set to nil.
+ errs := []string{fmt.Sprintf("%s(%v)", preferredManifestMIMEType, err)}
+ for _, manifestMIMEType := range otherManifestMIMETypeCandidates {
+ logrus.Debugf("Trying to use manifest type %s…", manifestMIMEType)
+ manifestUpdates.ManifestMIMEType = manifestMIMEType
+ attemptedManifest, err := ic.copyUpdatedConfigAndManifest()
+ if err != nil {
+ logrus.Debugf("Upload of manifest type %s failed: %v", manifestMIMEType, err)
+ errs = append(errs, fmt.Sprintf("%s(%v)", manifestMIMEType, err))
+ continue
+ }
+
+ // We have successfully uploaded a manifest.
+ manifest = attemptedManifest
+ errs = nil // Mark this as a success so that we don't abort below.
+ break
+ }
+ if errs != nil {
+ return fmt.Errorf("Uploading manifest failed, attempted the following formats: %s", strings.Join(errs, ", "))
+ }
+ }
+
+ if options.SignBy != "" {
+ newSig, err := createSignature(dest, manifest, options.SignBy, reportWriter)
+ if err != nil {
+ return err
+ }
+ sigs = append(sigs, newSig)
+ }
+
+ writeReport("Storing signatures\n")
+ if err := dest.PutSignatures(sigs); err != nil {
+ return errors.Wrap(err, "Error writing signatures")
+ }
+
+ if err := dest.Commit(); err != nil {
+ return errors.Wrap(err, "Error committing the finished image")
+ }
+
+ return nil
+}
+
+func checkImageDestinationForCurrentRuntimeOS(src types.Image, dest types.ImageDestination) error {
+ if dest.MustMatchRuntimeOS() {
+ c, err := src.OCIConfig()
+ if err != nil {
+ return errors.Wrapf(err, "Error parsing image configuration")
+ }
+ osErr := fmt.Errorf("image operating system %q cannot be used on %q", c.OS, runtime.GOOS)
+ if runtime.GOOS == "windows" && c.OS == "linux" {
+ return osErr
+ } else if runtime.GOOS != "windows" && c.OS == "windows" {
+ return osErr
+ }
+ }
+ return nil
+}
+
+// updateEmbeddedDockerReference handles the Docker reference embedded in Docker schema1 manifests.
+func updateEmbeddedDockerReference(manifestUpdates *types.ManifestUpdateOptions, dest types.ImageDestination, src types.Image, canModifyManifest bool) error {
+ destRef := dest.Reference().DockerReference()
+ if destRef == nil {
+ return nil // Destination does not care about Docker references
+ }
+ if !src.EmbeddedDockerReferenceConflicts(destRef) {
+ return nil // No reference embedded in the manifest, or it matches destRef already.
+ }
+
+ if !canModifyManifest {
+ return errors.Errorf("Copying a schema1 image with an embedded Docker reference to %s (Docker reference %s) would invalidate existing signatures. Explicitly enable signature removal to proceed anyway",
+ transports.ImageName(dest.Reference()), destRef.String())
+ }
+ manifestUpdates.EmbeddedDockerReference = destRef
+ return nil
+}
+
+// copyLayers copies layers from src/rawSource to dest, using and updating ic.manifestUpdates if necessary and ic.canModifyManifest.
+func (ic *imageCopier) copyLayers() error {
+ srcInfos := ic.src.LayerInfos()
+ destInfos := []types.BlobInfo{}
+ diffIDs := []digest.Digest{}
+ for _, srcLayer := range srcInfos {
+ var (
+ destInfo types.BlobInfo
+ diffID digest.Digest
+ err error
+ )
+ if ic.dest.AcceptsForeignLayerURLs() && len(srcLayer.URLs) != 0 {
+ // DiffIDs are, currently, needed only when converting from schema1.
+ // In which case src.LayerInfos will not have URLs because schema1
+ // does not support them.
+ if ic.diffIDsAreNeeded {
+ return errors.New("getting DiffID for foreign layers is unimplemented")
+ }
+ destInfo = srcLayer
+ fmt.Fprintf(ic.reportWriter, "Skipping foreign layer %q copy to %s\n", destInfo.Digest, ic.dest.Reference().Transport().Name())
+ } else {
+ destInfo, diffID, err = ic.copyLayer(srcLayer)
+ if err != nil {
+ return err
+ }
+ }
+ destInfos = append(destInfos, destInfo)
+ diffIDs = append(diffIDs, diffID)
+ }
+ ic.manifestUpdates.InformationOnly.LayerInfos = destInfos
+ if ic.diffIDsAreNeeded {
+ ic.manifestUpdates.InformationOnly.LayerDiffIDs = diffIDs
+ }
+ if layerDigestsDiffer(srcInfos, destInfos) {
+ ic.manifestUpdates.LayerInfos = destInfos
+ }
+ return nil
+}
+
+// layerDigestsDiffer return true iff the digests in a and b differ (ignoring sizes and possible other fields)
+func layerDigestsDiffer(a, b []types.BlobInfo) bool {
+ if len(a) != len(b) {
+ return true
+ }
+ for i := range a {
+ if a[i].Digest != b[i].Digest {
+ return true
+ }
+ }
+ return false
+}
+
+// copyUpdatedConfigAndManifest updates the image per ic.manifestUpdates, if necessary,
+// stores the resulting config and manifest to the destination, and returns the stored manifest.
+func (ic *imageCopier) copyUpdatedConfigAndManifest() ([]byte, error) {
+ pendingImage := ic.src
+ if !reflect.DeepEqual(*ic.manifestUpdates, types.ManifestUpdateOptions{InformationOnly: ic.manifestUpdates.InformationOnly}) {
+ if !ic.canModifyManifest {
+ return nil, errors.Errorf("Internal error: copy needs an updated manifest but that was known to be forbidden")
+ }
+ if !ic.diffIDsAreNeeded && ic.src.UpdatedImageNeedsLayerDiffIDs(*ic.manifestUpdates) {
+ // We have set ic.diffIDsAreNeeded based on the preferred MIME type returned by determineManifestConversion.
+ // So, this can only happen if we are trying to upload using one of the other MIME type candidates.
+ // Because UpdatedImageNeedsLayerDiffIDs is true only when converting from s1 to s2, this case should only arise
+ // when ic.dest.SupportedManifestMIMETypes() includes both s1 and s2, the upload using s1 failed, and we are now trying s2.
+ // Supposedly s2-only registries do not exist or are extremely rare, so failing with this error message is good enough for now.
+ // If handling such registries turns out to be necessary, we could compute ic.diffIDsAreNeeded based on the full list of manifest MIME type candidates.
+ return nil, errors.Errorf("Can not convert image to %s, preparing DiffIDs for this case is not supported", ic.manifestUpdates.ManifestMIMEType)
+ }
+ pi, err := ic.src.UpdatedImage(*ic.manifestUpdates)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error creating an updated image manifest")
+ }
+ pendingImage = pi
+ }
+ manifest, _, err := pendingImage.Manifest()
+ if err != nil {
+ return nil, errors.Wrap(err, "Error reading manifest")
+ }
+
+ if err := ic.copyConfig(pendingImage); err != nil {
+ return nil, err
+ }
+
+ fmt.Fprintf(ic.reportWriter, "Writing manifest to image destination\n")
+ if err := ic.dest.PutManifest(manifest); err != nil {
+ return nil, errors.Wrap(err, "Error writing manifest")
+ }
+ return manifest, nil
+}
+
+// copyConfig copies config.json, if any, from src to dest.
+func (ic *imageCopier) copyConfig(src types.Image) error {
+ srcInfo := src.ConfigInfo()
+ if srcInfo.Digest != "" {
+ fmt.Fprintf(ic.reportWriter, "Copying config %s\n", srcInfo.Digest)
+ configBlob, err := src.ConfigBlob()
+ if err != nil {
+ return errors.Wrapf(err, "Error reading config blob %s", srcInfo.Digest)
+ }
+ destInfo, err := ic.copyBlobFromStream(bytes.NewReader(configBlob), srcInfo, nil, false)
+ if err != nil {
+ return err
+ }
+ if destInfo.Digest != srcInfo.Digest {
+ return errors.Errorf("Internal error: copying uncompressed config blob %s changed digest to %s", srcInfo.Digest, destInfo.Digest)
+ }
+ }
+ return nil
+}
+
+// diffIDResult contains both a digest value and an error from diffIDComputationGoroutine.
+// We could also send the error through the pipeReader, but this more cleanly separates the copying of the layer and the DiffID computation.
+type diffIDResult struct {
+ digest digest.Digest
+ err error
+}
+
+// copyLayer copies a layer with srcInfo (with known Digest and possibly known Size) in src to dest, perhaps compressing it if canCompress,
+// and returns a complete blobInfo of the copied layer, and a value for LayerDiffIDs if diffIDIsNeeded
+func (ic *imageCopier) copyLayer(srcInfo types.BlobInfo) (types.BlobInfo, digest.Digest, error) {
+ // Check if we already have a blob with this digest
+ haveBlob, extantBlobSize, err := ic.dest.HasBlob(srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error checking for blob %s at destination", srcInfo.Digest)
+ }
+ // If we already have a cached diffID for this blob, we don't need to compute it
+ diffIDIsNeeded := ic.diffIDsAreNeeded && (ic.cachedDiffIDs[srcInfo.Digest] == "")
+ // If we already have the blob, and we don't need to recompute the diffID, then we might be able to avoid reading it again
+ if haveBlob && !diffIDIsNeeded {
+ // Check the blob sizes match, if we were given a size this time
+ if srcInfo.Size != -1 && srcInfo.Size != extantBlobSize {
+ return types.BlobInfo{}, "", errors.Errorf("Error: blob %s is already present, but with size %d instead of %d", srcInfo.Digest, extantBlobSize, srcInfo.Size)
+ }
+ srcInfo.Size = extantBlobSize
+ // Tell the image destination that this blob's delta is being applied again. For some image destinations, this can be faster than using GetBlob/PutBlob
+ blobinfo, err := ic.dest.ReapplyBlob(srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error reapplying blob %s at destination", srcInfo.Digest)
+ }
+ fmt.Fprintf(ic.reportWriter, "Skipping fetch of repeat blob %s\n", srcInfo.Digest)
+ return blobinfo, ic.cachedDiffIDs[srcInfo.Digest], err
+ }
+
+ // Fallback: copy the layer, computing the diffID if we need to do so
+ fmt.Fprintf(ic.reportWriter, "Copying blob %s\n", srcInfo.Digest)
+ srcStream, srcBlobSize, err := ic.rawSource.GetBlob(srcInfo)
+ if err != nil {
+ return types.BlobInfo{}, "", errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
+ }
+ defer srcStream.Close()
+
+ blobInfo, diffIDChan, err := ic.copyLayerFromStream(srcStream, types.BlobInfo{Digest: srcInfo.Digest, Size: srcBlobSize},
+ diffIDIsNeeded)
+ if err != nil {
+ return types.BlobInfo{}, "", err
+ }
+ var diffIDResult diffIDResult // = {digest:""}
+ if diffIDIsNeeded {
+ diffIDResult = <-diffIDChan
+ if diffIDResult.err != nil {
+ return types.BlobInfo{}, "", errors.Wrap(diffIDResult.err, "Error computing layer DiffID")
+ }
+ logrus.Debugf("Computed DiffID %s for layer %s", diffIDResult.digest, srcInfo.Digest)
+ ic.cachedDiffIDs[srcInfo.Digest] = diffIDResult.digest
+ }
+ return blobInfo, diffIDResult.digest, nil
+}
+
+// copyLayerFromStream is an implementation detail of copyLayer; mostly providing a separate “defer” scope.
+// it copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest,
+// perhaps compressing the stream if canCompress,
+// and returns a complete blobInfo of the copied blob and perhaps a <-chan diffIDResult if diffIDIsNeeded, to be read by the caller.
+func (ic *imageCopier) copyLayerFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
+ diffIDIsNeeded bool) (types.BlobInfo, <-chan diffIDResult, error) {
+ var getDiffIDRecorder func(compression.DecompressorFunc) io.Writer // = nil
+ var diffIDChan chan diffIDResult
+
+ err := errors.New("Internal error: unexpected panic in copyLayer") // For pipeWriter.CloseWithError below
+ if diffIDIsNeeded {
+ diffIDChan = make(chan diffIDResult, 1) // Buffered, so that sending a value after this or our caller has failed and exited does not block.
+ pipeReader, pipeWriter := io.Pipe()
+ defer func() { // Note that this is not the same as {defer pipeWriter.CloseWithError(err)}; we need err to be evaluated lazily.
+ pipeWriter.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
+ }()
+
+ getDiffIDRecorder = func(decompressor compression.DecompressorFunc) io.Writer {
+ // If this fails, e.g. because we have exited and due to pipeWriter.CloseWithError() above further
+ // reading from the pipe has failed, we don’t really care.
+ // We only read from diffIDChan if the rest of the flow has succeeded, and when we do read from it,
+ // the return value includes an error indication, which we do check.
+ //
+ // If this gets never called, pipeReader will not be used anywhere, but pipeWriter will only be
+ // closed above, so we are happy enough with both pipeReader and pipeWriter to just get collected by GC.
+ go diffIDComputationGoroutine(diffIDChan, pipeReader, decompressor) // Closes pipeReader
+ return pipeWriter
+ }
+ }
+ blobInfo, err := ic.copyBlobFromStream(srcStream, srcInfo, getDiffIDRecorder, ic.canModifyManifest) // Sets err to nil on success
+ return blobInfo, diffIDChan, err
+ // We need the defer … pipeWriter.CloseWithError() to happen HERE so that the caller can block on reading from diffIDChan
+}
+
+// diffIDComputationGoroutine reads all input from layerStream, uncompresses using decompressor if necessary, and sends its digest, and status, if any, to dest.
+func diffIDComputationGoroutine(dest chan<- diffIDResult, layerStream io.ReadCloser, decompressor compression.DecompressorFunc) {
+ result := diffIDResult{
+ digest: "",
+ err: errors.New("Internal error: unexpected panic in diffIDComputationGoroutine"),
+ }
+ defer func() { dest <- result }()
+ defer layerStream.Close() // We do not care to bother the other end of the pipe with other failures; we send them to dest instead.
+
+ result.digest, result.err = computeDiffID(layerStream, decompressor)
+}
+
+// computeDiffID reads all input from layerStream, uncompresses it using decompressor if necessary, and returns its digest.
+func computeDiffID(stream io.Reader, decompressor compression.DecompressorFunc) (digest.Digest, error) {
+ if decompressor != nil {
+ s, err := decompressor(stream)
+ if err != nil {
+ return "", err
+ }
+ stream = s
+ }
+
+ return digest.Canonical.FromReader(stream)
+}
+
+// copyBlobFromStream copies a blob with srcInfo (with known Digest and possibly known Size) from srcStream to dest,
+// perhaps sending a copy to an io.Writer if getOriginalLayerCopyWriter != nil,
+// perhaps compressing it if canCompress,
+// and returns a complete blobInfo of the copied blob.
+func (ic *imageCopier) copyBlobFromStream(srcStream io.Reader, srcInfo types.BlobInfo,
+ getOriginalLayerCopyWriter func(decompressor compression.DecompressorFunc) io.Writer,
+ canCompress bool) (types.BlobInfo, error) {
+ // The copying happens through a pipeline of connected io.Readers.
+ // === Input: srcStream
+
+ // === Process input through digestingReader to validate against the expected digest.
+ // Be paranoid; in case PutBlob somehow managed to ignore an error from digestingReader,
+ // use a separate validation failure indicator.
+ // Note that we don't use a stronger "validationSucceeded" indicator, because
+ // dest.PutBlob may detect that the layer already exists, in which case we don't
+ // read stream to the end, and validation does not happen.
+ digestingReader, err := newDigestingReader(srcStream, srcInfo.Digest)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "Error preparing to verify blob %s", srcInfo.Digest)
+ }
+ var destStream io.Reader = digestingReader
+
+ // === Detect compression of the input stream.
+ // This requires us to “peek ahead” into the stream to read the initial part, which requires us to chain through another io.Reader returned by DetectCompression.
+ decompressor, destStream, err := compression.DetectCompression(destStream) // We could skip this in some cases, but let's keep the code path uniform
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "Error reading blob %s", srcInfo.Digest)
+ }
+ isCompressed := decompressor != nil
+
+ // === Report progress using a pb.Reader.
+ bar := pb.New(int(srcInfo.Size)).SetUnits(pb.U_BYTES)
+ bar.Output = ic.reportWriter
+ bar.SetMaxWidth(80)
+ bar.ShowTimeLeft = false
+ bar.ShowPercent = false
+ bar.Start()
+ destStream = bar.NewProxyReader(destStream)
+ defer fmt.Fprint(ic.reportWriter, "\n")
+
+ // === Send a copy of the original, uncompressed, stream, to a separate path if necessary.
+ var originalLayerReader io.Reader // DO NOT USE this other than to drain the input if no other consumer in the pipeline has done so.
+ if getOriginalLayerCopyWriter != nil {
+ destStream = io.TeeReader(destStream, getOriginalLayerCopyWriter(decompressor))
+ originalLayerReader = destStream
+ }
+
+ // === Compress the layer if it is uncompressed and compression is desired
+ var inputInfo types.BlobInfo
+ if !canCompress || isCompressed || !ic.dest.ShouldCompressLayers() {
+ logrus.Debugf("Using original blob without modification")
+ inputInfo = srcInfo
+ } else {
+ logrus.Debugf("Compressing blob on the fly")
+ pipeReader, pipeWriter := io.Pipe()
+ defer pipeReader.Close()
+
+ // If this fails while writing data, it will do pipeWriter.CloseWithError(); if it fails otherwise,
+ // e.g. because we have exited and due to pipeReader.Close() above further writing to the pipe has failed,
+ // we don’t care.
+ go compressGoroutine(pipeWriter, destStream) // Closes pipeWriter
+ destStream = pipeReader
+ inputInfo.Digest = ""
+ inputInfo.Size = -1
+ }
+
+ // === Report progress using the ic.progress channel, if required.
+ if ic.progress != nil && ic.progressInterval > 0 {
+ destStream = &progressReader{
+ source: destStream,
+ channel: ic.progress,
+ interval: ic.progressInterval,
+ artifact: srcInfo,
+ lastTime: time.Now(),
+ }
+ }
+
+ // === Finally, send the layer stream to dest.
+ uploadedInfo, err := ic.dest.PutBlob(destStream, inputInfo)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Error writing blob")
+ }
+
+ // This is fairly horrible: the writer from getOriginalLayerCopyWriter wants to consumer
+ // all of the input (to compute DiffIDs), even if dest.PutBlob does not need it.
+ // So, read everything from originalLayerReader, which will cause the rest to be
+ // sent there if we are not already at EOF.
+ if getOriginalLayerCopyWriter != nil {
+ logrus.Debugf("Consuming rest of the original blob to satisfy getOriginalLayerCopyWriter")
+ _, err := io.Copy(ioutil.Discard, originalLayerReader)
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrapf(err, "Error reading input blob %s", srcInfo.Digest)
+ }
+ }
+
+ if digestingReader.validationFailed { // Coverage: This should never happen.
+ return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, digest verification failed but was ignored", srcInfo.Digest)
+ }
+ if inputInfo.Digest != "" && uploadedInfo.Digest != inputInfo.Digest {
+ return types.BlobInfo{}, errors.Errorf("Internal error writing blob %s, blob with digest %s saved with digest %s", srcInfo.Digest, inputInfo.Digest, uploadedInfo.Digest)
+ }
+ return uploadedInfo, nil
+}
+
+// compressGoroutine reads all input from src and writes its compressed equivalent to dest.
+func compressGoroutine(dest *io.PipeWriter, src io.Reader) {
+ err := errors.New("Internal error: unexpected panic in compressGoroutine")
+ defer func() { // Note that this is not the same as {defer dest.CloseWithError(err)}; we need err to be evaluated lazily.
+ dest.CloseWithError(err) // CloseWithError(nil) is equivalent to Close()
+ }()
+
+ zipper := gzip.NewWriter(dest)
+ defer zipper.Close()
+
+ _, err = io.Copy(zipper, src) // Sets err to nil, i.e. causes dest.Close()
+}
diff --git a/vendor/github.com/containers/image/copy/manifest.go b/vendor/github.com/containers/image/copy/manifest.go
new file mode 100644
index 0000000000..6cb7517d20
--- /dev/null
+++ b/vendor/github.com/containers/image/copy/manifest.go
@@ -0,0 +1,102 @@
+package copy
+
+import (
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// preferredManifestMIMETypes lists manifest MIME types in order of our preference, if we can't use the original manifest and need to convert.
+// Prefer v2s2 to v2s1 because v2s2 does not need to be changed when uploading to a different location.
+// Include v2s1 signed but not v2s1 unsigned, because docker/distribution requires a signature even if the unsigned MIME type is used.
+var preferredManifestMIMETypes = []string{manifest.DockerV2Schema2MediaType, manifest.DockerV2Schema1SignedMediaType}
+
+// orderedSet is a list of strings (MIME types in our case), with each string appearing at most once.
+type orderedSet struct {
+ list []string
+ included map[string]struct{}
+}
+
+// newOrderedSet creates a correctly initialized orderedSet.
+// [Sometimes it would be really nice if Golang had constructors…]
+func newOrderedSet() *orderedSet {
+ return &orderedSet{
+ list: []string{},
+ included: map[string]struct{}{},
+ }
+}
+
+// append adds s to the end of os, only if it is not included already.
+func (os *orderedSet) append(s string) {
+ if _, ok := os.included[s]; !ok {
+ os.list = append(os.list, s)
+ os.included[s] = struct{}{}
+ }
+}
+
+// determineManifestConversion updates manifestUpdates to convert manifest to a supported MIME type, if necessary and canModifyManifest.
+// Note that the conversion will only happen later, through src.UpdatedImage
+// Returns the preferred manifest MIME type (whether we are converting to it or using it unmodified),
+// and a list of other possible alternatives, in order.
+func determineManifestConversion(manifestUpdates *types.ManifestUpdateOptions, src types.Image, destSupportedManifestMIMETypes []string, canModifyManifest bool) (string, []string, error) {
+ _, srcType, err := src.Manifest()
+ if err != nil { // This should have been cached?!
+ return "", nil, errors.Wrap(err, "Error reading manifest")
+ }
+
+ if len(destSupportedManifestMIMETypes) == 0 {
+ return srcType, []string{}, nil // Anything goes; just use the original as is, do not try any conversions.
+ }
+ supportedByDest := map[string]struct{}{}
+ for _, t := range destSupportedManifestMIMETypes {
+ supportedByDest[t] = struct{}{}
+ }
+
+ // destSupportedManifestMIMETypes is a static guess; a particular registry may still only support a subset of the types.
+ // So, build a list of types to try in order of decreasing preference.
+ // FIXME? This treats manifest.DockerV2Schema1SignedMediaType and manifest.DockerV2Schema1MediaType as distinct,
+ // although we are not really making any conversion, and it is very unlikely that a destination would support one but not the other.
+ // In practice, schema1 is probably the lowest common denominator, so we would expect to try the first one of the MIME types
+ // and never attempt the other one.
+ prioritizedTypes := newOrderedSet()
+
+ // First of all, prefer to keep the original manifest unmodified.
+ if _, ok := supportedByDest[srcType]; ok {
+ prioritizedTypes.append(srcType)
+ }
+ if !canModifyManifest {
+ // We could also drop the !canModifyManifest parameter and have the caller
+ // make the choice; it is already doing that to an extent, to improve error
+ // messages. But it is nice to hide the “if !canModifyManifest, do no conversion”
+ // special case in here; the caller can then worry (or not) only about a good UI.
+ logrus.Debugf("We can't modify the manifest, hoping for the best...")
+ return srcType, []string{}, nil // Take our chances - FIXME? Or should we fail without trying?
+ }
+
+ // Then use our list of preferred types.
+ for _, t := range preferredManifestMIMETypes {
+ if _, ok := supportedByDest[t]; ok {
+ prioritizedTypes.append(t)
+ }
+ }
+
+ // Finally, try anything else the destination supports.
+ for _, t := range destSupportedManifestMIMETypes {
+ prioritizedTypes.append(t)
+ }
+
+ logrus.Debugf("Manifest has MIME type %s, ordered candidate list [%s]", srcType, strings.Join(prioritizedTypes.list, ", "))
+ if len(prioritizedTypes.list) == 0 { // Coverage: destSupportedManifestMIMETypes is not empty (or we would have exited in the “Anything goes” case above), so this should never happen.
+ return "", nil, errors.New("Internal error: no candidate MIME types")
+ }
+ preferredType := prioritizedTypes.list[0]
+ if preferredType != srcType {
+ manifestUpdates.ManifestMIMEType = preferredType
+ } else {
+ logrus.Debugf("... will first try using the original manifest unmodified")
+ }
+ return preferredType, prioritizedTypes.list[1:], nil
+}
diff --git a/vendor/github.com/containers/image/copy/progress_reader.go b/vendor/github.com/containers/image/copy/progress_reader.go
new file mode 100644
index 0000000000..b670ee59f1
--- /dev/null
+++ b/vendor/github.com/containers/image/copy/progress_reader.go
@@ -0,0 +1,28 @@
+package copy
+
+import (
+ "io"
+ "time"
+
+ "github.com/containers/image/types"
+)
+
+// progressReader is a reader that reports its progress on an interval.
+type progressReader struct {
+ source io.Reader
+ channel chan types.ProgressProperties
+ interval time.Duration
+ artifact types.BlobInfo
+ lastTime time.Time
+ offset uint64
+}
+
+func (r *progressReader) Read(p []byte) (int, error) {
+ n, err := r.source.Read(p)
+ r.offset += uint64(n)
+ if time.Since(r.lastTime) > r.interval {
+ r.channel <- types.ProgressProperties{Artifact: r.artifact, Offset: r.offset}
+ r.lastTime = time.Now()
+ }
+ return n, err
+}
diff --git a/vendor/github.com/containers/image/copy/sign.go b/vendor/github.com/containers/image/copy/sign.go
new file mode 100644
index 0000000000..9187d70b33
--- /dev/null
+++ b/vendor/github.com/containers/image/copy/sign.go
@@ -0,0 +1,35 @@
+package copy
+
+import (
+ "fmt"
+ "io"
+
+ "github.com/containers/image/signature"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// createSignature creates a new signature of manifest at (identified by) dest using keyIdentity.
+func createSignature(dest types.ImageDestination, manifest []byte, keyIdentity string, reportWriter io.Writer) ([]byte, error) {
+ mech, err := signature.NewGPGSigningMechanism()
+ if err != nil {
+ return nil, errors.Wrap(err, "Error initializing GPG")
+ }
+ defer mech.Close()
+ if err := mech.SupportsSigning(); err != nil {
+ return nil, errors.Wrap(err, "Signing not supported")
+ }
+
+ dockerReference := dest.Reference().DockerReference()
+ if dockerReference == nil {
+ return nil, errors.Errorf("Cannot determine canonical Docker reference for destination %s", transports.ImageName(dest.Reference()))
+ }
+
+ fmt.Fprintf(reportWriter, "Signing manifest\n")
+ newSig, err := signature.SignDockerManifest(manifest, dockerReference.String(), mech, keyIdentity)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error creating signature")
+ }
+ return newSig, nil
+}
diff --git a/vendor/github.com/containers/image/docker/archive/dest.go b/vendor/github.com/containers/image/docker/archive/dest.go
new file mode 100644
index 0000000000..9fc85bd85b
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/archive/dest.go
@@ -0,0 +1,66 @@
+package archive
+
+import (
+ "io"
+ "os"
+
+ "github.com/containers/image/docker/tarfile"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+type archiveImageDestination struct {
+ *tarfile.Destination // Implements most of types.ImageDestination
+ ref archiveReference
+ writer io.Closer
+}
+
+func newImageDestination(ctx *types.SystemContext, ref archiveReference) (types.ImageDestination, error) {
+ if ref.destinationRef == nil {
+ return nil, errors.Errorf("docker-archive: destination reference not supplied (must be of form :)")
+ }
+
+ // ref.path can be either a pipe or a regular file
+ // in the case of a pipe, we require that we can open it for write
+ // in the case of a regular file, we don't want to overwrite any pre-existing file
+ // so we check for Size() == 0 below (This is racy, but using O_EXCL would also be racy,
+ // only in a different way. Either way, it’s up to the user to not have two writers to the same path.)
+ fh, err := os.OpenFile(ref.path, os.O_WRONLY|os.O_CREATE, 0644)
+ if err != nil {
+ return nil, errors.Wrapf(err, "error opening file %q", ref.path)
+ }
+
+ fhStat, err := fh.Stat()
+ if err != nil {
+ return nil, errors.Wrapf(err, "error statting file %q", ref.path)
+ }
+
+ if fhStat.Mode().IsRegular() && fhStat.Size() != 0 {
+ return nil, errors.New("docker-archive doesn't support modifying existing images")
+ }
+
+ return &archiveImageDestination{
+ Destination: tarfile.NewDestination(fh, ref.destinationRef),
+ ref: ref,
+ writer: fh,
+ }, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *archiveImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *archiveImageDestination) Close() error {
+ return d.writer.Close()
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *archiveImageDestination) Commit() error {
+ return d.Destination.Commit()
+}
diff --git a/vendor/github.com/containers/image/docker/archive/src.go b/vendor/github.com/containers/image/docker/archive/src.go
new file mode 100644
index 0000000000..5c5267a3a2
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/archive/src.go
@@ -0,0 +1,36 @@
+package archive
+
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/docker/tarfile"
+ "github.com/containers/image/types"
+)
+
+type archiveImageSource struct {
+ *tarfile.Source // Implements most of types.ImageSource
+ ref archiveReference
+}
+
+// newImageSource returns a types.ImageSource for the specified image reference.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(ctx *types.SystemContext, ref archiveReference) types.ImageSource {
+ if ref.destinationRef != nil {
+ logrus.Warnf("docker-archive: references are not supported for sources (ignoring)")
+ }
+ src := tarfile.NewSource(ref.path)
+ return &archiveImageSource{
+ Source: src,
+ ref: ref,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *archiveImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *archiveImageSource) Close() error {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/archive/transport.go b/vendor/github.com/containers/image/docker/archive/transport.go
new file mode 100644
index 0000000000..59c68c3beb
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/archive/transport.go
@@ -0,0 +1,155 @@
+package archive
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/docker/reference"
+ ctrImage "github.com/containers/image/image"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for local Docker archives.
+var Transport = archiveTransport{}
+
+type archiveTransport struct{}
+
+func (t archiveTransport) Name() string {
+ return "docker-archive"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t archiveTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t archiveTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // See the explanation in archiveReference.PolicyConfigurationIdentity.
+ return errors.New(`docker-archive: does not support any scopes except the default "" one`)
+}
+
+// archiveReference is an ImageReference for Docker images.
+type archiveReference struct {
+ destinationRef reference.NamedTagged // only used for destinations
+ path string
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
+func ParseReference(refString string) (types.ImageReference, error) {
+ if refString == "" {
+ return nil, errors.Errorf("docker-archive reference %s isn't of the form [:]", refString)
+ }
+
+ parts := strings.SplitN(refString, ":", 2)
+ path := parts[0]
+ var destinationRef reference.NamedTagged
+
+ // A :tag was specified, which is only necessary for destinations.
+ if len(parts) == 2 {
+ ref, err := reference.ParseNormalizedNamed(parts[1])
+ if err != nil {
+ return nil, errors.Wrapf(err, "docker-archive parsing reference")
+ }
+ ref = reference.TagNameOnly(ref)
+
+ if _, isDigest := ref.(reference.Canonical); isDigest {
+ return nil, errors.Errorf("docker-archive doesn't support digest references: %s", refString)
+ }
+
+ refTagged, isTagged := ref.(reference.NamedTagged)
+ if !isTagged {
+ // Really shouldn't be hit...
+ return nil, errors.Errorf("internal error: reference is not tagged even after reference.TagNameOnly: %s", refString)
+ }
+ destinationRef = refTagged
+ }
+
+ return archiveReference{
+ destinationRef: destinationRef,
+ path: path,
+ }, nil
+}
+
+func (ref archiveReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref archiveReference) StringWithinTransport() string {
+ if ref.destinationRef == nil {
+ return ref.path
+ }
+ return fmt.Sprintf("%s:%s", ref.path, ref.destinationRef.String())
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref archiveReference) DockerReference() reference.Named {
+ return ref.destinationRef
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref archiveReference) PolicyConfigurationIdentity() string {
+ // Punt, the justification is similar to dockerReference.PolicyConfigurationIdentity.
+ return ""
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref archiveReference) PolicyConfigurationNamespaces() []string {
+ // TODO
+ return []string{}
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref archiveReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ src := newImageSource(ctx, ref)
+ return ctrImage.FromSource(src)
+}
+
+// NewImageSource returns a types.ImageSource for this reference,
+// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
+// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
+// The caller must call .Close() on the returned ImageSource.
+func (ref archiveReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
+ return newImageSource(ctx, ref), nil
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref archiveReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref archiveReference) DeleteImage(ctx *types.SystemContext) error {
+ // Not really supported, for safety reasons.
+ return errors.New("Deleting images not implemented for docker-archive: images")
+}
diff --git a/vendor/github.com/containers/image/docker/docker_client.go b/vendor/github.com/containers/image/docker/docker_client.go
new file mode 100644
index 0000000000..ca442828b9
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_client.go
@@ -0,0 +1,584 @@
+package docker
+
+import (
+ "context"
+ "crypto/tls"
+ "encoding/base64"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net"
+ "net/http"
+ "os"
+ "path/filepath"
+ "strings"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+ "github.com/containers/storage/pkg/homedir"
+ "github.com/docker/distribution/registry/client"
+ "github.com/docker/go-connections/sockets"
+ "github.com/docker/go-connections/tlsconfig"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+const (
+ dockerHostname = "docker.io"
+ dockerRegistry = "registry-1.docker.io"
+ dockerAuthRegistry = "https://index.docker.io/v1/"
+
+ dockerCfg = ".docker"
+ dockerCfgFileName = "config.json"
+ dockerCfgObsolete = ".dockercfg"
+
+ systemPerHostCertDirPath = "/etc/docker/certs.d"
+
+ resolvedPingV2URL = "%s://%s/v2/"
+ resolvedPingV1URL = "%s://%s/v1/_ping"
+ tagsPath = "/v2/%s/tags/list"
+ manifestPath = "/v2/%s/manifests/%s"
+ blobsPath = "/v2/%s/blobs/%s"
+ blobUploadPath = "/v2/%s/blobs/uploads/"
+ extensionsSignaturePath = "/extensions/v2/%s/signatures/%s"
+
+ minimumTokenLifetimeSeconds = 60
+
+ extensionSignatureSchemaVersion = 2 // extensionSignature.Version
+ extensionSignatureTypeAtomic = "atomic" // extensionSignature.Type
+)
+
+// ErrV1NotSupported is returned when we're trying to talk to a
+// docker V1 registry.
+var ErrV1NotSupported = errors.New("can't talk to a V1 docker registry")
+
+// extensionSignature and extensionSignatureList come from github.com/openshift/origin/pkg/dockerregistry/server/signaturedispatcher.go:
+// signature represents a Docker image signature.
+type extensionSignature struct {
+ Version int `json:"schemaVersion"` // Version specifies the schema version
+ Name string `json:"name"` // Name must be in "sha256:@signatureName" format
+ Type string `json:"type"` // Type is optional, of not set it will be defaulted to "AtomicImageV1"
+ Content []byte `json:"content"` // Content contains the signature
+}
+
+// signatureList represents list of Docker image signatures.
+type extensionSignatureList struct {
+ Signatures []extensionSignature `json:"signatures"`
+}
+
+type bearerToken struct {
+ Token string `json:"token"`
+ ExpiresIn int `json:"expires_in"`
+ IssuedAt time.Time `json:"issued_at"`
+}
+
+// dockerClient is configuration for dealing with a single Docker registry.
+type dockerClient struct {
+ // The following members are set by newDockerClient and do not change afterwards.
+ ctx *types.SystemContext
+ registry string
+ username string
+ password string
+ client *http.Client
+ signatureBase signatureStorageBase
+ scope authScope
+ // The following members are detected registry properties:
+ // They are set after a successful detectProperties(), and never change afterwards.
+ scheme string // Empty value also used to indicate detectProperties() has not yet succeeded.
+ challenges []challenge
+ supportsSignatures bool
+ // The following members are private state for setupRequestAuth, both are valid if token != nil.
+ token *bearerToken
+ tokenExpiration time.Time
+}
+
+type authScope struct {
+ remoteName string
+ actions string
+}
+
+// this is cloned from docker/go-connections because upstream docker has changed
+// it and make deps here fails otherwise.
+// We'll drop this once we upgrade to docker 1.13.x deps.
+func serverDefault() *tls.Config {
+ return &tls.Config{
+ // Avoid fallback to SSL protocols < TLS1.0
+ MinVersion: tls.VersionTLS10,
+ PreferServerCipherSuites: true,
+ CipherSuites: tlsconfig.DefaultServerAcceptedCiphers,
+ }
+}
+
+func newTransport() *http.Transport {
+ direct := &net.Dialer{
+ Timeout: 30 * time.Second,
+ KeepAlive: 30 * time.Second,
+ DualStack: true,
+ }
+ tr := &http.Transport{
+ Proxy: http.ProxyFromEnvironment,
+ Dial: direct.Dial,
+ TLSHandshakeTimeout: 10 * time.Second,
+ // TODO(dmcgowan): Call close idle connections when complete and use keep alive
+ DisableKeepAlives: true,
+ }
+ proxyDialer, err := sockets.DialerFromEnvironment(direct)
+ if err == nil {
+ tr.Dial = proxyDialer.Dial
+ }
+ return tr
+}
+
+// dockerCertDir returns a path to a directory to be consumed by setupCertificates() depending on ctx and hostPort.
+func dockerCertDir(ctx *types.SystemContext, hostPort string) string {
+ if ctx != nil && ctx.DockerCertPath != "" {
+ return ctx.DockerCertPath
+ }
+ var hostCertDir string
+ if ctx != nil && ctx.DockerPerHostCertDirPath != "" {
+ hostCertDir = ctx.DockerPerHostCertDirPath
+ } else if ctx != nil && ctx.RootForImplicitAbsolutePaths != "" {
+ hostCertDir = filepath.Join(ctx.RootForImplicitAbsolutePaths, systemPerHostCertDirPath)
+ } else {
+ hostCertDir = systemPerHostCertDirPath
+ }
+ return filepath.Join(hostCertDir, hostPort)
+}
+
+func setupCertificates(dir string, tlsc *tls.Config) error {
+ logrus.Debugf("Looking for TLS certificates and private keys in %s", dir)
+ fs, err := ioutil.ReadDir(dir)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil
+ }
+ return err
+ }
+
+ for _, f := range fs {
+ fullPath := filepath.Join(dir, f.Name())
+ if strings.HasSuffix(f.Name(), ".crt") {
+ systemPool, err := tlsconfig.SystemCertPool()
+ if err != nil {
+ return errors.Wrap(err, "unable to get system cert pool")
+ }
+ tlsc.RootCAs = systemPool
+ logrus.Debugf(" crt: %s", fullPath)
+ data, err := ioutil.ReadFile(fullPath)
+ if err != nil {
+ return err
+ }
+ tlsc.RootCAs.AppendCertsFromPEM(data)
+ }
+ if strings.HasSuffix(f.Name(), ".cert") {
+ certName := f.Name()
+ keyName := certName[:len(certName)-5] + ".key"
+ logrus.Debugf(" cert: %s", fullPath)
+ if !hasFile(fs, keyName) {
+ return errors.Errorf("missing key %s for client certificate %s. Note that CA certificates should use the extension .crt", keyName, certName)
+ }
+ cert, err := tls.LoadX509KeyPair(filepath.Join(dir, certName), filepath.Join(dir, keyName))
+ if err != nil {
+ return err
+ }
+ tlsc.Certificates = append(tlsc.Certificates, cert)
+ }
+ if strings.HasSuffix(f.Name(), ".key") {
+ keyName := f.Name()
+ certName := keyName[:len(keyName)-4] + ".cert"
+ logrus.Debugf(" key: %s", fullPath)
+ if !hasFile(fs, certName) {
+ return errors.Errorf("missing client certificate %s for key %s", certName, keyName)
+ }
+ }
+ }
+ return nil
+}
+
+func hasFile(files []os.FileInfo, name string) bool {
+ for _, f := range files {
+ if f.Name() == name {
+ return true
+ }
+ }
+ return false
+}
+
+// newDockerClient returns a new dockerClient instance for refHostname (a host a specified in the Docker image reference, not canonicalized to dockerRegistry)
+// “write” specifies whether the client will be used for "write" access (in particular passed to lookaside.go:toplevelFromSection)
+func newDockerClient(ctx *types.SystemContext, ref dockerReference, write bool, actions string) (*dockerClient, error) {
+ registry := reference.Domain(ref.ref)
+ if registry == dockerHostname {
+ registry = dockerRegistry
+ }
+ username, password, err := getAuth(ctx, reference.Domain(ref.ref))
+ if err != nil {
+ return nil, err
+ }
+ tr := newTransport()
+ tr.TLSClientConfig = serverDefault()
+ // It is undefined whether the host[:port] string for dockerHostname should be dockerHostname or dockerRegistry,
+ // because docker/docker does not read the certs.d subdirectory at all in that case. We use the user-visible
+ // dockerHostname here, because it is more symmetrical to read the configuration in that case as well, and because
+ // generally the UI hides the existence of the different dockerRegistry. But note that this behavior is
+ // undocumented and may change if docker/docker changes.
+ certDir := dockerCertDir(ctx, reference.Domain(ref.ref))
+ if err := setupCertificates(certDir, tr.TLSClientConfig); err != nil {
+ return nil, err
+ }
+ if ctx != nil && ctx.DockerInsecureSkipTLSVerify {
+ tr.TLSClientConfig.InsecureSkipVerify = true
+ }
+ client := &http.Client{Transport: tr}
+
+ sigBase, err := configuredSignatureStorageBase(ctx, ref, write)
+ if err != nil {
+ return nil, err
+ }
+
+ return &dockerClient{
+ ctx: ctx,
+ registry: registry,
+ username: username,
+ password: password,
+ client: client,
+ signatureBase: sigBase,
+ scope: authScope{
+ actions: actions,
+ remoteName: reference.Path(ref.ref),
+ },
+ }, nil
+}
+
+// makeRequest creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
+// The host name and schema is taken from the client or autodetected, and the path is relative to it, i.e. the path usually starts with /v2/.
+func (c *dockerClient) makeRequest(ctx context.Context, method, path string, headers map[string][]string, stream io.Reader) (*http.Response, error) {
+ if err := c.detectProperties(ctx); err != nil {
+ return nil, err
+ }
+
+ url := fmt.Sprintf("%s://%s%s", c.scheme, c.registry, path)
+ return c.makeRequestToResolvedURL(ctx, method, url, headers, stream, -1, true)
+}
+
+// makeRequestToResolvedURL creates and executes a http.Request with the specified parameters, adding authentication and TLS options for the Docker client.
+// streamLen, if not -1, specifies the length of the data expected on stream.
+// makeRequest should generally be preferred.
+// TODO(runcom): too many arguments here, use a struct
+func (c *dockerClient) makeRequestToResolvedURL(ctx context.Context, method, url string, headers map[string][]string, stream io.Reader, streamLen int64, sendAuth bool) (*http.Response, error) {
+ req, err := http.NewRequest(method, url, stream)
+ if err != nil {
+ return nil, err
+ }
+ req = req.WithContext(ctx)
+ if streamLen != -1 { // Do not blindly overwrite if streamLen == -1, http.NewRequest above can figure out the length of bytes.Reader and similar objects without us having to compute it.
+ req.ContentLength = streamLen
+ }
+ req.Header.Set("Docker-Distribution-API-Version", "registry/2.0")
+ for n, h := range headers {
+ for _, hh := range h {
+ req.Header.Add(n, hh)
+ }
+ }
+ if c.ctx != nil && c.ctx.DockerRegistryUserAgent != "" {
+ req.Header.Add("User-Agent", c.ctx.DockerRegistryUserAgent)
+ }
+ if sendAuth {
+ if err := c.setupRequestAuth(req); err != nil {
+ return nil, err
+ }
+ }
+ logrus.Debugf("%s %s", method, url)
+ res, err := c.client.Do(req)
+ if err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// we're using the challenges from the /v2/ ping response and not the one from the destination
+// URL in this request because:
+//
+// 1) docker does that as well
+// 2) gcr.io is sending 401 without a WWW-Authenticate header in the real request
+//
+// debugging: https://github.com/containers/image/pull/211#issuecomment-273426236 and follows up
+func (c *dockerClient) setupRequestAuth(req *http.Request) error {
+ if len(c.challenges) == 0 {
+ return nil
+ }
+ schemeNames := make([]string, 0, len(c.challenges))
+ for _, challenge := range c.challenges {
+ schemeNames = append(schemeNames, challenge.Scheme)
+ switch challenge.Scheme {
+ case "basic":
+ req.SetBasicAuth(c.username, c.password)
+ return nil
+ case "bearer":
+ if c.token == nil || time.Now().After(c.tokenExpiration) {
+ realm, ok := challenge.Parameters["realm"]
+ if !ok {
+ return errors.Errorf("missing realm in bearer auth challenge")
+ }
+ service, _ := challenge.Parameters["service"] // Will be "" if not present
+ scope := fmt.Sprintf("repository:%s:%s", c.scope.remoteName, c.scope.actions)
+ token, err := c.getBearerToken(req.Context(), realm, service, scope)
+ if err != nil {
+ return err
+ }
+ c.token = token
+ c.tokenExpiration = token.IssuedAt.Add(time.Duration(token.ExpiresIn) * time.Second)
+ }
+ req.Header.Set("Authorization", fmt.Sprintf("Bearer %s", c.token.Token))
+ return nil
+ default:
+ logrus.Debugf("no handler for %s authentication", challenge.Scheme)
+ }
+ }
+ logrus.Infof("None of the challenges sent by server (%s) are supported, trying an unauthenticated request anyway", strings.Join(schemeNames, ", "))
+ return nil
+}
+
+func (c *dockerClient) getBearerToken(ctx context.Context, realm, service, scope string) (*bearerToken, error) {
+ authReq, err := http.NewRequest("GET", realm, nil)
+ if err != nil {
+ return nil, err
+ }
+ authReq = authReq.WithContext(ctx)
+ getParams := authReq.URL.Query()
+ if service != "" {
+ getParams.Add("service", service)
+ }
+ if scope != "" {
+ getParams.Add("scope", scope)
+ }
+ authReq.URL.RawQuery = getParams.Encode()
+ if c.username != "" && c.password != "" {
+ authReq.SetBasicAuth(c.username, c.password)
+ }
+ tr := newTransport()
+ // TODO(runcom): insecure for now to contact the external token service
+ tr.TLSClientConfig = &tls.Config{InsecureSkipVerify: true}
+ client := &http.Client{Transport: tr}
+ res, err := client.Do(authReq)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusUnauthorized:
+ return nil, errors.Errorf("unable to retrieve auth token: 401 unauthorized")
+ case http.StatusOK:
+ break
+ default:
+ return nil, errors.Errorf("unexpected http code: %d, URL: %s", res.StatusCode, authReq.URL)
+ }
+ tokenBlob, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+ var token bearerToken
+ if err := json.Unmarshal(tokenBlob, &token); err != nil {
+ return nil, err
+ }
+ if token.ExpiresIn < minimumTokenLifetimeSeconds {
+ token.ExpiresIn = minimumTokenLifetimeSeconds
+ logrus.Debugf("Increasing token expiration to: %d seconds", token.ExpiresIn)
+ }
+ if token.IssuedAt.IsZero() {
+ token.IssuedAt = time.Now().UTC()
+ }
+ return &token, nil
+}
+
+func getAuth(ctx *types.SystemContext, registry string) (string, string, error) {
+ if ctx != nil && ctx.DockerAuthConfig != nil {
+ return ctx.DockerAuthConfig.Username, ctx.DockerAuthConfig.Password, nil
+ }
+ var dockerAuth dockerConfigFile
+ dockerCfgPath := filepath.Join(getDefaultConfigDir(".docker"), dockerCfgFileName)
+ if _, err := os.Stat(dockerCfgPath); err == nil {
+ j, err := ioutil.ReadFile(dockerCfgPath)
+ if err != nil {
+ return "", "", err
+ }
+ if err := json.Unmarshal(j, &dockerAuth); err != nil {
+ return "", "", err
+ }
+
+ } else if os.IsNotExist(err) {
+ // try old config path
+ oldDockerCfgPath := filepath.Join(getDefaultConfigDir(dockerCfgObsolete))
+ if _, err := os.Stat(oldDockerCfgPath); err != nil {
+ if os.IsNotExist(err) {
+ return "", "", nil
+ }
+ return "", "", errors.Wrap(err, oldDockerCfgPath)
+ }
+
+ j, err := ioutil.ReadFile(oldDockerCfgPath)
+ if err != nil {
+ return "", "", err
+ }
+ if err := json.Unmarshal(j, &dockerAuth.AuthConfigs); err != nil {
+ return "", "", err
+ }
+
+ } else if err != nil {
+ return "", "", errors.Wrap(err, dockerCfgPath)
+ }
+
+ // I'm feeling lucky
+ if c, exists := dockerAuth.AuthConfigs[registry]; exists {
+ return decodeDockerAuth(c.Auth)
+ }
+
+ // bad luck; let's normalize the entries first
+ registry = normalizeRegistry(registry)
+ normalizedAuths := map[string]dockerAuthConfig{}
+ for k, v := range dockerAuth.AuthConfigs {
+ normalizedAuths[normalizeRegistry(k)] = v
+ }
+ if c, exists := normalizedAuths[registry]; exists {
+ return decodeDockerAuth(c.Auth)
+ }
+ return "", "", nil
+}
+
+// detectProperties detects various properties of the registry.
+// See the dockerClient documentation for members which are affected by this.
+func (c *dockerClient) detectProperties(ctx context.Context) error {
+ if c.scheme != "" {
+ return nil
+ }
+
+ ping := func(scheme string) error {
+ url := fmt.Sprintf(resolvedPingV2URL, scheme, c.registry)
+ resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
+ logrus.Debugf("Ping %s err %#v", url, err)
+ if err != nil {
+ return err
+ }
+ defer resp.Body.Close()
+ logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
+ return errors.Errorf("error pinging repository, response code %d", resp.StatusCode)
+ }
+ c.challenges = parseAuthHeader(resp.Header)
+ c.scheme = scheme
+ c.supportsSignatures = resp.Header.Get("X-Registry-Supports-Signatures") == "1"
+ return nil
+ }
+ err := ping("https")
+ if err != nil && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify {
+ err = ping("http")
+ }
+ if err != nil {
+ err = errors.Wrap(err, "pinging docker registry returned")
+ if c.ctx != nil && c.ctx.DockerDisableV1Ping {
+ return err
+ }
+ // best effort to understand if we're talking to a V1 registry
+ pingV1 := func(scheme string) bool {
+ url := fmt.Sprintf(resolvedPingV1URL, scheme, c.registry)
+ resp, err := c.makeRequestToResolvedURL(ctx, "GET", url, nil, nil, -1, true)
+ logrus.Debugf("Ping %s err %#v", url, err)
+ if err != nil {
+ return false
+ }
+ defer resp.Body.Close()
+ logrus.Debugf("Ping %s status %d", url, resp.StatusCode)
+ if resp.StatusCode != http.StatusOK && resp.StatusCode != http.StatusUnauthorized {
+ return false
+ }
+ return true
+ }
+ isV1 := pingV1("https")
+ if !isV1 && c.ctx != nil && c.ctx.DockerInsecureSkipTLSVerify {
+ isV1 = pingV1("http")
+ }
+ if isV1 {
+ err = ErrV1NotSupported
+ }
+ }
+ return err
+}
+
+// getExtensionsSignatures returns signatures from the X-Registry-Supports-Signatures API extension,
+// using the original data structures.
+func (c *dockerClient) getExtensionsSignatures(ctx context.Context, ref dockerReference, manifestDigest digest.Digest) (*extensionSignatureList, error) {
+ path := fmt.Sprintf(extensionsSignaturePath, reference.Path(ref.ref), manifestDigest)
+ res, err := c.makeRequest(ctx, "GET", path, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, client.HandleErrorResponse(res)
+ }
+ body, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, err
+ }
+
+ var parsedBody extensionSignatureList
+ if err := json.Unmarshal(body, &parsedBody); err != nil {
+ return nil, errors.Wrapf(err, "Error decoding signature list")
+ }
+ return &parsedBody, nil
+}
+
+func getDefaultConfigDir(confPath string) string {
+ return filepath.Join(homedir.Get(), confPath)
+}
+
+type dockerAuthConfig struct {
+ Auth string `json:"auth,omitempty"`
+}
+
+type dockerConfigFile struct {
+ AuthConfigs map[string]dockerAuthConfig `json:"auths"`
+}
+
+func decodeDockerAuth(s string) (string, string, error) {
+ decoded, err := base64.StdEncoding.DecodeString(s)
+ if err != nil {
+ return "", "", err
+ }
+ parts := strings.SplitN(string(decoded), ":", 2)
+ if len(parts) != 2 {
+ // if it's invalid just skip, as docker does
+ return "", "", nil
+ }
+ user := parts[0]
+ password := strings.Trim(parts[1], "\x00")
+ return user, password, nil
+}
+
+// convertToHostname converts a registry url which has http|https prepended
+// to just an hostname.
+// Copied from github.com/docker/docker/registry/auth.go
+func convertToHostname(url string) string {
+ stripped := url
+ if strings.HasPrefix(url, "http://") {
+ stripped = strings.TrimPrefix(url, "http://")
+ } else if strings.HasPrefix(url, "https://") {
+ stripped = strings.TrimPrefix(url, "https://")
+ }
+
+ nameParts := strings.SplitN(stripped, "/", 2)
+
+ return nameParts[0]
+}
+
+func normalizeRegistry(registry string) string {
+ normalized := convertToHostname(registry)
+ switch normalized {
+ case "registry-1.docker.io", "docker.io":
+ return "index.docker.io"
+ }
+ return normalized
+}
diff --git a/vendor/github.com/containers/image/docker/docker_image.go b/vendor/github.com/containers/image/docker/docker_image.go
new file mode 100644
index 0000000000..992d920354
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_image.go
@@ -0,0 +1,63 @@
+package docker
+
+import (
+ "context"
+ "encoding/json"
+ "fmt"
+ "net/http"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/image"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// Image is a Docker-specific implementation of types.Image with a few extra methods
+// which are specific to Docker.
+type Image struct {
+ types.Image
+ src *dockerImageSource
+}
+
+// newImage returns a new Image interface type after setting up
+// a client to the registry hosting the given image.
+// The caller must call .Close() on the returned Image.
+func newImage(ctx *types.SystemContext, ref dockerReference) (types.Image, error) {
+ s, err := newImageSource(ctx, ref, nil)
+ if err != nil {
+ return nil, err
+ }
+ img, err := image.FromSource(s)
+ if err != nil {
+ return nil, err
+ }
+ return &Image{Image: img, src: s}, nil
+}
+
+// SourceRefFullName returns a fully expanded name for the repository this image is in.
+func (i *Image) SourceRefFullName() string {
+ return i.src.ref.ref.Name()
+}
+
+// GetRepositoryTags list all tags available in the repository. Note that this has no connection with the tag(s) used for this specific image, if any.
+func (i *Image) GetRepositoryTags() ([]string, error) {
+ path := fmt.Sprintf(tagsPath, reference.Path(i.src.ref.ref))
+ // FIXME: Pass the context.Context
+ res, err := i.src.c.makeRequest(context.TODO(), "GET", path, nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ // print url also
+ return nil, errors.Errorf("Invalid status code returned when fetching tags list %d", res.StatusCode)
+ }
+ type tagsRes struct {
+ Tags []string
+ }
+ tags := &tagsRes{}
+ if err := json.NewDecoder(res.Body).Decode(tags); err != nil {
+ return nil, err
+ }
+ return tags.Tags, nil
+}
diff --git a/vendor/github.com/containers/image/docker/docker_image_dest.go b/vendor/github.com/containers/image/docker/docker_image_dest.go
new file mode 100644
index 0000000000..427e358a02
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_image_dest.go
@@ -0,0 +1,466 @@
+package docker
+
+import (
+ "bytes"
+ "context"
+ "crypto/rand"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "net/http"
+ "net/url"
+ "os"
+ "path/filepath"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/docker/distribution/registry/api/errcode"
+ "github.com/docker/distribution/registry/api/v2"
+ "github.com/docker/distribution/registry/client"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+var manifestMIMETypes = []string{
+ // TODO(runcom): we'll add OCI as part of another PR here
+ manifest.DockerV2Schema2MediaType,
+ manifest.DockerV2Schema1SignedMediaType,
+ manifest.DockerV2Schema1MediaType,
+}
+
+func supportedManifestMIMETypesMap() map[string]bool {
+ m := make(map[string]bool, len(manifestMIMETypes))
+ for _, mt := range manifestMIMETypes {
+ m[mt] = true
+ }
+ return m
+}
+
+type dockerImageDestination struct {
+ ref dockerReference
+ c *dockerClient
+ // State
+ manifestDigest digest.Digest // or "" if not yet known.
+}
+
+// newImageDestination creates a new ImageDestination for the specified image reference.
+func newImageDestination(ctx *types.SystemContext, ref dockerReference) (types.ImageDestination, error) {
+ c, err := newDockerClient(ctx, ref, true, "pull,push")
+ if err != nil {
+ return nil, err
+ }
+ return &dockerImageDestination{
+ ref: ref,
+ c: c,
+ }, nil
+}
+
+// Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+// e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+func (d *dockerImageDestination) Reference() types.ImageReference {
+ return d.ref
+}
+
+// Close removes resources associated with an initialized ImageDestination, if any.
+func (d *dockerImageDestination) Close() error {
+ return nil
+}
+
+func (d *dockerImageDestination) SupportedManifestMIMETypes() []string {
+ return manifestMIMETypes
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *dockerImageDestination) SupportsSignatures() error {
+ if err := d.c.detectProperties(context.TODO()); err != nil {
+ return err
+ }
+ switch {
+ case d.c.signatureBase != nil:
+ return nil
+ case d.c.supportsSignatures:
+ return nil
+ default:
+ return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
+ }
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+func (d *dockerImageDestination) ShouldCompressLayers() bool {
+ return true
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *dockerImageDestination) AcceptsForeignLayerURLs() bool {
+ return true
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *dockerImageDestination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// sizeCounter is an io.Writer which only counts the total size of its input.
+type sizeCounter struct{ size int64 }
+
+func (c *sizeCounter) Write(p []byte) (n int, err error) {
+ c.size += int64(len(p))
+ return len(p), nil
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *dockerImageDestination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ if inputInfo.Digest.String() != "" {
+ haveBlob, size, err := d.HasBlob(inputInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ if haveBlob {
+ return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ }
+ }
+
+ // FIXME? Chunked upload, progress reporting, etc.
+ uploadPath := fmt.Sprintf(blobUploadPath, reference.Path(d.ref.ref))
+ logrus.Debugf("Uploading %s", uploadPath)
+ res, err := d.c.makeRequest(context.TODO(), "POST", uploadPath, nil, nil)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusAccepted {
+ logrus.Debugf("Error initiating layer upload, response %#v", *res)
+ return types.BlobInfo{}, errors.Errorf("Error initiating layer upload to %s, status %d", uploadPath, res.StatusCode)
+ }
+ uploadLocation, err := res.Location()
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
+ }
+
+ digester := digest.Canonical.Digester()
+ sizeCounter := &sizeCounter{}
+ tee := io.TeeReader(stream, io.MultiWriter(digester.Hash(), sizeCounter))
+ res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PATCH", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, tee, inputInfo.Size, true)
+ if err != nil {
+ logrus.Debugf("Error uploading layer chunked, response %#v", res)
+ return types.BlobInfo{}, err
+ }
+ defer res.Body.Close()
+ computedDigest := digester.Digest()
+
+ uploadLocation, err = res.Location()
+ if err != nil {
+ return types.BlobInfo{}, errors.Wrap(err, "Error determining upload URL")
+ }
+
+ // FIXME: DELETE uploadLocation on failure
+
+ locationQuery := uploadLocation.Query()
+ // TODO: check inputInfo.Digest == computedDigest https://github.com/containers/image/pull/70#discussion_r77646717
+ locationQuery.Set("digest", computedDigest.String())
+ uploadLocation.RawQuery = locationQuery.Encode()
+ res, err = d.c.makeRequestToResolvedURL(context.TODO(), "PUT", uploadLocation.String(), map[string][]string{"Content-Type": {"application/octet-stream"}}, nil, -1, true)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusCreated {
+ logrus.Debugf("Error uploading layer, response %#v", *res)
+ return types.BlobInfo{}, errors.Errorf("Error uploading layer to %s, status %d", uploadLocation, res.StatusCode)
+ }
+
+ logrus.Debugf("Upload of layer %s complete", computedDigest)
+ return types.BlobInfo{Digest: computedDigest, Size: sizeCounter.size}, nil
+}
+
+// HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
+// Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
+// If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+// it returns a non-nil error only on an unexpected failure.
+func (d *dockerImageDestination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ if info.Digest == "" {
+ return false, -1, errors.Errorf(`"Can not check for a blob with unknown digest`)
+ }
+ checkPath := fmt.Sprintf(blobsPath, reference.Path(d.ref.ref), info.Digest.String())
+
+ logrus.Debugf("Checking %s", checkPath)
+ res, err := d.c.makeRequest(context.TODO(), "HEAD", checkPath, nil, nil)
+ if err != nil {
+ return false, -1, err
+ }
+ defer res.Body.Close()
+ switch res.StatusCode {
+ case http.StatusOK:
+ logrus.Debugf("... already exists")
+ return true, getBlobSize(res), nil
+ case http.StatusUnauthorized:
+ logrus.Debugf("... not authorized")
+ return false, -1, errors.Errorf("not authorized to read from destination repository %s", reference.Path(d.ref.ref))
+ case http.StatusNotFound:
+ logrus.Debugf("... not present")
+ return false, -1, nil
+ default:
+ return false, -1, errors.Errorf("failed to read from destination repository %s: %v", reference.Path(d.ref.ref), http.StatusText(res.StatusCode))
+ }
+}
+
+func (d *dockerImageDestination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return info, nil
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *dockerImageDestination) PutManifest(m []byte) error {
+ digest, err := manifest.Digest(m)
+ if err != nil {
+ return err
+ }
+ d.manifestDigest = digest
+
+ refTail, err := d.ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
+ path := fmt.Sprintf(manifestPath, reference.Path(d.ref.ref), refTail)
+
+ headers := map[string][]string{}
+ mimeType := manifest.GuessMIMEType(m)
+ if mimeType != "" {
+ headers["Content-Type"] = []string{mimeType}
+ }
+ res, err := d.c.makeRequest(context.TODO(), "PUT", path, headers, bytes.NewReader(m))
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusCreated {
+ err = errors.Wrapf(client.HandleErrorResponse(res), "Error uploading manifest to %s", path)
+ if isManifestInvalidError(errors.Cause(err)) {
+ err = types.ManifestTypeRejectedError{Err: err}
+ }
+ return err
+ }
+ return nil
+}
+
+// isManifestInvalidError returns true iff err from client.HandleErrorReponse is a “manifest invalid” error.
+func isManifestInvalidError(err error) bool {
+ errors, ok := err.(errcode.Errors)
+ if !ok || len(errors) == 0 {
+ return false
+ }
+ ec, ok := errors[0].(errcode.ErrorCoder)
+ if !ok {
+ return false
+ }
+ // ErrorCodeManifestInvalid is returned by OpenShift with acceptschema2=false.
+ // ErrorCodeTagInvalid is returned by docker/distribution (at least as of commit ec87e9b6971d831f0eff752ddb54fb64693e51cd)
+ // when uploading to a tag (because it can’t find a matching tag inside the manifest)
+ return ec.ErrorCode() == v2.ErrorCodeManifestInvalid || ec.ErrorCode() == v2.ErrorCodeTagInvalid
+}
+
+func (d *dockerImageDestination) PutSignatures(signatures [][]byte) error {
+ // Do not fail if we don’t really need to support signatures.
+ if len(signatures) == 0 {
+ return nil
+ }
+ if err := d.c.detectProperties(context.TODO()); err != nil {
+ return err
+ }
+ switch {
+ case d.c.signatureBase != nil:
+ return d.putSignaturesToLookaside(signatures)
+ case d.c.supportsSignatures:
+ return d.putSignaturesToAPIExtension(signatures)
+ default:
+ return errors.Errorf("X-Registry-Supports-Signatures extension not supported, and lookaside is not configured")
+ }
+}
+
+// putSignaturesToLookaside implements PutSignatures() from the lookaside location configured in s.c.signatureBase,
+// which is not nil.
+func (d *dockerImageDestination) putSignaturesToLookaside(signatures [][]byte) error {
+ // FIXME? This overwrites files one at a time, definitely not atomic.
+ // A failure when updating signatures with a reordered copy could lose some of them.
+
+ // Skip dealing with the manifest digest if not necessary.
+ if len(signatures) == 0 {
+ return nil
+ }
+
+ if d.manifestDigest.String() == "" {
+ // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
+ return errors.Errorf("Unknown manifest digest, can't add signatures")
+ }
+
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ for i, signature := range signatures {
+ url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
+ if url == nil {
+ return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
+ }
+ err := d.putOneSignature(url, signature)
+ if err != nil {
+ return err
+ }
+ }
+ // Remove any other signatures, if present.
+ // We stop at the first missing signature; if a previous deleting loop aborted
+ // prematurely, this may not clean up all of them, but one missing signature
+ // is enough for dockerImageSource to stop looking for other signatures, so that
+ // is sufficient.
+ for i := len(signatures); ; i++ {
+ url := signatureStorageURL(d.c.signatureBase, d.manifestDigest, i)
+ if url == nil {
+ return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
+ }
+ missing, err := d.c.deleteOneSignature(url)
+ if err != nil {
+ return err
+ }
+ if missing {
+ break
+ }
+ }
+
+ return nil
+}
+
+// putOneSignature stores one signature to url.
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (d *dockerImageDestination) putOneSignature(url *url.URL, signature []byte) error {
+ switch url.Scheme {
+ case "file":
+ logrus.Debugf("Writing to %s", url.Path)
+ err := os.MkdirAll(filepath.Dir(url.Path), 0755)
+ if err != nil {
+ return err
+ }
+ err = ioutil.WriteFile(url.Path, signature, 0644)
+ if err != nil {
+ return err
+ }
+ return nil
+
+ case "http", "https":
+ return errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
+ default:
+ return errors.Errorf("Unsupported scheme when writing signature to %s", url.String())
+ }
+}
+
+// deleteOneSignature deletes a signature from url, if it exists.
+// If it successfully determines that the signature does not exist, returns (true, nil)
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (c *dockerClient) deleteOneSignature(url *url.URL) (missing bool, err error) {
+ switch url.Scheme {
+ case "file":
+ logrus.Debugf("Deleting %s", url.Path)
+ err := os.Remove(url.Path)
+ if err != nil && os.IsNotExist(err) {
+ return true, nil
+ }
+ return false, err
+
+ case "http", "https":
+ return false, errors.Errorf("Writing directly to a %s sigstore %s is not supported. Configure a sigstore-staging: location", url.Scheme, url.String())
+ default:
+ return false, errors.Errorf("Unsupported scheme when deleting signature from %s", url.String())
+ }
+}
+
+// putSignaturesToAPIExtension implements PutSignatures() using the X-Registry-Supports-Signatures API extension.
+func (d *dockerImageDestination) putSignaturesToAPIExtension(signatures [][]byte) error {
+ // Skip dealing with the manifest digest, or reading the old state, if not necessary.
+ if len(signatures) == 0 {
+ return nil
+ }
+
+ if d.manifestDigest.String() == "" {
+ // This shouldn’t happen, ImageDestination users are required to call PutManifest before PutSignatures
+ return errors.Errorf("Unknown manifest digest, can't add signatures")
+ }
+
+ // Because image signatures are a shared resource in Atomic Registry, the default upload
+ // always adds signatures. Eventually we should also allow removing signatures,
+ // but the X-Registry-Supports-Signatures API extension does not support that yet.
+
+ existingSignatures, err := d.c.getExtensionsSignatures(context.TODO(), d.ref, d.manifestDigest)
+ if err != nil {
+ return err
+ }
+ existingSigNames := map[string]struct{}{}
+ for _, sig := range existingSignatures.Signatures {
+ existingSigNames[sig.Name] = struct{}{}
+ }
+
+sigExists:
+ for _, newSig := range signatures {
+ for _, existingSig := range existingSignatures.Signatures {
+ if existingSig.Version == extensionSignatureSchemaVersion && existingSig.Type == extensionSignatureTypeAtomic && bytes.Equal(existingSig.Content, newSig) {
+ continue sigExists
+ }
+ }
+
+ // The API expect us to invent a new unique name. This is racy, but hopefully good enough.
+ var signatureName string
+ for {
+ randBytes := make([]byte, 16)
+ n, err := rand.Read(randBytes)
+ if err != nil || n != 16 {
+ return errors.Wrapf(err, "Error generating random signature len %d", n)
+ }
+ signatureName = fmt.Sprintf("%s@%032x", d.manifestDigest.String(), randBytes)
+ if _, ok := existingSigNames[signatureName]; !ok {
+ break
+ }
+ }
+ sig := extensionSignature{
+ Version: extensionSignatureSchemaVersion,
+ Name: signatureName,
+ Type: extensionSignatureTypeAtomic,
+ Content: newSig,
+ }
+ body, err := json.Marshal(sig)
+ if err != nil {
+ return err
+ }
+
+ path := fmt.Sprintf(extensionsSignaturePath, reference.Path(d.ref.ref), d.manifestDigest.String())
+ res, err := d.c.makeRequest(context.TODO(), "PUT", path, nil, bytes.NewReader(body))
+ if err != nil {
+ return err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusCreated {
+ body, err := ioutil.ReadAll(res.Body)
+ if err == nil {
+ logrus.Debugf("Error body %s", string(body))
+ }
+ logrus.Debugf("Error uploading signature, status %d, %#v", res.StatusCode, res)
+ return errors.Errorf("Error uploading signature to %s, status %d", path, res.StatusCode)
+ }
+ }
+
+ return nil
+}
+
+// Commit marks the process of storing the image as successful and asks for the image to be persisted.
+// WARNING: This does not have any transactional semantics:
+// - Uploaded data MAY be visible to others before Commit() is called
+// - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+func (d *dockerImageDestination) Commit() error {
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/docker_image_src.go b/vendor/github.com/containers/image/docker/docker_image_src.go
new file mode 100644
index 0000000000..d6edb50c04
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_image_src.go
@@ -0,0 +1,391 @@
+package docker
+
+import (
+ "context"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "mime"
+ "net/http"
+ "net/url"
+ "os"
+ "strconv"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/docker/distribution/registry/client"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+type dockerImageSource struct {
+ ref dockerReference
+ requestedManifestMIMETypes []string
+ c *dockerClient
+ // State
+ cachedManifest []byte // nil if not loaded yet
+ cachedManifestMIMEType string // Only valid if cachedManifest != nil
+}
+
+// newImageSource creates a new ImageSource for the specified image reference,
+// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
+// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
+// The caller must call .Close() on the returned ImageSource.
+func newImageSource(ctx *types.SystemContext, ref dockerReference, requestedManifestMIMETypes []string) (*dockerImageSource, error) {
+ c, err := newDockerClient(ctx, ref, false, "pull")
+ if err != nil {
+ return nil, err
+ }
+ if requestedManifestMIMETypes == nil {
+ requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
+ }
+ supportedMIMEs := supportedManifestMIMETypesMap()
+ acceptableRequestedMIMEs := false
+ for _, mtrequested := range requestedManifestMIMETypes {
+ if supportedMIMEs[mtrequested] {
+ acceptableRequestedMIMEs = true
+ break
+ }
+ }
+ if !acceptableRequestedMIMEs {
+ requestedManifestMIMETypes = manifest.DefaultRequestedManifestMIMETypes
+ }
+ return &dockerImageSource{
+ ref: ref,
+ requestedManifestMIMETypes: requestedManifestMIMETypes,
+ c: c,
+ }, nil
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (s *dockerImageSource) Reference() types.ImageReference {
+ return s.ref
+}
+
+// Close removes resources associated with an initialized ImageSource, if any.
+func (s *dockerImageSource) Close() error {
+ return nil
+}
+
+// simplifyContentType drops parameters from a HTTP media type (see https://tools.ietf.org/html/rfc7231#section-3.1.1.1)
+// Alternatively, an empty string is returned unchanged, and invalid values are "simplified" to an empty string.
+func simplifyContentType(contentType string) string {
+ if contentType == "" {
+ return contentType
+ }
+ mimeType, _, err := mime.ParseMediaType(contentType)
+ if err != nil {
+ return ""
+ }
+ return mimeType
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+func (s *dockerImageSource) GetManifest() ([]byte, string, error) {
+ err := s.ensureManifestIsLoaded(context.TODO())
+ if err != nil {
+ return nil, "", err
+ }
+ return s.cachedManifest, s.cachedManifestMIMEType, nil
+}
+
+func (s *dockerImageSource) fetchManifest(ctx context.Context, tagOrDigest string) ([]byte, string, error) {
+ path := fmt.Sprintf(manifestPath, reference.Path(s.ref.ref), tagOrDigest)
+ headers := make(map[string][]string)
+ headers["Accept"] = s.requestedManifestMIMETypes
+ res, err := s.c.makeRequest(ctx, "GET", path, headers, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ defer res.Body.Close()
+ if res.StatusCode != http.StatusOK {
+ return nil, "", client.HandleErrorResponse(res)
+ }
+ manblob, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, "", err
+ }
+ return manblob, simplifyContentType(res.Header.Get("Content-Type")), nil
+}
+
+// GetTargetManifest returns an image's manifest given a digest.
+// This is mainly used to retrieve a single image's manifest out of a manifest list.
+func (s *dockerImageSource) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ return s.fetchManifest(context.TODO(), digest.String())
+}
+
+// ensureManifestIsLoaded sets s.cachedManifest and s.cachedManifestMIMEType
+//
+// ImageSource implementations are not required or expected to do any caching,
+// but because our signatures are “attached” to the manifest digest,
+// we need to ensure that the digest of the manifest returned by GetManifest
+// and used by GetSignatures are consistent, otherwise we would get spurious
+// signature verification failures when pulling while a tag is being updated.
+func (s *dockerImageSource) ensureManifestIsLoaded(ctx context.Context) error {
+ if s.cachedManifest != nil {
+ return nil
+ }
+
+ reference, err := s.ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
+
+ manblob, mt, err := s.fetchManifest(ctx, reference)
+ if err != nil {
+ return err
+ }
+ // We might validate manblob against the Docker-Content-Digest header here to protect against transport errors.
+ s.cachedManifest = manblob
+ s.cachedManifestMIMEType = mt
+ return nil
+}
+
+func (s *dockerImageSource) getExternalBlob(urls []string) (io.ReadCloser, int64, error) {
+ var (
+ resp *http.Response
+ err error
+ )
+ for _, url := range urls {
+ resp, err = s.c.makeRequestToResolvedURL(context.TODO(), "GET", url, nil, nil, -1, false)
+ if err == nil {
+ if resp.StatusCode != http.StatusOK {
+ err = errors.Errorf("error fetching external blob from %q: %d", url, resp.StatusCode)
+ logrus.Debug(err)
+ continue
+ }
+ }
+ }
+ if resp.Body != nil && err == nil {
+ return resp.Body, getBlobSize(resp), nil
+ }
+ return nil, 0, err
+}
+
+func getBlobSize(resp *http.Response) int64 {
+ size, err := strconv.ParseInt(resp.Header.Get("Content-Length"), 10, 64)
+ if err != nil {
+ size = -1
+ }
+ return size
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+func (s *dockerImageSource) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ if len(info.URLs) != 0 {
+ return s.getExternalBlob(info.URLs)
+ }
+
+ path := fmt.Sprintf(blobsPath, reference.Path(s.ref.ref), info.Digest.String())
+ logrus.Debugf("Downloading %s", path)
+ res, err := s.c.makeRequest(context.TODO(), "GET", path, nil, nil)
+ if err != nil {
+ return nil, 0, err
+ }
+ if res.StatusCode != http.StatusOK {
+ // print url also
+ return nil, 0, errors.Errorf("Invalid status code returned when fetching blob %d", res.StatusCode)
+ }
+ return res.Body, getBlobSize(res), nil
+}
+
+func (s *dockerImageSource) GetSignatures(ctx context.Context) ([][]byte, error) {
+ if err := s.c.detectProperties(ctx); err != nil {
+ return nil, err
+ }
+ switch {
+ case s.c.signatureBase != nil:
+ return s.getSignaturesFromLookaside(ctx)
+ case s.c.supportsSignatures:
+ return s.getSignaturesFromAPIExtension(ctx)
+ default:
+ return [][]byte{}, nil
+ }
+}
+
+// manifestDigest returns a digest of the manifest, either from the supplied reference or from a fetched manifest.
+func (s *dockerImageSource) manifestDigest(ctx context.Context) (digest.Digest, error) {
+ if digested, ok := s.ref.ref.(reference.Digested); ok {
+ d := digested.Digest()
+ if d.Algorithm() == digest.Canonical {
+ return d, nil
+ }
+ }
+ if err := s.ensureManifestIsLoaded(ctx); err != nil {
+ return "", err
+ }
+ return manifest.Digest(s.cachedManifest)
+}
+
+// getSignaturesFromLookaside implements GetSignatures() from the lookaside location configured in s.c.signatureBase,
+// which is not nil.
+func (s *dockerImageSource) getSignaturesFromLookaside(ctx context.Context) ([][]byte, error) {
+ manifestDigest, err := s.manifestDigest(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ signatures := [][]byte{}
+ for i := 0; ; i++ {
+ url := signatureStorageURL(s.c.signatureBase, manifestDigest, i)
+ if url == nil {
+ return nil, errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
+ }
+ signature, missing, err := s.getOneSignature(ctx, url)
+ if err != nil {
+ return nil, err
+ }
+ if missing {
+ break
+ }
+ signatures = append(signatures, signature)
+ }
+ return signatures, nil
+}
+
+// getOneSignature downloads one signature from url.
+// If it successfully determines that the signature does not exist, returns with missing set to true and error set to nil.
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func (s *dockerImageSource) getOneSignature(ctx context.Context, url *url.URL) (signature []byte, missing bool, err error) {
+ switch url.Scheme {
+ case "file":
+ logrus.Debugf("Reading %s", url.Path)
+ sig, err := ioutil.ReadFile(url.Path)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return nil, true, nil
+ }
+ return nil, false, err
+ }
+ return sig, false, nil
+
+ case "http", "https":
+ logrus.Debugf("GET %s", url)
+ req, err := http.NewRequest("GET", url.String(), nil)
+ if err != nil {
+ return nil, false, err
+ }
+ req = req.WithContext(ctx)
+ res, err := s.c.client.Do(req)
+ if err != nil {
+ return nil, false, err
+ }
+ defer res.Body.Close()
+ if res.StatusCode == http.StatusNotFound {
+ return nil, true, nil
+ } else if res.StatusCode != http.StatusOK {
+ return nil, false, errors.Errorf("Error reading signature from %s: status %d", url.String(), res.StatusCode)
+ }
+ sig, err := ioutil.ReadAll(res.Body)
+ if err != nil {
+ return nil, false, err
+ }
+ return sig, false, nil
+
+ default:
+ return nil, false, errors.Errorf("Unsupported scheme when reading signature from %s", url.String())
+ }
+}
+
+// getSignaturesFromAPIExtension implements GetSignatures() using the X-Registry-Supports-Signatures API extension.
+func (s *dockerImageSource) getSignaturesFromAPIExtension(ctx context.Context) ([][]byte, error) {
+ manifestDigest, err := s.manifestDigest(ctx)
+ if err != nil {
+ return nil, err
+ }
+
+ parsedBody, err := s.c.getExtensionsSignatures(ctx, s.ref, manifestDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ var sigs [][]byte
+ for _, sig := range parsedBody.Signatures {
+ if sig.Version == extensionSignatureSchemaVersion && sig.Type == extensionSignatureTypeAtomic {
+ sigs = append(sigs, sig.Content)
+ }
+ }
+ return sigs, nil
+}
+
+// deleteImage deletes the named image from the registry, if supported.
+func deleteImage(ctx *types.SystemContext, ref dockerReference) error {
+ c, err := newDockerClient(ctx, ref, true, "push")
+ if err != nil {
+ return err
+ }
+
+ // When retrieving the digest from a registry >= 2.3 use the following header:
+ // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
+ headers := make(map[string][]string)
+ headers["Accept"] = []string{manifest.DockerV2Schema2MediaType}
+
+ refTail, err := ref.tagOrDigest()
+ if err != nil {
+ return err
+ }
+ getPath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), refTail)
+ get, err := c.makeRequest(context.TODO(), "GET", getPath, headers, nil)
+ if err != nil {
+ return err
+ }
+ defer get.Body.Close()
+ manifestBody, err := ioutil.ReadAll(get.Body)
+ if err != nil {
+ return err
+ }
+ switch get.StatusCode {
+ case http.StatusOK:
+ case http.StatusNotFound:
+ return errors.Errorf("Unable to delete %v. Image may not exist or is not stored with a v2 Schema in a v2 registry", ref.ref)
+ default:
+ return errors.Errorf("Failed to delete %v: %s (%v)", ref.ref, manifestBody, get.Status)
+ }
+
+ digest := get.Header.Get("Docker-Content-Digest")
+ deletePath := fmt.Sprintf(manifestPath, reference.Path(ref.ref), digest)
+
+ // When retrieving the digest from a registry >= 2.3 use the following header:
+ // "Accept": "application/vnd.docker.distribution.manifest.v2+json"
+ delete, err := c.makeRequest(context.TODO(), "DELETE", deletePath, headers, nil)
+ if err != nil {
+ return err
+ }
+ defer delete.Body.Close()
+
+ body, err := ioutil.ReadAll(delete.Body)
+ if err != nil {
+ return err
+ }
+ if delete.StatusCode != http.StatusAccepted {
+ return errors.Errorf("Failed to delete %v: %s (%v)", deletePath, string(body), delete.Status)
+ }
+
+ if c.signatureBase != nil {
+ manifestDigest, err := manifest.Digest(manifestBody)
+ if err != nil {
+ return err
+ }
+
+ for i := 0; ; i++ {
+ url := signatureStorageURL(c.signatureBase, manifestDigest, i)
+ if url == nil {
+ return errors.Errorf("Internal error: signatureStorageURL with non-nil base returned nil")
+ }
+ missing, err := c.deleteOneSignature(url)
+ if err != nil {
+ return err
+ }
+ if missing {
+ break
+ }
+ }
+ }
+
+ return nil
+}
diff --git a/vendor/github.com/containers/image/docker/docker_transport.go b/vendor/github.com/containers/image/docker/docker_transport.go
new file mode 100644
index 0000000000..15d68e993c
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/docker_transport.go
@@ -0,0 +1,162 @@
+package docker
+
+import (
+ "fmt"
+ "strings"
+
+ "github.com/containers/image/docker/policyconfiguration"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+func init() {
+ transports.Register(Transport)
+}
+
+// Transport is an ImageTransport for Docker registry-hosted images.
+var Transport = dockerTransport{}
+
+type dockerTransport struct{}
+
+func (t dockerTransport) Name() string {
+ return "docker"
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (t dockerTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return ParseReference(reference)
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (t dockerTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // FIXME? We could be verifying the various character set and length restrictions
+ // from docker/distribution/reference.regexp.go, but other than that there
+ // are few semantically invalid strings.
+ return nil
+}
+
+// dockerReference is an ImageReference for Docker images.
+type dockerReference struct {
+ ref reference.Named // By construction we know that !reference.IsNameOnly(ref)
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an Docker ImageReference.
+func ParseReference(refString string) (types.ImageReference, error) {
+ if !strings.HasPrefix(refString, "//") {
+ return nil, errors.Errorf("docker: image reference %s does not start with //", refString)
+ }
+ ref, err := reference.ParseNormalizedNamed(strings.TrimPrefix(refString, "//"))
+ if err != nil {
+ return nil, err
+ }
+ ref = reference.TagNameOnly(ref)
+ return NewReference(ref)
+}
+
+// NewReference returns a Docker reference for a named reference. The reference must satisfy !reference.IsNameOnly().
+func NewReference(ref reference.Named) (types.ImageReference, error) {
+ if reference.IsNameOnly(ref) {
+ return nil, errors.Errorf("Docker reference %s has neither a tag nor a digest", reference.FamiliarString(ref))
+ }
+ // A github.com/distribution/reference value can have a tag and a digest at the same time!
+ // The docker/distribution API does not really support that (we can’t ask for an image with a specific
+ // tag and digest), so fail. This MAY be accepted in the future.
+ // (Even if it were supported, the semantics of policy namespaces are unclear - should we drop
+ // the tag or the digest first?)
+ _, isTagged := ref.(reference.NamedTagged)
+ _, isDigested := ref.(reference.Canonical)
+ if isTagged && isDigested {
+ return nil, errors.Errorf("Docker references with both a tag and digest are currently not supported")
+ }
+ return dockerReference{
+ ref: ref,
+ }, nil
+}
+
+func (ref dockerReference) Transport() types.ImageTransport {
+ return Transport
+}
+
+// StringWithinTransport returns a string representation of the reference, which MUST be such that
+// reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+// NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+// WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix.
+func (ref dockerReference) StringWithinTransport() string {
+ return "//" + reference.FamiliarString(ref.ref)
+}
+
+// DockerReference returns a Docker reference associated with this reference
+// (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+// not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+func (ref dockerReference) DockerReference() reference.Named {
+ return ref.ref
+}
+
+// PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+// This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+// The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+// (i.e. various references with exactly the same semantics should return the same configuration identity)
+// It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+// not required/guaranteed that it will be a valid input to Transport().ParseReference().
+// Returns "" if configuration identities for these references are not supported.
+func (ref dockerReference) PolicyConfigurationIdentity() string {
+ res, err := policyconfiguration.DockerReferenceIdentity(ref.ref)
+ if res == "" || err != nil { // Coverage: Should never happen, NewReference above should refuse values which could cause a failure.
+ panic(fmt.Sprintf("Internal inconsistency: policyconfiguration.DockerReferenceIdentity returned %#v, %v", res, err))
+ }
+ return res
+}
+
+// PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+// for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+// in order, terminating on first match, and an implicit "" is always checked at the end.
+// It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+// and each following element to be a prefix of the element preceding it.
+func (ref dockerReference) PolicyConfigurationNamespaces() []string {
+ return policyconfiguration.DockerReferenceNamespaces(ref.ref)
+}
+
+// NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+// The caller must call .Close() on the returned Image.
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+func (ref dockerReference) NewImage(ctx *types.SystemContext) (types.Image, error) {
+ return newImage(ctx, ref)
+}
+
+// NewImageSource returns a types.ImageSource for this reference,
+// asking the backend to use a manifest from requestedManifestMIMETypes if possible.
+// nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
+// The caller must call .Close() on the returned ImageSource.
+func (ref dockerReference) NewImageSource(ctx *types.SystemContext, requestedManifestMIMETypes []string) (types.ImageSource, error) {
+ return newImageSource(ctx, ref, requestedManifestMIMETypes)
+}
+
+// NewImageDestination returns a types.ImageDestination for this reference.
+// The caller must call .Close() on the returned ImageDestination.
+func (ref dockerReference) NewImageDestination(ctx *types.SystemContext) (types.ImageDestination, error) {
+ return newImageDestination(ctx, ref)
+}
+
+// DeleteImage deletes the named image from the registry, if supported.
+func (ref dockerReference) DeleteImage(ctx *types.SystemContext) error {
+ return deleteImage(ctx, ref)
+}
+
+// tagOrDigest returns a tag or digest from the reference.
+func (ref dockerReference) tagOrDigest() (string, error) {
+ if ref, ok := ref.ref.(reference.Canonical); ok {
+ return ref.Digest().String(), nil
+ }
+ if ref, ok := ref.ref.(reference.NamedTagged); ok {
+ return ref.Tag(), nil
+ }
+ // This should not happen, NewReference above refuses reference.IsNameOnly values.
+ return "", errors.Errorf("Internal inconsistency: Reference %s unexpectedly has neither a digest nor a tag", reference.FamiliarString(ref.ref))
+}
diff --git a/vendor/github.com/containers/image/docker/lookaside.go b/vendor/github.com/containers/image/docker/lookaside.go
new file mode 100644
index 0000000000..c6dca5e44d
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/lookaside.go
@@ -0,0 +1,202 @@
+package docker
+
+import (
+ "fmt"
+ "io/ioutil"
+ "net/url"
+ "os"
+ "path"
+ "path/filepath"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/types"
+ "github.com/ghodss/yaml"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// systemRegistriesDirPath is the path to registries.d, used for locating lookaside Docker signature storage.
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/docker.systemRegistriesDirPath=$your_path'
+var systemRegistriesDirPath = builtinRegistriesDirPath
+
+// builtinRegistriesDirPath is the path to registries.d.
+// DO NOT change this, instead see systemRegistriesDirPath above.
+const builtinRegistriesDirPath = "/etc/containers/registries.d"
+
+// registryConfiguration is one of the files in registriesDirPath configuring lookaside locations, or the result of merging them all.
+// NOTE: Keep this in sync with docs/registries.d.md!
+type registryConfiguration struct {
+ DefaultDocker *registryNamespace `json:"default-docker"`
+ // The key is a namespace, using fully-expanded Docker reference format or parent namespaces (per dockerReference.PolicyConfiguration*),
+ Docker map[string]registryNamespace `json:"docker"`
+}
+
+// registryNamespace defines lookaside locations for a single namespace.
+type registryNamespace struct {
+ SigStore string `json:"sigstore"` // For reading, and if SigStoreStaging is not present, for writing.
+ SigStoreStaging string `json:"sigstore-staging"` // For writing only.
+}
+
+// signatureStorageBase is an "opaque" type representing a lookaside Docker signature storage.
+// Users outside of this file should use configuredSignatureStorageBase and signatureStorageURL below.
+type signatureStorageBase *url.URL // The only documented value is nil, meaning storage is not supported.
+
+// configuredSignatureStorageBase reads configuration to find an appropriate signature storage URL for ref, for write access if “write”.
+func configuredSignatureStorageBase(ctx *types.SystemContext, ref dockerReference, write bool) (signatureStorageBase, error) {
+ // FIXME? Loading and parsing the config could be cached across calls.
+ dirPath := registriesDirPath(ctx)
+ logrus.Debugf(`Using registries.d directory %s for sigstore configuration`, dirPath)
+ config, err := loadAndMergeConfig(dirPath)
+ if err != nil {
+ return nil, err
+ }
+
+ topLevel := config.signatureTopLevel(ref, write)
+ if topLevel == "" {
+ return nil, nil
+ }
+
+ url, err := url.Parse(topLevel)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Invalid signature storage URL %s", topLevel)
+ }
+ // NOTE: Keep this in sync with docs/signature-protocols.md!
+ // FIXME? Restrict to explicitly supported schemes?
+ repo := reference.Path(ref.ref) // Note that this is without a tag or digest.
+ if path.Clean(repo) != repo { // Coverage: This should not be reachable because /./ and /../ components are not valid in docker references
+ return nil, errors.Errorf("Unexpected path elements in Docker reference %s for signature storage", ref.ref.String())
+ }
+ url.Path = url.Path + "/" + repo
+ return url, nil
+}
+
+// registriesDirPath returns a path to registries.d
+func registriesDirPath(ctx *types.SystemContext) string {
+ if ctx != nil {
+ if ctx.RegistriesDirPath != "" {
+ return ctx.RegistriesDirPath
+ }
+ if ctx.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemRegistriesDirPath)
+ }
+ }
+ return systemRegistriesDirPath
+}
+
+// loadAndMergeConfig loads configuration files in dirPath
+func loadAndMergeConfig(dirPath string) (*registryConfiguration, error) {
+ mergedConfig := registryConfiguration{Docker: map[string]registryNamespace{}}
+ dockerDefaultMergedFrom := ""
+ nsMergedFrom := map[string]string{}
+
+ dir, err := os.Open(dirPath)
+ if err != nil {
+ if os.IsNotExist(err) {
+ return &mergedConfig, nil
+ }
+ return nil, err
+ }
+ configNames, err := dir.Readdirnames(0)
+ if err != nil {
+ return nil, err
+ }
+ for _, configName := range configNames {
+ if !strings.HasSuffix(configName, ".yaml") {
+ continue
+ }
+ configPath := filepath.Join(dirPath, configName)
+ configBytes, err := ioutil.ReadFile(configPath)
+ if err != nil {
+ return nil, err
+ }
+
+ var config registryConfiguration
+ err = yaml.Unmarshal(configBytes, &config)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error parsing %s", configPath)
+ }
+
+ if config.DefaultDocker != nil {
+ if mergedConfig.DefaultDocker != nil {
+ return nil, errors.Errorf(`Error parsing signature storage configuration: "default-docker" defined both in "%s" and "%s"`,
+ dockerDefaultMergedFrom, configPath)
+ }
+ mergedConfig.DefaultDocker = config.DefaultDocker
+ dockerDefaultMergedFrom = configPath
+ }
+
+ for nsName, nsConfig := range config.Docker { // includes config.Docker == nil
+ if _, ok := mergedConfig.Docker[nsName]; ok {
+ return nil, errors.Errorf(`Error parsing signature storage configuration: "docker" namespace "%s" defined both in "%s" and "%s"`,
+ nsName, nsMergedFrom[nsName], configPath)
+ }
+ mergedConfig.Docker[nsName] = nsConfig
+ nsMergedFrom[nsName] = configPath
+ }
+ }
+
+ return &mergedConfig, nil
+}
+
+// config.signatureTopLevel returns an URL string configured in config for ref, for write access if “write”.
+// (the top level of the storage, namespaced by repo.FullName etc.), or "" if no signature storage should be used.
+func (config *registryConfiguration) signatureTopLevel(ref dockerReference, write bool) string {
+ if config.Docker != nil {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if ns, ok := config.Docker[identity]; ok {
+ logrus.Debugf(` Using "docker" namespace %s`, identity)
+ if url := ns.signatureTopLevel(write); url != "" {
+ return url
+ }
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if ns, ok := config.Docker[name]; ok {
+ logrus.Debugf(` Using "docker" namespace %s`, name)
+ if url := ns.signatureTopLevel(write); url != "" {
+ return url
+ }
+ }
+ }
+ }
+ // Look for a default location
+ if config.DefaultDocker != nil {
+ logrus.Debugf(` Using "default-docker" configuration`)
+ if url := config.DefaultDocker.signatureTopLevel(write); url != "" {
+ return url
+ }
+ }
+ logrus.Debugf(" No signature storage configuration found for %s", ref.PolicyConfigurationIdentity())
+ return ""
+}
+
+// ns.signatureTopLevel returns an URL string configured in ns for ref, for write access if “write”.
+// or "" if nothing has been configured.
+func (ns registryNamespace) signatureTopLevel(write bool) string {
+ if write && ns.SigStoreStaging != "" {
+ logrus.Debugf(` Using %s`, ns.SigStoreStaging)
+ return ns.SigStoreStaging
+ }
+ if ns.SigStore != "" {
+ logrus.Debugf(` Using %s`, ns.SigStore)
+ return ns.SigStore
+ }
+ return ""
+}
+
+// signatureStorageURL returns an URL usable for acessing signature index in base with known manifestDigest, or nil if not applicable.
+// Returns nil iff base == nil.
+// NOTE: Keep this in sync with docs/signature-protocols.md!
+func signatureStorageURL(base signatureStorageBase, manifestDigest digest.Digest, index int) *url.URL {
+ if base == nil {
+ return nil
+ }
+ url := *base
+ url.Path = fmt.Sprintf("%s@%s=%s/signature-%d", url.Path, manifestDigest.Algorithm(), manifestDigest.Hex(), index+1)
+ return &url
+}
diff --git a/vendor/github.com/containers/image/docker/policyconfiguration/naming.go b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go
new file mode 100644
index 0000000000..31bbb544c6
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/policyconfiguration/naming.go
@@ -0,0 +1,56 @@
+package policyconfiguration
+
+import (
+ "strings"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/pkg/errors"
+)
+
+// DockerReferenceIdentity returns a string representation of the reference, suitable for policy lookup,
+// as a backend for ImageReference.PolicyConfigurationIdentity.
+// The reference must satisfy !reference.IsNameOnly().
+func DockerReferenceIdentity(ref reference.Named) (string, error) {
+ res := ref.Name()
+ tagged, isTagged := ref.(reference.NamedTagged)
+ digested, isDigested := ref.(reference.Canonical)
+ switch {
+ case isTagged && isDigested: // Note that this CAN actually happen.
+ return "", errors.Errorf("Unexpected Docker reference %s with both a name and a digest", reference.FamiliarString(ref))
+ case !isTagged && !isDigested: // This should not happen, the caller is expected to ensure !reference.IsNameOnly()
+ return "", errors.Errorf("Internal inconsistency: Docker reference %s with neither a tag nor a digest", reference.FamiliarString(ref))
+ case isTagged:
+ res = res + ":" + tagged.Tag()
+ case isDigested:
+ res = res + "@" + digested.Digest().String()
+ default: // Coverage: The above was supposed to be exhaustive.
+ return "", errors.New("Internal inconsistency, unexpected default branch")
+ }
+ return res, nil
+}
+
+// DockerReferenceNamespaces returns a list of other policy configuration namespaces to search,
+// as a backend for ImageReference.PolicyConfigurationIdentity.
+// The reference must satisfy !reference.IsNameOnly().
+func DockerReferenceNamespaces(ref reference.Named) []string {
+ // Look for a match of the repository, and then of the possible parent
+ // namespaces. Note that this only happens on the expanded host names
+ // and repository names, i.e. "busybox" is looked up as "docker.io/library/busybox",
+ // then in its parent "docker.io/library"; in none of "busybox",
+ // un-namespaced "library" nor in "" supposedly implicitly representing "library/".
+ //
+ // ref.FullName() == ref.Hostname() + "/" + ref.RemoteName(), so the last
+ // iteration matches the host name (for any namespace).
+ res := []string{}
+ name := ref.Name()
+ for {
+ res = append(res, name)
+
+ lastSlash := strings.LastIndex(name, "/")
+ if lastSlash == -1 {
+ break
+ }
+ name = name[:lastSlash]
+ }
+ return res
+}
diff --git a/vendor/github.com/containers/image/docker/reference/README.md b/vendor/github.com/containers/image/docker/reference/README.md
new file mode 100644
index 0000000000..53a88de826
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/README.md
@@ -0,0 +1,2 @@
+This is a copy of github.com/docker/distribution/reference as of commit fb0bebc4b64e3881cc52a2478d749845ed76d2a8,
+except that ParseAnyReferenceWithSet has been removed to drop the dependency on github.com/docker/distribution/digestset.
\ No newline at end of file
diff --git a/vendor/github.com/containers/image/docker/reference/helpers.go b/vendor/github.com/containers/image/docker/reference/helpers.go
new file mode 100644
index 0000000000..978df7eabb
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/helpers.go
@@ -0,0 +1,42 @@
+package reference
+
+import "path"
+
+// IsNameOnly returns true if reference only contains a repo name.
+func IsNameOnly(ref Named) bool {
+ if _, ok := ref.(NamedTagged); ok {
+ return false
+ }
+ if _, ok := ref.(Canonical); ok {
+ return false
+ }
+ return true
+}
+
+// FamiliarName returns the familiar name string
+// for the given named, familiarizing if needed.
+func FamiliarName(ref Named) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().Name()
+ }
+ return ref.Name()
+}
+
+// FamiliarString returns the familiar string representation
+// for the given reference, familiarizing if needed.
+func FamiliarString(ref Reference) string {
+ if nn, ok := ref.(normalizedNamed); ok {
+ return nn.Familiar().String()
+ }
+ return ref.String()
+}
+
+// FamiliarMatch reports whether ref matches the specified pattern.
+// See https://godoc.org/path#Match for supported patterns.
+func FamiliarMatch(pattern string, ref Reference) (bool, error) {
+ matched, err := path.Match(pattern, FamiliarString(ref))
+ if namedRef, isNamed := ref.(Named); isNamed && !matched {
+ matched, _ = path.Match(pattern, FamiliarName(namedRef))
+ }
+ return matched, err
+}
diff --git a/vendor/github.com/containers/image/docker/reference/normalize.go b/vendor/github.com/containers/image/docker/reference/normalize.go
new file mode 100644
index 0000000000..fcc436a395
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/normalize.go
@@ -0,0 +1,152 @@
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+var (
+ legacyDefaultDomain = "index.docker.io"
+ defaultDomain = "docker.io"
+ officialRepoName = "library"
+ defaultTag = "latest"
+)
+
+// normalizedNamed represents a name which has been
+// normalized and has a familiar form. A familiar name
+// is what is used in Docker UI. An example normalized
+// name is "docker.io/library/ubuntu" and corresponding
+// familiar name of "ubuntu".
+type normalizedNamed interface {
+ Named
+ Familiar() Named
+}
+
+// ParseNormalizedNamed parses a string into a named reference
+// transforming a familiar name from Docker UI to a fully
+// qualified reference. If the value may be an identifier
+// use ParseAnyReference.
+func ParseNormalizedNamed(s string) (Named, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(s); ok {
+ return nil, fmt.Errorf("invalid repository name (%s), cannot specify 64-byte hexadecimal strings", s)
+ }
+ domain, remainder := splitDockerDomain(s)
+ var remoteName string
+ if tagSep := strings.IndexRune(remainder, ':'); tagSep > -1 {
+ remoteName = remainder[:tagSep]
+ } else {
+ remoteName = remainder
+ }
+ if strings.ToLower(remoteName) != remoteName {
+ return nil, errors.New("invalid reference format: repository name must be lowercase")
+ }
+
+ ref, err := Parse(domain + "/" + remainder)
+ if err != nil {
+ return nil, err
+ }
+ named, isNamed := ref.(Named)
+ if !isNamed {
+ return nil, fmt.Errorf("reference %s has no name", ref.String())
+ }
+ return named, nil
+}
+
+// splitDockerDomain splits a repository name to domain and remotename string.
+// If no valid domain is found, the default domain is used. Repository name
+// needs to be already validated before.
+func splitDockerDomain(name string) (domain, remainder string) {
+ i := strings.IndexRune(name, '/')
+ if i == -1 || (!strings.ContainsAny(name[:i], ".:") && name[:i] != "localhost") {
+ domain, remainder = defaultDomain, name
+ } else {
+ domain, remainder = name[:i], name[i+1:]
+ }
+ if domain == legacyDefaultDomain {
+ domain = defaultDomain
+ }
+ if domain == defaultDomain && !strings.ContainsRune(remainder, '/') {
+ remainder = officialRepoName + "/" + remainder
+ }
+ return
+}
+
+// familiarizeName returns a shortened version of the name familiar
+// to to the Docker UI. Familiar names have the default domain
+// "docker.io" and "library/" repository prefix removed.
+// For example, "docker.io/library/redis" will have the familiar
+// name "redis" and "docker.io/dmcgowan/myapp" will be "dmcgowan/myapp".
+// Returns a familiarized named only reference.
+func familiarizeName(named namedRepository) repository {
+ repo := repository{
+ domain: named.Domain(),
+ path: named.Path(),
+ }
+
+ if repo.domain == defaultDomain {
+ repo.domain = ""
+ // Handle official repositories which have the pattern "library/"
+ if split := strings.Split(repo.path, "/"); len(split) == 2 && split[0] == officialRepoName {
+ repo.path = split[1]
+ }
+ }
+ return repo
+}
+
+func (r reference) Familiar() Named {
+ return reference{
+ namedRepository: familiarizeName(r.namedRepository),
+ tag: r.tag,
+ digest: r.digest,
+ }
+}
+
+func (r repository) Familiar() Named {
+ return familiarizeName(r)
+}
+
+func (t taggedReference) Familiar() Named {
+ return taggedReference{
+ namedRepository: familiarizeName(t.namedRepository),
+ tag: t.tag,
+ }
+}
+
+func (c canonicalReference) Familiar() Named {
+ return canonicalReference{
+ namedRepository: familiarizeName(c.namedRepository),
+ digest: c.digest,
+ }
+}
+
+// TagNameOnly adds the default tag "latest" to a reference if it only has
+// a repo name.
+func TagNameOnly(ref Named) Named {
+ if IsNameOnly(ref) {
+ namedTagged, err := WithTag(ref, defaultTag)
+ if err != nil {
+ // Default tag must be valid, to create a NamedTagged
+ // type with non-validated input the WithTag function
+ // should be used instead
+ panic(err)
+ }
+ return namedTagged
+ }
+ return ref
+}
+
+// ParseAnyReference parses a reference string as a possible identifier,
+// full digest, or familiar name.
+func ParseAnyReference(ref string) (Reference, error) {
+ if ok := anchoredIdentifierRegexp.MatchString(ref); ok {
+ return digestReference("sha256:" + ref), nil
+ }
+ if dgst, err := digest.Parse(ref); err == nil {
+ return digestReference(dgst), nil
+ }
+
+ return ParseNormalizedNamed(ref)
+}
diff --git a/vendor/github.com/containers/image/docker/reference/reference.go b/vendor/github.com/containers/image/docker/reference/reference.go
new file mode 100644
index 0000000000..fd3510e9ee
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/reference.go
@@ -0,0 +1,433 @@
+// Package reference provides a general type to represent any way of referencing images within the registry.
+// Its main purpose is to abstract tags and digests (content-addressable hash).
+//
+// Grammar
+//
+// reference := name [ ":" tag ] [ "@" digest ]
+// name := [domain '/'] path-component ['/' path-component]*
+// domain := domain-component ['.' domain-component]* [':' port-number]
+// domain-component := /([a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])/
+// port-number := /[0-9]+/
+// path-component := alpha-numeric [separator alpha-numeric]*
+// alpha-numeric := /[a-z0-9]+/
+// separator := /[_.]|__|[-]*/
+//
+// tag := /[\w][\w.-]{0,127}/
+//
+// digest := digest-algorithm ":" digest-hex
+// digest-algorithm := digest-algorithm-component [ digest-algorithm-separator digest-algorithm-component ]
+// digest-algorithm-separator := /[+.-_]/
+// digest-algorithm-component := /[A-Za-z][A-Za-z0-9]*/
+// digest-hex := /[0-9a-fA-F]{32,}/ ; At least 128 bit digest value
+//
+// identifier := /[a-f0-9]{64}/
+// short-identifier := /[a-f0-9]{6,64}/
+package reference
+
+import (
+ "errors"
+ "fmt"
+ "strings"
+
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ // NameTotalLengthMax is the maximum total number of characters in a repository name.
+ NameTotalLengthMax = 255
+)
+
+var (
+ // ErrReferenceInvalidFormat represents an error while trying to parse a string as a reference.
+ ErrReferenceInvalidFormat = errors.New("invalid reference format")
+
+ // ErrTagInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrTagInvalidFormat = errors.New("invalid tag format")
+
+ // ErrDigestInvalidFormat represents an error while trying to parse a string as a tag.
+ ErrDigestInvalidFormat = errors.New("invalid digest format")
+
+ // ErrNameContainsUppercase is returned for invalid repository names that contain uppercase characters.
+ ErrNameContainsUppercase = errors.New("repository name must be lowercase")
+
+ // ErrNameEmpty is returned for empty, invalid repository names.
+ ErrNameEmpty = errors.New("repository name must have at least one component")
+
+ // ErrNameTooLong is returned when a repository name is longer than NameTotalLengthMax.
+ ErrNameTooLong = fmt.Errorf("repository name must not be more than %v characters", NameTotalLengthMax)
+
+ // ErrNameNotCanonical is returned when a name is not canonical.
+ ErrNameNotCanonical = errors.New("repository name must be canonical")
+)
+
+// Reference is an opaque object reference identifier that may include
+// modifiers such as a hostname, name, tag, and digest.
+type Reference interface {
+ // String returns the full reference
+ String() string
+}
+
+// Field provides a wrapper type for resolving correct reference types when
+// working with encoding.
+type Field struct {
+ reference Reference
+}
+
+// AsField wraps a reference in a Field for encoding.
+func AsField(reference Reference) Field {
+ return Field{reference}
+}
+
+// Reference unwraps the reference type from the field to
+// return the Reference object. This object should be
+// of the appropriate type to further check for different
+// reference types.
+func (f Field) Reference() Reference {
+ return f.reference
+}
+
+// MarshalText serializes the field to byte text which
+// is the string of the reference.
+func (f Field) MarshalText() (p []byte, err error) {
+ return []byte(f.reference.String()), nil
+}
+
+// UnmarshalText parses text bytes by invoking the
+// reference parser to ensure the appropriately
+// typed reference object is wrapped by field.
+func (f *Field) UnmarshalText(p []byte) error {
+ r, err := Parse(string(p))
+ if err != nil {
+ return err
+ }
+
+ f.reference = r
+ return nil
+}
+
+// Named is an object with a full name
+type Named interface {
+ Reference
+ Name() string
+}
+
+// Tagged is an object which has a tag
+type Tagged interface {
+ Reference
+ Tag() string
+}
+
+// NamedTagged is an object including a name and tag.
+type NamedTagged interface {
+ Named
+ Tag() string
+}
+
+// Digested is an object which has a digest
+// in which it can be referenced by
+type Digested interface {
+ Reference
+ Digest() digest.Digest
+}
+
+// Canonical reference is an object with a fully unique
+// name including a name with domain and digest
+type Canonical interface {
+ Named
+ Digest() digest.Digest
+}
+
+// namedRepository is a reference to a repository with a name.
+// A namedRepository has both domain and path components.
+type namedRepository interface {
+ Named
+ Domain() string
+ Path() string
+}
+
+// Domain returns the domain part of the Named reference
+func Domain(named Named) string {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain()
+ }
+ domain, _ := splitDomain(named.Name())
+ return domain
+}
+
+// Path returns the name without the domain part of the Named reference
+func Path(named Named) (name string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Path()
+ }
+ _, path := splitDomain(named.Name())
+ return path
+}
+
+func splitDomain(name string) (string, string) {
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if len(match) != 3 {
+ return "", name
+ }
+ return match[1], match[2]
+}
+
+// SplitHostname splits a named reference into a
+// hostname and name string. If no valid hostname is
+// found, the hostname is empty and the full value
+// is returned as name
+// DEPRECATED: Use Domain or Path
+func SplitHostname(named Named) (string, string) {
+ if r, ok := named.(namedRepository); ok {
+ return r.Domain(), r.Path()
+ }
+ return splitDomain(named.Name())
+}
+
+// Parse parses s and returns a syntactically valid Reference.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: Parse will not handle short digests.
+func Parse(s string) (Reference, error) {
+ matches := ReferenceRegexp.FindStringSubmatch(s)
+ if matches == nil {
+ if s == "" {
+ return nil, ErrNameEmpty
+ }
+ if ReferenceRegexp.FindStringSubmatch(strings.ToLower(s)) != nil {
+ return nil, ErrNameContainsUppercase
+ }
+ return nil, ErrReferenceInvalidFormat
+ }
+
+ if len(matches[1]) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ var repo repository
+
+ nameMatch := anchoredNameRegexp.FindStringSubmatch(matches[1])
+ if nameMatch != nil && len(nameMatch) == 3 {
+ repo.domain = nameMatch[1]
+ repo.path = nameMatch[2]
+ } else {
+ repo.domain = ""
+ repo.path = matches[1]
+ }
+
+ ref := reference{
+ namedRepository: repo,
+ tag: matches[2],
+ }
+ if matches[3] != "" {
+ var err error
+ ref.digest, err = digest.Parse(matches[3])
+ if err != nil {
+ return nil, err
+ }
+ }
+
+ r := getBestReferenceType(ref)
+ if r == nil {
+ return nil, ErrNameEmpty
+ }
+
+ return r, nil
+}
+
+// ParseNamed parses s and returns a syntactically valid reference implementing
+// the Named interface. The reference must have a name and be in the canonical
+// form, otherwise an error is returned.
+// If an error was encountered it is returned, along with a nil Reference.
+// NOTE: ParseNamed will not handle short digests.
+func ParseNamed(s string) (Named, error) {
+ named, err := ParseNormalizedNamed(s)
+ if err != nil {
+ return nil, err
+ }
+ if named.String() != s {
+ return nil, ErrNameNotCanonical
+ }
+ return named, nil
+}
+
+// WithName returns a named object representing the given string. If the input
+// is invalid ErrReferenceInvalidFormat will be returned.
+func WithName(name string) (Named, error) {
+ if len(name) > NameTotalLengthMax {
+ return nil, ErrNameTooLong
+ }
+
+ match := anchoredNameRegexp.FindStringSubmatch(name)
+ if match == nil || len(match) != 3 {
+ return nil, ErrReferenceInvalidFormat
+ }
+ return repository{
+ domain: match[1],
+ path: match[2],
+ }, nil
+}
+
+// WithTag combines the name from "name" and the tag from "tag" to form a
+// reference incorporating both the name and the tag.
+func WithTag(name Named, tag string) (NamedTagged, error) {
+ if !anchoredTagRegexp.MatchString(tag) {
+ return nil, ErrTagInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if canonical, ok := name.(Canonical); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tag,
+ digest: canonical.Digest(),
+ }, nil
+ }
+ return taggedReference{
+ namedRepository: repo,
+ tag: tag,
+ }, nil
+}
+
+// WithDigest combines the name from "name" and the digest from "digest" to form
+// a reference incorporating both the name and the digest.
+func WithDigest(name Named, digest digest.Digest) (Canonical, error) {
+ if !anchoredDigestRegexp.MatchString(digest.String()) {
+ return nil, ErrDigestInvalidFormat
+ }
+ var repo repository
+ if r, ok := name.(namedRepository); ok {
+ repo.domain = r.Domain()
+ repo.path = r.Path()
+ } else {
+ repo.path = name.Name()
+ }
+ if tagged, ok := name.(Tagged); ok {
+ return reference{
+ namedRepository: repo,
+ tag: tagged.Tag(),
+ digest: digest,
+ }, nil
+ }
+ return canonicalReference{
+ namedRepository: repo,
+ digest: digest,
+ }, nil
+}
+
+// TrimNamed removes any tag or digest from the named reference.
+func TrimNamed(ref Named) Named {
+ domain, path := SplitHostname(ref)
+ return repository{
+ domain: domain,
+ path: path,
+ }
+}
+
+func getBestReferenceType(ref reference) Reference {
+ if ref.Name() == "" {
+ // Allow digest only references
+ if ref.digest != "" {
+ return digestReference(ref.digest)
+ }
+ return nil
+ }
+ if ref.tag == "" {
+ if ref.digest != "" {
+ return canonicalReference{
+ namedRepository: ref.namedRepository,
+ digest: ref.digest,
+ }
+ }
+ return ref.namedRepository
+ }
+ if ref.digest == "" {
+ return taggedReference{
+ namedRepository: ref.namedRepository,
+ tag: ref.tag,
+ }
+ }
+
+ return ref
+}
+
+type reference struct {
+ namedRepository
+ tag string
+ digest digest.Digest
+}
+
+func (r reference) String() string {
+ return r.Name() + ":" + r.tag + "@" + r.digest.String()
+}
+
+func (r reference) Tag() string {
+ return r.tag
+}
+
+func (r reference) Digest() digest.Digest {
+ return r.digest
+}
+
+type repository struct {
+ domain string
+ path string
+}
+
+func (r repository) String() string {
+ return r.Name()
+}
+
+func (r repository) Name() string {
+ if r.domain == "" {
+ return r.path
+ }
+ return r.domain + "/" + r.path
+}
+
+func (r repository) Domain() string {
+ return r.domain
+}
+
+func (r repository) Path() string {
+ return r.path
+}
+
+type digestReference digest.Digest
+
+func (d digestReference) String() string {
+ return digest.Digest(d).String()
+}
+
+func (d digestReference) Digest() digest.Digest {
+ return digest.Digest(d)
+}
+
+type taggedReference struct {
+ namedRepository
+ tag string
+}
+
+func (t taggedReference) String() string {
+ return t.Name() + ":" + t.tag
+}
+
+func (t taggedReference) Tag() string {
+ return t.tag
+}
+
+type canonicalReference struct {
+ namedRepository
+ digest digest.Digest
+}
+
+func (c canonicalReference) String() string {
+ return c.Name() + "@" + c.digest.String()
+}
+
+func (c canonicalReference) Digest() digest.Digest {
+ return c.digest
+}
diff --git a/vendor/github.com/containers/image/docker/reference/regexp.go b/vendor/github.com/containers/image/docker/reference/regexp.go
new file mode 100644
index 0000000000..405e995db9
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/reference/regexp.go
@@ -0,0 +1,143 @@
+package reference
+
+import "regexp"
+
+var (
+ // alphaNumericRegexp defines the alpha numeric atom, typically a
+ // component of names. This only allows lower case characters and digits.
+ alphaNumericRegexp = match(`[a-z0-9]+`)
+
+ // separatorRegexp defines the separators allowed to be embedded in name
+ // components. This allow one period, one or two underscore and multiple
+ // dashes.
+ separatorRegexp = match(`(?:[._]|__|[-]*)`)
+
+ // nameComponentRegexp restricts registry path component names to start
+ // with at least one letter or number, with following parts able to be
+ // separated by one period, one or two underscore and multiple dashes.
+ nameComponentRegexp = expression(
+ alphaNumericRegexp,
+ optional(repeated(separatorRegexp, alphaNumericRegexp)))
+
+ // domainComponentRegexp restricts the registry domain component of a
+ // repository name to start with a component as defined by domainRegexp
+ // and followed by an optional port.
+ domainComponentRegexp = match(`(?:[a-zA-Z0-9]|[a-zA-Z0-9][a-zA-Z0-9-]*[a-zA-Z0-9])`)
+
+ // domainRegexp defines the structure of potential domain components
+ // that may be part of image names. This is purposely a subset of what is
+ // allowed by DNS to ensure backwards compatibility with Docker image
+ // names.
+ domainRegexp = expression(
+ domainComponentRegexp,
+ optional(repeated(literal(`.`), domainComponentRegexp)),
+ optional(literal(`:`), match(`[0-9]+`)))
+
+ // TagRegexp matches valid tag names. From docker/docker:graph/tags.go.
+ TagRegexp = match(`[\w][\w.-]{0,127}`)
+
+ // anchoredTagRegexp matches valid tag names, anchored at the start and
+ // end of the matched string.
+ anchoredTagRegexp = anchored(TagRegexp)
+
+ // DigestRegexp matches valid digests.
+ DigestRegexp = match(`[A-Za-z][A-Za-z0-9]*(?:[-_+.][A-Za-z][A-Za-z0-9]*)*[:][[:xdigit:]]{32,}`)
+
+ // anchoredDigestRegexp matches valid digests, anchored at the start and
+ // end of the matched string.
+ anchoredDigestRegexp = anchored(DigestRegexp)
+
+ // NameRegexp is the format for the name component of references. The
+ // regexp has capturing groups for the domain and name part omitting
+ // the separating forward slash from either.
+ NameRegexp = expression(
+ optional(domainRegexp, literal(`/`)),
+ nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp)))
+
+ // anchoredNameRegexp is used to parse a name value, capturing the
+ // domain and trailing components.
+ anchoredNameRegexp = anchored(
+ optional(capture(domainRegexp), literal(`/`)),
+ capture(nameComponentRegexp,
+ optional(repeated(literal(`/`), nameComponentRegexp))))
+
+ // ReferenceRegexp is the full supported format of a reference. The regexp
+ // is anchored and has capturing groups for name, tag, and digest
+ // components.
+ ReferenceRegexp = anchored(capture(NameRegexp),
+ optional(literal(":"), capture(TagRegexp)),
+ optional(literal("@"), capture(DigestRegexp)))
+
+ // IdentifierRegexp is the format for string identifier used as a
+ // content addressable identifier using sha256. These identifiers
+ // are like digests without the algorithm, since sha256 is used.
+ IdentifierRegexp = match(`([a-f0-9]{64})`)
+
+ // ShortIdentifierRegexp is the format used to represent a prefix
+ // of an identifier. A prefix may be used to match a sha256 identifier
+ // within a list of trusted identifiers.
+ ShortIdentifierRegexp = match(`([a-f0-9]{6,64})`)
+
+ // anchoredIdentifierRegexp is used to check or match an
+ // identifier value, anchored at start and end of string.
+ anchoredIdentifierRegexp = anchored(IdentifierRegexp)
+
+ // anchoredShortIdentifierRegexp is used to check if a value
+ // is a possible identifier prefix, anchored at start and end
+ // of string.
+ anchoredShortIdentifierRegexp = anchored(ShortIdentifierRegexp)
+)
+
+// match compiles the string to a regular expression.
+var match = regexp.MustCompile
+
+// literal compiles s into a literal regular expression, escaping any regexp
+// reserved characters.
+func literal(s string) *regexp.Regexp {
+ re := match(regexp.QuoteMeta(s))
+
+ if _, complete := re.LiteralPrefix(); !complete {
+ panic("must be a literal")
+ }
+
+ return re
+}
+
+// expression defines a full expression, where each regular expression must
+// follow the previous.
+func expression(res ...*regexp.Regexp) *regexp.Regexp {
+ var s string
+ for _, re := range res {
+ s += re.String()
+ }
+
+ return match(s)
+}
+
+// optional wraps the expression in a non-capturing group and makes the
+// production optional.
+func optional(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `?`)
+}
+
+// repeated wraps the regexp in a non-capturing group to get one or more
+// matches.
+func repeated(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(group(expression(res...)).String() + `+`)
+}
+
+// group wraps the regexp in a non-capturing group.
+func group(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(?:` + expression(res...).String() + `)`)
+}
+
+// capture wraps the expression in a capturing group.
+func capture(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`(` + expression(res...).String() + `)`)
+}
+
+// anchored anchors the regular expression by adding start and end delimiters.
+func anchored(res ...*regexp.Regexp) *regexp.Regexp {
+ return match(`^` + expression(res...).String() + `$`)
+}
diff --git a/vendor/github.com/containers/image/docker/tarfile/dest.go b/vendor/github.com/containers/image/docker/tarfile/dest.go
new file mode 100644
index 0000000000..d0b78d637e
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/tarfile/dest.go
@@ -0,0 +1,258 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+ "io/ioutil"
+ "os"
+ "time"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+const temporaryDirectoryForBigFiles = "/var/tmp" // Do not use the system default of os.TempDir(), usually /tmp, because with systemd it could be a tmpfs.
+
+// Destination is a partial implementation of types.ImageDestination for writing to an io.Writer.
+type Destination struct {
+ writer io.Writer
+ tar *tar.Writer
+ repoTag string
+ // Other state.
+ blobs map[digest.Digest]types.BlobInfo // list of already-sent blobs
+}
+
+// NewDestination returns a tarfile.Destination for the specified io.Writer.
+func NewDestination(dest io.Writer, ref reference.NamedTagged) *Destination {
+ // For github.com/docker/docker consumers, this works just as well as
+ // refString := ref.String()
+ // because when reading the RepoTags strings, github.com/docker/docker/reference
+ // normalizes both of them to the same value.
+ //
+ // Doing it this way to include the normalized-out `docker.io[/library]` does make
+ // a difference for github.com/projectatomic/docker consumers, with the
+ // “Add --add-registry and --block-registry options to docker daemon” patch.
+ // These consumers treat reference strings which include a hostname and reference
+ // strings without a hostname differently.
+ //
+ // Using the host name here is more explicit about the intent, and it has the same
+ // effect as (docker pull) in projectatomic/docker, which tags the result using
+ // a hostname-qualified reference.
+ // See https://github.com/containers/image/issues/72 for a more detailed
+ // analysis and explanation.
+ refString := fmt.Sprintf("%s:%s", ref.Name(), ref.Tag())
+ return &Destination{
+ writer: dest,
+ tar: tar.NewWriter(dest),
+ repoTag: refString,
+ blobs: make(map[digest.Digest]types.BlobInfo),
+ }
+}
+
+// SupportedManifestMIMETypes tells which manifest mime types the destination supports
+// If an empty slice or nil it's returned, then any mime type can be tried to upload
+func (d *Destination) SupportedManifestMIMETypes() []string {
+ return []string{
+ manifest.DockerV2Schema2MediaType, // We rely on the types.Image.UpdatedImage schema conversion capabilities.
+ }
+}
+
+// SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+// Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+func (d *Destination) SupportsSignatures() error {
+ return errors.Errorf("Storing signatures for docker tar files is not supported")
+}
+
+// ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+func (d *Destination) ShouldCompressLayers() bool {
+ return false
+}
+
+// AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+// uploaded to the image destination, true otherwise.
+func (d *Destination) AcceptsForeignLayerURLs() bool {
+ return false
+}
+
+// MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+func (d *Destination) MustMatchRuntimeOS() bool {
+ return false
+}
+
+// PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+// inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+// inputInfo.Size is the expected length of stream, if known.
+// WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+// to any other readers for download using the supplied digest.
+// If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+func (d *Destination) PutBlob(stream io.Reader, inputInfo types.BlobInfo) (types.BlobInfo, error) {
+ if inputInfo.Digest.String() == "" {
+ return types.BlobInfo{}, errors.Errorf("Can not stream a blob with unknown digest to docker tarfile")
+ }
+
+ ok, size, err := d.HasBlob(inputInfo)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ if ok {
+ return types.BlobInfo{Digest: inputInfo.Digest, Size: size}, nil
+ }
+
+ if inputInfo.Size == -1 { // Ouch, we need to stream the blob into a temporary file just to determine the size.
+ logrus.Debugf("docker tarfile: input with unknown size, streaming to disk first ...")
+ streamCopy, err := ioutil.TempFile(temporaryDirectoryForBigFiles, "docker-tarfile-blob")
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ defer os.Remove(streamCopy.Name())
+ defer streamCopy.Close()
+
+ size, err := io.Copy(streamCopy, stream)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ _, err = streamCopy.Seek(0, os.SEEK_SET)
+ if err != nil {
+ return types.BlobInfo{}, err
+ }
+ inputInfo.Size = size // inputInfo is a struct, so we are only modifying our copy.
+ stream = streamCopy
+ logrus.Debugf("... streaming done")
+ }
+
+ digester := digest.Canonical.Digester()
+ tee := io.TeeReader(stream, digester.Hash())
+ if err := d.sendFile(inputInfo.Digest.String(), inputInfo.Size, tee); err != nil {
+ return types.BlobInfo{}, err
+ }
+ d.blobs[inputInfo.Digest] = types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}
+ return types.BlobInfo{Digest: digester.Digest(), Size: inputInfo.Size}, nil
+}
+
+// HasBlob returns true iff the image destination already contains a blob with
+// the matching digest which can be reapplied using ReapplyBlob. Unlike
+// PutBlob, the digest can not be empty. If HasBlob returns true, the size of
+// the blob must also be returned. If the destination does not contain the
+// blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil); it
+// returns a non-nil error only on an unexpected failure.
+func (d *Destination) HasBlob(info types.BlobInfo) (bool, int64, error) {
+ if info.Digest == "" {
+ return false, -1, errors.Errorf("Can not check for a blob with unknown digest")
+ }
+ if blob, ok := d.blobs[info.Digest]; ok {
+ return true, blob.Size, nil
+ }
+ return false, -1, nil
+}
+
+// ReapplyBlob informs the image destination that a blob for which HasBlob
+// previously returned true would have been passed to PutBlob if it had
+// returned false. Like HasBlob and unlike PutBlob, the digest can not be
+// empty. If the blob is a filesystem layer, this signifies that the changes
+// it describes need to be applied again when composing a filesystem tree.
+func (d *Destination) ReapplyBlob(info types.BlobInfo) (types.BlobInfo, error) {
+ return info, nil
+}
+
+// PutManifest writes manifest to the destination.
+// FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+// If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+// but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+func (d *Destination) PutManifest(m []byte) error {
+ // We do not bother with types.ManifestTypeRejectedError; our .SupportedManifestMIMETypes() above is already providing only one alternative,
+ // so the caller trying a different manifest kind would be pointless.
+ var man schema2Manifest
+ if err := json.Unmarshal(m, &man); err != nil {
+ return errors.Wrap(err, "Error parsing manifest")
+ }
+ if man.SchemaVersion != 2 || man.MediaType != manifest.DockerV2Schema2MediaType {
+ return errors.Errorf("Unsupported manifest type, need a Docker schema 2 manifest")
+ }
+
+ layerPaths := []string{}
+ for _, l := range man.Layers {
+ layerPaths = append(layerPaths, l.Digest.String())
+ }
+
+ items := []ManifestItem{{
+ Config: man.Config.Digest.String(),
+ RepoTags: []string{d.repoTag},
+ Layers: layerPaths,
+ Parent: "",
+ LayerSources: nil,
+ }}
+ itemsBytes, err := json.Marshal(&items)
+ if err != nil {
+ return err
+ }
+
+ // FIXME? Do we also need to support the legacy format?
+ return d.sendFile(manifestFileName, int64(len(itemsBytes)), bytes.NewReader(itemsBytes))
+}
+
+type tarFI struct {
+ path string
+ size int64
+}
+
+func (t *tarFI) Name() string {
+ return t.path
+}
+func (t *tarFI) Size() int64 {
+ return t.size
+}
+func (t *tarFI) Mode() os.FileMode {
+ return 0444
+}
+func (t *tarFI) ModTime() time.Time {
+ return time.Unix(0, 0)
+}
+func (t *tarFI) IsDir() bool {
+ return false
+}
+func (t *tarFI) Sys() interface{} {
+ return nil
+}
+
+// sendFile sends a file into the tar stream.
+func (d *Destination) sendFile(path string, expectedSize int64, stream io.Reader) error {
+ hdr, err := tar.FileInfoHeader(&tarFI{path: path, size: expectedSize}, "")
+ if err != nil {
+ return nil
+ }
+ logrus.Debugf("Sending as tar file %s", path)
+ if err := d.tar.WriteHeader(hdr); err != nil {
+ return err
+ }
+ size, err := io.Copy(d.tar, stream)
+ if err != nil {
+ return err
+ }
+ if size != expectedSize {
+ return errors.Errorf("Size mismatch when copying %s, expected %d, got %d", path, expectedSize, size)
+ }
+ return nil
+}
+
+// PutSignatures adds the given signatures to the docker tarfile (currently not
+// supported). MUST be called after PutManifest (signatures reference manifest
+// contents)
+func (d *Destination) PutSignatures(signatures [][]byte) error {
+ if len(signatures) != 0 {
+ return errors.Errorf("Storing signatures for docker tar files is not supported")
+ }
+ return nil
+}
+
+// Commit finishes writing data to the underlying io.Writer.
+// It is the caller's responsibility to close it, if necessary.
+func (d *Destination) Commit() error {
+ return d.tar.Close()
+}
diff --git a/vendor/github.com/containers/image/docker/tarfile/doc.go b/vendor/github.com/containers/image/docker/tarfile/doc.go
new file mode 100644
index 0000000000..4ea5369c05
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/tarfile/doc.go
@@ -0,0 +1,3 @@
+// Package tarfile is an internal implementation detail of some transports.
+// Do not use outside of the github.com/containers/image repo!
+package tarfile
diff --git a/vendor/github.com/containers/image/docker/tarfile/src.go b/vendor/github.com/containers/image/docker/tarfile/src.go
new file mode 100644
index 0000000000..f77cb713c4
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/tarfile/src.go
@@ -0,0 +1,360 @@
+package tarfile
+
+import (
+ "archive/tar"
+ "bytes"
+ "context"
+ "encoding/json"
+ "io"
+ "io/ioutil"
+ "os"
+ "path"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/compression"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// Source is a partial implementation of types.ImageSource for reading from tarPath.
+type Source struct {
+ tarPath string
+ // The following data is only available after ensureCachedDataIsPresent() succeeds
+ tarManifest *ManifestItem // nil if not available yet.
+ configBytes []byte
+ configDigest digest.Digest
+ orderedDiffIDList []diffID
+ knownLayers map[diffID]*layerInfo
+ // Other state
+ generatedManifest []byte // Private cache for GetManifest(), nil if not set yet.
+}
+
+type layerInfo struct {
+ path string
+ size int64
+}
+
+// NewSource returns a tarfile.Source for the specified path.
+func NewSource(path string) *Source {
+ // TODO: We could add support for multiple images in a single archive, so
+ // that people could use docker-archive:opensuse.tar:opensuse:leap as
+ // the source of an image.
+ return &Source{
+ tarPath: path,
+ }
+}
+
+// tarReadCloser is a way to close the backing file of a tar.Reader when the user no longer needs the tar component.
+type tarReadCloser struct {
+ *tar.Reader
+ backingFile *os.File
+}
+
+func (t *tarReadCloser) Close() error {
+ return t.backingFile.Close()
+}
+
+// openTarComponent returns a ReadCloser for the specific file within the archive.
+// This is linear scan; we assume that the tar file will have a fairly small amount of files (~layers),
+// and that filesystem caching will make the repeated seeking over the (uncompressed) tarPath cheap enough.
+// The caller should call .Close() on the returned stream.
+func (s *Source) openTarComponent(componentPath string) (io.ReadCloser, error) {
+ f, err := os.Open(s.tarPath)
+ if err != nil {
+ return nil, err
+ }
+ succeeded := false
+ defer func() {
+ if !succeeded {
+ f.Close()
+ }
+ }()
+
+ tarReader, header, err := findTarComponent(f, componentPath)
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, os.ErrNotExist
+ }
+ if header.FileInfo().Mode()&os.ModeType == os.ModeSymlink { // FIXME: untested
+ // We follow only one symlink; so no loops are possible.
+ if _, err := f.Seek(0, os.SEEK_SET); err != nil {
+ return nil, err
+ }
+ // The new path could easily point "outside" the archive, but we only compare it to existing tar headers without extracting the archive,
+ // so we don't care.
+ tarReader, header, err = findTarComponent(f, path.Join(path.Dir(componentPath), header.Linkname))
+ if err != nil {
+ return nil, err
+ }
+ if header == nil {
+ return nil, os.ErrNotExist
+ }
+ }
+
+ if !header.FileInfo().Mode().IsRegular() {
+ return nil, errors.Errorf("Error reading tar archive component %s: not a regular file", header.Name)
+ }
+ succeeded = true
+ return &tarReadCloser{Reader: tarReader, backingFile: f}, nil
+}
+
+// findTarComponent returns a header and a reader matching path within inputFile,
+// or (nil, nil, nil) if not found.
+func findTarComponent(inputFile io.Reader, path string) (*tar.Reader, *tar.Header, error) {
+ t := tar.NewReader(inputFile)
+ for {
+ h, err := t.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, nil, err
+ }
+ if h.Name == path {
+ return t, h, nil
+ }
+ }
+ return nil, nil, nil
+}
+
+// readTarComponent returns full contents of componentPath.
+func (s *Source) readTarComponent(path string) ([]byte, error) {
+ file, err := s.openTarComponent(path)
+ if err != nil {
+ return nil, errors.Wrapf(err, "Error loading tar component %s", path)
+ }
+ defer file.Close()
+ bytes, err := ioutil.ReadAll(file)
+ if err != nil {
+ return nil, err
+ }
+ return bytes, nil
+}
+
+// ensureCachedDataIsPresent loads data necessary for any of the public accessors.
+func (s *Source) ensureCachedDataIsPresent() error {
+ if s.tarManifest != nil {
+ return nil
+ }
+
+ // Read and parse manifest.json
+ tarManifest, err := s.loadTarManifest()
+ if err != nil {
+ return err
+ }
+
+ // Check to make sure length is 1
+ if len(tarManifest) != 1 {
+ return errors.Errorf("Unexpected tar manifest.json: expected 1 item, got %d", len(tarManifest))
+ }
+
+ // Read and parse config.
+ configBytes, err := s.readTarComponent(tarManifest[0].Config)
+ if err != nil {
+ return err
+ }
+ var parsedConfig image // Most fields ommitted, we only care about layer DiffIDs.
+ if err := json.Unmarshal(configBytes, &parsedConfig); err != nil {
+ return errors.Wrapf(err, "Error decoding tar config %s", tarManifest[0].Config)
+ }
+
+ knownLayers, err := s.prepareLayerData(&tarManifest[0], &parsedConfig)
+ if err != nil {
+ return err
+ }
+
+ // Success; commit.
+ s.tarManifest = &tarManifest[0]
+ s.configBytes = configBytes
+ s.configDigest = digest.FromBytes(configBytes)
+ s.orderedDiffIDList = parsedConfig.RootFS.DiffIDs
+ s.knownLayers = knownLayers
+ return nil
+}
+
+// loadTarManifest loads and decodes the manifest.json.
+func (s *Source) loadTarManifest() ([]ManifestItem, error) {
+ // FIXME? Do we need to deal with the legacy format?
+ bytes, err := s.readTarComponent(manifestFileName)
+ if err != nil {
+ return nil, err
+ }
+ var items []ManifestItem
+ if err := json.Unmarshal(bytes, &items); err != nil {
+ return nil, errors.Wrap(err, "Error decoding tar manifest.json")
+ }
+ return items, nil
+}
+
+// LoadTarManifest loads and decodes the manifest.json
+func (s *Source) LoadTarManifest() ([]ManifestItem, error) {
+ return s.loadTarManifest()
+}
+
+func (s *Source) prepareLayerData(tarManifest *ManifestItem, parsedConfig *image) (map[diffID]*layerInfo, error) {
+ // Collect layer data available in manifest and config.
+ if len(tarManifest.Layers) != len(parsedConfig.RootFS.DiffIDs) {
+ return nil, errors.Errorf("Inconsistent layer count: %d in manifest, %d in config", len(tarManifest.Layers), len(parsedConfig.RootFS.DiffIDs))
+ }
+ knownLayers := map[diffID]*layerInfo{}
+ unknownLayerSizes := map[string]*layerInfo{} // Points into knownLayers, a "to do list" of items with unknown sizes.
+ for i, diffID := range parsedConfig.RootFS.DiffIDs {
+ if _, ok := knownLayers[diffID]; ok {
+ // Apparently it really can happen that a single image contains the same layer diff more than once.
+ // In that case, the diffID validation ensures that both layers truly are the same, and it should not matter
+ // which of the tarManifest.Layers paths is used; (docker save) actually makes the duplicates symlinks to the original.
+ continue
+ }
+ layerPath := tarManifest.Layers[i]
+ if _, ok := unknownLayerSizes[layerPath]; ok {
+ return nil, errors.Errorf("Layer tarfile %s used for two different DiffID values", layerPath)
+ }
+ li := &layerInfo{ // A new element in each iteration
+ path: layerPath,
+ size: -1,
+ }
+ knownLayers[diffID] = li
+ unknownLayerSizes[layerPath] = li
+ }
+
+ // Scan the tar file to collect layer sizes.
+ file, err := os.Open(s.tarPath)
+ if err != nil {
+ return nil, err
+ }
+ defer file.Close()
+ t := tar.NewReader(file)
+ for {
+ h, err := t.Next()
+ if err == io.EOF {
+ break
+ }
+ if err != nil {
+ return nil, err
+ }
+ if li, ok := unknownLayerSizes[h.Name]; ok {
+ li.size = h.Size
+ delete(unknownLayerSizes, h.Name)
+ }
+ }
+ if len(unknownLayerSizes) != 0 {
+ return nil, errors.Errorf("Some layer tarfiles are missing in the tarball") // This could do with a better error reporting, if this ever happened in practice.
+ }
+
+ return knownLayers, nil
+}
+
+// GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+// It may use a remote (= slow) service.
+func (s *Source) GetManifest() ([]byte, string, error) {
+ if s.generatedManifest == nil {
+ if err := s.ensureCachedDataIsPresent(); err != nil {
+ return nil, "", err
+ }
+ m := schema2Manifest{
+ SchemaVersion: 2,
+ MediaType: manifest.DockerV2Schema2MediaType,
+ Config: distributionDescriptor{
+ MediaType: manifest.DockerV2Schema2ConfigMediaType,
+ Size: int64(len(s.configBytes)),
+ Digest: s.configDigest,
+ },
+ Layers: []distributionDescriptor{},
+ }
+ for _, diffID := range s.orderedDiffIDList {
+ li, ok := s.knownLayers[diffID]
+ if !ok {
+ return nil, "", errors.Errorf("Internal inconsistency: Information about layer %s missing", diffID)
+ }
+ m.Layers = append(m.Layers, distributionDescriptor{
+ Digest: digest.Digest(diffID), // diffID is a digest of the uncompressed tarball
+ MediaType: manifest.DockerV2Schema2LayerMediaType,
+ Size: li.size,
+ })
+ }
+ manifestBytes, err := json.Marshal(&m)
+ if err != nil {
+ return nil, "", err
+ }
+ s.generatedManifest = manifestBytes
+ }
+ return s.generatedManifest, manifest.DockerV2Schema2MediaType, nil
+}
+
+// GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
+// out of a manifest list.
+func (s *Source) GetTargetManifest(digest digest.Digest) ([]byte, string, error) {
+ // How did we even get here? GetManifest() above has returned a manifest.DockerV2Schema2MediaType.
+ return nil, "", errors.Errorf(`Manifest lists are not supported by "docker-daemon:"`)
+}
+
+type readCloseWrapper struct {
+ io.Reader
+ closeFunc func() error
+}
+
+func (r readCloseWrapper) Close() error {
+ if r.closeFunc != nil {
+ return r.closeFunc()
+ }
+ return nil
+}
+
+// GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+func (s *Source) GetBlob(info types.BlobInfo) (io.ReadCloser, int64, error) {
+ if err := s.ensureCachedDataIsPresent(); err != nil {
+ return nil, 0, err
+ }
+
+ if info.Digest == s.configDigest { // FIXME? Implement a more general algorithm matching instead of assuming sha256.
+ return ioutil.NopCloser(bytes.NewReader(s.configBytes)), int64(len(s.configBytes)), nil
+ }
+
+ if li, ok := s.knownLayers[diffID(info.Digest)]; ok { // diffID is a digest of the uncompressed tarball,
+ stream, err := s.openTarComponent(li.path)
+ if err != nil {
+ return nil, 0, err
+ }
+
+ // In order to handle the fact that digests != diffIDs (and thus that a
+ // caller which is trying to verify the blob will run into problems),
+ // we need to decompress blobs. This is a bit ugly, but it's a
+ // consequence of making everything addressable by their DiffID rather
+ // than by their digest...
+ //
+ // In particular, because the v2s2 manifest being generated uses
+ // DiffIDs, any caller of GetBlob is going to be asking for DiffIDs of
+ // layers not their _actual_ digest. The result is that copy/... will
+ // be verifing a "digest" which is not the actual layer's digest (but
+ // is instead the DiffID).
+
+ decompressFunc, reader, err := compression.DetectCompression(stream)
+ if err != nil {
+ return nil, 0, errors.Wrapf(err, "Detecting compression in blob %s", info.Digest)
+ }
+
+ if decompressFunc != nil {
+ reader, err = decompressFunc(reader)
+ if err != nil {
+ return nil, 0, errors.Wrapf(err, "Decompressing blob %s stream", info.Digest)
+ }
+ }
+
+ newStream := readCloseWrapper{
+ Reader: reader,
+ closeFunc: stream.Close,
+ }
+
+ return newStream, li.size, nil
+ }
+
+ return nil, 0, errors.Errorf("Unknown blob %s", info.Digest)
+}
+
+// GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+func (s *Source) GetSignatures(ctx context.Context) ([][]byte, error) {
+ return [][]byte{}, nil
+}
diff --git a/vendor/github.com/containers/image/docker/tarfile/types.go b/vendor/github.com/containers/image/docker/tarfile/types.go
new file mode 100644
index 0000000000..f16cc8c62a
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/tarfile/types.go
@@ -0,0 +1,54 @@
+package tarfile
+
+import "github.com/opencontainers/go-digest"
+
+// Various data structures.
+
+// Based on github.com/docker/docker/image/tarexport/tarexport.go
+const (
+ manifestFileName = "manifest.json"
+ // legacyLayerFileName = "layer.tar"
+ // legacyConfigFileName = "json"
+ // legacyVersionFileName = "VERSION"
+ // legacyRepositoriesFileName = "repositories"
+)
+
+// ManifestItem is an element of the array stored in the top-level manifest.json file.
+type ManifestItem struct {
+ Config string
+ RepoTags []string
+ Layers []string
+ Parent imageID `json:",omitempty"`
+ LayerSources map[diffID]distributionDescriptor `json:",omitempty"`
+}
+
+type imageID string
+type diffID digest.Digest
+
+// Based on github.com/docker/distribution/blobs.go
+type distributionDescriptor struct {
+ MediaType string `json:"mediaType,omitempty"`
+ Size int64 `json:"size,omitempty"`
+ Digest digest.Digest `json:"digest,omitempty"`
+ URLs []string `json:"urls,omitempty"`
+}
+
+// Based on github.com/docker/distribution/manifest/schema2/manifest.go
+// FIXME: We are repeating this all over the place; make a public copy?
+type schema2Manifest struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType,omitempty"`
+ Config distributionDescriptor `json:"config"`
+ Layers []distributionDescriptor `json:"layers"`
+}
+
+// Based on github.com/docker/docker/image/image.go
+// MOST CONTENT OMITTED AS UNNECESSARY
+type image struct {
+ RootFS *rootFS `json:"rootfs,omitempty"`
+}
+
+type rootFS struct {
+ Type string `json:"type"`
+ DiffIDs []diffID `json:"diff_ids,omitempty"`
+}
diff --git a/vendor/github.com/containers/image/docker/wwwauthenticate.go b/vendor/github.com/containers/image/docker/wwwauthenticate.go
new file mode 100644
index 0000000000..23664a74a5
--- /dev/null
+++ b/vendor/github.com/containers/image/docker/wwwauthenticate.go
@@ -0,0 +1,159 @@
+package docker
+
+// Based on github.com/docker/distribution/registry/client/auth/authchallenge.go, primarily stripping unnecessary dependencies.
+
+import (
+ "net/http"
+ "strings"
+)
+
+// challenge carries information from a WWW-Authenticate response header.
+// See RFC 7235.
+type challenge struct {
+ // Scheme is the auth-scheme according to RFC 7235
+ Scheme string
+
+ // Parameters are the auth-params according to RFC 7235
+ Parameters map[string]string
+}
+
+// Octet types from RFC 7230.
+type octetType byte
+
+var octetTypes [256]octetType
+
+const (
+ isToken octetType = 1 << iota
+ isSpace
+)
+
+func init() {
+ // OCTET =
+ // CHAR =
+ // CTL =
+ // CR =
+ // LF =
+ // SP =
+ // HT =
+ // <"> =
+ // CRLF = CR LF
+ // LWS = [CRLF] 1*( SP | HT )
+ // TEXT =
+ // separators = "(" | ")" | "<" | ">" | "@" | "," | ";" | ":" | "\" | <">
+ // | "/" | "[" | "]" | "?" | "=" | "{" | "}" | SP | HT
+ // token = 1*
+ // qdtext = >
+
+ for c := 0; c < 256; c++ {
+ var t octetType
+ isCtl := c <= 31 || c == 127
+ isChar := 0 <= c && c <= 127
+ isSeparator := strings.IndexRune(" \t\"(),/:;<=>?@[]\\{}", rune(c)) >= 0
+ if strings.IndexRune(" \t\r\n", rune(c)) >= 0 {
+ t |= isSpace
+ }
+ if isChar && !isCtl && !isSeparator {
+ t |= isToken
+ }
+ octetTypes[c] = t
+ }
+}
+
+func parseAuthHeader(header http.Header) []challenge {
+ challenges := []challenge{}
+ for _, h := range header[http.CanonicalHeaderKey("WWW-Authenticate")] {
+ v, p := parseValueAndParams(h)
+ if v != "" {
+ challenges = append(challenges, challenge{Scheme: v, Parameters: p})
+ }
+ }
+ return challenges
+}
+
+// NOTE: This is not a fully compliant parser per RFC 7235:
+// Most notably it does not support more than one challenge within a single header
+// Some of the whitespace parsing also seems noncompliant.
+// But it is clearly better than what we used to have…
+func parseValueAndParams(header string) (value string, params map[string]string) {
+ params = make(map[string]string)
+ value, s := expectToken(header)
+ if value == "" {
+ return
+ }
+ value = strings.ToLower(value)
+ s = "," + skipSpace(s)
+ for strings.HasPrefix(s, ",") {
+ var pkey string
+ pkey, s = expectToken(skipSpace(s[1:]))
+ if pkey == "" {
+ return
+ }
+ if !strings.HasPrefix(s, "=") {
+ return
+ }
+ var pvalue string
+ pvalue, s = expectTokenOrQuoted(s[1:])
+ if pvalue == "" {
+ return
+ }
+ pkey = strings.ToLower(pkey)
+ params[pkey] = pvalue
+ s = skipSpace(s)
+ }
+ return
+}
+
+func skipSpace(s string) (rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isSpace == 0 {
+ break
+ }
+ }
+ return s[i:]
+}
+
+func expectToken(s string) (token, rest string) {
+ i := 0
+ for ; i < len(s); i++ {
+ if octetTypes[s[i]]&isToken == 0 {
+ break
+ }
+ }
+ return s[:i], s[i:]
+}
+
+func expectTokenOrQuoted(s string) (value string, rest string) {
+ if !strings.HasPrefix(s, "\"") {
+ return expectToken(s)
+ }
+ s = s[1:]
+ for i := 0; i < len(s); i++ {
+ switch s[i] {
+ case '"':
+ return s[:i], s[i+1:]
+ case '\\':
+ p := make([]byte, len(s)-1)
+ j := copy(p, s[:i])
+ escape := true
+ for i = i + 1; i < len(s); i++ {
+ b := s[i]
+ switch {
+ case escape:
+ escape = false
+ p[j] = b
+ j++
+ case b == '\\':
+ escape = true
+ case b == '"':
+ return string(p[:j]), s[i+1:]
+ default:
+ p[j] = b
+ j++
+ }
+ }
+ return "", ""
+ }
+ }
+ return "", ""
+}
diff --git a/vendor/github.com/containers/image/image/docker_list.go b/vendor/github.com/containers/image/image/docker_list.go
new file mode 100644
index 0000000000..c79adaccab
--- /dev/null
+++ b/vendor/github.com/containers/image/image/docker_list.go
@@ -0,0 +1,63 @@
+package image
+
+import (
+ "encoding/json"
+ "runtime"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+type platformSpec struct {
+ Architecture string `json:"architecture"`
+ OS string `json:"os"`
+ OSVersion string `json:"os.version,omitempty"`
+ OSFeatures []string `json:"os.features,omitempty"`
+ Variant string `json:"variant,omitempty"`
+ Features []string `json:"features,omitempty"` // removed in OCI
+}
+
+// A manifestDescriptor references a platform-specific manifest.
+type manifestDescriptor struct {
+ descriptor
+ Platform platformSpec `json:"platform"`
+}
+
+type manifestList struct {
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ Manifests []manifestDescriptor `json:"manifests"`
+}
+
+func manifestSchema2FromManifestList(src types.ImageSource, manblob []byte) (genericManifest, error) {
+ list := manifestList{}
+ if err := json.Unmarshal(manblob, &list); err != nil {
+ return nil, err
+ }
+ var targetManifestDigest digest.Digest
+ for _, d := range list.Manifests {
+ if d.Platform.Architecture == runtime.GOARCH && d.Platform.OS == runtime.GOOS {
+ targetManifestDigest = d.Digest
+ break
+ }
+ }
+ if targetManifestDigest == "" {
+ return nil, errors.New("no supported platform found in manifest list")
+ }
+ manblob, mt, err := src.GetTargetManifest(targetManifestDigest)
+ if err != nil {
+ return nil, err
+ }
+
+ matches, err := manifest.MatchesDigest(manblob, targetManifestDigest)
+ if err != nil {
+ return nil, errors.Wrap(err, "Error computing manifest digest")
+ }
+ if !matches {
+ return nil, errors.Errorf("Manifest image does not match selected manifest digest %s", targetManifestDigest)
+ }
+
+ return manifestInstanceFromBlob(src, manblob, mt)
+}
diff --git a/vendor/github.com/containers/image/image/docker_schema1.go b/vendor/github.com/containers/image/image/docker_schema1.go
new file mode 100644
index 0000000000..4152b3cdf7
--- /dev/null
+++ b/vendor/github.com/containers/image/image/docker_schema1.go
@@ -0,0 +1,375 @@
+package image
+
+import (
+ "encoding/json"
+ "regexp"
+ "strings"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+var (
+ validHex = regexp.MustCompile(`^([a-f0-9]{64})$`)
+)
+
+type fsLayersSchema1 struct {
+ BlobSum digest.Digest `json:"blobSum"`
+}
+
+type historySchema1 struct {
+ V1Compatibility string `json:"v1Compatibility"`
+}
+
+// historySchema1 is a string containing this. It is similar to v1Image but not the same, in particular note the ThrowAway field.
+type v1Compatibility struct {
+ ID string `json:"id"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig struct {
+ Cmd []string
+ } `json:"container_config,omitempty"`
+ Author string `json:"author,omitempty"`
+ ThrowAway bool `json:"throwaway,omitempty"`
+}
+
+type manifestSchema1 struct {
+ Name string `json:"name"`
+ Tag string `json:"tag"`
+ Architecture string `json:"architecture"`
+ FSLayers []fsLayersSchema1 `json:"fsLayers"`
+ History []historySchema1 `json:"history"`
+ SchemaVersion int `json:"schemaVersion"`
+}
+
+func manifestSchema1FromManifest(manifest []byte) (genericManifest, error) {
+ mschema1 := &manifestSchema1{}
+ if err := json.Unmarshal(manifest, mschema1); err != nil {
+ return nil, err
+ }
+ if mschema1.SchemaVersion != 1 {
+ return nil, errors.Errorf("unsupported schema version %d", mschema1.SchemaVersion)
+ }
+ if len(mschema1.FSLayers) != len(mschema1.History) {
+ return nil, errors.New("length of history not equal to number of layers")
+ }
+ if len(mschema1.FSLayers) == 0 {
+ return nil, errors.New("no FSLayers in manifest")
+ }
+
+ if err := fixManifestLayers(mschema1); err != nil {
+ return nil, err
+ }
+ return mschema1, nil
+}
+
+// manifestSchema1FromComponents builds a new manifestSchema1 from the supplied data.
+func manifestSchema1FromComponents(ref reference.Named, fsLayers []fsLayersSchema1, history []historySchema1, architecture string) genericManifest {
+ var name, tag string
+ if ref != nil { // Well, what to do if it _is_ nil? Most consumers actually don't use these fields nowadays, so we might as well try not supplying them.
+ name = reference.Path(ref)
+ if tagged, ok := ref.(reference.NamedTagged); ok {
+ tag = tagged.Tag()
+ }
+ }
+ return &manifestSchema1{
+ Name: name,
+ Tag: tag,
+ Architecture: architecture,
+ FSLayers: fsLayers,
+ History: history,
+ SchemaVersion: 1,
+ }
+}
+
+func (m *manifestSchema1) serialize() ([]byte, error) {
+ // docker/distribution requires a signature even if the incoming data uses the nominally unsigned DockerV2Schema1MediaType.
+ unsigned, err := json.Marshal(*m)
+ if err != nil {
+ return nil, err
+ }
+ return manifest.AddDummyV2S1Signature(unsigned)
+}
+
+func (m *manifestSchema1) manifestMIMEType() string {
+ return manifest.DockerV2Schema1SignedMediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema1) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{}
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema1) ConfigBlob() ([]byte, error) {
+ return nil, nil
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema1) OCIConfig() (*imgspecv1.Image, error) {
+ v2s2, err := m.convertToManifestSchema2(nil, nil)
+ if err != nil {
+ return nil, err
+ }
+ return v2s2.OCIConfig()
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema1) LayerInfos() []types.BlobInfo {
+ layers := make([]types.BlobInfo, len(m.FSLayers))
+ for i, layer := range m.FSLayers { // NOTE: This includes empty layers (where m.History.V1Compatibility->ThrowAway)
+ layers[(len(m.FSLayers)-1)-i] = types.BlobInfo{Digest: layer.BlobSum, Size: -1}
+ }
+ return layers
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ // This is a bit convoluted: We can’t just have a "get embedded docker reference" method
+ // and have the “does it conflict” logic in the generic copy code, because the manifest does not actually
+ // embed a full docker/distribution reference, but only the repo name and tag (without the host name).
+ // So we would have to provide a “return repo without host name, and tag” getter for the generic code,
+ // which would be very awkward. Instead, we do the matching here in schema1-specific code, and all the
+ // generic copy code needs to know about is reference.Named and that a manifest may need updating
+ // for some destinations.
+ name := reference.Path(ref)
+ var tag string
+ if tagged, isTagged := ref.(reference.NamedTagged); isTagged {
+ tag = tagged.Tag()
+ } else {
+ tag = ""
+ }
+ return m.Name != name || m.Tag != tag
+}
+
+func (m *manifestSchema1) imageInspectInfo() (*types.ImageInspectInfo, error) {
+ v1 := &v1Image{}
+ if err := json.Unmarshal([]byte(m.History[0].V1Compatibility), v1); err != nil {
+ return nil, err
+ }
+ return &types.ImageInspectInfo{
+ Tag: m.Tag,
+ DockerVersion: v1.DockerVersion,
+ Created: v1.Created,
+ Labels: v1.Config.Labels,
+ Architecture: v1.Architecture,
+ Os: v1.OS,
+ }, nil
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return options.ManifestMIMEType == manifest.DockerV2Schema2MediaType
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+func (m *manifestSchema1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := *m
+ if options.LayerInfos != nil {
+ // Our LayerInfos includes empty layers (where m.History.V1Compatibility->ThrowAway), so expect them to be included here as well.
+ if len(copy.FSLayers) != len(options.LayerInfos) {
+ return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.FSLayers), len(options.LayerInfos))
+ }
+ for i, info := range options.LayerInfos {
+ // (docker push) sets up m.History.V1Compatibility->{Id,Parent} based on values of info.Digest,
+ // but (docker pull) ignores them in favor of computing DiffIDs from uncompressed data, except verifying the child->parent links and uniqueness.
+ // So, we don't bother recomputing the IDs in m.History.V1Compatibility.
+ copy.FSLayers[(len(options.LayerInfos)-1)-i].BlobSum = info.Digest
+ }
+ }
+ if options.EmbeddedDockerReference != nil {
+ copy.Name = reference.Path(options.EmbeddedDockerReference)
+ if tagged, isTagged := options.EmbeddedDockerReference.(reference.NamedTagged); isTagged {
+ copy.Tag = tagged.Tag()
+ } else {
+ copy.Tag = ""
+ }
+ }
+
+ switch options.ManifestMIMEType {
+ case "": // No conversion, OK
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType:
+ // We have 2 MIME types for schema 1, which are basically equivalent (even the un-"Signed" MIME type will be rejected if there isn’t a signature; so,
+ // handle conversions between them by doing nothing.
+ case manifest.DockerV2Schema2MediaType:
+ return copy.convertToManifestSchema2(options.InformationOnly.LayerInfos, options.InformationOnly.LayerDiffIDs)
+ default:
+ return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema1SignedMediaType, options.ManifestMIMEType)
+ }
+
+ return memoryImageFromManifest(©), nil
+}
+
+// fixManifestLayers, after validating the supplied manifest
+// (to use correctly-formatted IDs, and to not have non-consecutive ID collisions in manifest.History),
+// modifies manifest to only have one entry for each layer ID in manifest.History (deleting the older duplicates,
+// both from manifest.History and manifest.FSLayers).
+// Note that even after this succeeds, manifest.FSLayers may contain duplicate entries
+// (for Dockerfile operations which change the configuration but not the filesystem).
+func fixManifestLayers(manifest *manifestSchema1) error {
+ type imageV1 struct {
+ ID string
+ Parent string
+ }
+ // Per the specification, we can assume that len(manifest.FSLayers) == len(manifest.History)
+ imgs := make([]*imageV1, len(manifest.FSLayers))
+ for i := range manifest.FSLayers {
+ img := &imageV1{}
+
+ if err := json.Unmarshal([]byte(manifest.History[i].V1Compatibility), img); err != nil {
+ return err
+ }
+
+ imgs[i] = img
+ if err := validateV1ID(img.ID); err != nil {
+ return err
+ }
+ }
+ if imgs[len(imgs)-1].Parent != "" {
+ return errors.New("Invalid parent ID in the base layer of the image")
+ }
+ // check general duplicates to error instead of a deadlock
+ idmap := make(map[string]struct{})
+ var lastID string
+ for _, img := range imgs {
+ // skip IDs that appear after each other, we handle those later
+ if _, exists := idmap[img.ID]; img.ID != lastID && exists {
+ return errors.Errorf("ID %+v appears multiple times in manifest", img.ID)
+ }
+ lastID = img.ID
+ idmap[lastID] = struct{}{}
+ }
+ // backwards loop so that we keep the remaining indexes after removing items
+ for i := len(imgs) - 2; i >= 0; i-- {
+ if imgs[i].ID == imgs[i+1].ID { // repeated ID. remove and continue
+ manifest.FSLayers = append(manifest.FSLayers[:i], manifest.FSLayers[i+1:]...)
+ manifest.History = append(manifest.History[:i], manifest.History[i+1:]...)
+ } else if imgs[i].Parent != imgs[i+1].ID {
+ return errors.Errorf("Invalid parent ID. Expected %v, got %v", imgs[i+1].ID, imgs[i].Parent)
+ }
+ }
+ return nil
+}
+
+func validateV1ID(id string) error {
+ if ok := validHex.MatchString(id); !ok {
+ return errors.Errorf("image ID %q is invalid", id)
+ }
+ return nil
+}
+
+// Based on github.com/docker/docker/distribution/pull_v2.go
+func (m *manifestSchema1) convertToManifestSchema2(uploadedLayerInfos []types.BlobInfo, layerDiffIDs []digest.Digest) (types.Image, error) {
+ if len(m.History) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
+ return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema2MediaType)
+ }
+ if len(m.History) != len(m.FSLayers) {
+ return nil, errors.Errorf("Inconsistent schema 1 manifest: %d history entries, %d fsLayers entries", len(m.History), len(m.FSLayers))
+ }
+ if uploadedLayerInfos != nil && len(uploadedLayerInfos) != len(m.FSLayers) {
+ return nil, errors.Errorf("Internal error: uploaded %d blobs, but schema1 manifest has %d fsLayers", len(uploadedLayerInfos), len(m.FSLayers))
+ }
+ if layerDiffIDs != nil && len(layerDiffIDs) != len(m.FSLayers) {
+ return nil, errors.Errorf("Internal error: collected %d DiffID values, but schema1 manifest has %d fsLayers", len(layerDiffIDs), len(m.FSLayers))
+ }
+
+ rootFS := rootFS{
+ Type: "layers",
+ DiffIDs: []digest.Digest{},
+ BaseLayer: "",
+ }
+ var layers []descriptor
+ history := make([]imageHistory, len(m.History))
+ for v1Index := len(m.History) - 1; v1Index >= 0; v1Index-- {
+ v2Index := (len(m.History) - 1) - v1Index
+
+ var v1compat v1Compatibility
+ if err := json.Unmarshal([]byte(m.History[v1Index].V1Compatibility), &v1compat); err != nil {
+ return nil, errors.Wrapf(err, "Error decoding history entry %d", v1Index)
+ }
+ history[v2Index] = imageHistory{
+ Created: v1compat.Created,
+ Author: v1compat.Author,
+ CreatedBy: strings.Join(v1compat.ContainerConfig.Cmd, " "),
+ Comment: v1compat.Comment,
+ EmptyLayer: v1compat.ThrowAway,
+ }
+
+ if !v1compat.ThrowAway {
+ var size int64
+ if uploadedLayerInfos != nil {
+ size = uploadedLayerInfos[v2Index].Size
+ }
+ var d digest.Digest
+ if layerDiffIDs != nil {
+ d = layerDiffIDs[v2Index]
+ }
+ layers = append(layers, descriptor{
+ MediaType: "application/vnd.docker.image.rootfs.diff.tar.gzip",
+ Size: size,
+ Digest: m.FSLayers[v1Index].BlobSum,
+ })
+ rootFS.DiffIDs = append(rootFS.DiffIDs, d)
+ }
+ }
+ configJSON, err := configJSONFromV1Config([]byte(m.History[0].V1Compatibility), rootFS, history)
+ if err != nil {
+ return nil, err
+ }
+ configDescriptor := descriptor{
+ MediaType: "application/vnd.docker.container.image.v1+json",
+ Size: int64(len(configJSON)),
+ Digest: digest.FromBytes(configJSON),
+ }
+
+ m2 := manifestSchema2FromComponents(configDescriptor, nil, configJSON, layers)
+ return memoryImageFromManifest(m2), nil
+}
+
+func configJSONFromV1Config(v1ConfigJSON []byte, rootFS rootFS, history []imageHistory) ([]byte, error) {
+ // github.com/docker/docker/image/v1/imagev1.go:MakeConfigFromV1Config unmarshals and re-marshals the input if docker_version is < 1.8.3 to remove blank fields;
+ // we don't do that here. FIXME? Should we? AFAICT it would only affect the digest value of the schema2 manifest, and we don't particularly need that to be
+ // a consistently reproducible value.
+
+ // Preserve everything we don't specifically know about.
+ // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
+ rawContents := map[string]*json.RawMessage{}
+ if err := json.Unmarshal(v1ConfigJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
+ return nil, err
+ }
+
+ delete(rawContents, "id")
+ delete(rawContents, "parent")
+ delete(rawContents, "Size")
+ delete(rawContents, "parent_id")
+ delete(rawContents, "layer_id")
+ delete(rawContents, "throwaway")
+
+ updates := map[string]interface{}{"rootfs": rootFS, "history": history}
+ for field, value := range updates {
+ encoded, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ rawContents[field] = (*json.RawMessage)(&encoded)
+ }
+ return json.Marshal(rawContents)
+}
diff --git a/vendor/github.com/containers/image/image/docker_schema2.go b/vendor/github.com/containers/image/image/docker_schema2.go
new file mode 100644
index 0000000000..9c242cf069
--- /dev/null
+++ b/vendor/github.com/containers/image/image/docker_schema2.go
@@ -0,0 +1,364 @@
+package image
+
+import (
+ "bytes"
+ "crypto/sha256"
+ "encoding/hex"
+ "encoding/json"
+ "io/ioutil"
+ "strings"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+// gzippedEmptyLayer is a gzip-compressed version of an empty tar file (1024 NULL bytes)
+// This comes from github.com/docker/distribution/manifest/schema1/config_builder.go; there is
+// a non-zero embedded timestamp; we could zero that, but that would just waste storage space
+// in registries, so let’s use the same values.
+var gzippedEmptyLayer = []byte{
+ 31, 139, 8, 0, 0, 9, 110, 136, 0, 255, 98, 24, 5, 163, 96, 20, 140, 88,
+ 0, 8, 0, 0, 255, 255, 46, 175, 181, 239, 0, 4, 0, 0,
+}
+
+// gzippedEmptyLayerDigest is a digest of gzippedEmptyLayer
+const gzippedEmptyLayerDigest = digest.Digest("sha256:a3ed95caeb02ffe68cdd9fd84406680ae93d633cb16422d00e8a7c22955b46d4")
+
+type descriptor struct {
+ MediaType string `json:"mediaType"`
+ Size int64 `json:"size"`
+ Digest digest.Digest `json:"digest"`
+ URLs []string `json:"urls,omitempty"`
+}
+
+type manifestSchema2 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ SchemaVersion int `json:"schemaVersion"`
+ MediaType string `json:"mediaType"`
+ ConfigDescriptor descriptor `json:"config"`
+ LayersDescriptors []descriptor `json:"layers"`
+}
+
+func manifestSchema2FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
+ v2s2 := manifestSchema2{src: src}
+ if err := json.Unmarshal(manifest, &v2s2); err != nil {
+ return nil, err
+ }
+ return &v2s2, nil
+}
+
+// manifestSchema2FromComponents builds a new manifestSchema2 from the supplied data:
+func manifestSchema2FromComponents(config descriptor, src types.ImageSource, configBlob []byte, layers []descriptor) genericManifest {
+ return &manifestSchema2{
+ src: src,
+ configBlob: configBlob,
+ SchemaVersion: 2,
+ MediaType: manifest.DockerV2Schema2MediaType,
+ ConfigDescriptor: config,
+ LayersDescriptors: layers,
+ }
+}
+
+func (m *manifestSchema2) serialize() ([]byte, error) {
+ return json.Marshal(*m)
+}
+
+func (m *manifestSchema2) manifestMIMEType() string {
+ return m.MediaType
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestSchema2) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestSchema2) OCIConfig() (*imgspecv1.Image, error) {
+ configBlob, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ // docker v2s2 and OCI v1 are mostly compatible but v2s2 contains more fields
+ // than OCI v1. This unmarshal makes sure we drop docker v2s2
+ // fields that aren't needed in OCI v1.
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(configBlob, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestSchema2) ConfigBlob() ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestSchema2")
+ }
+ stream, _, err := m.src.GetBlob(types.BlobInfo{
+ Digest: m.ConfigDescriptor.Digest,
+ Size: m.ConfigDescriptor.Size,
+ URLs: m.ConfigDescriptor.URLs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.ConfigDescriptor.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestSchema2) LayerInfos() []types.BlobInfo {
+ blobs := []types.BlobInfo{}
+ for _, layer := range m.LayersDescriptors {
+ blobs = append(blobs, types.BlobInfo{
+ Digest: layer.Digest,
+ Size: layer.Size,
+ URLs: layer.URLs,
+ })
+ }
+ return blobs
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestSchema2) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+func (m *manifestSchema2) imageInspectInfo() (*types.ImageInspectInfo, error) {
+ config, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ v1 := &v1Image{}
+ if err := json.Unmarshal(config, v1); err != nil {
+ return nil, err
+ }
+ return &types.ImageInspectInfo{
+ DockerVersion: v1.DockerVersion,
+ Created: v1.Created,
+ Labels: v1.Config.Labels,
+ Architecture: v1.Architecture,
+ Os: v1.OS,
+ }, nil
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestSchema2) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+func (m *manifestSchema2) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
+ if options.LayerInfos != nil {
+ if len(copy.LayersDescriptors) != len(options.LayerInfos) {
+ return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
+ }
+ copy.LayersDescriptors = make([]descriptor, len(options.LayerInfos))
+ for i, info := range options.LayerInfos {
+ copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
+ copy.LayersDescriptors[i].Digest = info.Digest
+ copy.LayersDescriptors[i].Size = info.Size
+ copy.LayersDescriptors[i].URLs = info.URLs
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1 to schema2, but we really don't care.
+
+ switch options.ManifestMIMEType {
+ case "": // No conversion, OK
+ case manifest.DockerV2Schema1SignedMediaType, manifest.DockerV2Schema1MediaType:
+ return copy.convertToManifestSchema1(options.InformationOnly.Destination)
+ case imgspecv1.MediaTypeImageManifest:
+ return copy.convertToManifestOCI1()
+ default:
+ return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", manifest.DockerV2Schema2MediaType, options.ManifestMIMEType)
+ }
+
+ return memoryImageFromManifest(©), nil
+}
+
+func (m *manifestSchema2) convertToManifestOCI1() (types.Image, error) {
+ configOCI, err := m.OCIConfig()
+ if err != nil {
+ return nil, err
+ }
+ configOCIBytes, err := json.Marshal(configOCI)
+ if err != nil {
+ return nil, err
+ }
+
+ config := descriptorOCI1{
+ descriptor: descriptor{
+ MediaType: imgspecv1.MediaTypeImageConfig,
+ Size: int64(len(configOCIBytes)),
+ Digest: digest.FromBytes(configOCIBytes),
+ },
+ }
+
+ layers := make([]descriptorOCI1, len(m.LayersDescriptors))
+ for idx := range layers {
+ layers[idx] = descriptorOCI1{descriptor: m.LayersDescriptors[idx]}
+ if m.LayersDescriptors[idx].MediaType == manifest.DockerV2Schema2ForeignLayerMediaType {
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerNonDistributable
+ } else {
+ // we assume layers are gzip'ed because docker v2s2 only deals with
+ // gzip'ed layers. However, OCI has non-gzip'ed layers as well.
+ layers[idx].MediaType = imgspecv1.MediaTypeImageLayerGzip
+ }
+ }
+
+ m1 := manifestOCI1FromComponents(config, m.src, configOCIBytes, layers)
+ return memoryImageFromManifest(m1), nil
+}
+
+// Based on docker/distribution/manifest/schema1/config_builder.go
+func (m *manifestSchema2) convertToManifestSchema1(dest types.ImageDestination) (types.Image, error) {
+ configBytes, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ imageConfig := &image{}
+ if err := json.Unmarshal(configBytes, imageConfig); err != nil {
+ return nil, err
+ }
+
+ // Build fsLayers and History, discarding all configs. We will patch the top-level config in later.
+ fsLayers := make([]fsLayersSchema1, len(imageConfig.History))
+ history := make([]historySchema1, len(imageConfig.History))
+ nonemptyLayerIndex := 0
+ var parentV1ID string // Set in the loop
+ v1ID := ""
+ haveGzippedEmptyLayer := false
+ if len(imageConfig.History) == 0 {
+ // What would this even mean?! Anyhow, the rest of the code depends on fsLayers[0] and history[0] existing.
+ return nil, errors.Errorf("Cannot convert an image with 0 history entries to %s", manifest.DockerV2Schema1SignedMediaType)
+ }
+ for v2Index, historyEntry := range imageConfig.History {
+ parentV1ID = v1ID
+ v1Index := len(imageConfig.History) - 1 - v2Index
+
+ var blobDigest digest.Digest
+ if historyEntry.EmptyLayer {
+ if !haveGzippedEmptyLayer {
+ logrus.Debugf("Uploading empty layer during conversion to schema 1")
+ info, err := dest.PutBlob(bytes.NewReader(gzippedEmptyLayer), types.BlobInfo{Digest: gzippedEmptyLayerDigest, Size: int64(len(gzippedEmptyLayer))})
+ if err != nil {
+ return nil, errors.Wrap(err, "Error uploading empty layer")
+ }
+ if info.Digest != gzippedEmptyLayerDigest {
+ return nil, errors.Errorf("Internal error: Uploaded empty layer has digest %#v instead of %s", info.Digest, gzippedEmptyLayerDigest)
+ }
+ haveGzippedEmptyLayer = true
+ }
+ blobDigest = gzippedEmptyLayerDigest
+ } else {
+ if nonemptyLayerIndex >= len(m.LayersDescriptors) {
+ return nil, errors.Errorf("Invalid image configuration, needs more than the %d distributed layers", len(m.LayersDescriptors))
+ }
+ blobDigest = m.LayersDescriptors[nonemptyLayerIndex].Digest
+ nonemptyLayerIndex++
+ }
+
+ // AFAICT pull ignores these ID values, at least nowadays, so we could use anything unique, including a simple counter. Use what Docker uses for cargo-cult consistency.
+ v, err := v1IDFromBlobDigestAndComponents(blobDigest, parentV1ID)
+ if err != nil {
+ return nil, err
+ }
+ v1ID = v
+
+ fakeImage := v1Compatibility{
+ ID: v1ID,
+ Parent: parentV1ID,
+ Comment: historyEntry.Comment,
+ Created: historyEntry.Created,
+ Author: historyEntry.Author,
+ ThrowAway: historyEntry.EmptyLayer,
+ }
+ fakeImage.ContainerConfig.Cmd = []string{historyEntry.CreatedBy}
+ v1CompatibilityBytes, err := json.Marshal(&fakeImage)
+ if err != nil {
+ return nil, errors.Errorf("Internal error: Error creating v1compatibility for %#v", fakeImage)
+ }
+
+ fsLayers[v1Index] = fsLayersSchema1{BlobSum: blobDigest}
+ history[v1Index] = historySchema1{V1Compatibility: string(v1CompatibilityBytes)}
+ // Note that parentV1ID of the top layer is preserved when exiting this loop
+ }
+
+ // Now patch in real configuration for the top layer (v1Index == 0)
+ v1ID, err = v1IDFromBlobDigestAndComponents(fsLayers[0].BlobSum, parentV1ID, string(configBytes)) // See above WRT v1ID value generation and cargo-cult consistency.
+ if err != nil {
+ return nil, err
+ }
+ v1Config, err := v1ConfigFromConfigJSON(configBytes, v1ID, parentV1ID, imageConfig.History[len(imageConfig.History)-1].EmptyLayer)
+ if err != nil {
+ return nil, err
+ }
+ history[0].V1Compatibility = string(v1Config)
+
+ m1 := manifestSchema1FromComponents(dest.Reference().DockerReference(), fsLayers, history, imageConfig.Architecture)
+ return memoryImageFromManifest(m1), nil
+}
+
+func v1IDFromBlobDigestAndComponents(blobDigest digest.Digest, others ...string) (string, error) {
+ if err := blobDigest.Validate(); err != nil {
+ return "", err
+ }
+ parts := append([]string{blobDigest.Hex()}, others...)
+ v1IDHash := sha256.Sum256([]byte(strings.Join(parts, " ")))
+ return hex.EncodeToString(v1IDHash[:]), nil
+}
+
+func v1ConfigFromConfigJSON(configJSON []byte, v1ID, parentV1ID string, throwaway bool) ([]byte, error) {
+ // Preserve everything we don't specifically know about.
+ // (This must be a *json.RawMessage, even though *[]byte is fairly redundant, because only *RawMessage implements json.Marshaler.)
+ rawContents := map[string]*json.RawMessage{}
+ if err := json.Unmarshal(configJSON, &rawContents); err != nil { // We have already unmarshaled it before, using a more detailed schema?!
+ return nil, err
+ }
+ delete(rawContents, "rootfs")
+ delete(rawContents, "history")
+
+ updates := map[string]interface{}{"id": v1ID}
+ if parentV1ID != "" {
+ updates["parent"] = parentV1ID
+ }
+ if throwaway {
+ updates["throwaway"] = throwaway
+ }
+ for field, value := range updates {
+ encoded, err := json.Marshal(value)
+ if err != nil {
+ return nil, err
+ }
+ rawContents[field] = (*json.RawMessage)(&encoded)
+ }
+ return json.Marshal(rawContents)
+}
diff --git a/vendor/github.com/containers/image/image/manifest.go b/vendor/github.com/containers/image/image/manifest.go
new file mode 100644
index 0000000000..75c9e71164
--- /dev/null
+++ b/vendor/github.com/containers/image/image/manifest.go
@@ -0,0 +1,129 @@
+package image
+
+import (
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/pkg/strslice"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+type config struct {
+ Cmd strslice.StrSlice
+ Labels map[string]string
+}
+
+type v1Image struct {
+ ID string `json:"id,omitempty"`
+ Parent string `json:"parent,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ Created time.Time `json:"created"`
+ ContainerConfig *config `json:"container_config,omitempty"`
+ DockerVersion string `json:"docker_version,omitempty"`
+ Author string `json:"author,omitempty"`
+ // Config is the configuration of the container received from the client
+ Config *config `json:"config,omitempty"`
+ // Architecture is the hardware that the image is build and runs on
+ Architecture string `json:"architecture,omitempty"`
+ // OS is the operating system used to build and run the image
+ OS string `json:"os,omitempty"`
+}
+
+type image struct {
+ v1Image
+ History []imageHistory `json:"history,omitempty"`
+ RootFS *rootFS `json:"rootfs,omitempty"`
+}
+
+type imageHistory struct {
+ Created time.Time `json:"created"`
+ Author string `json:"author,omitempty"`
+ CreatedBy string `json:"created_by,omitempty"`
+ Comment string `json:"comment,omitempty"`
+ EmptyLayer bool `json:"empty_layer,omitempty"`
+}
+
+type rootFS struct {
+ Type string `json:"type"`
+ DiffIDs []digest.Digest `json:"diff_ids,omitempty"`
+ BaseLayer string `json:"base_layer,omitempty"`
+}
+
+// genericManifest is an interface for parsing, modifying image manifests and related data.
+// Note that the public methods are intended to be a subset of types.Image
+// so that embedding a genericManifest into structs works.
+// will support v1 one day...
+type genericManifest interface {
+ serialize() ([]byte, error)
+ manifestMIMEType() string
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+ ConfigInfo() types.BlobInfo
+ // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+ // The result is cached; it is OK to call this however often you need.
+ ConfigBlob() ([]byte, error)
+ // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+ // layers in the resulting configuration isn't guaranteed to be returned to due how
+ // old image manifests work (docker v2s1 especially).
+ OCIConfig() (*imgspecv1.Image, error)
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []types.BlobInfo
+ // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+ // It returns false if the manifest does not embed a Docker reference.
+ // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+ EmbeddedDockerReferenceConflicts(ref reference.Named) bool
+ imageInspectInfo() (*types.ImageInspectInfo, error) // To be called by inspectManifest
+ // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+ // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+ // (most importantly it forces us to download the full layers even if they are already present at the destination).
+ UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool
+ // UpdatedImage returns a types.Image modified according to options.
+ // This does not change the state of the original Image object.
+ UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error)
+}
+
+func manifestInstanceFromBlob(src types.ImageSource, manblob []byte, mt string) (genericManifest, error) {
+ switch mt {
+ // "application/json" is a valid v2s1 value per https://github.com/docker/distribution/blob/master/docs/spec/manifest-v2-1.md .
+ // This works for now, when nothing else seems to return "application/json"; if that were not true, the mapping/detection might
+ // need to happen within the ImageSource.
+ case manifest.DockerV2Schema1MediaType, manifest.DockerV2Schema1SignedMediaType, "application/json":
+ return manifestSchema1FromManifest(manblob)
+ case imgspecv1.MediaTypeImageManifest:
+ return manifestOCI1FromManifest(src, manblob)
+ case manifest.DockerV2Schema2MediaType:
+ return manifestSchema2FromManifest(src, manblob)
+ case manifest.DockerV2ListMediaType:
+ return manifestSchema2FromManifestList(src, manblob)
+ default:
+ // If it's not a recognized manifest media type, or we have failed determining the type, we'll try one last time
+ // to deserialize using v2s1 as per https://github.com/docker/distribution/blob/master/manifests.go#L108
+ // and https://github.com/docker/distribution/blob/master/manifest/schema1/manifest.go#L50
+ //
+ // Crane registries can also return "text/plain", or pretty much anything else depending on a file extension “recognized” in the tag.
+ // This makes no real sense, but it happens
+ // because requests for manifests are
+ // redirected to a content distribution
+ // network which is configured that way. See https://bugzilla.redhat.com/show_bug.cgi?id=1389442
+ return manifestSchema1FromManifest(manblob)
+ }
+}
+
+// inspectManifest is an implementation of types.Image.Inspect
+func inspectManifest(m genericManifest) (*types.ImageInspectInfo, error) {
+ info, err := m.imageInspectInfo()
+ if err != nil {
+ return nil, err
+ }
+ layers := m.LayerInfos()
+ info.Layers = make([]string, len(layers))
+ for i, layer := range layers {
+ info.Layers[i] = layer.Digest.String()
+ }
+ return info, nil
+}
diff --git a/vendor/github.com/containers/image/image/memory.go b/vendor/github.com/containers/image/image/memory.go
new file mode 100644
index 0000000000..62995f6188
--- /dev/null
+++ b/vendor/github.com/containers/image/image/memory.go
@@ -0,0 +1,73 @@
+package image
+
+import (
+ "context"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/types"
+)
+
+// memoryImage is a mostly-implementation of types.Image assembled from data
+// created in memory, used primarily as a return value of types.Image.UpdatedImage
+// as a way to carry various structured information in a type-safe and easy-to-use way.
+// Note that this _only_ carries the immediate metadata; it is _not_ a stand-alone
+// collection of all related information, e.g. there is no way to get layer blobs
+// from a memoryImage.
+type memoryImage struct {
+ genericManifest
+ serializedManifest []byte // A private cache for Manifest()
+}
+
+func memoryImageFromManifest(m genericManifest) types.Image {
+ return &memoryImage{
+ genericManifest: m,
+ serializedManifest: nil,
+ }
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *memoryImage) Reference() types.ImageReference {
+ // It would really be inappropriate to return the ImageReference of the image this was based on.
+ return nil
+}
+
+// Close removes resources associated with an initialized UnparsedImage, if any.
+func (i *memoryImage) Close() error {
+ return nil
+}
+
+// Size returns the size of the image as stored, if known, or -1 if not.
+func (i *memoryImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *memoryImage) Manifest() ([]byte, string, error) {
+ if i.serializedManifest == nil {
+ m, err := i.genericManifest.serialize()
+ if err != nil {
+ return nil, "", err
+ }
+ i.serializedManifest = m
+ }
+ return i.serializedManifest, i.genericManifest.manifestMIMEType(), nil
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *memoryImage) Signatures(ctx context.Context) ([][]byte, error) {
+ // Modifying an image invalidates signatures; a caller asking the updated image for signatures
+ // is probably confused.
+ return nil, errors.New("Internal error: Image.Signatures() is not supported for images modified in memory")
+}
+
+// Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+func (i *memoryImage) Inspect() (*types.ImageInspectInfo, error) {
+ return inspectManifest(i.genericManifest)
+}
+
+// IsMultiImage returns true if the image's manifest is a list of images, false otherwise.
+func (i *memoryImage) IsMultiImage() bool {
+ return false
+}
diff --git a/vendor/github.com/containers/image/image/oci.go b/vendor/github.com/containers/image/image/oci.go
new file mode 100644
index 0000000000..048387ec3e
--- /dev/null
+++ b/vendor/github.com/containers/image/image/oci.go
@@ -0,0 +1,196 @@
+package image
+
+import (
+ "encoding/json"
+ "io/ioutil"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+ "github.com/pkg/errors"
+)
+
+type descriptorOCI1 struct {
+ descriptor
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+type manifestOCI1 struct {
+ src types.ImageSource // May be nil if configBlob is not nil
+ configBlob []byte // If set, corresponds to contents of ConfigDescriptor.
+ SchemaVersion int `json:"schemaVersion"`
+ ConfigDescriptor descriptorOCI1 `json:"config"`
+ LayersDescriptors []descriptorOCI1 `json:"layers"`
+ Annotations map[string]string `json:"annotations,omitempty"`
+}
+
+func manifestOCI1FromManifest(src types.ImageSource, manifest []byte) (genericManifest, error) {
+ oci := manifestOCI1{src: src}
+ if err := json.Unmarshal(manifest, &oci); err != nil {
+ return nil, err
+ }
+ return &oci, nil
+}
+
+// manifestOCI1FromComponents builds a new manifestOCI1 from the supplied data:
+func manifestOCI1FromComponents(config descriptorOCI1, src types.ImageSource, configBlob []byte, layers []descriptorOCI1) genericManifest {
+ return &manifestOCI1{
+ src: src,
+ configBlob: configBlob,
+ SchemaVersion: 2,
+ ConfigDescriptor: config,
+ LayersDescriptors: layers,
+ }
+}
+
+func (m *manifestOCI1) serialize() ([]byte, error) {
+ return json.Marshal(*m)
+}
+
+func (m *manifestOCI1) manifestMIMEType() string {
+ return imgspecv1.MediaTypeImageManifest
+}
+
+// ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+// Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+func (m *manifestOCI1) ConfigInfo() types.BlobInfo {
+ return types.BlobInfo{Digest: m.ConfigDescriptor.Digest, Size: m.ConfigDescriptor.Size}
+}
+
+// ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+// The result is cached; it is OK to call this however often you need.
+func (m *manifestOCI1) ConfigBlob() ([]byte, error) {
+ if m.configBlob == nil {
+ if m.src == nil {
+ return nil, errors.Errorf("Internal error: neither src nor configBlob set in manifestOCI1")
+ }
+ stream, _, err := m.src.GetBlob(types.BlobInfo{
+ Digest: m.ConfigDescriptor.Digest,
+ Size: m.ConfigDescriptor.Size,
+ URLs: m.ConfigDescriptor.URLs,
+ })
+ if err != nil {
+ return nil, err
+ }
+ defer stream.Close()
+ blob, err := ioutil.ReadAll(stream)
+ if err != nil {
+ return nil, err
+ }
+ computedDigest := digest.FromBytes(blob)
+ if computedDigest != m.ConfigDescriptor.Digest {
+ return nil, errors.Errorf("Download config.json digest %s does not match expected %s", computedDigest, m.ConfigDescriptor.Digest)
+ }
+ m.configBlob = blob
+ }
+ return m.configBlob, nil
+}
+
+// OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+// layers in the resulting configuration isn't guaranteed to be returned to due how
+// old image manifests work (docker v2s1 especially).
+func (m *manifestOCI1) OCIConfig() (*imgspecv1.Image, error) {
+ cb, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ configOCI := &imgspecv1.Image{}
+ if err := json.Unmarshal(cb, configOCI); err != nil {
+ return nil, err
+ }
+ return configOCI, nil
+}
+
+// LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+// The Digest field is guaranteed to be provided; Size may be -1.
+// WARNING: The list may contain duplicates, and they are semantically relevant.
+func (m *manifestOCI1) LayerInfos() []types.BlobInfo {
+ blobs := []types.BlobInfo{}
+ for _, layer := range m.LayersDescriptors {
+ blobs = append(blobs, types.BlobInfo{Digest: layer.Digest, Size: layer.Size})
+ }
+ return blobs
+}
+
+// EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+// It returns false if the manifest does not embed a Docker reference.
+// (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+func (m *manifestOCI1) EmbeddedDockerReferenceConflicts(ref reference.Named) bool {
+ return false
+}
+
+func (m *manifestOCI1) imageInspectInfo() (*types.ImageInspectInfo, error) {
+ config, err := m.ConfigBlob()
+ if err != nil {
+ return nil, err
+ }
+ v1 := &v1Image{}
+ if err := json.Unmarshal(config, v1); err != nil {
+ return nil, err
+ }
+ return &types.ImageInspectInfo{
+ DockerVersion: v1.DockerVersion,
+ Created: v1.Created,
+ Labels: v1.Config.Labels,
+ Architecture: v1.Architecture,
+ Os: v1.OS,
+ }, nil
+}
+
+// UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+// This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+// (most importantly it forces us to download the full layers even if they are already present at the destination).
+func (m *manifestOCI1) UpdatedImageNeedsLayerDiffIDs(options types.ManifestUpdateOptions) bool {
+ return false
+}
+
+// UpdatedImage returns a types.Image modified according to options.
+// This does not change the state of the original Image object.
+func (m *manifestOCI1) UpdatedImage(options types.ManifestUpdateOptions) (types.Image, error) {
+ copy := *m // NOTE: This is not a deep copy, it still shares slices etc.
+ if options.LayerInfos != nil {
+ if len(copy.LayersDescriptors) != len(options.LayerInfos) {
+ return nil, errors.Errorf("Error preparing updated manifest: layer count changed from %d to %d", len(copy.LayersDescriptors), len(options.LayerInfos))
+ }
+ copy.LayersDescriptors = make([]descriptorOCI1, len(options.LayerInfos))
+ for i, info := range options.LayerInfos {
+ copy.LayersDescriptors[i].MediaType = m.LayersDescriptors[i].MediaType
+ copy.LayersDescriptors[i].Digest = info.Digest
+ copy.LayersDescriptors[i].Size = info.Size
+ }
+ }
+ // Ignore options.EmbeddedDockerReference: it may be set when converting from schema1, but we really don't care.
+
+ switch options.ManifestMIMEType {
+ case "": // No conversion, OK
+ case manifest.DockerV2Schema2MediaType:
+ return copy.convertToManifestSchema2()
+ default:
+ return nil, errors.Errorf("Conversion of image manifest from %s to %s is not implemented", imgspecv1.MediaTypeImageManifest, options.ManifestMIMEType)
+ }
+
+ return memoryImageFromManifest(©), nil
+}
+
+func (m *manifestOCI1) convertToManifestSchema2() (types.Image, error) {
+ // Create a copy of the descriptor.
+ config := m.ConfigDescriptor.descriptor
+
+ // The only difference between OCI and DockerSchema2 is the mediatypes. The
+ // media type of the manifest is handled by manifestSchema2FromComponents.
+ config.MediaType = manifest.DockerV2Schema2ConfigMediaType
+
+ layers := make([]descriptor, len(m.LayersDescriptors))
+ for idx := range layers {
+ layers[idx] = m.LayersDescriptors[idx].descriptor
+ layers[idx].MediaType = manifest.DockerV2Schema2LayerMediaType
+ }
+
+ // Rather than copying the ConfigBlob now, we just pass m.src to the
+ // translated manifest, since the only difference is the mediatype of
+ // descriptors there is no change to any blob stored in m.src.
+ m1 := manifestSchema2FromComponents(config, m.src, nil, layers)
+ return memoryImageFromManifest(m1), nil
+}
diff --git a/vendor/github.com/containers/image/image/sourced.go b/vendor/github.com/containers/image/image/sourced.go
new file mode 100644
index 0000000000..ef35b3c32a
--- /dev/null
+++ b/vendor/github.com/containers/image/image/sourced.go
@@ -0,0 +1,90 @@
+// Package image consolidates knowledge about various container image formats
+// (as opposed to image storage mechanisms, which are handled by types.ImageSource)
+// and exposes all of them using an unified interface.
+package image
+
+import (
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+)
+
+// FromSource returns a types.Image implementation for source.
+// The caller must call .Close() on the returned Image.
+//
+// FromSource “takes ownership” of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// Image and ImageSource objects simultaneously, but it means that they only need to
+// the Image.)
+//
+// NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+// verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage instead of calling this function.
+func FromSource(src types.ImageSource) (types.Image, error) {
+ return FromUnparsedImage(UnparsedFromSource(src))
+}
+
+// sourcedImage is a general set of utilities for working with container images,
+// whatever is their underlying location (i.e. dockerImageSource-independent).
+// Note the existence of skopeo/docker.Image: some instances of a `types.Image`
+// may not be a `sourcedImage` directly. However, most users of `types.Image`
+// do not care, and those who care about `skopeo/docker.Image` know they do.
+type sourcedImage struct {
+ *UnparsedImage
+ manifestBlob []byte
+ manifestMIMEType string
+ // genericManifest contains data corresponding to manifestBlob.
+ // NOTE: The manifest may have been modified in the process; DO NOT reserialize and store genericManifest
+ // if you want to preserve the original manifest; use manifestBlob directly.
+ genericManifest
+}
+
+// FromUnparsedImage returns a types.Image implementation for unparsed.
+// The caller must call .Close() on the returned Image.
+//
+// FromSource “takes ownership” of the input UnparsedImage and will call uparsed.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to
+// keep a reference to the Image.)
+func FromUnparsedImage(unparsed *UnparsedImage) (types.Image, error) {
+ // Note that the input parameter above is specifically *image.UnparsedImage, not types.UnparsedImage:
+ // we want to be able to use unparsed.src. We could make that an explicit interface, but, well,
+ // this is the only UnparsedImage implementation around, anyway.
+
+ // Also, we do not explicitly implement types.Image.Close; we let the implementation fall through to
+ // unparsed.Close.
+
+ // NOTE: It is essential for signature verification that all parsing done in this object happens on the same manifest which is returned by unparsed.Manifest().
+ manifestBlob, manifestMIMEType, err := unparsed.Manifest()
+ if err != nil {
+ return nil, err
+ }
+
+ parsedManifest, err := manifestInstanceFromBlob(unparsed.src, manifestBlob, manifestMIMEType)
+ if err != nil {
+ return nil, err
+ }
+
+ return &sourcedImage{
+ UnparsedImage: unparsed,
+ manifestBlob: manifestBlob,
+ manifestMIMEType: manifestMIMEType,
+ genericManifest: parsedManifest,
+ }, nil
+}
+
+// Size returns the size of the image as stored, if it's known, or -1 if it isn't.
+func (i *sourcedImage) Size() (int64, error) {
+ return -1, nil
+}
+
+// Manifest overrides the UnparsedImage.Manifest to always use the fields which we have already fetched.
+func (i *sourcedImage) Manifest() ([]byte, string, error) {
+ return i.manifestBlob, i.manifestMIMEType, nil
+}
+
+func (i *sourcedImage) Inspect() (*types.ImageInspectInfo, error) {
+ return inspectManifest(i.genericManifest)
+}
+
+func (i *sourcedImage) IsMultiImage() bool {
+ return i.manifestMIMEType == manifest.DockerV2ListMediaType
+}
diff --git a/vendor/github.com/containers/image/image/unparsed.go b/vendor/github.com/containers/image/image/unparsed.go
new file mode 100644
index 0000000000..483cfd04f1
--- /dev/null
+++ b/vendor/github.com/containers/image/image/unparsed.go
@@ -0,0 +1,85 @@
+package image
+
+import (
+ "context"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+ "github.com/pkg/errors"
+)
+
+// UnparsedImage implements types.UnparsedImage .
+type UnparsedImage struct {
+ src types.ImageSource
+ cachedManifest []byte // A private cache for Manifest(); nil if not yet known.
+ // A private cache for Manifest(), may be the empty string if guessing failed.
+ // Valid iff cachedManifest is not nil.
+ cachedManifestMIMEType string
+ cachedSignatures [][]byte // A private cache for Signatures(); nil if not yet known.
+}
+
+// UnparsedFromSource returns a types.UnparsedImage implementation for source.
+// The caller must call .Close() on the returned UnparsedImage.
+//
+// UnparsedFromSource “takes ownership” of the input ImageSource and will call src.Close()
+// when the image is closed. (This does not prevent callers from using both the
+// UnparsedImage and ImageSource objects simultaneously, but it means that they only need to
+// keep a reference to the UnparsedImage.)
+func UnparsedFromSource(src types.ImageSource) *UnparsedImage {
+ return &UnparsedImage{src: src}
+}
+
+// Reference returns the reference used to set up this source, _as specified by the user_
+// (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+func (i *UnparsedImage) Reference() types.ImageReference {
+ return i.src.Reference()
+}
+
+// Close removes resources associated with an initialized UnparsedImage, if any.
+func (i *UnparsedImage) Close() error {
+ return i.src.Close()
+}
+
+// Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Manifest() ([]byte, string, error) {
+ if i.cachedManifest == nil {
+ m, mt, err := i.src.GetManifest()
+ if err != nil {
+ return nil, "", err
+ }
+
+ // ImageSource.GetManifest does not do digest verification, but we do;
+ // this immediately protects also any user of types.Image.
+ ref := i.Reference().DockerReference()
+ if ref != nil {
+ if canonical, ok := ref.(reference.Canonical); ok {
+ digest := digest.Digest(canonical.Digest())
+ matches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return nil, "", errors.Wrap(err, "Error computing manifest digest")
+ }
+ if !matches {
+ return nil, "", errors.Errorf("Manifest does not match provided manifest digest %s", digest)
+ }
+ }
+ }
+
+ i.cachedManifest = m
+ i.cachedManifestMIMEType = mt
+ }
+ return i.cachedManifest, i.cachedManifestMIMEType, nil
+}
+
+// Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+func (i *UnparsedImage) Signatures(ctx context.Context) ([][]byte, error) {
+ if i.cachedSignatures == nil {
+ sigs, err := i.src.GetSignatures(ctx)
+ if err != nil {
+ return nil, err
+ }
+ i.cachedSignatures = sigs
+ }
+ return i.cachedSignatures, nil
+}
diff --git a/vendor/github.com/containers/image/manifest/manifest.go b/vendor/github.com/containers/image/manifest/manifest.go
new file mode 100644
index 0000000000..605bab1db7
--- /dev/null
+++ b/vendor/github.com/containers/image/manifest/manifest.go
@@ -0,0 +1,144 @@
+package manifest
+
+import (
+ "encoding/json"
+
+ "github.com/docker/libtrust"
+ "github.com/opencontainers/go-digest"
+ imgspecv1 "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// FIXME: Should we just use docker/distribution and docker/docker implementations directly?
+
+// FIXME(runcom, mitr): should we havea mediatype pkg??
+const (
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1
+ DockerV2Schema1MediaType = "application/vnd.docker.distribution.manifest.v1+json"
+ // DockerV2Schema1MediaType MIME type represents Docker manifest schema 1 with a JWS signature
+ DockerV2Schema1SignedMediaType = "application/vnd.docker.distribution.manifest.v1+prettyjws"
+ // DockerV2Schema2MediaType MIME type represents Docker manifest schema 2
+ DockerV2Schema2MediaType = "application/vnd.docker.distribution.manifest.v2+json"
+ // DockerV2Schema2ConfigMediaType is the MIME type used for schema 2 config blobs.
+ DockerV2Schema2ConfigMediaType = "application/vnd.docker.container.image.v1+json"
+ // DockerV2Schema2LayerMediaType is the MIME type used for schema 2 layers.
+ DockerV2Schema2LayerMediaType = "application/vnd.docker.image.rootfs.diff.tar.gzip"
+ // DockerV2ListMediaType MIME type represents Docker manifest schema 2 list
+ DockerV2ListMediaType = "application/vnd.docker.distribution.manifest.list.v2+json"
+ // DockerV2Schema2ForeignLayerMediaType is the MIME type used for schema 2 foreign layers.
+ DockerV2Schema2ForeignLayerMediaType = "application/vnd.docker.image.rootfs.foreign.diff.tar.gzip"
+)
+
+// DefaultRequestedManifestMIMETypes is a list of MIME types a types.ImageSource
+// should request from the backend unless directed otherwise.
+var DefaultRequestedManifestMIMETypes = []string{
+ imgspecv1.MediaTypeImageManifest,
+ DockerV2Schema2MediaType,
+ DockerV2Schema1SignedMediaType,
+ DockerV2Schema1MediaType,
+ DockerV2ListMediaType,
+}
+
+// GuessMIMEType guesses MIME type of a manifest and returns it _if it is recognized_, or "" if unknown or unrecognized.
+// FIXME? We should, in general, prefer out-of-band MIME type instead of blindly parsing the manifest,
+// but we may not have such metadata available (e.g. when the manifest is a local file).
+func GuessMIMEType(manifest []byte) string {
+ // A subset of manifest fields; the rest is silently ignored by json.Unmarshal.
+ // Also docker/distribution/manifest.Versioned.
+ meta := struct {
+ MediaType string `json:"mediaType"`
+ SchemaVersion int `json:"schemaVersion"`
+ Signatures interface{} `json:"signatures"`
+ }{}
+ if err := json.Unmarshal(manifest, &meta); err != nil {
+ return ""
+ }
+
+ switch meta.MediaType {
+ case DockerV2Schema2MediaType, DockerV2ListMediaType: // A recognized type.
+ return meta.MediaType
+ }
+ // this is the only way the function can return DockerV2Schema1MediaType, and recognizing that is essential for stripping the JWS signatures = computing the correct manifest digest.
+ switch meta.SchemaVersion {
+ case 1:
+ if meta.Signatures != nil {
+ return DockerV2Schema1SignedMediaType
+ }
+ return DockerV2Schema1MediaType
+ case 2:
+ // best effort to understand if this is an OCI image since mediaType
+ // isn't in the manifest for OCI anymore
+ // for docker v2s2 meta.MediaType should have been set. But given the data, this is our best guess.
+ ociMan := struct {
+ Config struct {
+ MediaType string `json:"mediaType"`
+ } `json:"config"`
+ Layers []imgspecv1.Descriptor `json:"layers"`
+ }{}
+ if err := json.Unmarshal(manifest, &ociMan); err != nil {
+ return ""
+ }
+ if ociMan.Config.MediaType == imgspecv1.MediaTypeImageConfig && len(ociMan.Layers) != 0 {
+ return imgspecv1.MediaTypeImageManifest
+ }
+ ociIndex := struct {
+ Manifests []imgspecv1.Descriptor `json:"manifests"`
+ }{}
+ if err := json.Unmarshal(manifest, &ociIndex); err != nil {
+ return ""
+ }
+ if len(ociIndex.Manifests) != 0 && ociIndex.Manifests[0].MediaType == imgspecv1.MediaTypeImageManifest {
+ return imgspecv1.MediaTypeImageIndex
+ }
+ return DockerV2Schema2MediaType
+ }
+ return ""
+}
+
+// Digest returns the a digest of a docker manifest, with any necessary implied transformations like stripping v1s1 signatures.
+func Digest(manifest []byte) (digest.Digest, error) {
+ if GuessMIMEType(manifest) == DockerV2Schema1SignedMediaType {
+ sig, err := libtrust.ParsePrettySignature(manifest, "signatures")
+ if err != nil {
+ return "", err
+ }
+ manifest, err = sig.Payload()
+ if err != nil {
+ // Coverage: This should never happen, libtrust's Payload() can fail only if joseBase64UrlDecode() fails, on a string
+ // that libtrust itself has josebase64UrlEncode()d
+ return "", err
+ }
+ }
+
+ return digest.FromBytes(manifest), nil
+}
+
+// MatchesDigest returns true iff the manifest matches expectedDigest.
+// Error may be set if this returns false.
+// Note that this is not doing ConstantTimeCompare; by the time we get here, the cryptographic signature must already have been verified,
+// or we are not using a cryptographic channel and the attacker can modify the digest along with the manifest blob.
+func MatchesDigest(manifest []byte, expectedDigest digest.Digest) (bool, error) {
+ // This should eventually support various digest types.
+ actualDigest, err := Digest(manifest)
+ if err != nil {
+ return false, err
+ }
+ return expectedDigest == actualDigest, nil
+}
+
+// AddDummyV2S1Signature adds an JWS signature with a temporary key (i.e. useless) to a v2s1 manifest.
+// This is useful to make the manifest acceptable to a Docker Registry (even though nothing needs or wants the JWS signature).
+func AddDummyV2S1Signature(manifest []byte) ([]byte, error) {
+ key, err := libtrust.GenerateECP256PrivateKey()
+ if err != nil {
+ return nil, err // Coverage: This can fail only if rand.Reader fails.
+ }
+
+ js, err := libtrust.NewJSONSignature(manifest)
+ if err != nil {
+ return nil, err
+ }
+ if err := js.Sign(key); err != nil { // Coverage: This can fail basically only if rand.Reader fails.
+ return nil, err
+ }
+ return js.PrettySignature("signatures")
+}
diff --git a/vendor/github.com/containers/image/pkg/compression/compression.go b/vendor/github.com/containers/image/pkg/compression/compression.go
new file mode 100644
index 0000000000..c114ded68e
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/compression/compression.go
@@ -0,0 +1,67 @@
+package compression
+
+import (
+ "bytes"
+ "compress/bzip2"
+ "compress/gzip"
+ "io"
+
+ "github.com/pkg/errors"
+
+ "github.com/Sirupsen/logrus"
+)
+
+// DecompressorFunc returns the decompressed stream, given a compressed stream.
+type DecompressorFunc func(io.Reader) (io.Reader, error)
+
+// GzipDecompressor is a DecompressorFunc for the gzip compression algorithm.
+func GzipDecompressor(r io.Reader) (io.Reader, error) {
+ return gzip.NewReader(r)
+}
+
+// Bzip2Decompressor is a DecompressorFunc for the bzip2 compression algorithm.
+func Bzip2Decompressor(r io.Reader) (io.Reader, error) {
+ return bzip2.NewReader(r), nil
+}
+
+// XzDecompressor is a DecompressorFunc for the xz compression algorithm.
+func XzDecompressor(r io.Reader) (io.Reader, error) {
+ return nil, errors.New("Decompressing xz streams is not supported")
+}
+
+// compressionAlgos is an internal implementation detail of DetectCompression
+var compressionAlgos = map[string]struct {
+ prefix []byte
+ decompressor DecompressorFunc
+}{
+ "gzip": {[]byte{0x1F, 0x8B, 0x08}, GzipDecompressor}, // gzip (RFC 1952)
+ "bzip2": {[]byte{0x42, 0x5A, 0x68}, Bzip2Decompressor}, // bzip2 (decompress.c:BZ2_decompress)
+ "xz": {[]byte{0xFD, 0x37, 0x7A, 0x58, 0x5A, 0x00}, XzDecompressor}, // xz (/usr/share/doc/xz/xz-file-format.txt)
+}
+
+// DetectCompression returns a DecompressorFunc if the input is recognized as a compressed format, nil otherwise.
+// Because it consumes the start of input, other consumers must use the returned io.Reader instead to also read from the beginning.
+func DetectCompression(input io.Reader) (DecompressorFunc, io.Reader, error) {
+ buffer := [8]byte{}
+
+ n, err := io.ReadAtLeast(input, buffer[:], len(buffer))
+ if err != nil && err != io.EOF && err != io.ErrUnexpectedEOF {
+ // This is a “real” error. We could just ignore it this time, process the data we have, and hope that the source will report the same error again.
+ // Instead, fail immediately with the original error cause instead of a possibly secondary/misleading error returned later.
+ return nil, nil, err
+ }
+
+ var decompressor DecompressorFunc
+ for name, algo := range compressionAlgos {
+ if bytes.HasPrefix(buffer[:n], algo.prefix) {
+ logrus.Debugf("Detected compression format %s", name)
+ decompressor = algo.decompressor
+ break
+ }
+ }
+ if decompressor == nil {
+ logrus.Debugf("No compression detected")
+ }
+
+ return decompressor, io.MultiReader(bytes.NewReader(buffer[:n]), input), nil
+}
diff --git a/vendor/github.com/containers/image/pkg/strslice/README.md b/vendor/github.com/containers/image/pkg/strslice/README.md
new file mode 100644
index 0000000000..ae6097e82e
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/strslice/README.md
@@ -0,0 +1 @@
+This package was replicated from [github.com/docker/docker v17.04.0-ce](https://github.com/docker/docker/tree/v17.04.0-ce/api/types/strslice).
diff --git a/vendor/github.com/containers/image/pkg/strslice/strslice.go b/vendor/github.com/containers/image/pkg/strslice/strslice.go
new file mode 100644
index 0000000000..bad493fb89
--- /dev/null
+++ b/vendor/github.com/containers/image/pkg/strslice/strslice.go
@@ -0,0 +1,30 @@
+package strslice
+
+import "encoding/json"
+
+// StrSlice represents a string or an array of strings.
+// We need to override the json decoder to accept both options.
+type StrSlice []string
+
+// UnmarshalJSON decodes the byte slice whether it's a string or an array of
+// strings. This method is needed to implement json.Unmarshaler.
+func (e *StrSlice) UnmarshalJSON(b []byte) error {
+ if len(b) == 0 {
+ // With no input, we preserve the existing value by returning nil and
+ // leaving the target alone. This allows defining default values for
+ // the type.
+ return nil
+ }
+
+ p := make([]string, 0, 1)
+ if err := json.Unmarshal(b, &p); err != nil {
+ var s string
+ if err := json.Unmarshal(b, &s); err != nil {
+ return err
+ }
+ p = append(p, s)
+ }
+
+ *e = p
+ return nil
+}
diff --git a/vendor/github.com/containers/image/signature/docker.go b/vendor/github.com/containers/image/signature/docker.go
new file mode 100644
index 0000000000..16eb3f7993
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/docker.go
@@ -0,0 +1,65 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+package signature
+
+import (
+ "fmt"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/manifest"
+ "github.com/opencontainers/go-digest"
+)
+
+// SignDockerManifest returns a signature for manifest as the specified dockerReference,
+// using mech and keyIdentity.
+func SignDockerManifest(m []byte, dockerReference string, mech SigningMechanism, keyIdentity string) ([]byte, error) {
+ manifestDigest, err := manifest.Digest(m)
+ if err != nil {
+ return nil, err
+ }
+ sig := newUntrustedSignature(manifestDigest, dockerReference)
+ return sig.sign(mech, keyIdentity)
+}
+
+// VerifyDockerManifestSignature checks that unverifiedSignature uses expectedKeyIdentity to sign unverifiedManifest as expectedDockerReference,
+// using mech.
+func VerifyDockerManifestSignature(unverifiedSignature, unverifiedManifest []byte,
+ expectedDockerReference string, mech SigningMechanism, expectedKeyIdentity string) (*Signature, error) {
+ expectedRef, err := reference.ParseNormalizedNamed(expectedDockerReference)
+ if err != nil {
+ return nil, err
+ }
+ sig, err := verifyAndExtractSignature(mech, unverifiedSignature, signatureAcceptanceRules{
+ validateKeyIdentity: func(keyIdentity string) error {
+ if keyIdentity != expectedKeyIdentity {
+ return InvalidSignatureError{msg: fmt.Sprintf("Signature by %s does not match expected fingerprint %s", keyIdentity, expectedKeyIdentity)}
+ }
+ return nil
+ },
+ validateSignedDockerReference: func(signedDockerReference string) error {
+ signedRef, err := reference.ParseNormalizedNamed(signedDockerReference)
+ if err != nil {
+ return InvalidSignatureError{msg: fmt.Sprintf("Invalid docker reference %s in signature", signedDockerReference)}
+ }
+ if signedRef.String() != expectedRef.String() {
+ return InvalidSignatureError{msg: fmt.Sprintf("Docker reference %s does not match %s",
+ signedDockerReference, expectedDockerReference)}
+ }
+ return nil
+ },
+ validateSignedDockerManifestDigest: func(signedDockerManifestDigest digest.Digest) error {
+ matches, err := manifest.MatchesDigest(unverifiedManifest, signedDockerManifestDigest)
+ if err != nil {
+ return err
+ }
+ if !matches {
+ return InvalidSignatureError{msg: fmt.Sprintf("Signature for docker digest %q does not match", signedDockerManifestDigest)}
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return nil, err
+ }
+ return sig, nil
+}
diff --git a/vendor/github.com/containers/image/signature/json.go b/vendor/github.com/containers/image/signature/json.go
new file mode 100644
index 0000000000..9e592863da
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/json.go
@@ -0,0 +1,88 @@
+package signature
+
+import (
+ "bytes"
+ "encoding/json"
+ "fmt"
+ "io"
+)
+
+// jsonFormatError is returned when JSON does not match expected format.
+type jsonFormatError string
+
+func (err jsonFormatError) Error() string {
+ return string(err)
+}
+
+// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// (including duplicated keys, unrecognized keys, and non-matching types). Uses fieldResolver to
+// determine the destination for a field value, which should return a pointer to the destination if valid, or nil if the key is rejected.
+//
+// The fieldResolver approach is useful for decoding the Policy.Transports map; using it for structs is a bit lazy,
+// we could use reflection to automate this. Later?
+func paranoidUnmarshalJSONObject(data []byte, fieldResolver func(string) interface{}) error {
+ seenKeys := map[string]struct{}{}
+
+ dec := json.NewDecoder(bytes.NewReader(data))
+ t, err := dec.Token()
+ if err != nil {
+ return jsonFormatError(err.Error())
+ }
+ if t != json.Delim('{') {
+ return jsonFormatError(fmt.Sprintf("JSON object expected, got \"%s\"", t))
+ }
+ for {
+ t, err := dec.Token()
+ if err != nil {
+ return jsonFormatError(err.Error())
+ }
+ if t == json.Delim('}') {
+ break
+ }
+
+ key, ok := t.(string)
+ if !ok {
+ // Coverage: This should never happen, dec.Token() rejects non-string-literals in this state.
+ return jsonFormatError(fmt.Sprintf("Key string literal expected, got \"%s\"", t))
+ }
+ if _, ok := seenKeys[key]; ok {
+ return jsonFormatError(fmt.Sprintf("Duplicate key \"%s\"", key))
+ }
+ seenKeys[key] = struct{}{}
+
+ valuePtr := fieldResolver(key)
+ if valuePtr == nil {
+ return jsonFormatError(fmt.Sprintf("Unknown key \"%s\"", key))
+ }
+ // This works like json.Unmarshal, in particular it allows us to implement UnmarshalJSON to implement strict parsing of the field value.
+ if err := dec.Decode(valuePtr); err != nil {
+ return jsonFormatError(err.Error())
+ }
+ }
+ if _, err := dec.Token(); err != io.EOF {
+ return jsonFormatError("Unexpected data after JSON object")
+ }
+ return nil
+}
+
+// paranoidUnmarshalJSONObject unmarshals data as a JSON object, but failing on the slightest unexpected aspect
+// (including duplicated keys, unrecognized keys, and non-matching types). Each of the fields in exactFields
+// must be present exactly once, and none other fields are accepted.
+func paranoidUnmarshalJSONObjectExactFields(data []byte, exactFields map[string]interface{}) error {
+ seenKeys := map[string]struct{}{}
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ if valuePtr, ok := exactFields[key]; ok {
+ seenKeys[key] = struct{}{}
+ return valuePtr
+ }
+ return nil
+ }); err != nil {
+ return err
+ }
+ for key := range exactFields {
+ if _, ok := seenKeys[key]; !ok {
+ return jsonFormatError(fmt.Sprintf(`Key "%s" missing in a JSON object`, key))
+ }
+ }
+ return nil
+}
diff --git a/vendor/github.com/containers/image/signature/mechanism.go b/vendor/github.com/containers/image/signature/mechanism.go
new file mode 100644
index 0000000000..bdf26c531f
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/mechanism.go
@@ -0,0 +1,85 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "strings"
+
+ "golang.org/x/crypto/openpgp"
+)
+
+// SigningMechanism abstracts a way to sign binary blobs and verify their signatures.
+// Each mechanism should eventually be closed by calling Close().
+// FIXME: Eventually expand on keyIdentity (namespace them between mechanisms to
+// eliminate ambiguities, support CA signatures and perhaps other key properties)
+type SigningMechanism interface {
+ // Close removes resources associated with the mechanism, if any.
+ Close() error
+ // SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+ SupportsSigning() error
+ // Sign creates a (non-detached) signature of input using keyIdentity.
+ // Fails with a SigningNotSupportedError if the mechanism does not support signing.
+ Sign(input []byte, keyIdentity string) ([]byte, error)
+ // Verify parses unverifiedSignature and returns the content and the signer's identity
+ Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error)
+ // UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+ // along with a short identifier of the key used for signing.
+ // WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
+ // is NOT the same as a "key identity" used in other calls ot this interface, and
+ // the values may have no recognizable relationship if the public key is not available.
+ UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error)
+}
+
+// SigningNotSupportedError is returned when trying to sign using a mechanism which does not support that.
+type SigningNotSupportedError string
+
+func (err SigningNotSupportedError) Error() string {
+ return string(err)
+}
+
+// NewGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism for the user’s default
+// GPG configuration ($GNUPGHOME / ~/.gnupg)
+// The caller must call .Close() on the returned SigningMechanism.
+func NewGPGSigningMechanism() (SigningMechanism, error) {
+ return newGPGSigningMechanismInDirectory("")
+}
+
+// NewEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blob, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func NewEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+ return newEphemeralGPGSigningMechanism(blob)
+}
+
+// gpgUntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls ot this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func gpgUntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ // This uses the Golang-native OpenPGP implementation instead of gpgme because we are not doing any cryptography.
+ md, err := openpgp.ReadMessage(bytes.NewReader(untrustedSignature), openpgp.EntityList{}, nil, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ if !md.IsSigned {
+ return nil, "", errors.New("The input is not a signature")
+ }
+ content, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ // Coverage: An error during reading the body can happen only if
+ // 1) the message is encrypted, which is not our case (and we don’t give ReadMessage the key
+ // to decrypt the contents anyway), or
+ // 2) the message is signed AND we give ReadMessage a correspnding public key, which we don’t.
+ return nil, "", err
+ }
+
+ // Uppercase the key ID for minimal consistency with the gpgme-returned fingerprints
+ // (but note that key ID is a suffix of the fingerprint only for V4 keys, not V3)!
+ return content, strings.ToUpper(fmt.Sprintf("%016X", md.SignedByKeyId)), nil
+}
diff --git a/vendor/github.com/containers/image/signature/mechanism_gpgme.go b/vendor/github.com/containers/image/signature/mechanism_gpgme.go
new file mode 100644
index 0000000000..4825ab27c6
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/mechanism_gpgme.go
@@ -0,0 +1,175 @@
+// +build !containers_image_openpgp
+
+package signature
+
+import (
+ "bytes"
+ "fmt"
+ "io/ioutil"
+ "os"
+
+ "github.com/mtrmac/gpgme"
+)
+
+// A GPG/OpenPGP signing mechanism, implemented using gpgme.
+type gpgmeSigningMechanism struct {
+ ctx *gpgme.Context
+ ephemeralDir string // If not "", a directory to be removed on Close()
+}
+
+// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
+// The caller must call .Close() on the returned SigningMechanism.
+func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+ ctx, err := newGPGMEContext(optionalDir)
+ if err != nil {
+ return nil, err
+ }
+ return &gpgmeSigningMechanism{
+ ctx: ctx,
+ ephemeralDir: "",
+ }, nil
+}
+
+// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blob, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+ dir, err := ioutil.TempDir("", "containers-ephemeral-gpg-")
+ if err != nil {
+ return nil, nil, err
+ }
+ removeDir := true
+ defer func() {
+ if removeDir {
+ os.RemoveAll(dir)
+ }
+ }()
+ ctx, err := newGPGMEContext(dir)
+ if err != nil {
+ return nil, nil, err
+ }
+ mech := &gpgmeSigningMechanism{
+ ctx: ctx,
+ ephemeralDir: dir,
+ }
+ keyIdentities, err := mech.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+
+ removeDir = false
+ return mech, keyIdentities, nil
+}
+
+// newGPGMEContext returns a new *gpgme.Context, using optionalDir if not empty.
+func newGPGMEContext(optionalDir string) (*gpgme.Context, error) {
+ ctx, err := gpgme.New()
+ if err != nil {
+ return nil, err
+ }
+ if err = ctx.SetProtocol(gpgme.ProtocolOpenPGP); err != nil {
+ return nil, err
+ }
+ if optionalDir != "" {
+ err := ctx.SetEngineInfo(gpgme.ProtocolOpenPGP, "", optionalDir)
+ if err != nil {
+ return nil, err
+ }
+ }
+ ctx.SetArmor(false)
+ ctx.SetTextMode(false)
+ return ctx, nil
+}
+
+func (m *gpgmeSigningMechanism) Close() error {
+ if m.ephemeralDir != "" {
+ os.RemoveAll(m.ephemeralDir) // Ignore an error, if any
+ }
+ return nil
+}
+
+// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
+// The blob is assumed to have an appropriate format (the caller is expected to know which one).
+// NOTE: This may modify long-term state (e.g. key storage in a directory underlying the mechanism);
+// but we do not make this public, it can only be used through newEphemeralGPGSigningMechanism.
+func (m *gpgmeSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
+ inputData, err := gpgme.NewDataBytes(blob)
+ if err != nil {
+ return nil, err
+ }
+ res, err := m.ctx.Import(inputData)
+ if err != nil {
+ return nil, err
+ }
+ keyIdentities := []string{}
+ for _, i := range res.Imports {
+ if i.Result == nil {
+ keyIdentities = append(keyIdentities, i.Fingerprint)
+ }
+ }
+ return keyIdentities, nil
+}
+
+// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+func (m *gpgmeSigningMechanism) SupportsSigning() error {
+ return nil
+}
+
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *gpgmeSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ key, err := m.ctx.GetKey(keyIdentity, true)
+ if err != nil {
+ return nil, err
+ }
+ inputData, err := gpgme.NewDataBytes(input)
+ if err != nil {
+ return nil, err
+ }
+ var sigBuffer bytes.Buffer
+ sigData, err := gpgme.NewDataWriter(&sigBuffer)
+ if err != nil {
+ return nil, err
+ }
+ if err = m.ctx.Sign([]*gpgme.Key{key}, inputData, sigData, gpgme.SigModeNormal); err != nil {
+ return nil, err
+ }
+ return sigBuffer.Bytes(), nil
+}
+
+// Verify parses unverifiedSignature and returns the content and the signer's identity
+func (m gpgmeSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
+ signedBuffer := bytes.Buffer{}
+ signedData, err := gpgme.NewDataWriter(&signedBuffer)
+ if err != nil {
+ return nil, "", err
+ }
+ unverifiedSignatureData, err := gpgme.NewDataBytes(unverifiedSignature)
+ if err != nil {
+ return nil, "", err
+ }
+ _, sigs, err := m.ctx.Verify(unverifiedSignatureData, nil, signedData)
+ if err != nil {
+ return nil, "", err
+ }
+ if len(sigs) != 1 {
+ return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Unexpected GPG signature count %d", len(sigs))}
+ }
+ sig := sigs[0]
+ // This is sig.Summary == gpgme.SigSumValid except for key trust, which we handle ourselves
+ if sig.Status != nil || sig.Validity == gpgme.ValidityNever || sig.ValidityReason != nil || sig.WrongKeyUsage {
+ // FIXME: Better error reporting eventually
+ return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", sig)}
+ }
+ return signedBuffer.Bytes(), sig.Fingerprint, nil
+}
+
+// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls ot this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func (m gpgmeSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ return gpgUntrustedSignatureContents(untrustedSignature)
+}
diff --git a/vendor/github.com/containers/image/signature/mechanism_openpgp.go b/vendor/github.com/containers/image/signature/mechanism_openpgp.go
new file mode 100644
index 0000000000..eccd610c9d
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/mechanism_openpgp.go
@@ -0,0 +1,159 @@
+// +build containers_image_openpgp
+
+package signature
+
+import (
+ "bytes"
+ "errors"
+ "fmt"
+ "io/ioutil"
+ "os"
+ "path"
+ "strings"
+ "time"
+
+ "github.com/containers/storage/pkg/homedir"
+ "golang.org/x/crypto/openpgp"
+)
+
+// A GPG/OpenPGP signing mechanism, implemented using x/crypto/openpgp.
+type openpgpSigningMechanism struct {
+ keyring openpgp.EntityList
+}
+
+// newGPGSigningMechanismInDirectory returns a new GPG/OpenPGP signing mechanism, using optionalDir if not empty.
+// The caller must call .Close() on the returned SigningMechanism.
+func newGPGSigningMechanismInDirectory(optionalDir string) (SigningMechanism, error) {
+ m := &openpgpSigningMechanism{
+ keyring: openpgp.EntityList{},
+ }
+
+ gpgHome := optionalDir
+ if gpgHome == "" {
+ gpgHome = os.Getenv("GNUPGHOME")
+ if gpgHome == "" {
+ gpgHome = path.Join(homedir.Get(), ".gnupg")
+ }
+ }
+
+ pubring, err := ioutil.ReadFile(path.Join(gpgHome, "pubring.gpg"))
+ if err != nil {
+ if !os.IsNotExist(err) {
+ return nil, err
+ }
+ } else {
+ _, err := m.importKeysFromBytes(pubring)
+ if err != nil {
+ return nil, err
+ }
+ }
+ return m, nil
+}
+
+// newEphemeralGPGSigningMechanism returns a new GPG/OpenPGP signing mechanism which
+// recognizes _only_ public keys from the supplied blob, and returns the identities
+// of these keys.
+// The caller must call .Close() on the returned SigningMechanism.
+func newEphemeralGPGSigningMechanism(blob []byte) (SigningMechanism, []string, error) {
+ m := &openpgpSigningMechanism{
+ keyring: openpgp.EntityList{},
+ }
+ keyIdentities, err := m.importKeysFromBytes(blob)
+ if err != nil {
+ return nil, nil, err
+ }
+ return m, keyIdentities, nil
+}
+
+func (m *openpgpSigningMechanism) Close() error {
+ return nil
+}
+
+// importKeysFromBytes imports public keys from the supplied blob and returns their identities.
+// The blob is assumed to have an appropriate format (the caller is expected to know which one).
+func (m *openpgpSigningMechanism) importKeysFromBytes(blob []byte) ([]string, error) {
+ keyring, err := openpgp.ReadKeyRing(bytes.NewReader(blob))
+ if err != nil {
+ k, e2 := openpgp.ReadArmoredKeyRing(bytes.NewReader(blob))
+ if e2 != nil {
+ return nil, err // The original error -- FIXME: is this better?
+ }
+ keyring = k
+ }
+
+ keyIdentities := []string{}
+ for _, entity := range keyring {
+ if entity.PrimaryKey == nil {
+ // Coverage: This should never happen, openpgp.ReadEntity fails with a
+ // openpgp.errors.StructuralError instead of returning an entity with this
+ // field set to nil.
+ continue
+ }
+ // Uppercase the fingerprint to be compatible with gpgme
+ keyIdentities = append(keyIdentities, strings.ToUpper(fmt.Sprintf("%x", entity.PrimaryKey.Fingerprint)))
+ m.keyring = append(m.keyring, entity)
+ }
+ return keyIdentities, nil
+}
+
+// SupportsSigning returns nil if the mechanism supports signing, or a SigningNotSupportedError.
+func (m *openpgpSigningMechanism) SupportsSigning() error {
+ return SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
+}
+
+// Sign creates a (non-detached) signature of input using keyIdentity.
+// Fails with a SigningNotSupportedError if the mechanism does not support signing.
+func (m *openpgpSigningMechanism) Sign(input []byte, keyIdentity string) ([]byte, error) {
+ return nil, SigningNotSupportedError("signing is not supported in github.com/containers/image built with the containers_image_openpgp build tag")
+}
+
+// Verify parses unverifiedSignature and returns the content and the signer's identity
+func (m *openpgpSigningMechanism) Verify(unverifiedSignature []byte) (contents []byte, keyIdentity string, err error) {
+ md, err := openpgp.ReadMessage(bytes.NewReader(unverifiedSignature), m.keyring, nil, nil)
+ if err != nil {
+ return nil, "", err
+ }
+ if !md.IsSigned {
+ return nil, "", errors.New("not signed")
+ }
+ content, err := ioutil.ReadAll(md.UnverifiedBody)
+ if err != nil {
+ // Coverage: md.UnverifiedBody.Read only fails if the body is encrypted
+ // (and possibly also signed, but it _must_ be encrypted) and the signing
+ // “modification detection code” detects a mismatch. But in that case,
+ // we would expect the signature verification to fail as well, and that is checked
+ // first. Besides, we are not supplying any decryption keys, so we really
+ // can never reach this “encrypted data MDC mismatch” path.
+ return nil, "", err
+ }
+ if md.SignatureError != nil {
+ return nil, "", fmt.Errorf("signature error: %v", md.SignatureError)
+ }
+ if md.SignedBy == nil {
+ return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Invalid GPG signature: %#v", md.Signature)}
+ }
+ if md.Signature != nil {
+ if md.Signature.SigLifetimeSecs != nil {
+ expiry := md.Signature.CreationTime.Add(time.Duration(*md.Signature.SigLifetimeSecs) * time.Second)
+ if time.Now().After(expiry) {
+ return nil, "", InvalidSignatureError{msg: fmt.Sprintf("Signature expired on %s", expiry)}
+ }
+ }
+ } else if md.SignatureV3 == nil {
+ // Coverage: If md.SignedBy != nil, the final md.UnverifiedBody.Read() either sets one of md.Signature or md.SignatureV3,
+ // or sets md.SignatureError.
+ return nil, "", InvalidSignatureError{msg: "Unexpected openpgp.MessageDetails: neither Signature nor SignatureV3 is set"}
+ }
+
+ // Uppercase the fingerprint to be compatible with gpgme
+ return content, strings.ToUpper(fmt.Sprintf("%x", md.SignedBy.PublicKey.Fingerprint)), nil
+}
+
+// UntrustedSignatureContents returns UNTRUSTED contents of the signature WITHOUT ANY VERIFICATION,
+// along with a short identifier of the key used for signing.
+// WARNING: The short key identifier (which correponds to "Key ID" for OpenPGP keys)
+// is NOT the same as a "key identity" used in other calls ot this interface, and
+// the values may have no recognizable relationship if the public key is not available.
+func (m openpgpSigningMechanism) UntrustedSignatureContents(untrustedSignature []byte) (untrustedContents []byte, shortKeyIdentifier string, err error) {
+ return gpgUntrustedSignatureContents(untrustedSignature)
+}
diff --git a/vendor/github.com/containers/image/signature/policy_config.go b/vendor/github.com/containers/image/signature/policy_config.go
new file mode 100644
index 0000000000..bc6c5e9a7d
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_config.go
@@ -0,0 +1,684 @@
+// policy_config.go hanles creation of policy objects, either by parsing JSON
+// or by programs building them programmatically.
+
+// The New* constructors are intended to be a stable API. FIXME: after an independent review.
+
+// Do not invoke the internals of the JSON marshaling/unmarshaling directly.
+
+// We can't just blindly call json.Unmarshal because that would silently ignore
+// typos, and that would just not do for security policy.
+
+// FIXME? This is by no means an user-friendly parser: No location information in error messages, no other context.
+// But at least it is not worse than blind json.Unmarshal()…
+
+package signature
+
+import (
+ "encoding/json"
+ "fmt"
+ "io/ioutil"
+ "path/filepath"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// systemDefaultPolicyPath is the policy path used for DefaultPolicy().
+// You can override this at build time with
+// -ldflags '-X github.com/containers/image/signature.systemDefaultPolicyPath=$your_path'
+var systemDefaultPolicyPath = builtinDefaultPolicyPath
+
+// builtinDefaultPolicyPath is the policy pat used for DefaultPolicy().
+// DO NOT change this, instead see systemDefaultPolicyPath above.
+const builtinDefaultPolicyPath = "/etc/containers/policy.json"
+
+// InvalidPolicyFormatError is returned when parsing an invalid policy configuration.
+type InvalidPolicyFormatError string
+
+func (err InvalidPolicyFormatError) Error() string {
+ return string(err)
+}
+
+// DefaultPolicy returns the default policy of the system.
+// Most applications should be using this method to get the policy configured
+// by the system administrator.
+// ctx should usually be nil, can be set to override the default.
+// NOTE: When this function returns an error, report it to the user and abort.
+// DO NOT hard-code fallback policies in your application.
+func DefaultPolicy(ctx *types.SystemContext) (*Policy, error) {
+ return NewPolicyFromFile(defaultPolicyPath(ctx))
+}
+
+// defaultPolicyPath returns a path to the default policy of the system.
+func defaultPolicyPath(ctx *types.SystemContext) string {
+ if ctx != nil {
+ if ctx.SignaturePolicyPath != "" {
+ return ctx.SignaturePolicyPath
+ }
+ if ctx.RootForImplicitAbsolutePaths != "" {
+ return filepath.Join(ctx.RootForImplicitAbsolutePaths, systemDefaultPolicyPath)
+ }
+ }
+ return systemDefaultPolicyPath
+}
+
+// NewPolicyFromFile returns a policy configured in the specified file.
+func NewPolicyFromFile(fileName string) (*Policy, error) {
+ contents, err := ioutil.ReadFile(fileName)
+ if err != nil {
+ return nil, err
+ }
+ return NewPolicyFromBytes(contents)
+}
+
+// NewPolicyFromBytes returns a policy parsed from the specified blob.
+// Use this function instead of calling json.Unmarshal directly.
+func NewPolicyFromBytes(data []byte) (*Policy, error) {
+ p := Policy{}
+ if err := json.Unmarshal(data, &p); err != nil {
+ return nil, InvalidPolicyFormatError(err.Error())
+ }
+ return &p, nil
+}
+
+// Compile-time check that Policy implements json.Unmarshaler.
+var _ json.Unmarshaler = (*Policy)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (p *Policy) UnmarshalJSON(data []byte) error {
+ *p = Policy{}
+ transports := policyTransportsMap{}
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ switch key {
+ case "default":
+ return &p.Default
+ case "transports":
+ return &transports
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if p.Default == nil {
+ return InvalidPolicyFormatError("Default policy is missing")
+ }
+ p.Transports = map[string]PolicyTransportScopes(transports)
+ return nil
+}
+
+// policyTransportsMap is a specialization of this map type for the strict JSON parsing semantics appropriate for the Policy.Transports member.
+type policyTransportsMap map[string]PolicyTransportScopes
+
+// Compile-time check that policyTransportsMap implements json.Unmarshaler.
+var _ json.Unmarshaler = (*policyTransportsMap)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *policyTransportsMap) UnmarshalJSON(data []byte) error {
+ // We can't unmarshal directly into map values because it is not possible to take an address of a map value.
+ // So, use a temporary map of pointers-to-slices and convert.
+ tmpMap := map[string]*PolicyTransportScopes{}
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ // transport can be nil
+ transport := transports.Get(key)
+ // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if _, ok := tmpMap[key]; ok {
+ return nil
+ }
+ ptsWithTransport := policyTransportScopesWithTransport{
+ transport: transport,
+ dest: &PolicyTransportScopes{}, // This allocates a new instance on each call.
+ }
+ tmpMap[key] = ptsWithTransport.dest
+ return &ptsWithTransport
+ }); err != nil {
+ return err
+ }
+ for key, ptr := range tmpMap {
+ (*m)[key] = *ptr
+ }
+ return nil
+}
+
+// Compile-time check that PolicyTransportScopes "implements"" json.Unmarshaler.
+// we want to only use policyTransportScopesWithTransport
+var _ json.Unmarshaler = (*PolicyTransportScopes)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *PolicyTransportScopes) UnmarshalJSON(data []byte) error {
+ return errors.New("Do not try to unmarshal PolicyTransportScopes directly")
+}
+
+// policyTransportScopesWithTransport is a way to unmarshal a PolicyTransportScopes
+// while validating using a specific ImageTransport if not nil.
+type policyTransportScopesWithTransport struct {
+ transport types.ImageTransport
+ dest *PolicyTransportScopes
+}
+
+// Compile-time check that policyTransportScopesWithTransport implements json.Unmarshaler.
+var _ json.Unmarshaler = (*policyTransportScopesWithTransport)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *policyTransportScopesWithTransport) UnmarshalJSON(data []byte) error {
+ // We can't unmarshal directly into map values because it is not possible to take an address of a map value.
+ // So, use a temporary map of pointers-to-slices and convert.
+ tmpMap := map[string]*PolicyRequirements{}
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ // paranoidUnmarshalJSONObject detects key duplication for us, check just to be safe.
+ if _, ok := tmpMap[key]; ok {
+ return nil
+ }
+ if key != "" && m.transport != nil {
+ if err := m.transport.ValidatePolicyConfigurationScope(key); err != nil {
+ return nil
+ }
+ }
+ ptr := &PolicyRequirements{} // This allocates a new instance on each call.
+ tmpMap[key] = ptr
+ return ptr
+ }); err != nil {
+ return err
+ }
+ for key, ptr := range tmpMap {
+ (*m.dest)[key] = *ptr
+ }
+ return nil
+}
+
+// Compile-time check that PolicyRequirements implements json.Unmarshaler.
+var _ json.Unmarshaler = (*PolicyRequirements)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (m *PolicyRequirements) UnmarshalJSON(data []byte) error {
+ reqJSONs := []json.RawMessage{}
+ if err := json.Unmarshal(data, &reqJSONs); err != nil {
+ return err
+ }
+ if len(reqJSONs) == 0 {
+ return InvalidPolicyFormatError("List of verification policy requirements must not be empty")
+ }
+ res := make([]PolicyRequirement, len(reqJSONs))
+ for i, reqJSON := range reqJSONs {
+ req, err := newPolicyRequirementFromJSON(reqJSON)
+ if err != nil {
+ return err
+ }
+ res[i] = req
+ }
+ *m = res
+ return nil
+}
+
+// newPolicyRequirementFromJSON parses JSON data into a PolicyRequirement implementation.
+func newPolicyRequirementFromJSON(data []byte) (PolicyRequirement, error) {
+ var typeField prCommon
+ if err := json.Unmarshal(data, &typeField); err != nil {
+ return nil, err
+ }
+ var res PolicyRequirement
+ switch typeField.Type {
+ case prTypeInsecureAcceptAnything:
+ res = &prInsecureAcceptAnything{}
+ case prTypeReject:
+ res = &prReject{}
+ case prTypeSignedBy:
+ res = &prSignedBy{}
+ case prTypeSignedBaseLayer:
+ res = &prSignedBaseLayer{}
+ default:
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy requirement type \"%s\"", typeField.Type))
+ }
+ if err := json.Unmarshal(data, &res); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// newPRInsecureAcceptAnything is NewPRInsecureAcceptAnything, except it returns the private type.
+func newPRInsecureAcceptAnything() *prInsecureAcceptAnything {
+ return &prInsecureAcceptAnything{prCommon{Type: prTypeInsecureAcceptAnything}}
+}
+
+// NewPRInsecureAcceptAnything returns a new "insecureAcceptAnything" PolicyRequirement.
+func NewPRInsecureAcceptAnything() PolicyRequirement {
+ return newPRInsecureAcceptAnything()
+}
+
+// Compile-time check that prInsecureAcceptAnything implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prInsecureAcceptAnything)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prInsecureAcceptAnything) UnmarshalJSON(data []byte) error {
+ *pr = prInsecureAcceptAnything{}
+ var tmp prInsecureAcceptAnything
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeInsecureAcceptAnything {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *pr = *newPRInsecureAcceptAnything()
+ return nil
+}
+
+// newPRReject is NewPRReject, except it returns the private type.
+func newPRReject() *prReject {
+ return &prReject{prCommon{Type: prTypeReject}}
+}
+
+// NewPRReject returns a new "reject" PolicyRequirement.
+func NewPRReject() PolicyRequirement {
+ return newPRReject()
+}
+
+// Compile-time check that prReject implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prReject)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prReject) UnmarshalJSON(data []byte) error {
+ *pr = prReject{}
+ var tmp prReject
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeReject {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *pr = *newPRReject()
+ return nil
+}
+
+// newPRSignedBy returns a new prSignedBy if parameters are valid.
+func newPRSignedBy(keyType sbKeyType, keyPath string, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ if !keyType.IsValid() {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("invalid keyType \"%s\"", keyType))
+ }
+ if len(keyPath) > 0 && len(keyData) > 0 {
+ return nil, InvalidPolicyFormatError("keyType and keyData cannot be used simultaneously")
+ }
+ if signedIdentity == nil {
+ return nil, InvalidPolicyFormatError("signedIdentity not specified")
+ }
+ return &prSignedBy{
+ prCommon: prCommon{Type: prTypeSignedBy},
+ KeyType: keyType,
+ KeyPath: keyPath,
+ KeyData: keyData,
+ SignedIdentity: signedIdentity,
+ }, nil
+}
+
+// newPRSignedByKeyPath is NewPRSignedByKeyPath, except it returns the private type.
+func newPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, keyPath, nil, signedIdentity)
+}
+
+// NewPRSignedByKeyPath returns a new "signedBy" PolicyRequirement using a KeyPath
+func NewPRSignedByKeyPath(keyType sbKeyType, keyPath string, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyPath(keyType, keyPath, signedIdentity)
+}
+
+// newPRSignedByKeyData is NewPRSignedByKeyData, except it returns the private type.
+func newPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (*prSignedBy, error) {
+ return newPRSignedBy(keyType, "", keyData, signedIdentity)
+}
+
+// NewPRSignedByKeyData returns a new "signedBy" PolicyRequirement using a KeyData
+func NewPRSignedByKeyData(keyType sbKeyType, keyData []byte, signedIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedByKeyData(keyType, keyData, signedIdentity)
+}
+
+// Compile-time check that prSignedBy implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSignedBy)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSignedBy) UnmarshalJSON(data []byte) error {
+ *pr = prSignedBy{}
+ var tmp prSignedBy
+ var gotKeyPath, gotKeyData = false, false
+ var signedIdentity json.RawMessage
+ if err := paranoidUnmarshalJSONObject(data, func(key string) interface{} {
+ switch key {
+ case "type":
+ return &tmp.Type
+ case "keyType":
+ return &tmp.KeyType
+ case "keyPath":
+ gotKeyPath = true
+ return &tmp.KeyPath
+ case "keyData":
+ gotKeyData = true
+ return &tmp.KeyData
+ case "signedIdentity":
+ return &signedIdentity
+ default:
+ return nil
+ }
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSignedBy {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ if signedIdentity == nil {
+ tmp.SignedIdentity = NewPRMMatchRepoDigestOrExact()
+ } else {
+ si, err := newPolicyReferenceMatchFromJSON(signedIdentity)
+ if err != nil {
+ return err
+ }
+ tmp.SignedIdentity = si
+ }
+
+ var res *prSignedBy
+ var err error
+ switch {
+ case gotKeyPath && gotKeyData:
+ return InvalidPolicyFormatError("keyPath and keyData cannot be used simultaneously")
+ case gotKeyPath && !gotKeyData:
+ res, err = newPRSignedByKeyPath(tmp.KeyType, tmp.KeyPath, tmp.SignedIdentity)
+ case !gotKeyPath && gotKeyData:
+ res, err = newPRSignedByKeyData(tmp.KeyType, tmp.KeyData, tmp.SignedIdentity)
+ case !gotKeyPath && !gotKeyData:
+ return InvalidPolicyFormatError("At least one of keyPath and keyData mus be specified")
+ default: // Coverage: This should never happen
+ return errors.Errorf("Impossible keyPath/keyData presence combination!?")
+ }
+ if err != nil {
+ return err
+ }
+ *pr = *res
+
+ return nil
+}
+
+// IsValid returns true iff kt is a recognized value
+func (kt sbKeyType) IsValid() bool {
+ switch kt {
+ case SBKeyTypeGPGKeys, SBKeyTypeSignedByGPGKeys,
+ SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
+ return true
+ default:
+ return false
+ }
+}
+
+// Compile-time check that sbKeyType implements json.Unmarshaler.
+var _ json.Unmarshaler = (*sbKeyType)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (kt *sbKeyType) UnmarshalJSON(data []byte) error {
+ *kt = sbKeyType("")
+ var s string
+ if err := json.Unmarshal(data, &s); err != nil {
+ return err
+ }
+ if !sbKeyType(s).IsValid() {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unrecognized keyType value \"%s\"", s))
+ }
+ *kt = sbKeyType(s)
+ return nil
+}
+
+// newPRSignedBaseLayer is NewPRSignedBaseLayer, except it returns the private type.
+func newPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (*prSignedBaseLayer, error) {
+ if baseLayerIdentity == nil {
+ return nil, InvalidPolicyFormatError("baseLayerIdentity not specified")
+ }
+ return &prSignedBaseLayer{
+ prCommon: prCommon{Type: prTypeSignedBaseLayer},
+ BaseLayerIdentity: baseLayerIdentity,
+ }, nil
+}
+
+// NewPRSignedBaseLayer returns a new "signedBaseLayer" PolicyRequirement.
+func NewPRSignedBaseLayer(baseLayerIdentity PolicyReferenceMatch) (PolicyRequirement, error) {
+ return newPRSignedBaseLayer(baseLayerIdentity)
+}
+
+// Compile-time check that prSignedBaseLayer implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prSignedBaseLayer)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (pr *prSignedBaseLayer) UnmarshalJSON(data []byte) error {
+ *pr = prSignedBaseLayer{}
+ var tmp prSignedBaseLayer
+ var baseLayerIdentity json.RawMessage
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ "baseLayerIdentity": &baseLayerIdentity,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prTypeSignedBaseLayer {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ bli, err := newPolicyReferenceMatchFromJSON(baseLayerIdentity)
+ if err != nil {
+ return err
+ }
+ res, err := newPRSignedBaseLayer(bli)
+ if err != nil {
+ // Coverage: This should never happen, newPolicyReferenceMatchFromJSON has ensured bli is valid.
+ return err
+ }
+ *pr = *res
+ return nil
+}
+
+// newPolicyReferenceMatchFromJSON parses JSON data into a PolicyReferenceMatch implementation.
+func newPolicyReferenceMatchFromJSON(data []byte) (PolicyReferenceMatch, error) {
+ var typeField prmCommon
+ if err := json.Unmarshal(data, &typeField); err != nil {
+ return nil, err
+ }
+ var res PolicyReferenceMatch
+ switch typeField.Type {
+ case prmTypeMatchExact:
+ res = &prmMatchExact{}
+ case prmTypeMatchRepoDigestOrExact:
+ res = &prmMatchRepoDigestOrExact{}
+ case prmTypeMatchRepository:
+ res = &prmMatchRepository{}
+ case prmTypeExactReference:
+ res = &prmExactReference{}
+ case prmTypeExactRepository:
+ res = &prmExactRepository{}
+ default:
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Unknown policy reference match type \"%s\"", typeField.Type))
+ }
+ if err := json.Unmarshal(data, &res); err != nil {
+ return nil, err
+ }
+ return res, nil
+}
+
+// newPRMMatchExact is NewPRMMatchExact, except it resturns the private type.
+func newPRMMatchExact() *prmMatchExact {
+ return &prmMatchExact{prmCommon{Type: prmTypeMatchExact}}
+}
+
+// NewPRMMatchExact returns a new "matchExact" PolicyReferenceMatch.
+func NewPRMMatchExact() PolicyReferenceMatch {
+ return newPRMMatchExact()
+}
+
+// Compile-time check that prmMatchExact implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchExact)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchExact) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchExact{}
+ var tmp prmMatchExact
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchExact {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchExact()
+ return nil
+}
+
+// newPRMMatchRepoDigestOrExact is NewPRMMatchRepoDigestOrExact, except it resturns the private type.
+func newPRMMatchRepoDigestOrExact() *prmMatchRepoDigestOrExact {
+ return &prmMatchRepoDigestOrExact{prmCommon{Type: prmTypeMatchRepoDigestOrExact}}
+}
+
+// NewPRMMatchRepoDigestOrExact returns a new "matchRepoDigestOrExact" PolicyReferenceMatch.
+func NewPRMMatchRepoDigestOrExact() PolicyReferenceMatch {
+ return newPRMMatchRepoDigestOrExact()
+}
+
+// Compile-time check that prmMatchRepoDigestOrExact implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchRepoDigestOrExact)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchRepoDigestOrExact) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchRepoDigestOrExact{}
+ var tmp prmMatchRepoDigestOrExact
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchRepoDigestOrExact {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchRepoDigestOrExact()
+ return nil
+}
+
+// newPRMMatchRepository is NewPRMMatchRepository, except it resturns the private type.
+func newPRMMatchRepository() *prmMatchRepository {
+ return &prmMatchRepository{prmCommon{Type: prmTypeMatchRepository}}
+}
+
+// NewPRMMatchRepository returns a new "matchRepository" PolicyReferenceMatch.
+func NewPRMMatchRepository() PolicyReferenceMatch {
+ return newPRMMatchRepository()
+}
+
+// Compile-time check that prmMatchRepository implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmMatchRepository)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmMatchRepository) UnmarshalJSON(data []byte) error {
+ *prm = prmMatchRepository{}
+ var tmp prmMatchRepository
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeMatchRepository {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+ *prm = *newPRMMatchRepository()
+ return nil
+}
+
+// newPRMExactReference is NewPRMExactReference, except it resturns the private type.
+func newPRMExactReference(dockerReference string) (*prmExactReference, error) {
+ ref, err := reference.ParseNormalizedNamed(dockerReference)
+ if err != nil {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerReference %s: %s", dockerReference, err.Error()))
+ }
+ if reference.IsNameOnly(ref) {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("dockerReference %s contains neither a tag nor digest", dockerReference))
+ }
+ return &prmExactReference{
+ prmCommon: prmCommon{Type: prmTypeExactReference},
+ DockerReference: dockerReference,
+ }, nil
+}
+
+// NewPRMExactReference returns a new "exactReference" PolicyReferenceMatch.
+func NewPRMExactReference(dockerReference string) (PolicyReferenceMatch, error) {
+ return newPRMExactReference(dockerReference)
+}
+
+// Compile-time check that prmExactReference implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmExactReference)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmExactReference) UnmarshalJSON(data []byte) error {
+ *prm = prmExactReference{}
+ var tmp prmExactReference
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ "dockerReference": &tmp.DockerReference,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeExactReference {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+
+ res, err := newPRMExactReference(tmp.DockerReference)
+ if err != nil {
+ return err
+ }
+ *prm = *res
+ return nil
+}
+
+// newPRMExactRepository is NewPRMExactRepository, except it resturns the private type.
+func newPRMExactRepository(dockerRepository string) (*prmExactRepository, error) {
+ if _, err := reference.ParseNormalizedNamed(dockerRepository); err != nil {
+ return nil, InvalidPolicyFormatError(fmt.Sprintf("Invalid format of dockerRepository %s: %s", dockerRepository, err.Error()))
+ }
+ return &prmExactRepository{
+ prmCommon: prmCommon{Type: prmTypeExactRepository},
+ DockerRepository: dockerRepository,
+ }, nil
+}
+
+// NewPRMExactRepository returns a new "exactRepository" PolicyRepositoryMatch.
+func NewPRMExactRepository(dockerRepository string) (PolicyReferenceMatch, error) {
+ return newPRMExactRepository(dockerRepository)
+}
+
+// Compile-time check that prmExactRepository implements json.Unmarshaler.
+var _ json.Unmarshaler = (*prmExactRepository)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface.
+func (prm *prmExactRepository) UnmarshalJSON(data []byte) error {
+ *prm = prmExactRepository{}
+ var tmp prmExactRepository
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "type": &tmp.Type,
+ "dockerRepository": &tmp.DockerRepository,
+ }); err != nil {
+ return err
+ }
+
+ if tmp.Type != prmTypeExactRepository {
+ return InvalidPolicyFormatError(fmt.Sprintf("Unexpected policy requirement type \"%s\"", tmp.Type))
+ }
+
+ res, err := newPRMExactRepository(tmp.DockerRepository)
+ if err != nil {
+ return err
+ }
+ *prm = *res
+ return nil
+}
diff --git a/vendor/github.com/containers/image/signature/policy_eval.go b/vendor/github.com/containers/image/signature/policy_eval.go
new file mode 100644
index 0000000000..408510cdb6
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_eval.go
@@ -0,0 +1,289 @@
+// This defines the top-level policy evaluation API.
+// To the extent possible, the interface of the fuctions provided
+// here is intended to be completely unambiguous, and stable for users
+// to rely on.
+
+package signature
+
+import (
+ "context"
+
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/types"
+ "github.com/pkg/errors"
+)
+
+// PolicyRequirementError is an explanatory text for rejecting a signature or an image.
+type PolicyRequirementError string
+
+func (err PolicyRequirementError) Error() string {
+ return string(err)
+}
+
+// signatureAcceptanceResult is the principal value returned by isSignatureAuthorAccepted.
+type signatureAcceptanceResult string
+
+const (
+ sarAccepted signatureAcceptanceResult = "sarAccepted"
+ sarRejected signatureAcceptanceResult = "sarRejected"
+ sarUnknown signatureAcceptanceResult = "sarUnknown"
+)
+
+// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
+// The type is public, but its definition is private.
+type PolicyRequirement interface {
+ // FIXME: For speed, we should support creating per-context state (not stored in the PolicyRequirement), to cache
+ // costly initialization like creating temporary GPG home directories and reading files.
+ // Setup() (someState, error)
+ // Then, the operations below would be done on the someState object, not directly on a PolicyRequirement.
+
+ // isSignatureAuthorAccepted, given an image and a signature blob, returns:
+ // - sarAccepted if the signature has been verified against the appropriate public key
+ // (where "appropriate public key" may depend on the contents of the signature);
+ // in that case a parsed Signature should be returned.
+ // - sarRejected if the signature has not been verified;
+ // in that case error must be non-nil, and should be an PolicyRequirementError if evaluation
+ // succeeded but the result was rejection.
+ // - sarUnknown if if this PolicyRequirement does not deal with signatures.
+ // NOTE: sarUnknown should not be returned if this PolicyRequirement should make a decision but something failed.
+ // Returning sarUnknown and a non-nil error value is invalid.
+ // WARNING: This makes the signature contents acceptable for futher processing,
+ // but it does not necessarily mean that the contents of the signature are
+ // consistent with local policy.
+ // For example:
+ // - Do not use a true value to determine whether to run
+ // a container based on this image; use IsRunningImageAllowed instead.
+ // - Just because a signature is accepted does not automatically mean the contents of the
+ // signature are authorized to run code as root, or to affect system or cluster configuration.
+ isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error)
+
+ // isRunningImageAllowed returns true if the requirement allows running an image.
+ // If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
+ // succeeded but the result was rejection.
+ // WARNING: This validates signatures and the manifest, but does not download or validate the
+ // layers. Users must validate that the layers match their expected digests.
+ isRunningImageAllowed(image types.UnparsedImage) (bool, error)
+}
+
+// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
+// The type is public, but its implementation is private.
+type PolicyReferenceMatch interface {
+ // matchesDockerReference decides whether a specific image identity is accepted for an image
+ // (or, usually, for the image's Reference().DockerReference()). Note that
+ // image.Reference().DockerReference() may be nil.
+ matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool
+}
+
+// PolicyContext encapsulates a policy and possible cached state
+// for speeding up its evaluation.
+type PolicyContext struct {
+ Policy *Policy
+ state policyContextState // Internal consistency checking
+}
+
+// policyContextState is used internally to verify the users are not misusing a PolicyContext.
+type policyContextState string
+
+const (
+ pcInvalid policyContextState = ""
+ pcInitializing policyContextState = "Initializing"
+ pcReady policyContextState = "Ready"
+ pcInUse policyContextState = "InUse"
+ pcDestroying policyContextState = "Destroying"
+ pcDestroyed policyContextState = "Destroyed"
+)
+
+// changeContextState changes pc.state, or fails if the state is unexpected
+func (pc *PolicyContext) changeState(expected, new policyContextState) error {
+ if pc.state != expected {
+ return errors.Errorf(`"Invalid PolicyContext state, expected "%s", found "%s"`, expected, pc.state)
+ }
+ pc.state = new
+ return nil
+}
+
+// NewPolicyContext sets up and initializes a context for the specified policy.
+// The policy must not be modified while the context exists. FIXME: make a deep copy?
+// If this function succeeds, the caller should call PolicyContext.Destroy() when done.
+func NewPolicyContext(policy *Policy) (*PolicyContext, error) {
+ pc := &PolicyContext{Policy: policy, state: pcInitializing}
+ // FIXME: initialize
+ if err := pc.changeState(pcInitializing, pcReady); err != nil {
+ // Huh?! This should never fail, we didn't give the pointer to anybody.
+ // Just give up and leave unclean state around.
+ return nil, err
+ }
+ return pc, nil
+}
+
+// Destroy should be called when the user of the context is done with it.
+func (pc *PolicyContext) Destroy() error {
+ if err := pc.changeState(pcReady, pcDestroying); err != nil {
+ return err
+ }
+ // FIXME: destroy
+ return pc.changeState(pcDestroying, pcDestroyed)
+}
+
+// policyIdentityLogName returns a string description of the image identity for policy purposes.
+// ONLY use this for log messages, not for any decisions!
+func policyIdentityLogName(ref types.ImageReference) string {
+ return ref.Transport().Name() + ":" + ref.PolicyConfigurationIdentity()
+}
+
+// requirementsForImageRef selects the appropriate requirements for ref.
+func (pc *PolicyContext) requirementsForImageRef(ref types.ImageReference) PolicyRequirements {
+ // Do we have a PolicyTransportScopes for this transport?
+ transportName := ref.Transport().Name()
+ if transportScopes, ok := pc.Policy.Transports[transportName]; ok {
+ // Look for a full match.
+ identity := ref.PolicyConfigurationIdentity()
+ if req, ok := transportScopes[identity]; ok {
+ logrus.Debugf(` Using transport "%s" policy section %s`, transportName, identity)
+ return req
+ }
+
+ // Look for a match of the possible parent namespaces.
+ for _, name := range ref.PolicyConfigurationNamespaces() {
+ if req, ok := transportScopes[name]; ok {
+ logrus.Debugf(` Using transport "%s" specific policy section %s`, transportName, name)
+ return req
+ }
+ }
+
+ // Look for a default match for the transport.
+ if req, ok := transportScopes[""]; ok {
+ logrus.Debugf(` Using transport "%s" policy section ""`, transportName)
+ return req
+ }
+ }
+
+ logrus.Debugf(" Using default policy section")
+ return pc.Policy.Default
+}
+
+// GetSignaturesWithAcceptedAuthor returns those signatures from an image
+// for which the policy accepts the author (and which have been successfully
+// verified).
+// NOTE: This may legitimately return an empty list and no error, if the image
+// has no signatures or only invalid signatures.
+// WARNING: This makes the signature contents acceptable for futher processing,
+// but it does not necessarily mean that the contents of the signature are
+// consistent with local policy.
+// For example:
+// - Do not use a an existence of an accepted signature to determine whether to run
+// a container based on this image; use IsRunningImageAllowed instead.
+// - Just because a signature is accepted does not automatically mean the contents of the
+// signature are authorized to run code as root, or to affect system or cluster configuration.
+func (pc *PolicyContext) GetSignaturesWithAcceptedAuthor(image types.UnparsedImage) (sigs []*Signature, finalErr error) {
+ if err := pc.changeState(pcReady, pcInUse); err != nil {
+ return nil, err
+ }
+ defer func() {
+ if err := pc.changeState(pcInUse, pcReady); err != nil {
+ sigs = nil
+ finalErr = err
+ }
+ }()
+
+ logrus.Debugf("GetSignaturesWithAcceptedAuthor for image %s", policyIdentityLogName(image.Reference()))
+ reqs := pc.requirementsForImageRef(image.Reference())
+
+ // FIXME: rename Signatures to UnverifiedSignatures
+ // FIXME: pass context.Context
+ unverifiedSignatures, err := image.Signatures(context.TODO())
+ if err != nil {
+ return nil, err
+ }
+
+ res := make([]*Signature, 0, len(unverifiedSignatures))
+ for sigNumber, sig := range unverifiedSignatures {
+ var acceptedSig *Signature // non-nil if accepted
+ rejected := false
+ // FIXME? Say more about the contents of the signature, i.e. parse it even before verification?!
+ logrus.Debugf("Evaluating signature %d:", sigNumber)
+ interpretingReqs:
+ for reqNumber, req := range reqs {
+ // FIXME: Log the requirement itself? For now, we use just the number.
+ // FIXME: supply state
+ switch res, as, err := req.isSignatureAuthorAccepted(image, sig); res {
+ case sarAccepted:
+ if as == nil { // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but no parsed contents", reqNumber)
+ rejected = true
+ break interpretingReqs
+ }
+ logrus.Debugf(" Requirement %d: signature accepted", reqNumber)
+ if acceptedSig == nil {
+ acceptedSig = as
+ } else if *as != *acceptedSig { // Coverage: this should never happen
+ // Huh?! Two ways of verifying the same signature blob resulted in two different parses of its already accepted contents?
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarAccepted but different parsed contents", reqNumber)
+ rejected = true
+ acceptedSig = nil
+ break interpretingReqs
+ }
+ case sarRejected:
+ logrus.Debugf(" Requirement %d: signature rejected: %s", reqNumber, err.Error())
+ rejected = true
+ break interpretingReqs
+ case sarUnknown:
+ if err != nil { // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: sarUnknown but an error message %s", reqNumber, err.Error())
+ rejected = true
+ break interpretingReqs
+ }
+ logrus.Debugf(" Requirement %d: signature state unknown, continuing", reqNumber)
+ default: // Coverage: this should never happen
+ logrus.Debugf(" Requirement %d: internal inconsistency: unknown result %#v", reqNumber, string(res))
+ rejected = true
+ break interpretingReqs
+ }
+ }
+ // This also handles the (invalid) case of empty reqs, by rejecting the signature.
+ if acceptedSig != nil && !rejected {
+ logrus.Debugf(" Overall: OK, signature accepted")
+ res = append(res, acceptedSig)
+ } else {
+ logrus.Debugf(" Overall: Signature not accepted")
+ }
+ }
+ return res, nil
+}
+
+// IsRunningImageAllowed returns true iff the policy allows running the image.
+// If it returns false, err must be non-nil, and should be an PolicyRequirementError if evaluation
+// succeeded but the result was rejection.
+// WARNING: This validates signatures and the manifest, but does not download or validate the
+// layers. Users must validate that the layers match their expected digests.
+func (pc *PolicyContext) IsRunningImageAllowed(image types.UnparsedImage) (res bool, finalErr error) {
+ if err := pc.changeState(pcReady, pcInUse); err != nil {
+ return false, err
+ }
+ defer func() {
+ if err := pc.changeState(pcInUse, pcReady); err != nil {
+ res = false
+ finalErr = err
+ }
+ }()
+
+ logrus.Debugf("IsRunningImageAllowed for image %s", policyIdentityLogName(image.Reference()))
+ reqs := pc.requirementsForImageRef(image.Reference())
+
+ if len(reqs) == 0 {
+ return false, PolicyRequirementError("List of verification policy requirements must not be empty")
+ }
+
+ for reqNumber, req := range reqs {
+ // FIXME: supply state
+ allowed, err := req.isRunningImageAllowed(image)
+ if !allowed {
+ logrus.Debugf("Requirement %d: denied, done", reqNumber)
+ return false, err
+ }
+ logrus.Debugf(" Requirement %d: allowed", reqNumber)
+ }
+ // We have tested that len(reqs) != 0, so at least one req must have explicitly allowed this image.
+ logrus.Debugf("Overall: allowed")
+ return true, nil
+}
diff --git a/vendor/github.com/containers/image/signature/policy_eval_baselayer.go b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go
new file mode 100644
index 0000000000..dec84c93c1
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_eval_baselayer.go
@@ -0,0 +1,18 @@
+// Policy evaluation for prSignedBaseLayer.
+
+package signature
+
+import (
+ "github.com/Sirupsen/logrus"
+ "github.com/containers/image/types"
+)
+
+func (pr *prSignedBaseLayer) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ return sarUnknown, nil, nil
+}
+
+func (pr *prSignedBaseLayer) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
+ // FIXME? Reject this at policy parsing time already?
+ logrus.Errorf("signedBaseLayer not implemented yet!")
+ return false, PolicyRequirementError("signedBaseLayer not implemented yet!")
+}
diff --git a/vendor/github.com/containers/image/signature/policy_eval_signedby.go b/vendor/github.com/containers/image/signature/policy_eval_signedby.go
new file mode 100644
index 0000000000..56665124c0
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_eval_signedby.go
@@ -0,0 +1,131 @@
+// Policy evaluation for prSignedBy.
+
+package signature
+
+import (
+ "context"
+ "fmt"
+ "io/ioutil"
+ "strings"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/manifest"
+ "github.com/containers/image/types"
+ "github.com/opencontainers/go-digest"
+)
+
+func (pr *prSignedBy) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ switch pr.KeyType {
+ case SBKeyTypeGPGKeys:
+ case SBKeyTypeSignedByGPGKeys, SBKeyTypeX509Certificates, SBKeyTypeSignedByX509CAs:
+ // FIXME? Reject this at policy parsing time already?
+ return sarRejected, nil, errors.Errorf(`"Unimplemented "keyType" value "%s"`, string(pr.KeyType))
+ default:
+ // This should never happen, newPRSignedBy ensures KeyType.IsValid()
+ return sarRejected, nil, errors.Errorf(`"Unknown "keyType" value "%s"`, string(pr.KeyType))
+ }
+
+ if pr.KeyPath != "" && pr.KeyData != nil {
+ return sarRejected, nil, errors.New(`Internal inconsistency: both "keyPath" and "keyData" specified`)
+ }
+ // FIXME: move this to per-context initialization
+ var data []byte
+ if pr.KeyData != nil {
+ data = pr.KeyData
+ } else {
+ d, err := ioutil.ReadFile(pr.KeyPath)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ data = d
+ }
+
+ // FIXME: move this to per-context initialization
+ mech, trustedIdentities, err := NewEphemeralGPGSigningMechanism(data)
+ if err != nil {
+ return sarRejected, nil, err
+ }
+ defer mech.Close()
+ if len(trustedIdentities) == 0 {
+ return sarRejected, nil, PolicyRequirementError("No public keys imported")
+ }
+
+ signature, err := verifyAndExtractSignature(mech, sig, signatureAcceptanceRules{
+ validateKeyIdentity: func(keyIdentity string) error {
+ for _, trustedIdentity := range trustedIdentities {
+ if keyIdentity == trustedIdentity {
+ return nil
+ }
+ }
+ // Coverage: We use a private GPG home directory and only import trusted keys, so this should
+ // not be reachable.
+ return PolicyRequirementError(fmt.Sprintf("Signature by key %s is not accepted", keyIdentity))
+ },
+ validateSignedDockerReference: func(ref string) error {
+ if !pr.SignedIdentity.matchesDockerReference(image, ref) {
+ return PolicyRequirementError(fmt.Sprintf("Signature for identity %s is not accepted", ref))
+ }
+ return nil
+ },
+ validateSignedDockerManifestDigest: func(digest digest.Digest) error {
+ m, _, err := image.Manifest()
+ if err != nil {
+ return err
+ }
+ digestMatches, err := manifest.MatchesDigest(m, digest)
+ if err != nil {
+ return err
+ }
+ if !digestMatches {
+ return PolicyRequirementError(fmt.Sprintf("Signature for digest %s does not match", digest))
+ }
+ return nil
+ },
+ })
+ if err != nil {
+ return sarRejected, nil, err
+ }
+
+ return sarAccepted, signature, nil
+}
+
+func (pr *prSignedBy) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
+ // FIXME: pass context.Context
+ sigs, err := image.Signatures(context.TODO())
+ if err != nil {
+ return false, err
+ }
+ var rejections []error
+ for _, s := range sigs {
+ var reason error
+ switch res, _, err := pr.isSignatureAuthorAccepted(image, s); res {
+ case sarAccepted:
+ // One accepted signature is enough.
+ return true, nil
+ case sarRejected:
+ reason = err
+ case sarUnknown:
+ // Huh?! This should not happen at all; treat it as any other invalid value.
+ fallthrough
+ default:
+ reason = errors.Errorf(`Internal error: Unexpected signature verification result "%s"`, string(res))
+ }
+ rejections = append(rejections, reason)
+ }
+ var summary error
+ switch len(rejections) {
+ case 0:
+ summary = PolicyRequirementError("A signature was required, but no signature exists")
+ case 1:
+ summary = rejections[0]
+ default:
+ var msgs []string
+ for _, e := range rejections {
+ msgs = append(msgs, e.Error())
+ }
+ summary = PolicyRequirementError(fmt.Sprintf("None of the signatures were accepted, reasons: %s",
+ strings.Join(msgs, "; ")))
+ }
+ return false, summary
+}
diff --git a/vendor/github.com/containers/image/signature/policy_eval_simple.go b/vendor/github.com/containers/image/signature/policy_eval_simple.go
new file mode 100644
index 0000000000..19a71e6d99
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_eval_simple.go
@@ -0,0 +1,28 @@
+// Policy evaluation for the various simple PolicyRequirement types.
+
+package signature
+
+import (
+ "fmt"
+
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+)
+
+func (pr *prInsecureAcceptAnything) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ // prInsecureAcceptAnything semantics: Every image is allowed to run,
+ // but this does not consider the signature as verified.
+ return sarUnknown, nil, nil
+}
+
+func (pr *prInsecureAcceptAnything) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
+ return true, nil
+}
+
+func (pr *prReject) isSignatureAuthorAccepted(image types.UnparsedImage, sig []byte) (signatureAcceptanceResult, *Signature, error) {
+ return sarRejected, nil, PolicyRequirementError(fmt.Sprintf("Any signatures for image %s are rejected by policy.", transports.ImageName(image.Reference())))
+}
+
+func (pr *prReject) isRunningImageAllowed(image types.UnparsedImage) (bool, error) {
+ return false, PolicyRequirementError(fmt.Sprintf("Running image %s is rejected by policy.", transports.ImageName(image.Reference())))
+}
diff --git a/vendor/github.com/containers/image/signature/policy_reference_match.go b/vendor/github.com/containers/image/signature/policy_reference_match.go
new file mode 100644
index 0000000000..a8dad67701
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_reference_match.go
@@ -0,0 +1,101 @@
+// PolicyReferenceMatch implementations.
+
+package signature
+
+import (
+ "fmt"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/containers/image/transports"
+ "github.com/containers/image/types"
+)
+
+// parseImageAndDockerReference converts an image and a reference string into two parsed entities, failing on any error and handling unidentified images.
+func parseImageAndDockerReference(image types.UnparsedImage, s2 string) (reference.Named, reference.Named, error) {
+ r1 := image.Reference().DockerReference()
+ if r1 == nil {
+ return nil, nil, PolicyRequirementError(fmt.Sprintf("Docker reference match attempted on image %s with no known Docker reference identity",
+ transports.ImageName(image.Reference())))
+ }
+ r2, err := reference.ParseNormalizedNamed(s2)
+ if err != nil {
+ return nil, nil, err
+ }
+ return r1, r2, nil
+}
+
+func (prm *prmMatchExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
+ return false
+ }
+ return signature.String() == intended.String()
+}
+
+func (prm *prmMatchRepoDigestOrExact) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+
+ // Do not add default tags: image.Reference().DockerReference() should contain it already, and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(signature) {
+ return false
+ }
+ switch intended.(type) {
+ case reference.NamedTagged: // Includes the case when intended has both a tag and a digest.
+ return signature.String() == intended.String()
+ case reference.Canonical:
+ // We don’t actually compare the manifest digest against the signature here; that happens prSignedBy.in UnparsedImage.Manifest.
+ // Becase UnparsedImage.Manifest verifies the intended.Digest() against the manifest, and prSignedBy verifies the signature digest against the manifest,
+ // we know that signature digest matches intended.Digest() (but intended.Digest() and signature digest may use different algorithms)
+ return signature.Name() == intended.Name()
+ default: // !reference.IsNameOnly(intended)
+ return false
+ }
+}
+
+func (prm *prmMatchRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseImageAndDockerReference(image, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ return signature.Name() == intended.Name()
+}
+
+// parseDockerReferences converts two reference strings into parsed entities, failing on any error
+func parseDockerReferences(s1, s2 string) (reference.Named, reference.Named, error) {
+ r1, err := reference.ParseNormalizedNamed(s1)
+ if err != nil {
+ return nil, nil, err
+ }
+ r2, err := reference.ParseNormalizedNamed(s2)
+ if err != nil {
+ return nil, nil, err
+ }
+ return r1, r2, nil
+}
+
+func (prm *prmExactReference) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseDockerReferences(prm.DockerReference, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ // prm.DockerReference and signatureDockerReference should be exact; so, verify that now.
+ if reference.IsNameOnly(intended) || reference.IsNameOnly(signature) {
+ return false
+ }
+ return signature.String() == intended.String()
+}
+
+func (prm *prmExactRepository) matchesDockerReference(image types.UnparsedImage, signatureDockerReference string) bool {
+ intended, signature, err := parseDockerReferences(prm.DockerRepository, signatureDockerReference)
+ if err != nil {
+ return false
+ }
+ return signature.Name() == intended.Name()
+}
diff --git a/vendor/github.com/containers/image/signature/policy_types.go b/vendor/github.com/containers/image/signature/policy_types.go
new file mode 100644
index 0000000000..4cd770f11c
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/policy_types.go
@@ -0,0 +1,152 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+// This defines types used to represent a signature verification policy in memory.
+// Do not use the private types directly; either parse a configuration file, or construct a Policy from PolicyRequirements
+// built using the constructor functions provided in policy_config.go.
+
+package signature
+
+// NOTE: Keep this in sync with docs/policy.json.md!
+
+// Policy defines requirements for considering a signature, or an image, valid.
+type Policy struct {
+ // Default applies to any image which does not have a matching policy in Transports.
+ // Note that this can happen even if a matching PolicyTransportScopes exists in Transports
+ // if the image matches none of the scopes.
+ Default PolicyRequirements `json:"default"`
+ Transports map[string]PolicyTransportScopes `json:"transports"`
+}
+
+// PolicyTransportScopes defines policies for images for a specific transport,
+// for various scopes, the map keys.
+// Scopes are defined by the transport (types.ImageReference.PolicyConfigurationIdentity etc.);
+// there is one scope precisely matching to a single image, and namespace scopes as prefixes
+// of the single-image scope. (e.g. hostname[/zero[/or[/more[/namespaces[/individualimage]]]]])
+// The empty scope, if exists, is considered a parent namespace of all other scopes.
+// Most specific scope wins, duplication is prohibited (hard failure).
+type PolicyTransportScopes map[string]PolicyRequirements
+
+// PolicyRequirements is a set of requirements applying to a set of images; each of them must be satisfied (though perhaps each by a different signature).
+// Must not be empty, frequently will only contain a single element.
+type PolicyRequirements []PolicyRequirement
+
+// PolicyRequirement is a rule which must be satisfied by at least one of the signatures of an image.
+// The type is public, but its definition is private.
+
+// prCommon is the common type field in a JSON encoding of PolicyRequirement.
+type prCommon struct {
+ Type prTypeIdentifier `json:"type"`
+}
+
+// prTypeIdentifier is string designating a kind of a PolicyRequirement.
+type prTypeIdentifier string
+
+const (
+ prTypeInsecureAcceptAnything prTypeIdentifier = "insecureAcceptAnything"
+ prTypeReject prTypeIdentifier = "reject"
+ prTypeSignedBy prTypeIdentifier = "signedBy"
+ prTypeSignedBaseLayer prTypeIdentifier = "signedBaseLayer"
+)
+
+// prInsecureAcceptAnything is a PolicyRequirement with type = prTypeInsecureAcceptAnything:
+// every image is allowed to run.
+// Note that because PolicyRequirements are implicitly ANDed, this is necessary only if it is the only rule (to make the list non-empty and the policy explicit).
+// NOTE: This allows the image to run; it DOES NOT consider the signature verified (per IsSignatureAuthorAccepted).
+// FIXME? Better name?
+type prInsecureAcceptAnything struct {
+ prCommon
+}
+
+// prReject is a PolicyRequirement with type = prTypeReject: every image is rejected.
+type prReject struct {
+ prCommon
+}
+
+// prSignedBy is a PolicyRequirement with type = prTypeSignedBy: the image is signed by trusted keys for a specified identity
+type prSignedBy struct {
+ prCommon
+
+ // KeyType specifies what kind of key reference KeyPath/KeyData is.
+ // Acceptable values are “GPGKeys” | “signedByGPGKeys” “X.509Certificates” | “signedByX.509CAs”
+ // FIXME: eventually also support GPGTOFU, X.509TOFU, with KeyPath only
+ KeyType sbKeyType `json:"keyType"`
+
+ // KeyPath is a pathname to a local file containing the trusted key(s). Exactly one of KeyPath and KeyData must be specified.
+ KeyPath string `json:"keyPath,omitempty"`
+ // KeyData contains the trusted key(s), base64-encoded. Exactly one of KeyPath and KeyData must be specified.
+ KeyData []byte `json:"keyData,omitempty"`
+
+ // SignedIdentity specifies what image identity the signature must be claiming about the image.
+ // Defaults to "match-exact" if not specified.
+ SignedIdentity PolicyReferenceMatch `json:"signedIdentity"`
+}
+
+// sbKeyType are the allowed values for prSignedBy.KeyType
+type sbKeyType string
+
+const (
+ // SBKeyTypeGPGKeys refers to keys contained in a GPG keyring
+ SBKeyTypeGPGKeys sbKeyType = "GPGKeys"
+ // SBKeyTypeSignedByGPGKeys refers to keys signed by keys in a GPG keyring
+ SBKeyTypeSignedByGPGKeys sbKeyType = "signedByGPGKeys"
+ // SBKeyTypeX509Certificates refers to keys in a set of X.509 certificates
+ // FIXME: PEM, DER?
+ SBKeyTypeX509Certificates sbKeyType = "X509Certificates"
+ // SBKeyTypeSignedByX509CAs refers to keys signed by one of the X.509 CAs
+ // FIXME: PEM, DER?
+ SBKeyTypeSignedByX509CAs sbKeyType = "signedByX509CAs"
+)
+
+// prSignedBaseLayer is a PolicyRequirement with type = prSignedBaseLayer: the image has a specified, correctly signed, base image.
+type prSignedBaseLayer struct {
+ prCommon
+ // BaseLayerIdentity specifies the base image to look for. "match-exact" is rejected, "match-repository" is unlikely to be useful.
+ BaseLayerIdentity PolicyReferenceMatch `json:"baseLayerIdentity"`
+}
+
+// PolicyReferenceMatch specifies a set of image identities accepted in PolicyRequirement.
+// The type is public, but its implementation is private.
+
+// prmCommon is the common type field in a JSON encoding of PolicyReferenceMatch.
+type prmCommon struct {
+ Type prmTypeIdentifier `json:"type"`
+}
+
+// prmTypeIdentifier is string designating a kind of a PolicyReferenceMatch.
+type prmTypeIdentifier string
+
+const (
+ prmTypeMatchExact prmTypeIdentifier = "matchExact"
+ prmTypeMatchRepoDigestOrExact prmTypeIdentifier = "matchRepoDigestOrExact"
+ prmTypeMatchRepository prmTypeIdentifier = "matchRepository"
+ prmTypeExactReference prmTypeIdentifier = "exactReference"
+ prmTypeExactRepository prmTypeIdentifier = "exactRepository"
+)
+
+// prmMatchExact is a PolicyReferenceMatch with type = prmMatchExact: the two references must match exactly.
+type prmMatchExact struct {
+ prmCommon
+}
+
+// prmMatchRepoDigestOrExact is a PolicyReferenceMatch with type = prmMatchExactOrDigest: the two references must match exactly,
+// except that digest references are also accepted if the repository name matches (regardless of tag/digest) and the signature applies to the referenced digest
+type prmMatchRepoDigestOrExact struct {
+ prmCommon
+}
+
+// prmMatchRepository is a PolicyReferenceMatch with type = prmMatchRepository: the two references must use the same repository, may differ in the tag.
+type prmMatchRepository struct {
+ prmCommon
+}
+
+// prmExactReference is a PolicyReferenceMatch with type = prmExactReference: matches a specified reference exactly.
+type prmExactReference struct {
+ prmCommon
+ DockerReference string `json:"dockerReference"`
+}
+
+// prmExactRepository is a PolicyReferenceMatch with type = prmExactRepository: matches a specified repository, with any tag.
+type prmExactRepository struct {
+ prmCommon
+ DockerRepository string `json:"dockerRepository"`
+}
diff --git a/vendor/github.com/containers/image/signature/signature.go b/vendor/github.com/containers/image/signature/signature.go
new file mode 100644
index 0000000000..f6219bec87
--- /dev/null
+++ b/vendor/github.com/containers/image/signature/signature.go
@@ -0,0 +1,284 @@
+// Note: Consider the API unstable until the code supports at least three different image formats or transports.
+
+// NOTE: Keep this in sync with docs/atomic-signature.md and docs/atomic-signature-embedded.json!
+
+package signature
+
+import (
+ "encoding/json"
+ "fmt"
+ "time"
+
+ "github.com/pkg/errors"
+
+ "github.com/containers/image/version"
+ "github.com/opencontainers/go-digest"
+)
+
+const (
+ signatureType = "atomic container signature"
+)
+
+// InvalidSignatureError is returned when parsing an invalid signature.
+type InvalidSignatureError struct {
+ msg string
+}
+
+func (err InvalidSignatureError) Error() string {
+ return err.msg
+}
+
+// Signature is a parsed content of a signature.
+// The only way to get this structure from a blob should be as a return value from a successful call to verifyAndExtractSignature below.
+type Signature struct {
+ DockerManifestDigest digest.Digest
+ DockerReference string // FIXME: more precise type?
+}
+
+// untrustedSignature is a parsed content of a signature.
+type untrustedSignature struct {
+ UntrustedDockerManifestDigest digest.Digest
+ UntrustedDockerReference string // FIXME: more precise type?
+ UntrustedCreatorID *string
+ // This is intentionally an int64; the native JSON float64 type would allow to represent _some_ sub-second precision,
+ // but not nearly enough (with current timestamp values, a single unit in the last place is on the order of hundreds of nanoseconds).
+ // So, this is explicitly an int64, and we reject fractional values. If we did need more precise timestamps eventually,
+ // we would add another field, UntrustedTimestampNS int64.
+ UntrustedTimestamp *int64
+}
+
+// UntrustedSignatureInformation is information available in an untrusted signature.
+// This may be useful when debugging signature verification failures,
+// or when managing a set of signatures on a single image.
+//
+// WARNING: Do not use the contents of this for ANY security decisions,
+// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
+// There is NO REASON to expect the values to be correct, or not intentionally misleading
+// (including things like “✅ Verified by $authority”)
+type UntrustedSignatureInformation struct {
+ UntrustedDockerManifestDigest digest.Digest
+ UntrustedDockerReference string // FIXME: more precise type?
+ UntrustedCreatorID *string
+ UntrustedTimestamp *time.Time
+ UntrustedShortKeyIdentifier string
+}
+
+// newUntrustedSignature returns an untrustedSignature object with
+// the specified primary contents and appropriate metadata.
+func newUntrustedSignature(dockerManifestDigest digest.Digest, dockerReference string) untrustedSignature {
+ // Use intermediate variables for these values so that we can take their addresses.
+ // Golang guarantees that they will have a new address on every execution.
+ creatorID := "atomic " + version.Version
+ timestamp := time.Now().Unix()
+ return untrustedSignature{
+ UntrustedDockerManifestDigest: dockerManifestDigest,
+ UntrustedDockerReference: dockerReference,
+ UntrustedCreatorID: &creatorID,
+ UntrustedTimestamp: ×tamp,
+ }
+}
+
+// Compile-time check that untrustedSignature implements json.Marshaler
+var _ json.Marshaler = (*untrustedSignature)(nil)
+
+// MarshalJSON implements the json.Marshaler interface.
+func (s untrustedSignature) MarshalJSON() ([]byte, error) {
+ if s.UntrustedDockerManifestDigest == "" || s.UntrustedDockerReference == "" {
+ return nil, errors.New("Unexpected empty signature content")
+ }
+ critical := map[string]interface{}{
+ "type": signatureType,
+ "image": map[string]string{"docker-manifest-digest": s.UntrustedDockerManifestDigest.String()},
+ "identity": map[string]string{"docker-reference": s.UntrustedDockerReference},
+ }
+ optional := map[string]interface{}{}
+ if s.UntrustedCreatorID != nil {
+ optional["creator"] = *s.UntrustedCreatorID
+ }
+ if s.UntrustedTimestamp != nil {
+ optional["timestamp"] = *s.UntrustedTimestamp
+ }
+ signature := map[string]interface{}{
+ "critical": critical,
+ "optional": optional,
+ }
+ return json.Marshal(signature)
+}
+
+// Compile-time check that untrustedSignature implements json.Unmarshaler
+var _ json.Unmarshaler = (*untrustedSignature)(nil)
+
+// UnmarshalJSON implements the json.Unmarshaler interface
+func (s *untrustedSignature) UnmarshalJSON(data []byte) error {
+ err := s.strictUnmarshalJSON(data)
+ if err != nil {
+ if _, ok := err.(jsonFormatError); ok {
+ err = InvalidSignatureError{msg: err.Error()}
+ }
+ }
+ return err
+}
+
+// strictUnmarshalJSON is UnmarshalJSON, except that it may return the internal jsonFormatError error type.
+// Splitting it into a separate function allows us to do the jsonFormatError → InvalidSignatureError in a single place, the caller.
+func (s *untrustedSignature) strictUnmarshalJSON(data []byte) error {
+ var critical, optional json.RawMessage
+ if err := paranoidUnmarshalJSONObjectExactFields(data, map[string]interface{}{
+ "critical": &critical,
+ "optional": &optional,
+ }); err != nil {
+ return err
+ }
+
+ var creatorID string
+ var timestamp float64
+ var gotCreatorID, gotTimestamp = false, false
+ if err := paranoidUnmarshalJSONObject(optional, func(key string) interface{} {
+ switch key {
+ case "creator":
+ gotCreatorID = true
+ return &creatorID
+ case "timestamp":
+ gotTimestamp = true
+ return ×tamp
+ default:
+ var ignore interface{}
+ return &ignore
+ }
+ }); err != nil {
+ return err
+ }
+ if gotCreatorID {
+ s.UntrustedCreatorID = &creatorID
+ }
+ if gotTimestamp {
+ intTimestamp := int64(timestamp)
+ if float64(intTimestamp) != timestamp {
+ return InvalidSignatureError{msg: "Field optional.timestamp is not is not an integer"}
+ }
+ s.UntrustedTimestamp = &intTimestamp
+ }
+
+ var t string
+ var image, identity json.RawMessage
+ if err := paranoidUnmarshalJSONObjectExactFields(critical, map[string]interface{}{
+ "type": &t,
+ "image": &image,
+ "identity": &identity,
+ }); err != nil {
+ return err
+ }
+ if t != signatureType {
+ return InvalidSignatureError{msg: fmt.Sprintf("Unrecognized signature type %s", t)}
+ }
+
+ var digestString string
+ if err := paranoidUnmarshalJSONObjectExactFields(image, map[string]interface{}{
+ "docker-manifest-digest": &digestString,
+ }); err != nil {
+ return err
+ }
+ s.UntrustedDockerManifestDigest = digest.Digest(digestString)
+
+ if err := paranoidUnmarshalJSONObjectExactFields(identity, map[string]interface{}{
+ "docker-reference": &s.UntrustedDockerReference,
+ }); err != nil {
+ return err
+ }
+
+ return nil
+}
+
+// Sign formats the signature and returns a blob signed using mech and keyIdentity
+// (If it seems surprising that this is a method on untrustedSignature, note that there
+// isn’t a good reason to think that a key used by the user is trusted by any component
+// of the system just because it is a private key — actually the presence of a private key
+// on the system increases the likelihood of an a successful attack on that private key
+// on that particular system.)
+func (s untrustedSignature) sign(mech SigningMechanism, keyIdentity string) ([]byte, error) {
+ json, err := json.Marshal(s)
+ if err != nil {
+ return nil, err
+ }
+
+ return mech.Sign(json, keyIdentity)
+}
+
+// signatureAcceptanceRules specifies how to decide whether an untrusted signature is acceptable.
+// We centralize the actual parsing and data extraction in verifyAndExtractSignature; this supplies
+// the policy. We use an object instead of supplying func parameters to verifyAndExtractSignature
+// because the functions have the same or similar types, so there is a risk of exchanging the functions;
+// named members of this struct are more explicit.
+type signatureAcceptanceRules struct {
+ validateKeyIdentity func(string) error
+ validateSignedDockerReference func(string) error
+ validateSignedDockerManifestDigest func(digest.Digest) error
+}
+
+// verifyAndExtractSignature verifies that unverifiedSignature has been signed, and that its principial components
+// match expected values, both as specified by rules, and returns it
+func verifyAndExtractSignature(mech SigningMechanism, unverifiedSignature []byte, rules signatureAcceptanceRules) (*Signature, error) {
+ signed, keyIdentity, err := mech.Verify(unverifiedSignature)
+ if err != nil {
+ return nil, err
+ }
+ if err := rules.validateKeyIdentity(keyIdentity); err != nil {
+ return nil, err
+ }
+
+ var unmatchedSignature untrustedSignature
+ if err := json.Unmarshal(signed, &unmatchedSignature); err != nil {
+ return nil, InvalidSignatureError{msg: err.Error()}
+ }
+ if err := rules.validateSignedDockerManifestDigest(unmatchedSignature.UntrustedDockerManifestDigest); err != nil {
+ return nil, err
+ }
+ if err := rules.validateSignedDockerReference(unmatchedSignature.UntrustedDockerReference); err != nil {
+ return nil, err
+ }
+ // signatureAcceptanceRules have accepted this value.
+ return &Signature{
+ DockerManifestDigest: unmatchedSignature.UntrustedDockerManifestDigest,
+ DockerReference: unmatchedSignature.UntrustedDockerReference,
+ }, nil
+}
+
+// GetUntrustedSignatureInformationWithoutVerifying extracts information available in an untrusted signature,
+// WITHOUT doing any cryptographic verification.
+// This may be useful when debugging signature verification failures,
+// or when managing a set of signatures on a single image.
+//
+// WARNING: Do not use the contents of this for ANY security decisions,
+// and be VERY CAREFUL about showing this information to humans in any way which suggest that these values “are probably” reliable.
+// There is NO REASON to expect the values to be correct, or not intentionally misleading
+// (including things like “✅ Verified by $authority”)
+func GetUntrustedSignatureInformationWithoutVerifying(untrustedSignatureBytes []byte) (*UntrustedSignatureInformation, error) {
+ // NOTE: This should eventualy do format autodetection.
+ mech, _, err := NewEphemeralGPGSigningMechanism([]byte{})
+ if err != nil {
+ return nil, err
+ }
+ defer mech.Close()
+
+ untrustedContents, shortKeyIdentifier, err := mech.UntrustedSignatureContents(untrustedSignatureBytes)
+ if err != nil {
+ return nil, err
+ }
+ var untrustedDecodedContents untrustedSignature
+ if err := json.Unmarshal(untrustedContents, &untrustedDecodedContents); err != nil {
+ return nil, InvalidSignatureError{msg: err.Error()}
+ }
+
+ var timestamp *time.Time // = nil
+ if untrustedDecodedContents.UntrustedTimestamp != nil {
+ ts := time.Unix(*untrustedDecodedContents.UntrustedTimestamp, 0)
+ timestamp = &ts
+ }
+ return &UntrustedSignatureInformation{
+ UntrustedDockerManifestDigest: untrustedDecodedContents.UntrustedDockerManifestDigest,
+ UntrustedDockerReference: untrustedDecodedContents.UntrustedDockerReference,
+ UntrustedCreatorID: untrustedDecodedContents.UntrustedCreatorID,
+ UntrustedTimestamp: timestamp,
+ UntrustedShortKeyIdentifier: shortKeyIdentifier,
+ }, nil
+}
diff --git a/vendor/github.com/containers/image/transports/stub.go b/vendor/github.com/containers/image/transports/stub.go
new file mode 100644
index 0000000000..087f69b6ea
--- /dev/null
+++ b/vendor/github.com/containers/image/transports/stub.go
@@ -0,0 +1,36 @@
+package transports
+
+import (
+ "fmt"
+
+ "github.com/containers/image/types"
+)
+
+// stubTransport is an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
+type stubTransport string
+
+// NewStubTransport returns an implementation of types.ImageTransport which has a name, but rejects any references with “the transport $name: is not supported in this build”.
+func NewStubTransport(name string) types.ImageTransport {
+ return stubTransport(name)
+}
+
+// Name returns the name of the transport, which must be unique among other transports.
+func (s stubTransport) Name() string {
+ return string(s)
+}
+
+// ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+func (s stubTransport) ParseReference(reference string) (types.ImageReference, error) {
+ return nil, fmt.Errorf(`The transport "%s:" is not supported in this build`, string(s))
+}
+
+// ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+// (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+// It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+// scope passed to this function will not be "", that value is always allowed.
+func (s stubTransport) ValidatePolicyConfigurationScope(scope string) error {
+ // Allowing any reference in here allows tools with some transports stubbed-out to still
+ // use signature verification policies which refer to these stubbed-out transports.
+ // See also the treatment of unknown transports in policyTransportScopesWithTransport.UnmarshalJSON .
+ return nil
+}
diff --git a/vendor/github.com/containers/image/transports/transports.go b/vendor/github.com/containers/image/transports/transports.go
new file mode 100644
index 0000000000..687d0a44e3
--- /dev/null
+++ b/vendor/github.com/containers/image/transports/transports.go
@@ -0,0 +1,90 @@
+package transports
+
+import (
+ "fmt"
+ "sort"
+ "sync"
+
+ "github.com/containers/image/types"
+)
+
+// knownTransports is a registry of known ImageTransport instances.
+type knownTransports struct {
+ transports map[string]types.ImageTransport
+ mu sync.Mutex
+}
+
+func (kt *knownTransports) Get(k string) types.ImageTransport {
+ kt.mu.Lock()
+ t := kt.transports[k]
+ kt.mu.Unlock()
+ return t
+}
+
+func (kt *knownTransports) Remove(k string) {
+ kt.mu.Lock()
+ delete(kt.transports, k)
+ kt.mu.Unlock()
+}
+
+func (kt *knownTransports) Add(t types.ImageTransport) {
+ kt.mu.Lock()
+ defer kt.mu.Unlock()
+ name := t.Name()
+ if t := kt.transports[name]; t != nil {
+ panic(fmt.Sprintf("Duplicate image transport name %s", name))
+ }
+ kt.transports[name] = t
+}
+
+var kt *knownTransports
+
+func init() {
+ kt = &knownTransports{
+ transports: make(map[string]types.ImageTransport),
+ }
+}
+
+// Get returns the transport specified by name or nil when unavailable.
+func Get(name string) types.ImageTransport {
+ return kt.Get(name)
+}
+
+// Delete deletes a transport from the registered transports.
+func Delete(name string) {
+ kt.Remove(name)
+}
+
+// Register registers a transport.
+func Register(t types.ImageTransport) {
+ kt.Add(t)
+}
+
+// ImageName converts a types.ImageReference into an URL-like image name, which MUST be such that
+// ParseImageName(ImageName(reference)) returns an equivalent reference.
+//
+// This is the generally recommended way to refer to images in the UI.
+//
+// NOTE: The returned string is not promised to be equal to the original input to ParseImageName;
+// e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+func ImageName(ref types.ImageReference) string {
+ return ref.Transport().Name() + ":" + ref.StringWithinTransport()
+}
+
+// ListNames returns a list of non deprecated transport names.
+// Deprecated transports can be used, but are not presented to users.
+func ListNames() []string {
+ kt.mu.Lock()
+ defer kt.mu.Unlock()
+ deprecated := map[string]bool{
+ "atomic": true,
+ }
+ var names []string
+ for _, transport := range kt.transports {
+ if !deprecated[transport.Name()] {
+ names = append(names, transport.Name())
+ }
+ }
+ sort.Strings(names)
+ return names
+}
diff --git a/vendor/github.com/containers/image/types/types.go b/vendor/github.com/containers/image/types/types.go
new file mode 100644
index 0000000000..6bcd392f13
--- /dev/null
+++ b/vendor/github.com/containers/image/types/types.go
@@ -0,0 +1,333 @@
+package types
+
+import (
+ "context"
+ "io"
+ "time"
+
+ "github.com/containers/image/docker/reference"
+ "github.com/opencontainers/go-digest"
+ "github.com/opencontainers/image-spec/specs-go/v1"
+)
+
+// ImageTransport is a top-level namespace for ways to to store/load an image.
+// It should generally correspond to ImageSource/ImageDestination implementations.
+//
+// Note that ImageTransport is based on "ways the users refer to image storage", not necessarily on the underlying physical transport.
+// For example, all Docker References would be used within a single "docker" transport, regardless of whether the images are pulled over HTTP or HTTPS
+// (or, even, IPv4 or IPv6).
+//
+// OTOH all images using the same transport should (apart from versions of the image format), be interoperable.
+// For example, several different ImageTransport implementations may be based on local filesystem paths,
+// but using completely different formats for the contents of that path (a single tar file, a directory containing tarballs, a fully expanded container filesystem, ...)
+//
+// See also transports.KnownTransports.
+type ImageTransport interface {
+ // Name returns the name of the transport, which must be unique among other transports.
+ Name() string
+ // ParseReference converts a string, which should not start with the ImageTransport.Name prefix, into an ImageReference.
+ ParseReference(reference string) (ImageReference, error)
+ // ValidatePolicyConfigurationScope checks that scope is a valid name for a signature.PolicyTransportScopes keys
+ // (i.e. a valid PolicyConfigurationIdentity() or PolicyConfigurationNamespaces() return value).
+ // It is acceptable to allow an invalid value which will never be matched, it can "only" cause user confusion.
+ // scope passed to this function will not be "", that value is always allowed.
+ ValidatePolicyConfigurationScope(scope string) error
+}
+
+// ImageReference is an abstracted way to refer to an image location, namespaced within an ImageTransport.
+//
+// The object should preferably be immutable after creation, with any parsing/state-dependent resolving happening
+// within an ImageTransport.ParseReference() or equivalent API creating the reference object.
+// That's also why the various identification/formatting methods of this type do not support returning errors.
+//
+// WARNING: While this design freezes the content of the reference within this process, it can not freeze the outside
+// world: paths may be replaced by symlinks elsewhere, HTTP APIs may start returning different results, and so on.
+type ImageReference interface {
+ Transport() ImageTransport
+ // StringWithinTransport returns a string representation of the reference, which MUST be such that
+ // reference.Transport().ParseReference(reference.StringWithinTransport()) returns an equivalent reference.
+ // NOTE: The returned string is not promised to be equal to the original input to ParseReference;
+ // e.g. default attribute values omitted by the user may be filled in in the return value, or vice versa.
+ // WARNING: Do not use the return value in the UI to describe an image, it does not contain the Transport().Name() prefix;
+ // instead, see transports.ImageName().
+ StringWithinTransport() string
+
+ // DockerReference returns a Docker reference associated with this reference
+ // (fully explicit, i.e. !reference.IsNameOnly, but reflecting user intent,
+ // not e.g. after redirect or alias processing), or nil if unknown/not applicable.
+ DockerReference() reference.Named
+
+ // PolicyConfigurationIdentity returns a string representation of the reference, suitable for policy lookup.
+ // This MUST reflect user intent, not e.g. after processing of third-party redirects or aliases;
+ // The value SHOULD be fully explicit about its semantics, with no hidden defaults, AND canonical
+ // (i.e. various references with exactly the same semantics should return the same configuration identity)
+ // It is fine for the return value to be equal to StringWithinTransport(), and it is desirable but
+ // not required/guaranteed that it will be a valid input to Transport().ParseReference().
+ // Returns "" if configuration identities for these references are not supported.
+ PolicyConfigurationIdentity() string
+
+ // PolicyConfigurationNamespaces returns a list of other policy configuration namespaces to search
+ // for if explicit configuration for PolicyConfigurationIdentity() is not set. The list will be processed
+ // in order, terminating on first match, and an implicit "" is always checked at the end.
+ // It is STRONGLY recommended for the first element, if any, to be a prefix of PolicyConfigurationIdentity(),
+ // and each following element to be a prefix of the element preceding it.
+ PolicyConfigurationNamespaces() []string
+
+ // NewImage returns a types.Image for this reference, possibly specialized for this ImageTransport.
+ // The caller must call .Close() on the returned Image.
+ // NOTE: If any kind of signature verification should happen, build an UnparsedImage from the value returned by NewImageSource,
+ // verify that UnparsedImage, and convert it into a real Image via image.FromUnparsedImage.
+ NewImage(ctx *SystemContext) (Image, error)
+ // NewImageSource returns a types.ImageSource for this reference,
+ // asking the backend to use a manifest from requestedManifestMIMETypes if possible.
+ // nil requestedManifestMIMETypes means manifest.DefaultRequestedManifestMIMETypes.
+ // The caller must call .Close() on the returned ImageSource.
+ NewImageSource(ctx *SystemContext, requestedManifestMIMETypes []string) (ImageSource, error)
+ // NewImageDestination returns a types.ImageDestination for this reference.
+ // The caller must call .Close() on the returned ImageDestination.
+ NewImageDestination(ctx *SystemContext) (ImageDestination, error)
+
+ // DeleteImage deletes the named image from the registry, if supported.
+ DeleteImage(ctx *SystemContext) error
+}
+
+// BlobInfo collects known information about a blob (layer/config).
+// In some situations, some fields may be unknown, in others they may be mandatory; documenting an “unknown” value here does not override that.
+type BlobInfo struct {
+ Digest digest.Digest // "" if unknown.
+ Size int64 // -1 if unknown
+ URLs []string
+}
+
+// ImageSource is a service, possibly remote (= slow), to download components of a single image.
+// This is primarily useful for copying images around; for examining their properties, Image (below)
+// is usually more useful.
+// Each ImageSource should eventually be closed by calling Close().
+//
+// WARNING: Various methods which return an object identified by digest generally do not
+// validate that the returned data actually matches that digest; this is the caller’s responsibility.
+type ImageSource interface {
+ // Reference returns the reference used to set up this source, _as specified by the user_
+ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+ Reference() ImageReference
+ // Close removes resources associated with an initialized ImageSource, if any.
+ Close() error
+ // GetManifest returns the image's manifest along with its MIME type (which may be empty when it can't be determined but the manifest is available).
+ // It may use a remote (= slow) service.
+ GetManifest() ([]byte, string, error)
+ // GetTargetManifest returns an image's manifest given a digest. This is mainly used to retrieve a single image's manifest
+ // out of a manifest list.
+ GetTargetManifest(digest digest.Digest) ([]byte, string, error)
+ // GetBlob returns a stream for the specified blob, and the blob’s size (or -1 if unknown).
+ // The Digest field in BlobInfo is guaranteed to be provided; Size may be -1.
+ GetBlob(BlobInfo) (io.ReadCloser, int64, error)
+ // GetSignatures returns the image's signatures. It may use a remote (= slow) service.
+ GetSignatures(context.Context) ([][]byte, error)
+}
+
+// ImageDestination is a service, possibly remote (= slow), to store components of a single image.
+//
+// There is a specific required order for some of the calls:
+// PutBlob on the various blobs, if any, MUST be called before PutManifest (manifest references blobs, which may be created or compressed only at push time)
+// ReapplyBlob, if used, MUST only be called if HasBlob returned true for the same blob digest
+// PutSignatures, if called, MUST be called after PutManifest (signatures reference manifest contents)
+// Finally, Commit MUST be called if the caller wants the image, as formed by the components saved above, to persist.
+//
+// Each ImageDestination should eventually be closed by calling Close().
+type ImageDestination interface {
+ // Reference returns the reference used to set up this destination. Note that this should directly correspond to user's intent,
+ // e.g. it should use the public hostname instead of the result of resolving CNAMEs or following redirects.
+ Reference() ImageReference
+ // Close removes resources associated with an initialized ImageDestination, if any.
+ Close() error
+
+ // SupportedManifestMIMETypes tells which manifest mime types the destination supports
+ // If an empty slice or nil it's returned, then any mime type can be tried to upload
+ SupportedManifestMIMETypes() []string
+ // SupportsSignatures returns an error (to be displayed to the user) if the destination certainly can't store signatures.
+ // Note: It is still possible for PutSignatures to fail if SupportsSignatures returns nil.
+ SupportsSignatures() error
+ // ShouldCompressLayers returns true iff it is desirable to compress layer blobs written to this destination.
+ ShouldCompressLayers() bool
+ // AcceptsForeignLayerURLs returns false iff foreign layers in manifest should be actually
+ // uploaded to the image destination, true otherwise.
+ AcceptsForeignLayerURLs() bool
+ // MustMatchRuntimeOS returns true iff the destination can store only images targeted for the current runtime OS. False otherwise.
+ MustMatchRuntimeOS() bool
+ // PutBlob writes contents of stream and returns data representing the result (with all data filled in).
+ // inputInfo.Digest can be optionally provided if known; it is not mandatory for the implementation to verify it.
+ // inputInfo.Size is the expected length of stream, if known.
+ // WARNING: The contents of stream are being verified on the fly. Until stream.Read() returns io.EOF, the contents of the data SHOULD NOT be available
+ // to any other readers for download using the supplied digest.
+ // If stream.Read() at any time, ESPECIALLY at end of input, returns an error, PutBlob MUST 1) fail, and 2) delete any data stored so far.
+ PutBlob(stream io.Reader, inputInfo BlobInfo) (BlobInfo, error)
+ // HasBlob returns true iff the image destination already contains a blob with the matching digest which can be reapplied using ReapplyBlob.
+ // Unlike PutBlob, the digest can not be empty. If HasBlob returns true, the size of the blob must also be returned.
+ // If the destination does not contain the blob, or it is unknown, HasBlob ordinarily returns (false, -1, nil);
+ // it returns a non-nil error only on an unexpected failure.
+ HasBlob(info BlobInfo) (bool, int64, error)
+ // ReapplyBlob informs the image destination that a blob for which HasBlob previously returned true would have been passed to PutBlob if it had returned false. Like HasBlob and unlike PutBlob, the digest can not be empty. If the blob is a filesystem layer, this signifies that the changes it describes need to be applied again when composing a filesystem tree.
+ ReapplyBlob(info BlobInfo) (BlobInfo, error)
+ // PutManifest writes manifest to the destination.
+ // FIXME? This should also receive a MIME type if known, to differentiate between schema versions.
+ // If the destination is in principle available, refuses this manifest type (e.g. it does not recognize the schema),
+ // but may accept a different manifest type, the returned error must be an ManifestTypeRejectedError.
+ PutManifest(manifest []byte) error
+ PutSignatures(signatures [][]byte) error
+ // Commit marks the process of storing the image as successful and asks for the image to be persisted.
+ // WARNING: This does not have any transactional semantics:
+ // - Uploaded data MAY be visible to others before Commit() is called
+ // - Uploaded data MAY be removed or MAY remain around if Close() is called without Commit() (i.e. rollback is allowed but not guaranteed)
+ Commit() error
+}
+
+// ManifestTypeRejectedError is returned by ImageDestination.PutManifest if the destination is in principle available,
+// refuses specifically this manifest type, but may accept a different manifest type.
+type ManifestTypeRejectedError struct { // We only use a struct to allow a type assertion, without limiting the contents of the error otherwise.
+ Err error
+}
+
+func (e ManifestTypeRejectedError) Error() string {
+ return e.Err.Error()
+}
+
+// UnparsedImage is an Image-to-be; until it is verified and accepted, it only caries its identity and caches manifest and signature blobs.
+// Thus, an UnparsedImage can be created from an ImageSource simply by fetching blobs without interpreting them,
+// allowing cryptographic signature verification to happen first, before even fetching the manifest, or parsing anything else.
+// This also makes the UnparsedImage→Image conversion an explicitly visible step.
+// Each UnparsedImage should eventually be closed by calling Close().
+type UnparsedImage interface {
+ // Reference returns the reference used to set up this source, _as specified by the user_
+ // (not as the image itself, or its underlying storage, claims). This can be used e.g. to determine which public keys are trusted for this image.
+ Reference() ImageReference
+ // Close removes resources associated with an initialized UnparsedImage, if any.
+ Close() error
+ // Manifest is like ImageSource.GetManifest, but the result is cached; it is OK to call this however often you need.
+ Manifest() ([]byte, string, error)
+ // Signatures is like ImageSource.GetSignatures, but the result is cached; it is OK to call this however often you need.
+ Signatures(ctx context.Context) ([][]byte, error)
+}
+
+// Image is the primary API for inspecting properties of images.
+// Each Image should eventually be closed by calling Close().
+type Image interface {
+ // Note that Reference may return nil in the return value of UpdatedImage!
+ UnparsedImage
+ // ConfigInfo returns a complete BlobInfo for the separate config object, or a BlobInfo{Digest:""} if there isn't a separate object.
+ // Note that the config object may not exist in the underlying storage in the return value of UpdatedImage! Use ConfigBlob() below.
+ ConfigInfo() BlobInfo
+ // ConfigBlob returns the blob described by ConfigInfo, iff ConfigInfo().Digest != ""; nil otherwise.
+ // The result is cached; it is OK to call this however often you need.
+ ConfigBlob() ([]byte, error)
+ // OCIConfig returns the image configuration as per OCI v1 image-spec. Information about
+ // layers in the resulting configuration isn't guaranteed to be returned to due how
+ // old image manifests work (docker v2s1 especially).
+ OCIConfig() (*v1.Image, error)
+ // LayerInfos returns a list of BlobInfos of layers referenced by this image, in order (the root layer first, and then successive layered layers).
+ // The Digest field is guaranteed to be provided; Size may be -1.
+ // WARNING: The list may contain duplicates, and they are semantically relevant.
+ LayerInfos() []BlobInfo
+ // EmbeddedDockerReferenceConflicts whether a Docker reference embedded in the manifest, if any, conflicts with destination ref.
+ // It returns false if the manifest does not embed a Docker reference.
+ // (This embedding unfortunately happens for Docker schema1, please do not add support for this in any new formats.)
+ EmbeddedDockerReferenceConflicts(ref reference.Named) bool
+ // Inspect returns various information for (skopeo inspect) parsed from the manifest and configuration.
+ Inspect() (*ImageInspectInfo, error)
+ // UpdatedImageNeedsLayerDiffIDs returns true iff UpdatedImage(options) needs InformationOnly.LayerDiffIDs.
+ // This is a horribly specific interface, but computing InformationOnly.LayerDiffIDs can be very expensive to compute
+ // (most importantly it forces us to download the full layers even if they are already present at the destination).
+ UpdatedImageNeedsLayerDiffIDs(options ManifestUpdateOptions) bool
+ // UpdatedImage returns a types.Image modified according to options.
+ // Everything in options.InformationOnly should be provided, other fields should be set only if a modification is desired.
+ // This does not change the state of the original Image object.
+ UpdatedImage(options ManifestUpdateOptions) (Image, error)
+ // IsMultiImage returns true if the image's manifest is a list of images, false otherwise.
+ IsMultiImage() bool
+ // Size returns an approximation of the amount of disk space which is consumed by the image in its current
+ // location. If the size is not known, -1 will be returned.
+ Size() (int64, error)
+}
+
+// ManifestUpdateOptions is a way to pass named optional arguments to Image.UpdatedManifest
+type ManifestUpdateOptions struct {
+ LayerInfos []BlobInfo // Complete BlobInfos (size+digest+urls) which should replace the originals, in order (the root layer first, and then successive layered layers)
+ EmbeddedDockerReference reference.Named
+ ManifestMIMEType string
+ // The values below are NOT requests to modify the image; they provide optional context which may or may not be used.
+ InformationOnly ManifestUpdateInformation
+}
+
+// ManifestUpdateInformation is a component of ManifestUpdateOptions, named here
+// only to make writing struct literals possible.
+type ManifestUpdateInformation struct {
+ Destination ImageDestination // and yes, UpdatedManifest may write to Destination (see the schema2 → schema1 conversion logic in image/docker_schema2.go)
+ LayerInfos []BlobInfo // Complete BlobInfos (size+digest) which have been uploaded, in order (the root layer first, and then successive layered layers)
+ LayerDiffIDs []digest.Digest // Digest values for the _uncompressed_ contents of the blobs which have been uploaded, in the same order.
+}
+
+// ImageInspectInfo is a set of metadata describing Docker images, primarily their manifest and configuration.
+// The Tag field is a legacy field which is here just for the Docker v2s1 manifest. It won't be supported
+// for other manifest types.
+type ImageInspectInfo struct {
+ Tag string
+ Created time.Time
+ DockerVersion string
+ Labels map[string]string
+ Architecture string
+ Os string
+ Layers []string
+}
+
+// DockerAuthConfig contains authorization information for connecting to a registry.
+type DockerAuthConfig struct {
+ Username string
+ Password string
+}
+
+// SystemContext allows parametrizing access to implicitly-accessed resources,
+// like configuration files in /etc and users' login state in their home directory.
+// Various components can share the same field only if their semantics is exactly
+// the same; if in doubt, add a new field.
+// It is always OK to pass nil instead of a SystemContext.
+type SystemContext struct {
+ // If not "", prefixed to any absolute paths used by default by the library (e.g. in /etc/).
+ // Not used for any of the more specific path overrides available in this struct.
+ // Not used for any paths specified by users in config files (even if the location of the config file _was_ affected by it).
+ // NOTE: If this is set, environment-variable overrides of paths are ignored (to keep the semantics simple: to create an /etc replacement, just set RootForImplicitAbsolutePaths .
+ // and there is no need to worry about the environment.)
+ // NOTE: This does NOT affect paths starting by $HOME.
+ RootForImplicitAbsolutePaths string
+
+ // === Global configuration overrides ===
+ // If not "", overrides the system's default path for signature.Policy configuration.
+ SignaturePolicyPath string
+ // If not "", overrides the system's default path for registries.d (Docker signature storage configuration)
+ RegistriesDirPath string
+
+ // === docker.Transport overrides ===
+ // If not "", a directory containing a CA certificate (ending with ".crt"),
+ // a client certificate (ending with ".cert") and a client ceritificate key
+ // (ending with ".key") used when talking to a Docker Registry.
+ DockerCertPath string
+ // If not "", overrides the system’s default path for a directory containing host[:port] subdirectories with the same structure as DockerCertPath above.
+ // Ignored if DockerCertPath is non-empty.
+ DockerPerHostCertDirPath string
+ DockerInsecureSkipTLSVerify bool // Allow contacting docker registries over HTTP, or HTTPS with failed TLS verification. Note that this does not affect other TLS connections.
+ // if nil, the library tries to parse ~/.docker/config.json to retrieve credentials
+ DockerAuthConfig *DockerAuthConfig
+ // if not "", an User-Agent header is added to each request when contacting a registry.
+ DockerRegistryUserAgent string
+ // if true, a V1 ping attempt isn't done to give users a better error. Default is false.
+ // Note that this field is used mainly to integrate containers/image into projectatomic/docker
+ // in order to not break any existing docker's integration tests.
+ DockerDisableV1Ping bool
+ // Directory to use for OSTree temporary files
+ OSTreeTmpDirPath string
+}
+
+// ProgressProperties is used to pass information from the copy code to a monitor which
+// can use the real-time information to produce output or react to changes.
+type ProgressProperties struct {
+ Artifact BlobInfo
+ Offset uint64
+}
diff --git a/vendor/github.com/containers/image/version/version.go b/vendor/github.com/containers/image/version/version.go
new file mode 100644
index 0000000000..6644bcff3b
--- /dev/null
+++ b/vendor/github.com/containers/image/version/version.go
@@ -0,0 +1,18 @@
+package version
+
+import "fmt"
+
+const (
+ // VersionMajor is for an API incompatible changes
+ VersionMajor = 0
+ // VersionMinor is for functionality in a backwards-compatible manner
+ VersionMinor = 1
+ // VersionPatch is for backwards-compatible bug fixes
+ VersionPatch = 0
+
+ // VersionDev indicates development branch. Releases will be empty string.
+ VersionDev = "-dev"
+)
+
+// Version is the specification version that the package types support.
+var Version = fmt.Sprintf("%d.%d.%d%s", VersionMajor, VersionMinor, VersionPatch, VersionDev)
diff --git a/vendor/github.com/containers/storage/AUTHORS b/vendor/github.com/containers/storage/AUTHORS
new file mode 100644
index 0000000000..11cd83d14e
--- /dev/null
+++ b/vendor/github.com/containers/storage/AUTHORS
@@ -0,0 +1,1522 @@
+# This file lists all individuals having contributed content to the repository.
+# For how it is generated, see `hack/generate-authors.sh`.
+
+Aanand Prasad
+Aaron Davidson
+Aaron Feng
+Aaron Huslage
+Aaron Lehmann
+Aaron Welch
+Abel Muiño
+Abhijeet Kasurde
+Abhinav Ajgaonkar
+Abhishek Chanda
+Abin Shahab
+Adam Miller
+Adam Singer
+Aditi Rajagopal
+Aditya
+Adria Casas
+Adrian Mouat
+Adrian Oprea
+Adrien Folie
+Adrien Gallouët
+Ahmed Kamal
+Ahmet Alp Balkan
+Aidan Feldman
+Aidan Hobson Sayers
+AJ Bowen
+Ajey Charantimath
+ajneu
+Akihiro Suda
+Al Tobey
+alambike
+Alan Scherger
+Alan Thompson
+Albert Callarisa
+Albert Zhang
+Aleksa Sarai
+Aleksandrs Fadins
+Alena Prokharchyk
+Alessandro Boch
+Alessio Biancalana
+Alex Chan
+Alex Crawford
+Alex Ellis
+Alex Gaynor
+Alex Samorukov
+Alex Warhawk
+Alexander Artemenko
+Alexander Boyd
+Alexander Larsson
+Alexander Morozov
+Alexander Shopov
+Alexandre Beslic
+Alexandre González
+Alexandru Sfirlogea
+Alexey Guskov
+Alexey Kotlyarov
+Alexey Shamrin
+Alexis THOMAS
+Ali Dehghani
+Allen Madsen
+Allen Sun
+almoehi
+Alvin Richards
+amangoel
+Amen Belayneh
+Amit Bakshi
+Amit Krishnan
+Amy Lindburg
+Anand Patil
+AnandkumarPatel
+Anatoly Borodin
+Anchal Agrawal
+Anders Janmyr
+Andre Dublin <81dublin@gmail.com>
+Andre Granovsky
+Andrea Luzzardi
+Andrea Turli
+Andreas Köhler
+Andreas Savvides
+Andreas Tiefenthaler
+Andrew C. Bodine
+Andrew Clay Shafer
+Andrew Duckworth
+Andrew France
+Andrew Gerrand
+Andrew Guenther
+Andrew Kuklewicz
+Andrew Macgregor
+Andrew Macpherson
+Andrew Martin
+Andrew Munsell
+Andrew Weiss
+Andrew Williams
+Andrews Medina
+Andrey Petrov
+Andrey Stolbovsky
+André Martins
+andy
+Andy Chambers
+andy diller
+Andy Goldstein
+Andy Kipp
+Andy Rothfusz
+Andy Smith
+Andy Wilson
+Anes Hasicic
+Anil Belur
+Ankush Agarwal
+Anonmily
+Anthon van der Neut
+Anthony Baire
+Anthony Bishopric
+Anthony Dahanne
+Anton Löfgren
+Anton Nikitin
+Anton Polonskiy
+Anton Tiurin
+Antonio Murdaca
+Antony Messerli
+Anuj Bahuguna
+Anusha Ragunathan
+apocas
+ArikaChen
+Arnaud Porterie
+Arthur Barr
+Arthur Gautier
+Artur Meyster
+Arun Gupta
+Asbjørn Enge
+averagehuman
+Avi Das
+Avi Miller
+ayoshitake
+Azat Khuyiyakhmetov
+Bardia Keyoumarsi
+Barnaby Gray
+Barry Allard
+Bartłomiej Piotrowski
+Bastiaan Bakker
+bdevloed
+Ben Firshman
+Ben Golub
+Ben Hall
+Ben Sargent
+Ben Severson
+Ben Toews
+Ben Wiklund
+Benjamin Atkin
+Benoit Chesneau
+Bernerd Schaefer
+Bert Goethals
+Bharath Thiruveedula
+Bhiraj Butala
+Bill W
+bin liu
+Blake Geno
+Boaz Shuster
+bobby abbott
+boucher
+Bouke Haarsma
+Boyd Hemphill
+boynux
+Bradley Cicenas
+Bradley Wright
+Brandon Liu
+Brandon Philips
+Brandon Rhodes
+Brendan Dixon
+Brent Salisbury
+Brett Higgins
+Brett Kochendorfer
+Brian (bex) Exelbierd
+Brian Bland
+Brian DeHamer
+Brian Dorsey
+Brian Flad
+Brian Goff
+Brian McCallister
+Brian Olsen
+Brian Shumate
+Brian Torres-Gil
+Brian Trump
+Brice Jaglin
+Briehan Lombaard
+Bruno Bigras
+Bruno Binet
+Bruno Gazzera
+Bruno Renié
+Bryan Bess
+Bryan Boreham
+Bryan Matsuo
+Bryan Murphy
+buddhamagnet
+Burke Libbey
+Byung Kang
+Caleb Spare
+Calen Pennington
+Cameron Boehmer
+Cameron Spear
+Campbell Allen
+Candid Dauth
+Carl Henrik Lunde
+Carl X. Su
+Carlos Alexandro Becker
+Carlos Sanchez
+Carol Fager-Higgins
+Cary
+Casey Bisson
+Cedric Davies
+Cezar Sa Espinola
+Chad Swenson
+Chance Zibolski
+Chander G
+Charles Chan
+Charles Hooper
+Charles Law
+Charles Lindsay
+Charles Merriam
+Charles Sarrazin
+Charlie Lewis
+Chase Bolt
+ChaYoung You
+Chen Chao
+Chen Hanxiao
+cheney90
+Chewey
+Chia-liang Kao
+chli
+Cholerae Hu
+Chris Alfonso
+Chris Armstrong
+Chris Dituri
+Chris Fordham
+Chris Khoo
+Chris McKinnel
+Chris Seto
+Chris Snow
+Chris St. Pierre
+Chris Stivers