diff --git a/CHANGELOG.md b/CHANGELOG.md index 2174f139b3..5d9d2d422d 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,5 +1,69 @@ # Minikube Release Notes +## Version 1.0.0 - 2019-03-27 + +* Update default Kubernetes version to v1.14.0 [#3967](https://github.com/kubernetes/minikube/pull/3967) + * NOTE: To avoid interaction issues, we also recommend updating kubectl to a recent release (v1.13+) +* Upgrade addon-manager to v9.0 for compatibility with Kubernetes v1.14 [#3984](https://github.com/kubernetes/minikube/pull/3984) +* Add --image-repository flag so that users can select an alternative repository mirror [#3714](https://github.com/kubernetes/minikube/pull/3714) +* Rename MINIKUBE_IN_COLOR to MINIKUBE_IN_STYLE [#3976](https://github.com/kubernetes/minikube/pull/3976) +* mount: Allow names to be passed in for gid/uid [#3989](https://github.com/kubernetes/minikube/pull/3989) +* mount: unmount on sigint/sigterm, add --options and --mode, improve UI [#3855](https://github.com/kubernetes/minikube/pull/3855) +* --extra-config now work for kubeadm as well [#3879](https://github.com/kubernetes/minikube/pull/3879) +* start: Set the default value of --cache to true [#3917](https://github.com/kubernetes/minikube/pull/3917) +* Remove the swap partition from minikube.iso [#3927](https://github.com/kubernetes/minikube/pull/3927) +* Add solution catalog to help users who run into known problems [#3931](https://github.com/kubernetes/minikube/pull/3931) +* Automatically propagate proxy environment variables to docker env [#3834](https://github.com/kubernetes/minikube/pull/3834) +* More reliable unmount w/ SIGINT, particularly on kvm2 [#3985](https://github.com/kubernetes/minikube/pull/3985) +* Remove arch suffixes in image names [#3942](https://github.com/kubernetes/minikube/pull/3942) +* Issue #3253, improve kubernetes-version error string [#3596](https://github.com/kubernetes/minikube/pull/3596) +* Update kubeadm bootstrap logic so it does not wait for addon-manager [#3958](https://github.com/kubernetes/minikube/pull/3958) +* Add explicit kvm2 flag for hidden KVM signature [#3947](https://github.com/kubernetes/minikube/pull/3947) +* Remove the rkt container runtime [#3944](https://github.com/kubernetes/minikube/pull/3944) +* Store the toolbox on the disk instead of rootfs [#3951](https://github.com/kubernetes/minikube/pull/3951) +* fix CHANGE_MINIKUBE_NONE_USER regression from recent changes [#3875](https://github.com/kubernetes/minikube/pull/3875) +* Do not wait for k8s-app pods when starting with CNI [#3896](https://github.com/kubernetes/minikube/pull/3896) +* Replace server name in updateKubeConfig if --apiserver-name exists #3878 [#3897](https://github.com/kubernetes/minikube/pull/3897) +* feature-gates via minikube config set [#3861](https://github.com/kubernetes/minikube/pull/3861) +* Upgrade crio to v1.13.1, skip install.tools target as it isn't necessary [#3919](https://github.com/kubernetes/minikube/pull/3919) +* Update Ingress-NGINX to 0.23 Release [#3877](https://github.com/kubernetes/minikube/pull/3877) +* Add addon-manager, dashboard, and storage-provisioner to minikube logs [#3982](https://github.com/kubernetes/minikube/pull/3982) +* logs: Add kube-proxy, dmesg, uptime, uname + newlines between log sources [#3872](https://github.com/kubernetes/minikube/pull/3872) +* Skip "pull" command if using Kubernetes 1.10, which does not support it. [#3832](https://github.com/kubernetes/minikube/pull/3832) +* Allow building minikube for any architecture [#3887](https://github.com/kubernetes/minikube/pull/3887) +* Windows installer using installation path for x64 applications [#3895](https://github.com/kubernetes/minikube/pull/3895) +* caching: Fix containerd, improve console messages, add integration tests [#3767](https://github.com/kubernetes/minikube/pull/3767) +* Fix `minikube addons open heapster` [#3826](https://github.com/kubernetes/minikube/pull/3826) + +We couldn't have gotten here without the folks who contributed to this release: + +- Anders F Björklund +- Andy Daniels +- Calin Don +- Cristian Măgherușan-Stanciu @magheru_san +- Dmitry Budaev +- Guang Ya Liu +- Igor Akkerman +- Joel Smith +- Marco Vito Moscaritolo +- Marcos Diez +- Martynas Pumputis +- RA489 +- Sharif Elgamal +- Steven Davidovitz +- Thomas Strömberg +- Zhongcheng Lao +- flyingcircle +- jay vyas +- morvencao +- u5surf + +We all stand on the shoulders of the giants who came before us. A special shout-out to all [813 people who have contributed to minikube](https://github.com/kubernetes/minikube/graphs/contributors), and especially our former maintainers who made minikube into what it is today: + +- Matt Rickard +- Dan Lorenc +- Aaron Prindle + ## Version 0.35.0 - 2019-03-06 * Update default Kubernetes version to v1.13.4 (latest stable) [#3807](https://github.com/kubernetes/minikube/pull/3807) diff --git a/Gopkg.lock b/Gopkg.lock index 5bc2516ee7..a95d84b4bf 100644 --- a/Gopkg.lock +++ b/Gopkg.lock @@ -183,7 +183,8 @@ version = "v0.2.0" [[projects]] - digest = "1:186f7de0e878b5ff1fca82271ce36a7abf9747be09d03b3f08a921584c2f26fc" + branch = "master" + digest = "1:dfed0914a28dd3a8561fbfdd5c7a1deb2b90dee8edea6f58c9285680fc37b5c2" name = "github.com/google/go-containerregistry" packages = [ "pkg/authn", @@ -192,12 +193,13 @@ "pkg/v1/partial", "pkg/v1/remote", "pkg/v1/remote/transport", + "pkg/v1/stream", "pkg/v1/tarball", "pkg/v1/types", "pkg/v1/v1util", ] pruneopts = "NUT" - revision = "3165313d6d3f973ec0b0ed3ec5a63b520e065d40" + revision = "019cdfc6adf96a4905a1b93a7aeaea1e50c0b6cf" [[projects]] digest = "1:63ede27834b468648817fb80cfb95d40abfc61341f89cb7a0d6779b6aa955425" @@ -505,6 +507,14 @@ pruneopts = "NUT" revision = "4d0e916071f68db74f8a73926335f809396d6b42" +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "NUT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + [[projects]] branch = "master" digest = "1:1b6f62a965e4b2e004184bf2d38ef2915af240befa4d44e5f0e83925bcf89727" @@ -1008,6 +1018,7 @@ "github.com/pkg/browser", "github.com/pkg/errors", "github.com/pkg/profile", + "github.com/pmezard/go-difflib/difflib", "github.com/r2d4/external-storage/lib/controller", "github.com/sirupsen/logrus", "github.com/spf13/cobra", diff --git a/Makefile b/Makefile index 7f53075b48..1c19af654a 100755 --- a/Makefile +++ b/Makefile @@ -13,8 +13,8 @@ # limitations under the License. # Bump these on release - and please check ISO_VERSION for correctness. -VERSION_MAJOR ?= 0 -VERSION_MINOR ?= 35 +VERSION_MAJOR ?= 1 +VERSION_MINOR ?= 0 VERSION_BUILD ?= 0 # Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions ISO_VERSION ?= v$(VERSION_MAJOR).$(VERSION_MINOR).0 diff --git a/README.md b/README.md index 9ccc11bb7b..51e6862b94 100644 --- a/README.md +++ b/README.md @@ -15,17 +15,15 @@ minikube implements a local Kubernetes cluster on macOS, Linux, and Windows. - +![screenshot](/images/start.jpg) -Our [goal](https://github.com/kubernetes/minikube/blob/master/docs/contributors/principles.md) is to enable fast local development and to support all Kubernetes features that fit. We hope you enjoy it! +Our [project goals](https://github.com/kubernetes/minikube/blob/master/docs/contributors/principles.md) are to enable fast local development and to support all Kubernetes features that fit. We hope you enjoy it! ## News +* 2019-03-27 - v1.0.0 released! [[download](https://github.com/kubernetes/minikube/releases/tag/v1.0.0)] [[release notes](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md#version-1000---2019-03-27)] * 2019-03-06 - v0.35.0 released! [[download](https://github.com/kubernetes/minikube/releases/tag/v0.35.0)] [[release notes](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md#version-0350---2019-03-06)] * 2019-02-16 - v0.34.1 released! [[download](https://github.com/kubernetes/minikube/releases/tag/v0.34.1)] [[release notes](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md#version-0341---2019-02-16)] -* 2019-02-15 - v0.34.0 released! [[download](https://github.com/kubernetes/minikube/releases/tag/v0.34.0)] [[release notes](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md#version-0340---2019-02-15)] -* 2019-01-18 - v0.33.1 released to address [CVE-2019-5736](https://www.openwall.com/lists/oss-security/2019/02/11/2) [[download](https://github.com/kubernetes/minikube/releases/tag/v0.33.1)] [[release notes](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md#version-0331---2019-01-18)] -* 2019-01-17 - v0.33.0 released! [[download](https://github.com/kubernetes/minikube/releases/tag/v0.33.0)] [[release notes](https://github.com/kubernetes/minikube/blob/master/CHANGELOG.md#version-0330---2019-01-17)] ## Features @@ -56,6 +54,8 @@ As well as developer-friendly features: ## Community +![Help Wanted!](/images/help_wanted.jpg) + minikube is a Kubernetes [#sig-cluster-lifecycle](https://github.com/kubernetes/community/tree/master/sig-cluster-lifecycle) project. * [**#minikube on Kubernetes Slack**](https://kubernetes.slack.com) - Live chat with minikube developers! diff --git a/cmd/minikube/cmd/cache.go b/cmd/minikube/cmd/cache.go index b6fdb246ca..7b403f4b90 100644 --- a/cmd/minikube/cmd/cache.go +++ b/cmd/minikube/cmd/cache.go @@ -66,20 +66,43 @@ var deleteCacheCmd = &cobra.Command{ }, } -// LoadCachedImagesInConfigFile loads the images currently in the config file (minikube start) -func LoadCachedImagesInConfigFile() error { +func imagesInConfigFile() ([]string, error) { configFile, err := config.ReadConfig() if err != nil { - return err + return nil, err } if values, ok := configFile[constants.Cache]; ok { var images []string for key := range values.(map[string]interface{}) { images = append(images, key) } - return machine.CacheAndLoadImages(images) + return images, nil } - return nil + return []string{}, nil +} + +// CacheImagesInConfigFile caches the images currently in the config file (minikube start) +func CacheImagesInConfigFile() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheImages(images, constants.ImageCacheDir) +} + +// LoadCachedImagesInConfigFile loads the images currently in the config file (minikube start) +func LoadCachedImagesInConfigFile() error { + images, err := imagesInConfigFile() + if err != nil { + return err + } + if len(images) == 0 { + return nil + } + return machine.CacheAndLoadImages(images) } func init() { diff --git a/cmd/minikube/cmd/completion.go b/cmd/minikube/cmd/completion.go index 7de31f9086..eb697b8834 100644 --- a/cmd/minikube/cmd/completion.go +++ b/cmd/minikube/cmd/completion.go @@ -105,8 +105,10 @@ func GenerateBashCompletion(w io.Writer, cmd *cobra.Command) error { // GenerateZshCompletion generates the completion for the zsh shell func GenerateZshCompletion(out io.Writer, cmd *cobra.Command) error { - zshInitialization := `#compdef minikube + zshAutoloadTag := `#compdef minikube +` + zshInitialization := ` __minikube_bash_source() { alias shopt=':' alias _expand=_bash_expand @@ -239,7 +241,12 @@ __minikube_convert_bash_to_zsh() { <<'BASH_COMPLETION_EOF' ` - _, err := out.Write([]byte(boilerPlate)) + _, err := out.Write([]byte(zshAutoloadTag)) + if err != nil { + return err + } + + _, err = out.Write([]byte(boilerPlate)) if err != nil { return err } diff --git a/cmd/minikube/cmd/mount.go b/cmd/minikube/cmd/mount.go index 4260178135..36c7071837 100644 --- a/cmd/minikube/cmd/mount.go +++ b/cmd/minikube/cmd/mount.go @@ -45,8 +45,8 @@ var mountIP string var mountVersion string var mountType string var isKill bool -var uid int -var gid int +var uid string +var gid string var mSize int var options []string var mode uint @@ -98,6 +98,7 @@ var mountCmd = &cobra.Command{ } defer api.Close() host, err := api.Load(config.GetMachineName()) + if err != nil { exit.WithError("Error loading api", err) } @@ -144,8 +145,8 @@ var mountCmd = &cobra.Command{ console.OutStyle("mounting", "Mounting host path %s into VM as %s ...", hostPath, vmPath) console.OutStyle("mount-options", "Mount options:") console.OutStyle("option", "Type: %s", cfg.Type) - console.OutStyle("option", "UID: %d", cfg.UID) - console.OutStyle("option", "GID: %d", cfg.GID) + console.OutStyle("option", "UID: %s", cfg.UID) + console.OutStyle("option", "GID: %s", cfg.GID) console.OutStyle("option", "Version: %s", cfg.Version) console.OutStyle("option", "MSize: %d", cfg.MSize) console.OutStyle("option", "Mode: %o (%s)", cfg.Mode, cfg.Mode) @@ -163,22 +164,32 @@ var mountCmd = &cobra.Command{ go func() { console.OutStyle("fileserver", "Userspace file server: ") ufs.StartServer(net.JoinHostPort(ip.String(), strconv.Itoa(port)), debugVal, hostPath) + console.OutStyle("stopped", "Userspace file server is shutdown") wg.Done() }() } + // Use CommandRunner, as the native docker ssh service dies when Ctrl-C is received. + runner, err := machine.CommandRunner(host) + if err != nil { + exit.WithError("Failed to get command runner", err) + } + // Unmount if Ctrl-C or kill request is received. c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM) go func() { for sig := range c { console.OutStyle("unmount", "Unmounting %s ...", vmPath) - cluster.Unmount(host, vmPath) + err := cluster.Unmount(runner, vmPath) + if err != nil { + console.ErrStyle("failure", "Failed unmount: %v", err) + } exit.WithCode(exit.Interrupted, "Exiting due to %s signal", sig) } }() - err = cluster.Mount(host, ip.String(), vmPath, cfg) + err = cluster.Mount(runner, ip.String(), vmPath, cfg) if err != nil { exit.WithError("mount failed", err) } @@ -194,8 +205,8 @@ func init() { mountCmd.Flags().StringVar(&mountType, "type", nineP, "Specify the mount filesystem type (supported types: 9p)") mountCmd.Flags().StringVar(&mountVersion, "9p-version", constants.DefaultMountVersion, "Specify the 9p version that the mount should use") mountCmd.Flags().BoolVar(&isKill, "kill", false, "Kill the mount process spawned by minikube start") - mountCmd.Flags().IntVar(&uid, "uid", 1001, "Default user id used for the mount") - mountCmd.Flags().IntVar(&gid, "gid", 1001, "Default group id used for the mount") + mountCmd.Flags().StringVar(&uid, "uid", "docker", "Default user id used for the mount") + mountCmd.Flags().StringVar(&gid, "gid", "docker", "Default group id used for the mount") mountCmd.Flags().UintVar(&mode, "mode", 0755, "File permissions used for the mount") mountCmd.Flags().StringSliceVar(&options, "options", []string{}, "Additional mount options, such as cache=fscache") mountCmd.Flags().IntVar(&mSize, "msize", constants.DefaultMsize, "The number of bytes to use for 9p packet payload") diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index f01fe44827..f7b7221c39 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -87,6 +87,7 @@ const ( hidden = "hidden" embedCerts = "embed-certs" noVTXCheck = "no-vtx-check" + downloadOnly = "download-only" ) var ( @@ -135,7 +136,8 @@ func init() { startCmd.Flags().String(networkPlugin, "", "The name of the network plugin") startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\"") startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.") - startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine.") + startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.") + startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --vm-driver=none.") startCmd.Flags().Var(&extraOptions, "extra-config", `A set of key=value pairs that describe configuration that may be passed to different components. The key should be '.' separated, and the first part before the dot is the component to apply the configuration to. @@ -174,6 +176,11 @@ func runStart(cmd *cobra.Command, args []string) { exit.WithError("Failed to generate config", err) } + if viper.GetString(vmDriver) == constants.DriverNone { + // Optimization: images will be persistently loaded into the host's container runtime, so no need to duplicate work. + viper.Set(cacheImages, false) + } + var cacheGroup errgroup.Group beginCacheImages(&cacheGroup, k8sVersion) @@ -187,6 +194,22 @@ func runStart(cmd *cobra.Command, args []string) { if err != nil { exit.WithError("Failed to get machine client", err) } + + if viper.GetBool(downloadOnly) { + if err := cluster.CacheISO(config.MachineConfig); err != nil { + exit.WithError("Failed to cache ISO", err) + } + if err := doCacheBinaries(k8sVersion); err != nil { + exit.WithError("Failed to cache binaries", err) + } + waitCacheImages(&cacheGroup) + if err := CacheImagesInConfigFile(); err != nil { + exit.WithError("Failed to cache images", err) + } + console.OutStyle("check", "Download complete!") + return + } + host, preexisting := startHost(m, config.MachineConfig) ip := validateNetwork(host) @@ -201,13 +224,18 @@ func runStart(cmd *cobra.Command, args []string) { } cr := configureRuntimes(host, runner) - bs := prepareHostEnvironment(m, config.KubernetesConfig) + + // prepareHostEnvironment uses the downloaded images, so we need to wait for background task completion. waitCacheImages(&cacheGroup) + bs := prepareHostEnvironment(m, config.KubernetesConfig) + // The kube config must be update must come before bootstrapping, otherwise health checks may use a stale IP kubeconfig := updateKubeConfig(host, &config) bootstrapCluster(bs, cr, runner, config.KubernetesConfig, preexisting) - validateCluster(bs, cr, runner, ip) + + apiserverPort := config.KubernetesConfig.NodePort + validateCluster(bs, cr, runner, ip, apiserverPort) configureMounts() if err = LoadCachedImagesInConfigFile(); err != nil { console.Failure("Unable to load cached images from config file.") @@ -245,6 +273,11 @@ func validateConfig() { } } +// doCacheBinaries caches Kubernetes binaries in the foreground +func doCacheBinaries(k8sVersion string) error { + return machine.CacheBinariesForBootstrapper(k8sVersion, viper.GetString(cmdcfg.Bootstrapper)) +} + // beginCacheImages caches Docker images in the background func beginCacheImages(g *errgroup.Group, k8sVersion string) { if !viper.GetBool(cacheImages) { @@ -256,6 +289,17 @@ func beginCacheImages(g *errgroup.Group, k8sVersion string) { }) } +// waitCacheImages blocks until the image cache jobs complete +func waitCacheImages(g *errgroup.Group) { + if !viper.GetBool(cacheImages) { + return + } + console.OutStyle("waiting", "Waiting for image downloads to complete ...") + if err := g.Wait(); err != nil { + glog.Errorln("Error caching images: ", err) + } +} + // generateConfig generates cfg.Config based on flags and supplied arguments func generateConfig(cmd *cobra.Command, k8sVersion string) (cfg.Config, error) { r, err := cruntime.New(cruntime.Config{Type: viper.GetString(containerRuntime)}) @@ -511,17 +555,6 @@ func configureRuntimes(h *host.Host, runner bootstrapper.CommandRunner) cruntime return cr } -// waitCacheImages blocks until the image cache jobs complete -func waitCacheImages(g *errgroup.Group) { - if !viper.GetBool(cacheImages) { - return - } - console.OutStyle("waiting", "Waiting for image downloads to complete ...") - if err := g.Wait(); err != nil { - glog.Errorln("Error caching images: ", err) - } -} - // bootstrapCluster starts Kubernetes using the chosen bootstrapper func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner bootstrapper.CommandRunner, kc cfg.KubernetesConfig, preexisting bool) { console.OutStyle("pulling", "Pulling images required by Kubernetes %s ...", kc.KubernetesVersion) @@ -546,7 +579,7 @@ func bootstrapCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner b } // validateCluster validates that the cluster is well-configured and healthy -func validateCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner bootstrapper.CommandRunner, ip string) { +func validateCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner bootstrapper.CommandRunner, ip string, apiserverPort int) { console.OutStyle("verifying-noline", "Verifying component health ...") k8sStat := func() (err error) { st, err := bs.GetKubeletStatus() @@ -561,7 +594,7 @@ func validateCluster(bs bootstrapper.Bootstrapper, r cruntime.Manager, runner bo exit.WithLogEntries("kubelet checks failed", err, logs.FindProblems(r, bs, runner)) } aStat := func() (err error) { - st, err := bs.GetAPIServerStatus(net.ParseIP(ip)) + st, err := bs.GetAPIServerStatus(net.ParseIP(ip), apiserverPort) console.Out(".") if err != nil || st != state.Running.String() { return &pkgutil.RetriableError{Err: fmt.Errorf("apiserver status=%s err=%v", st, err)} diff --git a/cmd/minikube/cmd/status.go b/cmd/minikube/cmd/status.go index b39810318a..668ee9815a 100644 --- a/cmd/minikube/cmd/status.go +++ b/cmd/minikube/cmd/status.go @@ -92,7 +92,13 @@ var statusCmd = &cobra.Command{ glog.Errorln("Error host driver ip status:", err) } - apiserverSt, err = clusterBootstrapper.GetAPIServerStatus(ip) + apiserverPort, err := pkgutil.GetPortFromKubeConfig(util.GetKubeConfigPath(), config.GetMachineName()) + if err != nil { + // Fallback to presuming default apiserver port + apiserverPort = pkgutil.APIServerPort + } + + apiserverSt, err = clusterBootstrapper.GetAPIServerStatus(ip, apiserverPort) if err != nil { glog.Errorln("Error apiserver status:", err) } else if apiserverSt != state.Running.String() { diff --git a/deploy/addons/addon-manager.yaml b/deploy/addons/addon-manager.yaml index 56f6bb036d..534afc870f 100644 --- a/deploy/addons/addon-manager.yaml +++ b/deploy/addons/addon-manager.yaml @@ -19,13 +19,13 @@ metadata: namespace: kube-system labels: component: kube-addon-manager - version: v8.6 + version: v9.0 kubernetes.io/minikube-addons: addon-manager spec: hostNetwork: true containers: - name: kube-addon-manager - image: {{default "k8s.gcr.io" .ImageRepository}}/kube-addon-manager:v8.6 + image: {{default "k8s.gcr.io" .ImageRepository}}/kube-addon-manager:v9.0 env: - name: KUBECONFIG value: /var/lib/minikube/kubeconfig diff --git a/deploy/addons/registry/registry-rc.yaml b/deploy/addons/registry/registry-rc.yaml index aa0a59ee7a..bee3a42ff0 100644 --- a/deploy/addons/registry/registry-rc.yaml +++ b/deploy/addons/registry/registry-rc.yaml @@ -23,3 +23,6 @@ spec: ports: - containerPort: 5000 protocol: TCP + env: + - name: REGISTRY_STORAGE_DELETE_ENABLED + value: true diff --git a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig index e9394abf9f..c770ca5a9f 100644 --- a/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig +++ b/deploy/iso/minikube-iso/board/coreos/minikube/linux_defconfig @@ -20,7 +20,6 @@ CONFIG_RT_GROUP_SCHED=y CONFIG_CGROUP_PIDS=y CONFIG_CGROUP_FREEZER=y CONFIG_CGROUP_HUGETLB=y -CONFIG_CGROUP_NET_PRIO=y CONFIG_CPUSETS=y CONFIG_CGROUP_DEVICE=y CONFIG_CGROUP_CPUACCT=y @@ -114,6 +113,7 @@ CONFIG_NETFILTER=y CONFIG_NETFILTER_NETLINK_ACCT=y CONFIG_NETFILTER_NETLINK_QUEUE=y CONFIG_NF_CONNTRACK=m +CONFIG_NF_CONNTRACK_ZONES=y CONFIG_NF_CONNTRACK_EVENTS=y CONFIG_NF_CONNTRACK_TIMEOUT=y CONFIG_NF_CONNTRACK_TIMESTAMP=y @@ -123,8 +123,6 @@ CONFIG_NF_CONNTRACK_SANE=m CONFIG_NF_CONNTRACK_SIP=m CONFIG_NF_CONNTRACK_TFTP=m CONFIG_NF_CT_NETLINK=m -CONFIG_NF_SOCKET_IPV4=m -CONFIG_NF_SOCKET_IPV6=m CONFIG_NETFILTER_XT_SET=m CONFIG_NETFILTER_XT_TARGET_CHECKSUM=m CONFIG_NETFILTER_XT_TARGET_CLASSIFY=m @@ -229,6 +227,7 @@ CONFIG_IP_VS_SED=m CONFIG_IP_VS_NQ=m CONFIG_IP_VS_NFCT=y CONFIG_NF_CONNTRACK_IPV4=m +CONFIG_NF_SOCKET_IPV4=m CONFIG_NF_LOG_ARP=m CONFIG_IP_NF_IPTABLES=y CONFIG_IP_NF_FILTER=y @@ -240,6 +239,7 @@ CONFIG_IP_NF_TARGET_REDIRECT=m CONFIG_IP_NF_MANGLE=y CONFIG_IP_NF_RAW=m CONFIG_NF_CONNTRACK_IPV6=m +CONFIG_NF_SOCKET_IPV6=m CONFIG_IP6_NF_IPTABLES=y CONFIG_IP6_NF_MATCH_IPV6HEADER=y CONFIG_IP6_NF_FILTER=y @@ -277,6 +277,7 @@ CONFIG_NET_ACT_BPF=m CONFIG_OPENVSWITCH=m CONFIG_VSOCKETS=m CONFIG_VIRTIO_VSOCKETS=m +CONFIG_CGROUP_NET_PRIO=y CONFIG_BPF_JIT=y CONFIG_HAMRADIO=y CONFIG_CFG80211=y @@ -428,7 +429,6 @@ CONFIG_RTC_CLASS=y # CONFIG_RTC_HCTOSYS is not set CONFIG_DMADEVICES=y CONFIG_VIRT_DRIVERS=y -CONFIG_VBOXGUEST=m CONFIG_VIRTIO_PCI=y CONFIG_HYPERV=m CONFIG_HYPERV_UTILS=m diff --git a/deploy/iso/minikube-iso/package/Config.in b/deploy/iso/minikube-iso/package/Config.in index 2205935bac..d33f9e3356 100644 --- a/deploy/iso/minikube-iso/package/Config.in +++ b/deploy/iso/minikube-iso/package/Config.in @@ -7,7 +7,7 @@ menu "System tools" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/docker-bin/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/cni-bin/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/cni-plugins-bin/Config.in" - source "$BR2_EXTERNAL_MINIKUBE_PATH/package/hv-kvp-daemon/Config.in" + source "$BR2_EXTERNAL_MINIKUBE_PATH/package/hyperv-daemons/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/gluster/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/vbox-guest/Config.in" source "$BR2_EXTERNAL_MINIKUBE_PATH/package/containerd-bin/Config.in" diff --git a/deploy/iso/minikube-iso/package/docker-bin/docker-bin.hash b/deploy/iso/minikube-iso/package/docker-bin/docker-bin.hash index 995d0c5eb1..09da6e4b90 100644 --- a/deploy/iso/minikube-iso/package/docker-bin/docker-bin.hash +++ b/deploy/iso/minikube-iso/package/docker-bin/docker-bin.hash @@ -7,3 +7,4 @@ sha256 692e1c72937f6214b1038def84463018d8e320c8eaf8530546c84c2f8f9c767d docker- sha256 1270dce1bd7e1838d62ae21d2505d87f16efc1d9074645571daaefdfd0c14054 docker-17.12.1-ce.tgz sha256 83be159cf0657df9e1a1a4a127d181725a982714a983b2bdcc0621244df93687 docker-18.06.1-ce.tgz sha256 a979d9a952fae474886c7588da692ee00684cb2421d2c633c7ed415948cf0b10 docker-18.06.2-ce.tgz +sha256 346f9394393ee8db5f8bd1e229ee9d90e5b36931bdd754308b2ae68884dd6822 docker-18.06.3-ce.tgz diff --git a/deploy/iso/minikube-iso/package/docker-bin/docker-bin.mk b/deploy/iso/minikube-iso/package/docker-bin/docker-bin.mk index 0e1fec3935..e1f34f8c73 100644 --- a/deploy/iso/minikube-iso/package/docker-bin/docker-bin.mk +++ b/deploy/iso/minikube-iso/package/docker-bin/docker-bin.mk @@ -4,7 +4,7 @@ # ################################################################################ -DOCKER_BIN_VERSION = 18.06.2-ce +DOCKER_BIN_VERSION = 18.06.3-ce DOCKER_BIN_SITE = https://download.docker.com/linux/static/stable/x86_64 DOCKER_BIN_SOURCE = docker-$(DOCKER_BIN_VERSION).tgz diff --git a/deploy/iso/minikube-iso/package/hv-kvp-daemon/Config.in b/deploy/iso/minikube-iso/package/hv-kvp-daemon/Config.in deleted file mode 100644 index b3c164b3b8..0000000000 --- a/deploy/iso/minikube-iso/package/hv-kvp-daemon/Config.in +++ /dev/null @@ -1,4 +0,0 @@ -config BR2_PACKAGE_HV_KVP_DAEMON - bool "hv-kvp-daemon" - default y - depends on BR2_x86_64 diff --git a/deploy/iso/minikube-iso/package/hv-kvp-daemon/hv_kvp_daemon.mk b/deploy/iso/minikube-iso/package/hv-kvp-daemon/hv_kvp_daemon.mk deleted file mode 100644 index 632bf36fc3..0000000000 --- a/deploy/iso/minikube-iso/package/hv-kvp-daemon/hv_kvp_daemon.mk +++ /dev/null @@ -1,30 +0,0 @@ -################################################################################ -# -# hv-kvp-daemon -# -################################################################################ - -HV_KVP_DAEMON_VERSION = 4.4.27 -HV_KVP_DAEMON_SITE = https://www.kernel.org/pub/linux/kernel/v${HV_KVP_DAEMON_VERSION%%.*}.x -HV_KVP_DAEMON_SOURCE = linux-$(HV_KVP_DAEMON_VERSION).tar.xz - -define HV_KVP_DAEMON_BUILD_CMDS - $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D)/tools/hv/ -endef - -define HV_KVP_DAEMON_INSTALL_TARGET_CMDS - $(INSTALL) -D -m 0755 \ - $(@D)/tools/hv/hv_kvp_daemon \ - $(TARGET_DIR)/usr/sbin/hv_kvp_daemon -endef - -define HV_KVP_DAEMON_INSTALL_INIT_SYSTEMD - $(INSTALL) -D -m 644 \ - $(BR2_EXTERNAL_MINIKUBE_PATH)/package/hv-kvp-daemon/hv_kvp_daemon.service \ - $(TARGET_DIR)/usr/lib/systemd/system/hv_kvp_daemon.service - - ln -fs /usr/lib/systemd/system/hv_kvp_daemon.service \ - $(TARGET_DIR)/etc/systemd/system/multi-user.target.wants/hv_kvp_daemon.service -endef - -$(eval $(generic-package)) diff --git a/deploy/iso/minikube-iso/package/hv-kvp-daemon/hv_kvp_daemon.service b/deploy/iso/minikube-iso/package/hv-kvp-daemon/hv_kvp_daemon.service deleted file mode 100644 index 8dc1336cfd..0000000000 --- a/deploy/iso/minikube-iso/package/hv-kvp-daemon/hv_kvp_daemon.service +++ /dev/null @@ -1,15 +0,0 @@ -[Unit] -Description=Hyper-V Key Value Pair Daemon -Documentation=https://technet.microsoft.com/en-us/library/dn798287(v=ws.11).aspx -ConditionVirtualization=microsoft - - -[Service] -Type=simple -Restart=always -RestartSec=3 - -ExecStart=/usr/sbin/hv_kvp_daemon -n - -[Install] -WantedBy=multi-user.target diff --git a/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_fcopy.rules b/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_fcopy.rules new file mode 100644 index 0000000000..a186817c38 --- /dev/null +++ b/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_fcopy.rules @@ -0,0 +1 @@ +SUBSYSTEM=="misc", KERNEL=="vmbus/hv_fcopy", TAG+="systemd", ENV{SYSTEMD_WANTS}+="hv_fcopy_daemon.service" diff --git a/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_kvp.rules b/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_kvp.rules new file mode 100644 index 0000000000..7193fdda45 --- /dev/null +++ b/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_kvp.rules @@ -0,0 +1 @@ +SUBSYSTEM=="misc", KERNEL=="vmbus/hv_kvp", TAG+="systemd", ENV{SYSTEMD_WANTS}+="hv_kvp_daemon.service" diff --git a/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_vss.rules b/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_vss.rules new file mode 100644 index 0000000000..1274dec1eb --- /dev/null +++ b/deploy/iso/minikube-iso/package/hyperv-daemons/70-hv_vss.rules @@ -0,0 +1 @@ +SUBSYSTEM=="misc", KERNEL=="vmbus/hv_vss", TAG+="systemd", ENV{SYSTEMD_WANTS}+="hv_vss_daemon.service" diff --git a/deploy/iso/minikube-iso/package/hyperv-daemons/Config.in b/deploy/iso/minikube-iso/package/hyperv-daemons/Config.in new file mode 100644 index 0000000000..834b31bdcb --- /dev/null +++ b/deploy/iso/minikube-iso/package/hyperv-daemons/Config.in @@ -0,0 +1,4 @@ +config BR2_PACKAGE_HYPERV_DAEMONS + bool "hyperv-daemons" + default y + depends on BR2_x86_64 diff --git a/deploy/iso/minikube-iso/package/hyperv-daemons/hv_fcopy_daemon.service b/deploy/iso/minikube-iso/package/hyperv-daemons/hv_fcopy_daemon.service new file mode 100644 index 0000000000..88fed6f5ad --- /dev/null +++ b/deploy/iso/minikube-iso/package/hyperv-daemons/hv_fcopy_daemon.service @@ -0,0 +1,10 @@ +[Unit] +Description=Hyper-V FCOPY Daemon +Documentation=https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/integration-services#hyper-v-guest-service-interface +BindsTo=sys-devices-virtual-misc-vmbus\x21hv_fcopy.device + +[Service] +ExecStart=/usr/sbin/hv_fcopy_daemon -n + +[Install] +WantedBy=multi-user.target diff --git a/deploy/iso/minikube-iso/package/hyperv-daemons/hv_kvp_daemon.service b/deploy/iso/minikube-iso/package/hyperv-daemons/hv_kvp_daemon.service new file mode 100644 index 0000000000..6cd7e9d6c1 --- /dev/null +++ b/deploy/iso/minikube-iso/package/hyperv-daemons/hv_kvp_daemon.service @@ -0,0 +1,10 @@ +[Unit] +Description=Hyper-V Key Value Pair Daemon +Documentation=https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/integration-services#hyper-v-data-exchange-service-kvp +BindsTo=sys-devices-virtual-misc-vmbus\x21hv_kvp.device + +[Service] +ExecStart=/usr/sbin/hv_kvp_daemon -n + +[Install] +WantedBy=multi-user.target diff --git a/deploy/iso/minikube-iso/package/hyperv-daemons/hv_vss_daemon.service b/deploy/iso/minikube-iso/package/hyperv-daemons/hv_vss_daemon.service new file mode 100644 index 0000000000..d12aa1dded --- /dev/null +++ b/deploy/iso/minikube-iso/package/hyperv-daemons/hv_vss_daemon.service @@ -0,0 +1,10 @@ +[Unit] +Description=Hyper-V VSS Daemon +Documentation=https://docs.microsoft.com/en-us/virtualization/hyper-v-on-windows/reference/integration-services#hyper-v-volume-shadow-copy-requestor +BindsTo=sys-devices-virtual-misc-vmbus\x21hv_vss.device + +[Service] +ExecStart=/usr/sbin/hv_vss_daemon -n + +[Install] +WantedBy=multi-user.target diff --git a/deploy/iso/minikube-iso/package/hyperv-daemons/hyperv-daemons.mk b/deploy/iso/minikube-iso/package/hyperv-daemons/hyperv-daemons.mk new file mode 100644 index 0000000000..d0fadf5b1c --- /dev/null +++ b/deploy/iso/minikube-iso/package/hyperv-daemons/hyperv-daemons.mk @@ -0,0 +1,67 @@ +################################################################################ +# +# hyperv-daemons +# +################################################################################ + +HYPERV_DAEMONS_VERSION = 4.15.1 +HYPERV_DAEMONS_SITE = https://www.kernel.org/pub/linux/kernel/v${HYPERV_DAEMONS_VERSION%%.*}.x +HYPERV_DAEMONS_SOURCE = linux-$(HYPERV_DAEMONS_VERSION).tar.xz + +define HYPERV_DAEMONS_BUILD_CMDS + $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D)/tools/hv/ +endef + +define HYPERV_DAEMONS_INSTALL_TARGET_CMDS + $(INSTALL) -D -m 0755 \ + $(@D)/tools/hv/hv_fcopy_daemon \ + $(TARGET_DIR)/usr/sbin/hv_fcopy_daemon + + $(INSTALL) -D -m 0755 \ + $(@D)/tools/hv/hv_kvp_daemon \ + $(TARGET_DIR)/usr/sbin/hv_kvp_daemon + $(INSTALL) -D -m 0755 \ + $(@D)/tools/hv/hv_get_dhcp_info.sh \ + $(TARGET_DIR)/usr/libexec/hypervkvpd/hv_get_dhcp_info + $(INSTALL) -D -m 0755 \ + $(@D)/tools/hv/hv_get_dns_info.sh \ + $(TARGET_DIR)/usr/libexec/hypervkvpd/hv_get_dns_info + $(INSTALL) -D -m 0755 \ + $(@D)/tools/hv/hv_set_ifconfig.sh \ + $(TARGET_DIR)/usr/libexec/hypervkvpd/hv_set_ifconfig + + $(INSTALL) -D -m 0755 \ + $(@D)/tools/hv/hv_vss_daemon \ + $(TARGET_DIR)/usr/sbin/hv_vss_daemon +endef + +define HYPERV_DAEMONS_INSTALL_INIT_SYSTEMD + $(INSTALL) -D -m 644 \ + $(BR2_EXTERNAL_MINIKUBE_PATH)/package/hyperv-daemons/70-hv_fcopy.rules \ + $(TARGET_DIR)/etc/udev/rules.d/70-hv_fcopy.rules + $(INSTALL) -D -m 644 \ + $(BR2_EXTERNAL_MINIKUBE_PATH)/package/hyperv-daemons/70-hv_kvp.rules \ + $(TARGET_DIR)/etc/udev/rules.d/70-hv_kvp.rules + $(INSTALL) -D -m 644 \ + $(BR2_EXTERNAL_MINIKUBE_PATH)/package/hyperv-daemons/70-hv_vss.rules \ + $(TARGET_DIR)/etc/udev/rules.d/70-hv_vss.rules + + $(INSTALL) -D -m 644 \ + $(BR2_EXTERNAL_MINIKUBE_PATH)/package/hyperv-daemons/hv_fcopy_daemon.service \ + $(TARGET_DIR)/usr/lib/systemd/system/hv_fcopy_daemon.service + $(INSTALL) -D -m 644 \ + $(BR2_EXTERNAL_MINIKUBE_PATH)/package/hyperv-daemons/hv_kvp_daemon.service \ + $(TARGET_DIR)/usr/lib/systemd/system/hv_kvp_daemon.service + $(INSTALL) -D -m 644 \ + $(BR2_EXTERNAL_MINIKUBE_PATH)/package/hyperv-daemons/hv_vss_daemon.service \ + $(TARGET_DIR)/usr/lib/systemd/system/hv_vss_daemon.service + + ln -fs /usr/lib/systemd/system/hv_fcopy_daemon.service \ + $(TARGET_DIR)/etc/systemd/system/multi-user.target.wants/hv_fcopy_daemon.service + ln -fs /usr/lib/systemd/system/hv_kvp_daemon.service \ + $(TARGET_DIR)/etc/systemd/system/multi-user.target.wants/hv_kvp_daemon.service + ln -fs /usr/lib/systemd/system/hv_vss_daemon.service \ + $(TARGET_DIR)/etc/systemd/system/multi-user.target.wants/hv_vss_daemon.service +endef + +$(eval $(generic-package)) diff --git a/deploy/minikube/releases.json b/deploy/minikube/releases.json index c3bcb33d50..bce02f2e6c 100644 --- a/deploy/minikube/releases.json +++ b/deploy/minikube/releases.json @@ -1,4 +1,12 @@ [ + { + "name": "v1.0.0", + "checksums": { + "darwin": "865bd3a13c1ad3b7732b2bea35b26fef150f2b3cbfc257c5d1835527d1b331e9", + "linux": "a315869f81aae782ecc6ff2a6de4d0ab3a17ca1840d1d8e6eea050a8dd05907f", + "windows": "a9e629911498ce774681504abe1797c1957e29d100d40c80c26ac54e22716a85" + } + }, { "name": "v0.35.0", "checksums": { diff --git a/docs/README.md b/docs/README.md index d76126f0fa..ad3a47b749 100644 --- a/docs/README.md +++ b/docs/README.md @@ -41,3 +41,5 @@ * **Accessing etcd from inside the cluster** ([accessing_etcd.md](accessing_etcd.md)) * **Networking** ([networking.md](networking.md)): FAQ about networking between the host and minikube VM + +* **Offline** ([offline.md](offline.md)): Details about using minikube offline diff --git a/docs/contributors/releasing_minikube.md b/docs/contributors/releasing_minikube.md index f35b8643ab..29822c494b 100644 --- a/docs/contributors/releasing_minikube.md +++ b/docs/contributors/releasing_minikube.md @@ -78,6 +78,10 @@ This step uses the git tag to publish new binaries to GCS and create a github re * For `ISO_SHA256`, run: `gsutil cat gs://minikube/iso/minikube-v.iso.sha256` * Click *Build* +## Check the release logs + +Once the release completes, click "Console Output" to look or anything unusual. This is typically where you will see the brew automation fail, for instance. + ## Check releases.json This file is used for auto-update notifications, but is not active until releases.json is copied to GCS. @@ -93,6 +97,8 @@ These are downstream packages that are being maintained by others and how to upg | Arch Linux AUR | | "Flag as package out-of-date" | Brew Cask | | The release job creates a new PR in [Homebrew/homebrew-cask](https://github.com/Homebrew/homebrew-cask) with an updated version and SHA256, double check that it's created. +WARNING: The Brew cask automation is error-prone. please ensure that a PR was created. + ## Verification Verify release checksums by running`make check-release` diff --git a/docs/contributors/roadmap.md b/docs/contributors/roadmap.md index 3adb7ed9ab..58c729337e 100644 --- a/docs/contributors/roadmap.md +++ b/docs/contributors/roadmap.md @@ -6,45 +6,45 @@ Please send a PR to suggest any improvements to it. ## (#1) User-friendly and accessible -- Creation of a user-centric minikube website for installation & documentation -- Localized output to 5+ written languages -- Make minikube usable in environments with challenging connectivity requirements -- Support lightweight deployment methods for environments where VM's are impractical -- Add offline support +- [ ] Creation of a user-centric minikube website for installation & documentation +- [ ] Localized output to 5+ written languages +- [ ] Make minikube usable in environments with challenging connectivity requirements +- [ ] Support lightweight deployment methods for environments where VM's are impractical +- [x] Add offline support ## (#2) Inclusive and community-driven -- Increase community involvement in planning and decision making -- Make the continuous integration and release infrastructure publicly available -- Double the number of active maintainers +- [x] Increase community involvement in planning and decision making +- [ ] Make the continuous integration and release infrastructure publicly available +- [x] Double the number of active maintainers ## (#3) Cross-platform -- Simplified installation process across all supported platforms -- Users should never need to separately install supporting binaries +- [ ] Simplified installation process across all supported platforms +- [ ] Users should never need to separately install supporting binaries ## (#4) Support all Kubernetes features -- Add multi-node support +- [ ] Add multi-node support ## (#5) High-fidelity -- Reduce guest VM overhead by 50% -- Disable swap in the guest VM +- [ ] Reduce guest VM overhead by 50% +- [x] Disable swap in the guest VM ## (#6) Compatible with all supported Kubernetes releases -- Continuous Integration testing across all supported Kubernetes releases -- Automatic PR generation for updating the default Kubernetes release minikube uses +- [x] Continuous Integration testing across all supported Kubernetes releases +- [ ] Automatic PR generation for updating the default Kubernetes release minikube uses ## (#7) Support for all Kubernetes-friendly container runtimes -- Run all integration tests across all supported container runtimes -- Support for Kata Containers (help wanted!) +- [x] Run all integration tests across all supported container runtimes +- [ ] Support for Kata Containers (help wanted!) ## (#8) Stable and easy to debug -- Pre-flight error checks for common connectivity and configuration errors -- Improve the `minikube status` command so that it can diagnose common issues -- Mark all features not covered by continuous integration as `experimental` -- Stabilize and improve profiles support (AKA multi-cluster) +- [ ] Pre-flight error checks for common connectivity and configuration errors +- [ ] Improve the `minikube status` command so that it can diagnose common issues +- [ ] Mark all features not covered by continuous integration as `experimental` +- [x] Stabilize and improve profiles support (AKA multi-cluster) diff --git a/docs/drivers.md b/docs/drivers.md index 3b30775c4b..1ac2ba1c74 100644 --- a/docs/drivers.md +++ b/docs/drivers.md @@ -95,30 +95,19 @@ minikube start ## Hyperkit driver -The Hyperkit driver will eventually replace the existing xhyve driver. -It is built from the minikube source tree, and uses [moby/hyperkit](http://github.com/moby/hyperkit) as a Go library. - -To install the hyperkit driver via brew: +Install the [hyperkit](http://github.com/moby/hyperkit) VM manager using [brew](https://brew.sh): ```shell -brew install docker-machine-driver-hyperkit - -# docker-machine-driver-hyperkit need root owner and uid -sudo chown root:wheel /usr/local/opt/docker-machine-driver-hyperkit/bin/docker-machine-driver-hyperkit -sudo chmod u+s /usr/local/opt/docker-machine-driver-hyperkit/bin/docker-machine-driver-hyperkit +brew install hyperkit ``` -To install the hyperkit driver manually: +Then install the most recent version of minikube's fork of the hyperkit driver: ```shell curl -LO https://storage.googleapis.com/minikube/releases/latest/docker-machine-driver-hyperkit \ && sudo install -o root -g wheel -m 4755 docker-machine-driver-hyperkit /usr/local/bin/ ``` -The hyperkit driver currently requires running as root to use the vmnet framework to setup networking. - -If you encountered errors like `Could not find hyperkit executable`, you might need to install [Docker for Mac](https://store.docker.com/editions/community/docker-ce-desktop-mac) - If you are using [dnsmasq](http://www.thekelleys.org.uk/dnsmasq/doc.html) in your setup and cluster creation fails (stuck at kube-dns initialization) you might need to add `listen-address=192.168.64.1` to `dnsmasq.conf`. *Note: If `dnsmasq.conf` contains `listen-address=127.0.0.1` kubernetes discovers dns at 127.0.0.1:53 and tries to use it using bridge ip address, but dnsmasq replies only to requests from 127.0.0.1* @@ -129,18 +118,12 @@ To use the driver: minikube start --vm-driver hyperkit ``` -or, to use hyperkit as a default driver: +or, to use hyperkit as a default driver for minikube: ```shell minikube config set vm-driver hyperkit ``` -and run minikube as usual: - -```shell -minikube start -``` - ## HyperV driver Hyper-v users may need to create a new external network switch as described [here](https://docs.docker.com/machine/drivers/hyper-v/). This step may prevent a problem in which `minikube start` hangs indefinitely, unable to ssh into the minikube virtual machine. In this add, add the `--hyperv-virtual-switch=switch-name` argument to the `minikube start` command. diff --git a/docs/env_vars.md b/docs/env_vars.md index f531d4615e..01d8a2176b 100644 --- a/docs/env_vars.md +++ b/docs/env_vars.md @@ -13,7 +13,7 @@ Some features can only be accessed by environment variables, here is a list of t * **MINIKUBE_HOME** - (string) sets the path for the .minikube directory that minikube uses for state/configuration -* **MINIKUBE_IN_COLOR** - (bool) manually sets whether or not emoji and colors should appear in minikube. Set to false or 0 to disable this feature, true or 1 to force it to be turned on. +* **MINIKUBE_IN_STYLE** - (bool) manually sets whether or not emoji and colors should appear in minikube. Set to false or 0 to disable this feature, true or 1 to force it to be turned on. * **MINIKUBE_WANTUPDATENOTIFICATION** - (bool) sets whether the user wants an update notification for new minikube versions @@ -34,7 +34,7 @@ To make the exported variables permanent: ### Example: Disabling emoji ```shell -export MINIKUBE_IN_COLOR=false +export MINIKUBE_IN_STYLE=false minikube start ``` diff --git a/docs/offline.md b/docs/offline.md new file mode 100644 index 0000000000..cf0db6245f --- /dev/null +++ b/docs/offline.md @@ -0,0 +1,42 @@ +# Offline support in minikube + +As of v1.0, `minikube start` is offline compatible out of the box. Here are some implementation details to help systems integrators: + +## Requirements + +* On the initial run for a given Kubernetes release, `minikube start` must have access to the internet, or a configured `--image-repository` to pull from. + +## Cache location + +* `~/.minikube/cache` - Top-level folder +* `~/.minikube/cache/iso` - VM ISO image. Typically updated once per major minikube release. +* `~/.minikube/cache/images` - Docker images used by Kubernetes. +* `~/.minikube/` - Kubernetes binaries, such as `kubeadm` and `kubelet` + +## Sharing the minikube cache + +For offline use on other hosts, one can copy the contents of `~/.minikube/cache`. As of the v1.0 release, this directory +contains 685MB of data: + +``` +cache/iso/minikube-v1.0.0.iso +cache/images/gcr.io/k8s-minikube/storage-provisioner_v1.8.1 +cache/images/k8s.gcr.io/k8s-dns-sidecar-amd64_1.14.13 +cache/images/k8s.gcr.io/k8s-dns-dnsmasq-nanny-amd64_1.14.13 +cache/images/k8s.gcr.io/kubernetes-dashboard-amd64_v1.10.1 +cache/images/k8s.gcr.io/kube-scheduler_v1.14.0 +cache/images/k8s.gcr.io/coredns_1.3.1 +cache/images/k8s.gcr.io/kube-controller-manager_v1.14.0 +cache/images/k8s.gcr.io/kube-apiserver_v1.14.0 +cache/images/k8s.gcr.io/pause_3.1 +cache/images/k8s.gcr.io/etcd_3.3.10 +cache/images/k8s.gcr.io/kube-addon-manager_v9.0 +cache/images/k8s.gcr.io/k8s-dns-kube-dns-amd64_1.14.13 +cache/images/k8s.gcr.io/kube-proxy_v1.14.0 +cache/v1.14.0/kubeadm +cache/v1.14.0/kubelet +``` + +If any of these files exist, minikube will use copy them into the VM directly rather than pulling them from the internet. + + diff --git a/docs/vmdriver-none.md b/docs/vmdriver-none.md index 888020ab6d..68265a1bc0 100644 --- a/docs/vmdriver-none.md +++ b/docs/vmdriver-none.md @@ -92,7 +92,7 @@ Some environment variables may be useful for using the `none` driver: * **MINIKUBE_HOME**: Saves all files to this directory instead of $HOME * **MINIKUBE_WANTUPDATENOTIFICATION**: Toggles the notification that your version of minikube is obsolete * **MINIKUBE_WANTREPORTERRORPROMPT**: Toggles the error reporting prompt -* **MINIKUBE_IN_COLOR**: Toggles color output and emoji usage +* **MINIKUBE_IN_STYLE**: Toggles color output and emoji usage ## Known Issues @@ -101,4 +101,12 @@ Some environment variables may be useful for using the `none` driver: * minikube with the `none` driver has a confusing permissions model, as some commands need to be run as root ("start"), and others by a regular user ("dashboard") * CoreDNS detects resolver loop, goes into CrashloopBackoff - [#3511](https://github.com/kubernetes/minikube/issues/3511) * Some versions of Linux have a version of docker that is newer then what Kubernetes expects. To overwrite this, run minikube with the following parameters: `sudo -E minikube start --vm-driver=none --kubernetes-version v1.11.8 --extra-config kubeadm.ignore-preflight-errors=SystemVerification` +* On Ubuntu 18.04 (and probably others), because of how `systemd-resolve` is configured by default, one needs to bypass the default `resolv.conf` file and use a different one instead. + - In this case, you should use this file: `/run/systemd/resolve/resolv.conf` + - `sudo -E minikube --vm-driver=none start --extra-config=kubelet.resolv-conf=/run/systemd/resolve/resolv.conf` + - Apperently, though, if `resolve.conf` is too big (about 10 lines!!!), one gets the following error: `Waiting for pods: apiserver proxy! Error restarting cluster: wait: waiting for k8s-app=kube-proxy: timed out waiting for the condition` + - This error happens in Kubernetes 0.11.x, 0.12.x and 0.13.x, but *not* in 0.14.x + - If that's your case, try this: + - `grep -E "^nameserver" /run/systemd/resolve/resolv.conf |head -n 3 > /tmp/resolv.conf && sudo -E minikube --vm-driver=none start --extra-config=kubelet.resolv-conf=/tmp/resolv.conf` + * [Full list of open 'none' driver issues](https://github.com/kubernetes/minikube/labels/co%2Fnone-driver) diff --git a/hack/conformance_tests.sh b/hack/conformance_tests.sh new file mode 100755 index 0000000000..eb1564e58e --- /dev/null +++ b/hack/conformance_tests.sh @@ -0,0 +1,71 @@ +#!/bin/sh + +# Copyright 2019 The Kubernetes Authors All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# This script executes the Kubernetes conformance tests in accordance with: +# https://github.com/cncf/k8s-conformance/blob/master/instructions.md +# +# Usage: +# conformance_tests.sh +# +# Example: +# conformance_tests.sh ./out/minikube --vm-driver=hyperkit +set -ex -o pipefail + +readonly PROFILE_NAME="k8sconformance" +readonly MINIKUBE=${1:-./out/minikube} +shift || true +readonly START_ARGS=$* + +# Requires a fully running Kubernetes cluster. +"${MINIKUBE}" delete -p "${PROFILE_NAME}" || true +"${MINIKUBE}" start -p "${PROFILE_NAME}" $START_ARGS +"${MINIKUBE}" status -p "${PROFILE_NAME}" +kubectl get pods --all-namespaces + +go get -u -v github.com/heptio/sonobuoy +sonobuoy run --wait +outdir="$(mktemp -d)" +sonobuoy retrieve "${outdir}" + +cwd=$(pwd) + +cd "${outdir}" +mkdir ./results; tar xzf *.tar.gz -C ./results + +version=$(${MINIKUBE} version | cut -d" " -f3) + +mkdir minikube-${version} +cd minikube-${version} + +cat <PRODUCT.yaml +vendor: minikube +name: minikube +version: ${version} +website_url: https://github.com/kubernetes/minikube +repo_url: https://github.com/kubernetes/minikube +documentation_url: https://github.com/kubernetes/minikube/blob/master/docs/README.md +product_logo_url: https://raw.githubusercontent.com/kubernetes/minikube/master/images/logo/logo.svg +type: installer +description: minikube runs a local Kubernetes cluster on macOS, Linux, and Windows. +EOF + +cat <README.md +./hack/conformance_tests.sh $MINIKUBE $START_ARGS +EOF + +cp ../results/plugins/e2e/results/* . +cd .. +cp -r minikube-${version} ${cwd} diff --git a/hack/jenkins/release_github_page.sh b/hack/jenkins/release_github_page.sh index 30117b7c1c..0d8c4be0f8 100755 --- a/hack/jenkins/release_github_page.sh +++ b/hack/jenkins/release_github_page.sh @@ -63,10 +63,10 @@ curl -Lo minikube https://storage.googleapis.com/minikube/releases/${TAGNAME}/mi Feel free to leave off \`\`\`sudo cp minikube /usr/local/bin/ && rm minikube\`\`\` if you would like to add minikube to your path manually. ### Debian Package (.deb) [Experimental] -Download the \`minikube_${DEB_VERSION}.deb\` file, and install it using \`sudo dpkg -i minikube_$(DEB_VERSION).deb\` +Download the \`minikube_${DEB_VERSION}.deb\` file, and install it using \`sudo dpkg -i minikube_${DEB_VERSION}.deb\` ### RPM Package (.rpm) [Experimental] -Download the \`minikube-${RPM_VERSION}.rpm\` file, and install it using \`sudo rpm -i minikube-$(RPM_VERSION).rpm\` +Download the \`minikube-${RPM_VERSION}.rpm\` file, and install it using \`sudo rpm -i minikube-${RPM_VERSION}.rpm\` ### Windows [Experimental] Download the \`minikube-windows-amd64.exe\` file, rename it to \`minikube.exe\` and add it to your path. diff --git a/images/help_wanted.jpg b/images/help_wanted.jpg new file mode 100644 index 0000000000..4805bb0c38 Binary files /dev/null and b/images/help_wanted.jpg differ diff --git a/images/start.jpg b/images/start.jpg index 90b21ae5fa..2747c6169d 100644 Binary files a/images/start.jpg and b/images/start.jpg differ diff --git a/pkg/minikube/bootstrapper/bootstrapper.go b/pkg/minikube/bootstrapper/bootstrapper.go index e7d9ed4bbb..e69d790582 100644 --- a/pkg/minikube/bootstrapper/bootstrapper.go +++ b/pkg/minikube/bootstrapper/bootstrapper.go @@ -43,7 +43,7 @@ type Bootstrapper interface { LogCommands(LogOptions) map[string]string SetupCerts(cfg config.KubernetesConfig) error GetKubeletStatus() (string, error) - GetAPIServerStatus(net.IP) (string, error) + GetAPIServerStatus(net.IP, int) (string, error) } const ( @@ -51,6 +51,16 @@ const ( BootstrapperTypeKubeadm = "kubeadm" ) +// GetCachedBinaryList returns the list of binaries +func GetCachedBinaryList(bootstrapper string) []string { + switch bootstrapper { + case BootstrapperTypeKubeadm: + return constants.GetKubeadmCachedBinaries() + default: + return []string{} + } +} + // GetCachedImageList returns the list of images for a version func GetCachedImageList(imageRepository string, version string, bootstrapper string) []string { switch bootstrapper { diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go index 5ad6e781ae..b3f95a4e07 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm.go @@ -18,13 +18,10 @@ package kubeadm import ( "bytes" - "crypto" "crypto/tls" "fmt" "net" "net/http" - "os" - "path" "strings" "time" @@ -32,7 +29,6 @@ import ( "github.com/docker/machine/libmachine" "github.com/docker/machine/libmachine/state" "github.com/golang/glog" - "github.com/jimmidyson/go-download" "github.com/pkg/errors" "golang.org/x/sync/errgroup" "k8s.io/apimachinery/pkg/labels" @@ -122,8 +118,8 @@ func (k *Bootstrapper) GetKubeletStatus() (string, error) { } // GetAPIServerStatus returns the api-server status -func (k *Bootstrapper) GetAPIServerStatus(ip net.IP) (string, error) { - url := fmt.Sprintf("https://%s:%d/healthz", ip, util.APIServerPort) +func (k *Bootstrapper) GetAPIServerStatus(ip net.IP, apiserverPort int) (string, error) { + url := fmt.Sprintf("https://%s:%d/healthz", ip, apiserverPort) // To avoid: x509: certificate signed by unknown authority tr := &http.Transport{ TLSClientConfig: &tls.Config{InsecureSkipVerify: true}, diff --git a/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go b/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go index 7f780f9e8a..a43fe782c5 100644 --- a/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go +++ b/pkg/minikube/bootstrapper/kubeadm/kubeadm_test.go @@ -17,458 +17,209 @@ limitations under the License. package kubeadm import ( - "strings" + "fmt" + "io/ioutil" "testing" - "github.com/google/go-cmp/cmp" + "github.com/pmezard/go-difflib/difflib" "k8s.io/minikube/pkg/minikube/config" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/pkg/minikube/cruntime" "k8s.io/minikube/pkg/util" ) +const ( + newMajor = "v1.14.0" + recentMajor = "v1.13.0" + oldMajor = "v1.12.0" + obsoleteMajor = "v1.10.0" +) + func TestGenerateKubeletConfig(t *testing.T) { tests := []struct { description string cfg config.KubernetesConfig - expectedCfg string + expected string shouldErr bool }{ { description: "docker runtime", cfg: config.KubernetesConfig{ NodeIP: "192.168.1.100", - KubernetesVersion: "v1.1.0", + KubernetesVersion: recentMajor, NodeName: "minikube", ContainerRuntime: "docker", }, - expectedCfg: ` + expected: ` [Unit] Wants=docker.socket [Service] ExecStart= -ExecStart=/usr/bin/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=docker --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --pod-manifest-path=/etc/kubernetes/manifests --require-kubeconfig=true +ExecStart=/usr/bin/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --pod-manifest-path=/etc/kubernetes/manifests [Install] `, }, { - description: "cri runtime", + description: "newest cri runtime", cfg: config.KubernetesConfig{ NodeIP: "192.168.1.100", - KubernetesVersion: "v1.1.0", + KubernetesVersion: constants.NewestKubernetesVersion, NodeName: "minikube", ContainerRuntime: "cri-o", }, - expectedCfg: ` + expected: ` [Unit] Wants=crio.service [Service] ExecStart= -ExecStart=/usr/bin/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --pod-manifest-path=/etc/kubernetes/manifests --require-kubeconfig=true --runtime-request-timeout=15m +ExecStart=/usr/bin/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m [Install] `, }, { - description: "docker runtime with custom image repository", + description: "docker with custom image repository", cfg: config.KubernetesConfig{ NodeIP: "192.168.1.100", - KubernetesVersion: "v1.1.0", + KubernetesVersion: constants.DefaultKubernetesVersion, NodeName: "minikube", ContainerRuntime: "docker", ImageRepository: "docker-proxy-image.io/google_containers", }, - expectedCfg: ` + expected: ` [Unit] Wants=docker.socket [Service] ExecStart= -ExecStart=/usr/bin/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cadvisor-port=0 --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=docker --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --pod-infra-container-image=docker-proxy-image.io/google_containers//pause-amd64:3.0 --pod-manifest-path=/etc/kubernetes/manifests --require-kubeconfig=true +ExecStart=/usr/bin/kubelet --allow-privileged=true --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-dns=10.96.0.10 --cluster-domain=cluster.local --container-runtime=docker --fail-swap-on=false --hostname-override=minikube --kubeconfig=/etc/kubernetes/kubelet.conf --pod-infra-container-image=docker-proxy-image.io/google_containers/pause:3.1 --pod-manifest-path=/etc/kubernetes/manifests [Install] `, }, } - for _, test := range tests { - t.Run(test.description, func(t *testing.T) { - runtime, err := cruntime.New(cruntime.Config{Type: test.cfg.ContainerRuntime}) + for _, tc := range tests { + t.Run(tc.description, func(t *testing.T) { + runtime, err := cruntime.New(cruntime.Config{Type: tc.cfg.ContainerRuntime}) if err != nil { t.Fatalf("runtime: %v", err) } - actualCfg, err := NewKubeletConfig(test.cfg, runtime) - if err != nil && !test.shouldErr { + got, err := NewKubeletConfig(tc.cfg, runtime) + if err != nil && !tc.shouldErr { t.Errorf("got unexpected error generating config: %v", err) return } - if err == nil && test.shouldErr { - t.Errorf("expected error but got none, config: %s", actualCfg) + if err == nil && tc.shouldErr { + t.Errorf("expected error but got none, config: %s", got) return } - if diff := cmp.Diff(test.expectedCfg, actualCfg); diff != "" { - t.Errorf("actual config does not match expected. (-want +got)\n%s", diff) + + diff, err := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(tc.expected), + B: difflib.SplitLines(got), + FromFile: "Expected", + ToFile: "Got", + Context: 1, + }) + if err != nil { + t.Fatalf("diff error: %v", err) + } + if diff != "" { + t.Errorf("unexpected diff:\n%s", diff) } }) } } func TestGenerateConfig(t *testing.T) { - tests := []struct { - description string - cfg config.KubernetesConfig - expectedCfg string - shouldErr bool - }{ - { - description: "no extra args", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.100", - KubernetesVersion: "v1.10.0", - NodeName: "minikube", - }, - expectedCfg: `apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -noTaintMaster: true -api: - advertiseAddress: 192.168.1.100 - bindPort: 8443 - controlPlaneEndpoint: localhost -kubernetesVersion: v1.10.0 -certificatesDir: /var/lib/minikube/certs/ -networking: - serviceSubnet: 10.96.0.0/12 -etcd: - dataDir: /data/minikube -nodeName: minikube -apiServerExtraArgs: - admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" -`, + extraOpts := util.ExtraOptionSlice{ + util.ExtraOption{ + Component: Apiserver, + Key: "fail-no-swap", + Value: "true", }, - { - description: "extra args all components", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.101", - KubernetesVersion: "v1.10.0-alpha.0", - NodeName: "extra-args-minikube", - ExtraOptions: util.ExtraOptionSlice{ - util.ExtraOption{ - Component: Apiserver, - Key: "fail-no-swap", - Value: "true", - }, - util.ExtraOption{ - Component: ControllerManager, - Key: "kube-api-burst", - Value: "32", - }, - util.ExtraOption{ - Component: Scheduler, - Key: "scheduler-name", - Value: "mini-scheduler", - }, - }, - }, - expectedCfg: `apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -noTaintMaster: true -api: - advertiseAddress: 192.168.1.101 - bindPort: 8443 - controlPlaneEndpoint: localhost -kubernetesVersion: v1.10.0-alpha.0 -certificatesDir: /var/lib/minikube/certs/ -networking: - serviceSubnet: 10.96.0.0/12 -etcd: - dataDir: /data/minikube -nodeName: extra-args-minikube -apiServerExtraArgs: - admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" - fail-no-swap: "true" -controllerManagerExtraArgs: - kube-api-burst: "32" -schedulerExtraArgs: - scheduler-name: "mini-scheduler" -`, + util.ExtraOption{ + Component: ControllerManager, + Key: "kube-api-burst", + Value: "32", }, - { - description: "extra args, v1.14.0", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.101", - KubernetesVersion: "v1.14.0-beta1", - NodeName: "extra-args-minikube-114", - ExtraOptions: util.ExtraOptionSlice{ - util.ExtraOption{ - Component: Apiserver, - Key: "fail-no-swap", - Value: "true", - }, - util.ExtraOption{ - Component: ControllerManager, - Key: "kube-api-burst", - Value: "32", - }, - }, - }, - expectedCfg: `apiVersion: kubeadm.k8s.io/v1beta1 -kind: InitConfiguration -localAPIEndpoint: - advertiseAddress: 192.168.1.101 - bindPort: 8443 -bootstrapTokens: -- groups: - - system:bootstrappers:kubeadm:default-node-token - ttl: 24h0m0s - usages: - - signing - - authentication -nodeRegistration: - criSocket: /var/run/dockershim.sock - name: extra-args-minikube-114 - taints: [] ---- -apiVersion: kubeadm.k8s.io/v1beta1 -kind: ClusterConfiguration -apiServer: - extraArgs: - enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"fail-no-swap: "true" -controllerManager: - extraArgs: - kube-api-burst: "32" -certificatesDir: /var/lib/minikube/certs/ -clusterName: kubernetes -controlPlaneEndpoint: localhost:8443 -dns: - type: CoreDNS -etcd: - local: - dataDir: /data/minikube -kubernetesVersion: v1.14.0-beta1 -networking: - dnsDomain: cluster.local - podSubnet: "" - serviceSubnet: 10.96.0.0/12 ---- -apiVersion: kubelet.config.k8s.io/v1beta1 -kind: KubeletConfiguration -imageGCHighThresholdPercent: 100 -evictionHard: - nodefs.available: "0%" - nodefs.inodesFree: "0%" - imagefs.available: "0%" -`, - }, { - description: "two extra args for one component", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.101", - KubernetesVersion: "v1.10.0-alpha.0", - NodeName: "extra-args-minikube", - ExtraOptions: util.ExtraOptionSlice{ - util.ExtraOption{ - Component: Apiserver, - Key: "fail-no-swap", - Value: "true", - }, - util.ExtraOption{ - Component: Apiserver, - Key: "kube-api-burst", - Value: "32", - }, - }, - }, - expectedCfg: `apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -noTaintMaster: true -api: - advertiseAddress: 192.168.1.101 - bindPort: 8443 - controlPlaneEndpoint: localhost -kubernetesVersion: v1.10.0-alpha.0 -certificatesDir: /var/lib/minikube/certs/ -networking: - serviceSubnet: 10.96.0.0/12 -etcd: - dataDir: /data/minikube -nodeName: extra-args-minikube -apiServerExtraArgs: - admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" - fail-no-swap: "true" - kube-api-burst: "32" -`, - }, - { - description: "enable feature gates", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.101", - KubernetesVersion: "v1.10.0-alpha.0", - NodeName: "extra-args-minikube", - FeatureGates: "HugePages=true,OtherFeature=false", - }, - expectedCfg: `apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -noTaintMaster: true -api: - advertiseAddress: 192.168.1.101 - bindPort: 8443 - controlPlaneEndpoint: localhost -kubernetesVersion: v1.10.0-alpha.0 -certificatesDir: /var/lib/minikube/certs/ -networking: - serviceSubnet: 10.96.0.0/12 -etcd: - dataDir: /data/minikube -nodeName: extra-args-minikube -apiServerExtraArgs: - admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" - feature-gates: "HugePages=true,OtherFeature=false" -controllerManagerExtraArgs: - feature-gates: "HugePages=true,OtherFeature=false" -kubeadmExtraArgs: - feature-gates: "HugePages=true,OtherFeature=false" -schedulerExtraArgs: - feature-gates: "HugePages=true,OtherFeature=false" -`, - }, - { - description: "enable feature gates and extra config", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.101", - KubernetesVersion: "v1.10.0-alpha.0", - NodeName: "extra-args-minikube", - FeatureGates: "HugePages=true,OtherFeature=false", - ExtraOptions: util.ExtraOptionSlice{ - util.ExtraOption{ - Component: Apiserver, - Key: "fail-no-swap", - Value: "true", - }, - }, - }, - expectedCfg: `apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -noTaintMaster: true -api: - advertiseAddress: 192.168.1.101 - bindPort: 8443 - controlPlaneEndpoint: localhost -kubernetesVersion: v1.10.0-alpha.0 -certificatesDir: /var/lib/minikube/certs/ -networking: - serviceSubnet: 10.96.0.0/12 -etcd: - dataDir: /data/minikube -nodeName: extra-args-minikube -apiServerExtraArgs: - admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" - fail-no-swap: "true" - feature-gates: "HugePages=true,OtherFeature=false" -controllerManagerExtraArgs: - feature-gates: "HugePages=true,OtherFeature=false" -kubeadmExtraArgs: - feature-gates: "HugePages=true,OtherFeature=false" -schedulerExtraArgs: - feature-gates: "HugePages=true,OtherFeature=false" -`, - }, - { - // Unknown components should fail silently - description: "unknown component", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.101", - KubernetesVersion: "v1.8.0-alpha.0", - NodeName: "extra-args-minikube", - ExtraOptions: util.ExtraOptionSlice{ - util.ExtraOption{ - Component: "not-a-real-component", - Key: "killswitch", - Value: "true", - }, - }, - }, - shouldErr: true, - }, - { - description: "custom api server port", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.100", - NodePort: 18443, - KubernetesVersion: "v1.10.0", - NodeName: "minikube", - }, - expectedCfg: `apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -noTaintMaster: true -api: - advertiseAddress: 192.168.1.100 - bindPort: 18443 - controlPlaneEndpoint: localhost -kubernetesVersion: v1.10.0 -certificatesDir: /var/lib/minikube/certs/ -networking: - serviceSubnet: 10.96.0.0/12 -etcd: - dataDir: /data/minikube -nodeName: minikube -apiServerExtraArgs: - admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" -`, - }, - { - description: "custom image repository", - cfg: config.KubernetesConfig{ - NodeIP: "192.168.1.100", - KubernetesVersion: "v1.10.0", - NodeName: "minikube", - ImageRepository: "docker-proxy-image.io/google_containers", - }, - expectedCfg: `apiVersion: kubeadm.k8s.io/v1alpha1 -kind: MasterConfiguration -noTaintMaster: true -api: - advertiseAddress: 192.168.1.100 - bindPort: 8443 - controlPlaneEndpoint: localhost -kubernetesVersion: v1.10.0 -certificatesDir: /var/lib/minikube/certs/ -networking: - serviceSubnet: 10.96.0.0/12 -etcd: - dataDir: /data/minikube -nodeName: minikube -imageRepository: docker-proxy-image.io/google_containers -apiServerExtraArgs: - admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" -`, + util.ExtraOption{ + Component: Scheduler, + Key: "scheduler-name", + Value: "mini-scheduler", }, } - for _, test := range tests { - runtime, err := cruntime.New(cruntime.Config{Type: "docker"}) - if err != nil { - t.Fatalf("runtime: %v", err) + // Test version policy: Last 4 major releases (slightly looser than our general policy) + versions := map[string]string{ + "default": constants.DefaultKubernetesVersion, + "new": newMajor, + "recent": recentMajor, + "old": oldMajor, + "obsolete": obsoleteMajor, + } + + tests := []struct { + name string + runtime string + shouldErr bool + cfg config.KubernetesConfig + }{ + {"default", "docker", false, config.KubernetesConfig{}}, + {"containerd", "containerd", false, config.KubernetesConfig{}}, + {"crio", "crio", false, config.KubernetesConfig{}}, + {"options", "docker", false, config.KubernetesConfig{ExtraOptions: extraOpts}}, + {"crio-options-gates", "crio", false, config.KubernetesConfig{ExtraOptions: extraOpts, FeatureGates: "a=b"}}, + {"unknown-omponent", "docker", true, config.KubernetesConfig{ExtraOptions: util.ExtraOptionSlice{util.ExtraOption{Component: "not-a-real-component", Key: "killswitch", Value: "true"}}}}, + {"containerd-api-port", "containerd", false, config.KubernetesConfig{NodePort: 12345}}, + {"image-repository", "docker", false, config.KubernetesConfig{ImageRepository: "test/repo"}}, + } + for vname, version := range versions { + for _, tc := range tests { + runtime, err := cruntime.New(cruntime.Config{Type: tc.runtime}) + if err != nil { + t.Fatalf("runtime: %v", err) + } + tname := tc.name + "__" + vname + t.Run(tname, func(t *testing.T) { + cfg := tc.cfg + cfg.NodeIP = "1.1.1.1" + cfg.NodeName = "mk" + cfg.KubernetesVersion = version + + got, err := generateConfig(cfg, runtime) + if err != nil && !tc.shouldErr { + t.Fatalf("got unexpected error generating config: %v", err) + } + if err == nil && tc.shouldErr { + t.Fatalf("expected error but got none, config: %s", got) + } + if tc.shouldErr { + return + } + expected, err := ioutil.ReadFile(fmt.Sprintf("testdata/%s.yaml", tname)) + if err != nil { + t.Fatalf("unable to read testdata: %v", err) + } + diff, err := difflib.GetUnifiedDiffString(difflib.UnifiedDiff{ + A: difflib.SplitLines(string(expected)), + B: difflib.SplitLines(got), + FromFile: "Expected", + ToFile: "Got", + Context: 1, + }) + if err != nil { + t.Fatalf("diff error: %v", err) + } + if diff != "" { + t.Errorf("unexpected diff:\n%s\n===== [RAW OUTPUT] =====\n%s", diff, got) + } + }) } - - t.Run(test.description, func(t *testing.T) { - gotCfg, err := generateConfig(test.cfg, runtime) - if err != nil && !test.shouldErr { - t.Errorf("got unexpected error generating config: %v", err) - return - } - if err == nil && test.shouldErr { - t.Errorf("expected error but got none, config: %s", gotCfg) - return - } - - // cmp.Diff doesn't present diffs of multi-line text well - gotSplit := strings.Split(gotCfg, "\n") - wantSplit := strings.Split(test.expectedCfg, "\n") - if diff := cmp.Diff(gotSplit, wantSplit); diff != "" { - t.Errorf("unexpected diff: (-want +got)\n%s\ngot: %s\n", diff, gotCfg) - } - }) } } diff --git a/pkg/minikube/bootstrapper/kubeadm/templates.go b/pkg/minikube/bootstrapper/kubeadm/templates.go index 7df240f120..e5a63efc44 100644 --- a/pkg/minikube/bootstrapper/kubeadm/templates.go +++ b/pkg/minikube/bootstrapper/kubeadm/templates.go @@ -56,12 +56,12 @@ apiEndpoint: advertiseAddress: {{.AdvertiseAddress}} bindPort: {{.APIServerPort}} bootstrapTokens: -- groups: - - system:bootstrappers:kubeadm:default-node-token - ttl: 24h0m0s - usages: - - signing - - authentication + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication nodeRegistration: criSocket: {{if .CRISocket}}{{.CRISocket}}{{else}}/var/run/dockershim.sock{{end}} name: {{.NodeName}} @@ -72,9 +72,10 @@ kind: ClusterConfiguration {{if .ImageRepository}}imageRepository: {{.ImageRepository}} {{end}}{{range .ExtraArgs}}{{.Component}}ExtraArgs:{{range $i, $val := printMapInOrder .Options ": " }} {{$val}}{{end}} -{{end}}{{if .FeatureArgs}}featureGates: {{range $i, $val := .FeatureArgs}} +{{end -}} +{{if .FeatureArgs}}featureGates: {{range $i, $val := .FeatureArgs}} {{$i}}: {{$val}}{{end}} -{{end}} +{{end -}} certificatesDir: {{.CertDir}} clusterName: kubernetes controlPlaneEndpoint: localhost:{{.APIServerPort}} @@ -104,12 +105,12 @@ localAPIEndpoint: advertiseAddress: {{.AdvertiseAddress}} bindPort: {{.APIServerPort}} bootstrapTokens: -- groups: - - system:bootstrappers:kubeadm:default-node-token - ttl: 24h0m0s - usages: - - signing - - authentication + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication nodeRegistration: criSocket: {{if .CRISocket}}{{.CRISocket}}{{else}}/var/run/dockershim.sock{{end}} name: {{.NodeName}} @@ -117,13 +118,16 @@ nodeRegistration: --- apiVersion: kubeadm.k8s.io/v1beta1 kind: ClusterConfiguration -{{if .ImageRepository}}imageRepository: {{.ImageRepository}} +{{ if .ImageRepository}}imageRepository: {{.ImageRepository}} {{end}}{{range .ExtraArgs}}{{.Component}}: extraArgs: - {{range $i, $val := printMapInOrder .Options ": " }}{{$val}}{{end}} -{{end}}{{if .FeatureArgs}}featureGates: {{range $i, $val := .FeatureArgs}} - {{$i}}: {{$val}}{{end}} +{{- range $i, $val := printMapInOrder .Options ": " }} + {{$val}} +{{- end}} {{end -}} +{{if .FeatureArgs}}featureGates: +{{range $i, $val := .FeatureArgs}}{{$i}}: {{$val}} +{{end -}}{{end -}} certificatesDir: {{.CertDir}} clusterName: kubernetes controlPlaneEndpoint: localhost:{{.APIServerPort}} diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__default.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__default.yaml new file mode 100644 index 0000000000..cc96f136ac --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__default.yaml @@ -0,0 +1,43 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 12345 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:12345 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__new.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__new.yaml new file mode 100644 index 0000000000..cc96f136ac --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__new.yaml @@ -0,0 +1,43 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 12345 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:12345 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__obsolete.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__obsolete.yaml new file mode 100644 index 0000000000..40c91637f3 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__obsolete.yaml @@ -0,0 +1,17 @@ +apiVersion: kubeadm.k8s.io/v1alpha1 +kind: MasterConfiguration +noTaintMaster: true +api: + advertiseAddress: 1.1.1.1 + bindPort: 12345 + controlPlaneEndpoint: localhost +kubernetesVersion: v1.10.0 +certificatesDir: /var/lib/minikube/certs/ +networking: + serviceSubnet: 10.96.0.0/12 +etcd: + dataDir: /data/minikube +nodeName: mk +criSocket: /run/containerd/containerd.sock +apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__old.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__old.yaml new file mode 100644 index 0000000000..7d84e36369 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__old.yaml @@ -0,0 +1,39 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 12345 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:12345 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.12.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__recent.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__recent.yaml new file mode 100644 index 0000000000..e776755631 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd-api-port__recent.yaml @@ -0,0 +1,39 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 12345 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:12345 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.13.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__default.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__default.yaml new file mode 100644 index 0000000000..147a6ccbec --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__default.yaml @@ -0,0 +1,43 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__new.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__new.yaml new file mode 100644 index 0000000000..147a6ccbec --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__new.yaml @@ -0,0 +1,43 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__obsolete.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__obsolete.yaml new file mode 100644 index 0000000000..b2d73f912b --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__obsolete.yaml @@ -0,0 +1,17 @@ +apiVersion: kubeadm.k8s.io/v1alpha1 +kind: MasterConfiguration +noTaintMaster: true +api: + advertiseAddress: 1.1.1.1 + bindPort: 8443 + controlPlaneEndpoint: localhost +kubernetesVersion: v1.10.0 +certificatesDir: /var/lib/minikube/certs/ +networking: + serviceSubnet: 10.96.0.0/12 +etcd: + dataDir: /data/minikube +nodeName: mk +criSocket: /run/containerd/containerd.sock +apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__old.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__old.yaml new file mode 100644 index 0000000000..4e4b1e4830 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__old.yaml @@ -0,0 +1,39 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.12.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__recent.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__recent.yaml new file mode 100644 index 0000000000..a852561fbe --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/containerd__recent.yaml @@ -0,0 +1,39 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /run/containerd/containerd.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.13.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__default.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__default.yaml new file mode 100644 index 0000000000..b593f313d3 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__default.yaml @@ -0,0 +1,56 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" + feature-gates: "a=b" +controllerManager: + extraArgs: + feature-gates: "a=b" + kube-api-burst: "32" +kubeadm: + extraArgs: + feature-gates: "a=b" +scheduler: + extraArgs: + feature-gates: "a=b" + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__new.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__new.yaml new file mode 100644 index 0000000000..b593f313d3 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__new.yaml @@ -0,0 +1,56 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" + feature-gates: "a=b" +controllerManager: + extraArgs: + feature-gates: "a=b" + kube-api-burst: "32" +kubeadm: + extraArgs: + feature-gates: "a=b" +scheduler: + extraArgs: + feature-gates: "a=b" + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__obsolete.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__obsolete.yaml new file mode 100644 index 0000000000..0635574f4c --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__obsolete.yaml @@ -0,0 +1,27 @@ +apiVersion: kubeadm.k8s.io/v1alpha1 +kind: MasterConfiguration +noTaintMaster: true +api: + advertiseAddress: 1.1.1.1 + bindPort: 8443 + controlPlaneEndpoint: localhost +kubernetesVersion: v1.10.0 +certificatesDir: /var/lib/minikube/certs/ +networking: + serviceSubnet: 10.96.0.0/12 +etcd: + dataDir: /data/minikube +nodeName: mk +criSocket: /var/run/crio/crio.sock +apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" + feature-gates: "a=b" +controllerManagerExtraArgs: + feature-gates: "a=b" + kube-api-burst: "32" +kubeadmExtraArgs: + feature-gates: "a=b" +schedulerExtraArgs: + feature-gates: "a=b" + scheduler-name: "mini-scheduler" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__old.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__old.yaml new file mode 100644 index 0000000000..cf204e307a --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__old.yaml @@ -0,0 +1,49 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" + feature-gates: "a=b" +controllerManagerExtraArgs: + feature-gates: "a=b" + kube-api-burst: "32" +kubeadmExtraArgs: + feature-gates: "a=b" +schedulerExtraArgs: + feature-gates: "a=b" + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.12.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__recent.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__recent.yaml new file mode 100644 index 0000000000..49e8c06a9f --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio-options-gates__recent.yaml @@ -0,0 +1,49 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" + feature-gates: "a=b" +controllerManagerExtraArgs: + feature-gates: "a=b" + kube-api-burst: "32" +kubeadmExtraArgs: + feature-gates: "a=b" +schedulerExtraArgs: + feature-gates: "a=b" + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.13.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio__default.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__default.yaml new file mode 100644 index 0000000000..66dac1c8af --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__default.yaml @@ -0,0 +1,43 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio__new.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__new.yaml new file mode 100644 index 0000000000..66dac1c8af --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__new.yaml @@ -0,0 +1,43 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio__obsolete.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__obsolete.yaml new file mode 100644 index 0000000000..f68df3416f --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__obsolete.yaml @@ -0,0 +1,17 @@ +apiVersion: kubeadm.k8s.io/v1alpha1 +kind: MasterConfiguration +noTaintMaster: true +api: + advertiseAddress: 1.1.1.1 + bindPort: 8443 + controlPlaneEndpoint: localhost +kubernetesVersion: v1.10.0 +certificatesDir: /var/lib/minikube/certs/ +networking: + serviceSubnet: 10.96.0.0/12 +etcd: + dataDir: /data/minikube +nodeName: mk +criSocket: /var/run/crio/crio.sock +apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio__old.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__old.yaml new file mode 100644 index 0000000000..033a7aefe2 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__old.yaml @@ -0,0 +1,39 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.12.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/crio__recent.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__recent.yaml new file mode 100644 index 0000000000..57b9f1b122 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/crio__recent.yaml @@ -0,0 +1,39 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/crio/crio.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.13.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/default__default.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/default__default.yaml new file mode 100644 index 0000000000..d88c0efebf --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/default__default.yaml @@ -0,0 +1,43 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/default__new.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/default__new.yaml new file mode 100644 index 0000000000..d88c0efebf --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/default__new.yaml @@ -0,0 +1,43 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/default__obsolete.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/default__obsolete.yaml new file mode 100644 index 0000000000..9f0ea00701 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/default__obsolete.yaml @@ -0,0 +1,16 @@ +apiVersion: kubeadm.k8s.io/v1alpha1 +kind: MasterConfiguration +noTaintMaster: true +api: + advertiseAddress: 1.1.1.1 + bindPort: 8443 + controlPlaneEndpoint: localhost +kubernetesVersion: v1.10.0 +certificatesDir: /var/lib/minikube/certs/ +networking: + serviceSubnet: 10.96.0.0/12 +etcd: + dataDir: /data/minikube +nodeName: mk +apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/default__old.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/default__old.yaml new file mode 100644 index 0000000000..d9ba1066a5 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/default__old.yaml @@ -0,0 +1,39 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.12.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/default__recent.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/default__recent.yaml new file mode 100644 index 0000000000..db611e2263 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/default__recent.yaml @@ -0,0 +1,39 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.13.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__default.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__default.yaml new file mode 100644 index 0000000000..bf53791b71 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__default.yaml @@ -0,0 +1,44 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +imageRepository: test/repo +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__new.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__new.yaml new file mode 100644 index 0000000000..bf53791b71 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__new.yaml @@ -0,0 +1,44 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +imageRepository: test/repo +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__obsolete.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__obsolete.yaml new file mode 100644 index 0000000000..cebbc924ed --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__obsolete.yaml @@ -0,0 +1,17 @@ +apiVersion: kubeadm.k8s.io/v1alpha1 +kind: MasterConfiguration +noTaintMaster: true +api: + advertiseAddress: 1.1.1.1 + bindPort: 8443 + controlPlaneEndpoint: localhost +kubernetesVersion: v1.10.0 +certificatesDir: /var/lib/minikube/certs/ +networking: + serviceSubnet: 10.96.0.0/12 +etcd: + dataDir: /data/minikube +nodeName: mk +imageRepository: test/repo +apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__old.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__old.yaml new file mode 100644 index 0000000000..bf00349785 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__old.yaml @@ -0,0 +1,40 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +imageRepository: test/repo +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.12.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__recent.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__recent.yaml new file mode 100644 index 0000000000..037dd6add2 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/image-repository__recent.yaml @@ -0,0 +1,40 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +imageRepository: test/repo +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.13.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/options__default.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/options__default.yaml new file mode 100644 index 0000000000..a85a2447b2 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/options__default.yaml @@ -0,0 +1,50 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" +controllerManager: + extraArgs: + kube-api-burst: "32" +scheduler: + extraArgs: + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/options__new.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/options__new.yaml new file mode 100644 index 0000000000..a85a2447b2 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/options__new.yaml @@ -0,0 +1,50 @@ +apiVersion: kubeadm.k8s.io/v1beta1 +kind: InitConfiguration +localAPIEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1beta1 +kind: ClusterConfiguration +apiServer: + extraArgs: + enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" +controllerManager: + extraArgs: + kube-api-burst: "32" +scheduler: + extraArgs: + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +dns: + type: CoreDNS +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.14.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +imageGCHighThresholdPercent: 100 +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/options__obsolete.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/options__obsolete.yaml new file mode 100644 index 0000000000..97fcc2e56e --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/options__obsolete.yaml @@ -0,0 +1,21 @@ +apiVersion: kubeadm.k8s.io/v1alpha1 +kind: MasterConfiguration +noTaintMaster: true +api: + advertiseAddress: 1.1.1.1 + bindPort: 8443 + controlPlaneEndpoint: localhost +kubernetesVersion: v1.10.0 +certificatesDir: /var/lib/minikube/certs/ +networking: + serviceSubnet: 10.96.0.0/12 +etcd: + dataDir: /data/minikube +nodeName: mk +apiServerExtraArgs: + admission-control: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" +controllerManagerExtraArgs: + kube-api-burst: "32" +schedulerExtraArgs: + scheduler-name: "mini-scheduler" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/options__old.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/options__old.yaml new file mode 100644 index 0000000000..3e9052efa8 --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/options__old.yaml @@ -0,0 +1,44 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" +controllerManagerExtraArgs: + kube-api-burst: "32" +schedulerExtraArgs: + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.12.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/bootstrapper/kubeadm/testdata/options__recent.yaml b/pkg/minikube/bootstrapper/kubeadm/testdata/options__recent.yaml new file mode 100644 index 0000000000..f1df66954d --- /dev/null +++ b/pkg/minikube/bootstrapper/kubeadm/testdata/options__recent.yaml @@ -0,0 +1,44 @@ +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: InitConfiguration +apiEndpoint: + advertiseAddress: 1.1.1.1 + bindPort: 8443 +bootstrapTokens: + - groups: + - system:bootstrappers:kubeadm:default-node-token + ttl: 24h0m0s + usages: + - signing + - authentication +nodeRegistration: + criSocket: /var/run/dockershim.sock + name: mk + taints: [] +--- +apiVersion: kubeadm.k8s.io/v1alpha3 +kind: ClusterConfiguration +apiServerExtraArgs: + enable-admission-plugins: "Initializers,NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota" + fail-no-swap: "true" +controllerManagerExtraArgs: + kube-api-burst: "32" +schedulerExtraArgs: + scheduler-name: "mini-scheduler" +certificatesDir: /var/lib/minikube/certs/ +clusterName: kubernetes +controlPlaneEndpoint: localhost:8443 +etcd: + local: + dataDir: /data/minikube +kubernetesVersion: v1.13.0 +networking: + dnsDomain: cluster.local + podSubnet: "" + serviceSubnet: 10.96.0.0/12 +--- +apiVersion: kubelet.config.k8s.io/v1beta1 +kind: KubeletConfiguration +evictionHard: + nodefs.available: "0%" + nodefs.inodesFree: "0%" + imagefs.available: "0%" diff --git a/pkg/minikube/cluster/cluster.go b/pkg/minikube/cluster/cluster.go index f5d4dd7db4..96bc3740e8 100644 --- a/pkg/minikube/cluster/cluster.go +++ b/pkg/minikube/cluster/cluster.go @@ -58,6 +58,16 @@ func init() { ssh.SetDefaultClient(ssh.Native) } +// CacheISO downloads and caches ISO. +func CacheISO(config cfg.MachineConfig) error { + if config.VMDriver != "none" { + if err := config.Downloader.CacheMinikubeISOFromURL(config.MinikubeISO); err != nil { + return err + } + } + return nil +} + // StartHost starts a host VM. func StartHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error) { exists, err := api.Exists(cfg.GetMachineName()) @@ -291,10 +301,9 @@ func createHost(api libmachine.API, config cfg.MachineConfig) (*host.Host, error exit.WithError("error getting driver", err) } - if config.VMDriver != "none" { - if err := config.Downloader.CacheMinikubeISOFromURL(config.MinikubeISO); err != nil { - return nil, errors.Wrap(err, "unable to cache ISO") - } + err = CacheISO(config) + if err != nil { + return nil, errors.Wrap(err, "unable to cache ISO") } driver := def.ConfigCreator(config) diff --git a/pkg/minikube/cluster/mount.go b/pkg/minikube/cluster/mount.go index 41181330b8..1610b214bc 100644 --- a/pkg/minikube/cluster/mount.go +++ b/pkg/minikube/cluster/mount.go @@ -23,6 +23,7 @@ import ( "strconv" "strings" + "github.com/golang/glog" "github.com/pkg/errors" ) @@ -31,9 +32,9 @@ type MountConfig struct { // Type is the filesystem type (Typically 9p) Type string // UID is the User ID which this path will be mounted as - UID int + UID string // GID is the Group ID which this path will be mounted as - GID int + GID string // Version is the 9P protocol version. Valid options: 9p2000, 9p200.u, 9p2000.L Version string // MSize is the number of bytes to use for 9p packet payload @@ -46,30 +47,59 @@ type MountConfig struct { Options map[string]string } -// hostRunner is the subset of host.Host used for mounting -type hostRunner interface { - RunSSHCommand(cmd string) (string, error) +// mountRunner is the subset of CommandRunner used for mounting +type mountRunner interface { + CombinedOutput(string) (string, error) } // Mount runs the mount command from the 9p client on the VM to the 9p server on the host -func Mount(h hostRunner, source string, target string, c *MountConfig) error { - if err := Unmount(h, target); err != nil { +func Mount(r mountRunner, source string, target string, c *MountConfig) error { + if err := Unmount(r, target); err != nil { return errors.Wrap(err, "umount") } cmd := fmt.Sprintf("sudo mkdir -m %o -p %s && %s", c.Mode, target, mntCmd(source, target, c)) - out, err := h.RunSSHCommand(cmd) + glog.Infof("Will run: %s", cmd) + out, err := r.CombinedOutput(cmd) + glog.Infof("mount err=%s, out=%s", err, out) if err != nil { return errors.Wrap(err, out) } return nil } +// returns either a raw UID number, or the subshell to resolve it. +func resolveUID(id string) string { + _, err := strconv.ParseInt(id, 10, 64) + if err == nil { + return id + } + // Preserve behavior where unset ID == 0 + if id == "" { + return "0" + } + return fmt.Sprintf(`$(id -u %s)`, id) +} + +// returns either a raw GID number, or the subshell to resolve it. +func resolveGID(id string) string { + _, err := strconv.ParseInt(id, 10, 64) + if err == nil { + return id + } + // Preserve behavior where unset ID == 0 + if id == "" { + return "0" + } + // Because `getent` isn't part of our ISO + return fmt.Sprintf(`$(grep ^%s: /etc/group | cut -d: -f3)`, id) +} + // mntCmd returns a mount command based on a config. func mntCmd(source string, target string, c *MountConfig) string { options := map[string]string{ - "dfltgid": strconv.Itoa(c.GID), - "dfltuid": strconv.Itoa(c.UID), + "dfltgid": resolveGID(c.GID), + "dfltuid": resolveUID(c.UID), } if c.Port != 0 { options["port"] = strconv.Itoa(c.Port) @@ -100,9 +130,31 @@ func mntCmd(source string, target string, c *MountConfig) string { return fmt.Sprintf("sudo mount -t %s -o %s %s %s", c.Type, strings.Join(opts, ","), source, target) } +// umountCmd returns a command for unmounting +func umountCmd(target string, force bool) string { + flag := "" + if force { + flag = "-f " + } + // grep because findmnt will also display the parent! + return fmt.Sprintf("findmnt -T %s | grep %s && sudo umount %s%s || true", target, target, flag, target) +} + // Unmount unmounts a path -func Unmount(h hostRunner, target string) error { - out, err := h.RunSSHCommand(fmt.Sprintf("findmnt -T %s && sudo umount %s || true", target, target)) +func Unmount(r mountRunner, target string) error { + cmd := umountCmd(target, false) + glog.Infof("Will run: %s", cmd) + out, err := r.CombinedOutput(cmd) + if err == nil { + return nil + } + glog.Warningf("initial unmount error: %v, out: %s", err, out) + + // Try again, using force if needed. + cmd = umountCmd(target, true) + glog.Infof("Will run: %s", cmd) + out, err = r.CombinedOutput(cmd) + glog.Infof("unmount force err=%v, out=%s", err, out) if err != nil { return errors.Wrap(err, out) } diff --git a/pkg/minikube/cluster/mount_test.go b/pkg/minikube/cluster/mount_test.go index 98f1f6ed29..749f631eda 100644 --- a/pkg/minikube/cluster/mount_test.go +++ b/pkg/minikube/cluster/mount_test.go @@ -23,19 +23,19 @@ import ( "github.com/google/go-cmp/cmp" ) -type mockMountHost struct { +type mockMountRunner struct { cmds []string T *testing.T } -func newMockMountHost(t *testing.T) *mockMountHost { - return &mockMountHost{ +func newMockMountRunner(t *testing.T) *mockMountRunner { + return &mockMountRunner{ T: t, cmds: []string{}, } } -func (m *mockMountHost) RunSSHCommand(cmd string) (string, error) { +func (m *mockMountRunner) CombinedOutput(cmd string) (string, error) { m.cmds = append(m.cmds, cmd) return "", nil } @@ -54,20 +54,30 @@ func TestMount(t *testing.T) { target: "target", cfg: &MountConfig{Type: "9p", Mode: os.FileMode(0700)}, want: []string{ - "findmnt -T target && sudo umount target || true", + "findmnt -T target | grep target && sudo umount target || true", "sudo mkdir -m 700 -p target && sudo mount -t 9p -o dfltgid=0,dfltuid=0 src target", }, }, + { + name: "named uid", + source: "src", + target: "target", + cfg: &MountConfig{Type: "9p", Mode: os.FileMode(0700), UID: "docker", GID: "docker"}, + want: []string{ + "findmnt -T target | grep target && sudo umount target || true", + "sudo mkdir -m 700 -p target && sudo mount -t 9p -o dfltgid=$(grep ^docker: /etc/group | cut -d: -f3),dfltuid=$(id -u docker) src target", + }, + }, { name: "everything", source: "10.0.0.1", target: "/target", - cfg: &MountConfig{Type: "9p", Mode: os.FileMode(0777), UID: 82, GID: 72, Version: "9p2000.u", Options: map[string]string{ + cfg: &MountConfig{Type: "9p", Mode: os.FileMode(0777), UID: "82", GID: "72", Version: "9p2000.u", Options: map[string]string{ "noextend": "", "cache": "fscache", }}, want: []string{ - "findmnt -T /target && sudo umount /target || true", + "findmnt -T /target | grep /target && sudo umount /target || true", "sudo mkdir -m 777 -p /target && sudo mount -t 9p -o cache=fscache,dfltgid=72,dfltuid=82,noextend,version=9p2000.u 10.0.0.1 /target", }, }, @@ -79,19 +89,19 @@ func TestMount(t *testing.T) { "version": "9p2000.L", }}, want: []string{ - "findmnt -T tgt && sudo umount tgt || true", + "findmnt -T tgt | grep tgt && sudo umount tgt || true", "sudo mkdir -m 700 -p tgt && sudo mount -t 9p -o dfltgid=0,dfltuid=0,version=9p2000.L src tgt", }, }, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { - h := newMockMountHost(t) - err := Mount(h, tc.source, tc.target, tc.cfg) + r := newMockMountRunner(t) + err := Mount(r, tc.source, tc.target, tc.cfg) if err != nil { t.Fatalf("Mount(%s, %s, %+v): %v", tc.source, tc.target, tc.cfg, err) } - if diff := cmp.Diff(h.cmds, tc.want); diff != "" { + if diff := cmp.Diff(r.cmds, tc.want); diff != "" { t.Errorf("command diff (-want +got): %s", diff) } }) @@ -99,14 +109,14 @@ func TestMount(t *testing.T) { } func TestUnmount(t *testing.T) { - h := newMockMountHost(t) - err := Unmount(h, "/mnt") + r := newMockMountRunner(t) + err := Unmount(r, "/mnt") if err != nil { t.Fatalf("Unmount(/mnt): %v", err) } - want := []string{"findmnt -T /mnt && sudo umount /mnt || true"} - if diff := cmp.Diff(h.cmds, want); diff != "" { + want := []string{"findmnt -T /mnt | grep /mnt && sudo umount /mnt || true"} + if diff := cmp.Diff(r.cmds, want); diff != "" { t.Errorf("command diff (-want +got): %s", diff) } } diff --git a/pkg/minikube/console/console.go b/pkg/minikube/console/console.go index 6ce18431c1..c26b44c5c6 100644 --- a/pkg/minikube/console/console.go +++ b/pkg/minikube/console/console.go @@ -40,7 +40,7 @@ import ( // console.SetErrFile(os.Stderr) // console.Fatal("Oh no, everything failed.") -// NOTE: If you do not want colorized output, set MINIKUBE_IN_COLOR=false in your environment. +// NOTE: If you do not want colorized output, set MINIKUBE_IN_STYLE=false in your environment. var ( // outFile is where Out* functions send output to. Set using SetOutFile() @@ -54,7 +54,7 @@ var ( // useColor is whether or not color output should be used, updated by Set*Writer. useColor = false // OverrideEnv is the environment variable used to override color/emoji usage - OverrideEnv = "MINIKUBE_IN_COLOR" + OverrideEnv = "MINIKUBE_IN_STYLE" ) // fdWriter is the subset of file.File that implements io.Writer and Fd() @@ -70,7 +70,7 @@ func HasStyle(style string) bool { // OutStyle writes a stylized and formatted message to stdout func OutStyle(style, format string, a ...interface{}) error { - OutStyle, err := applyStyle(style, useColor, fmt.Sprintf(format, a...)) + outStyled, err := applyStyle(style, useColor, format, a...) if err != nil { glog.Errorf("applyStyle(%s): %v", style, err) if oerr := OutLn(format, a...); oerr != nil { @@ -78,7 +78,12 @@ func OutStyle(style, format string, a ...interface{}) error { } return err } - return Out(OutStyle) + + // escape any outstanding '%' signs so that they don't get interpreted + // as a formatting directive down the line + outStyled = strings.Replace(outStyled, "%", "%%", -1) + + return Out(outStyled) } // Out writes a basic formatted string to stdout @@ -101,7 +106,7 @@ func OutLn(format string, a ...interface{}) error { // ErrStyle writes a stylized and formatted error message to stderr func ErrStyle(style, format string, a ...interface{}) error { - format, err := applyStyle(style, useColor, fmt.Sprintf(format, a...)) + format, err := applyStyle(style, useColor, format, a...) if err != nil { glog.Errorf("applyStyle(%s): %v", style, err) if oerr := ErrLn(format, a...); oerr != nil { @@ -109,6 +114,11 @@ func ErrStyle(style, format string, a ...interface{}) error { } return err } + + // escape any outstanding '%' signs so that they don't get interpreted + // as a formatting directive down the line + format = strings.Replace(format, "%", "%%", -1) + return Err(format) } @@ -192,8 +202,8 @@ func SetErrFile(w fdWriter) { func wantsColor(fd uintptr) bool { // First process the environment: we allow users to force colors on or off. // - // MINIKUBE_IN_COLOR=[1, T, true, TRUE] - // MINIKUBE_IN_COLOR=[0, f, false, FALSE] + // MINIKUBE_IN_STYLE=[1, T, true, TRUE] + // MINIKUBE_IN_STYLE=[0, f, false, FALSE] // // If unset, we try to automatically determine suitability from the environment. val := os.Getenv(OverrideEnv) diff --git a/pkg/minikube/console/console_test.go b/pkg/minikube/console/console_test.go index 7e0af8e1da..a12f6ae78d 100644 --- a/pkg/minikube/console/console_test.go +++ b/pkg/minikube/console/console_test.go @@ -51,22 +51,37 @@ func TestOutStyle(t *testing.T) { style string envValue string message string + params []interface{} want string }{ - {"happy", "true", "This is happy.", "😄 This is happy.\n"}, - {"Docker", "true", "This is Docker.", "🐳 This is Docker.\n"}, - {"option", "true", "This is option.", " ▪ This is option.\n"}, + {"happy", "true", "This is happy.", nil, "😄 This is happy.\n"}, + {"Docker", "true", "This is Docker.", nil, "🐳 This is Docker.\n"}, + {"option", "true", "This is option.", nil, " ▪ This is option.\n"}, + { + "option", + "true", + "Message with params: %s %s", + []interface{}{"encode '%' signs", "%s%%%d"}, + " ▪ Message with params: encode '%' signs %s%%%d\n", + }, - {"happy", "false", "This is happy.", "o This is happy.\n"}, - {"Docker", "false", "This is Docker.", "- This is Docker.\n"}, - {"option", "false", "This is option.", " - This is option.\n"}, + {"happy", "false", "This is happy.", nil, "o This is happy.\n"}, + {"Docker", "false", "This is Docker.", nil, "- This is Docker.\n"}, + {"option", "false", "This is option.", nil, " - This is option.\n"}, + { + "option", + "false", + "Message with params: %s %s", + []interface{}{"encode '%' signs", "%s%%%d"}, + " - Message with params: encode '%' signs %s%%%d\n", + }, } for _, tc := range tests { t.Run(tc.style+"-"+tc.envValue, func(t *testing.T) { os.Setenv(OverrideEnv, tc.envValue) f := newFakeFile() SetOutFile(f) - if err := OutStyle(tc.style, tc.message); err != nil { + if err := OutStyle(tc.style, tc.message, tc.params...); err != nil { t.Errorf("unexpected error: %q", err) } got := f.String() @@ -94,6 +109,7 @@ func TestOut(t *testing.T) { {format: "xyz123", want: "xyz123"}, {format: "Installing Kubernetes version %s ...", lang: language.Arabic, arg: "v1.13", want: "... v1.13 تثبيت Kubernetes الإصدار"}, {format: "Installing Kubernetes version %s ...", lang: language.AmericanEnglish, arg: "v1.13", want: "Installing Kubernetes version v1.13 ..."}, + {format: "Parameter encoding: %s", arg: "%s%%%d", want: "Parameter encoding: %s%%%d"}, } for _, tc := range tests { t.Run(tc.format, func(t *testing.T) { @@ -116,13 +132,13 @@ func TestErr(t *testing.T) { os.Setenv(OverrideEnv, "0") f := newFakeFile() SetErrFile(f) - if err := Err("xyz123\n"); err != nil { + if err := Err("xyz123 %s\n", "%s%%%d"); err != nil { t.Errorf("unexpected error: %q", err) } OutLn("unrelated message") got := f.String() - want := "xyz123\n" + want := "xyz123 %s%%%d\n" if got != want { t.Errorf("Err() = %q, want %q", got, want) @@ -133,11 +149,11 @@ func TestErrStyle(t *testing.T) { os.Setenv(OverrideEnv, "1") f := newFakeFile() SetErrFile(f) - if err := ErrStyle("fatal", "It broke"); err != nil { + if err := ErrStyle("fatal", "error: %s", "%s%%%d"); err != nil { t.Errorf("unexpected error: %q", err) } got := f.String() - want := "💣 It broke\n" + want := "💣 error: %s%%%d\n" if got != want { t.Errorf("ErrStyle() = %q, want %q", got, want) } diff --git a/pkg/minikube/console/style.go b/pkg/minikube/console/style.go index ef51ff9e2a..adddc4bc65 100644 --- a/pkg/minikube/console/style.go +++ b/pkg/minikube/console/style.go @@ -66,9 +66,10 @@ var styles = map[string]style{ "log-entry": {Prefix: " "}, // Indent "crushed": {Prefix: "💔 "}, "url": {Prefix: "👉 "}, - "documentation": {Prefix: "🗎 "}, - "issues": {Prefix: "📚 "}, + "documentation": {Prefix: "📘 "}, + "issues": {Prefix: "⁉️ "}, "issue": {Prefix: " ▪ "}, // Indented bullet + "check": {Prefix: "✔️ "}, // Specialized purpose styles "iso-download": {Prefix: "💿 ", LowPrefix: "@ "}, diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index 2072ffa97f..a26c957be0 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -154,7 +154,13 @@ var DefaultISOURL = fmt.Sprintf("https://storage.googleapis.com/%s/minikube-%s.i var DefaultISOSHAURL = DefaultISOURL + SHASuffix // DefaultKubernetesVersion is the default kubernetes version -var DefaultKubernetesVersion = "v1.13.4" +var DefaultKubernetesVersion = "v1.14.0" + +// NewestKubernetesVersion is the newest Kubernetes version to test against +var NewestKubernetesVersion = "v1.14.0" + +// OldestKubernetesVersion is the oldest Kubernetes version to test against +var OldestKubernetesVersion = "v1.10.13" // ConfigFilePath is the path of the config directory var ConfigFilePath = MakeMiniPath("config") @@ -224,6 +230,11 @@ const DriverNone = "none" // FileScheme is the file scheme const FileScheme = "file" +// GetKubeadmCachedBinaries gets the binaries to cache for kubeadm +func GetKubeadmCachedBinaries() []string { + return []string{"kubelet", "kubeadm"} +} + // GetKubeadmCachedImages gets the images to cache for kubeadm for a version func GetKubeadmCachedImages(imageRepository string, kubernetesVersionStr string) (string, []string) { minikubeRepository := imageRepository @@ -238,13 +249,6 @@ func GetKubeadmCachedImages(imageRepository string, kubernetesVersionStr string) minikubeRepository += "/" } - var images = []string{ - imageRepository + "kube-proxy-amd64:" + kubernetesVersionStr, - imageRepository + "kube-scheduler-amd64:" + kubernetesVersionStr, - imageRepository + "kube-controller-manager-amd64:" + kubernetesVersionStr, - imageRepository + "kube-apiserver-amd64:" + kubernetesVersionStr, - } - v1_14plus := semver.MustParseRange(">=1.14.0") v1_13 := semver.MustParseRange(">=1.13.0 <1.14.0") v1_12 := semver.MustParseRange(">=1.12.0 <1.13.0") @@ -252,18 +256,35 @@ func GetKubeadmCachedImages(imageRepository string, kubernetesVersionStr string) v1_10 := semver.MustParseRange(">=1.10.0 <1.11.0") v1_9 := semver.MustParseRange(">=1.9.0 <1.10.0") v1_8 := semver.MustParseRange(">=1.8.0 <1.9.0") + v1_12plus := semver.MustParseRange(">=1.12.0") kubernetesVersion, err := semver.Make(strings.TrimPrefix(kubernetesVersionStr, minikubeVersion.VersionPrefix)) if err != nil { glog.Errorln("Error parsing version semver: ", err) } + var images []string + if v1_12plus(kubernetesVersion) { + images = append(images, []string{ + imageRepository + "kube-proxy:" + kubernetesVersionStr, + imageRepository + "kube-scheduler:" + kubernetesVersionStr, + imageRepository + "kube-controller-manager:" + kubernetesVersionStr, + imageRepository + "kube-apiserver:" + kubernetesVersionStr, + }...) + } else { + images = append(images, []string{ + imageRepository + "kube-proxy-amd64:" + kubernetesVersionStr, + imageRepository + "kube-scheduler-amd64:" + kubernetesVersionStr, + imageRepository + "kube-controller-manager-amd64:" + kubernetesVersionStr, + imageRepository + "kube-apiserver-amd64:" + kubernetesVersionStr, + }...) + } + var podInfraContainerImage string if v1_14plus(kubernetesVersion) { - podInfraContainerImage = imageRepository + "pause-amd64:3.1" + podInfraContainerImage = imageRepository + "pause:3.1" images = append(images, []string{ podInfraContainerImage, - imageRepository + "pause:3.1", imageRepository + "k8s-dns-kube-dns-amd64:1.14.13", imageRepository + "k8s-dns-dnsmasq-nanny-amd64:1.14.13", imageRepository + "k8s-dns-sidecar-amd64:1.14.13", @@ -272,34 +293,31 @@ func GetKubeadmCachedImages(imageRepository string, kubernetesVersionStr string) }...) } else if v1_13(kubernetesVersion) { - podInfraContainerImage = imageRepository + "pause-amd64:3.1" + podInfraContainerImage = imageRepository + "pause:3.1" images = append(images, []string{ podInfraContainerImage, - imageRepository + "pause:3.1", imageRepository + "k8s-dns-kube-dns-amd64:1.14.8", imageRepository + "k8s-dns-dnsmasq-nanny-amd64:1.14.8", imageRepository + "k8s-dns-sidecar-amd64:1.14.8", - imageRepository + "etcd-amd64:3.2.24", + imageRepository + "etcd:3.2.24", imageRepository + "coredns:1.2.6", }...) } else if v1_12(kubernetesVersion) { - podInfraContainerImage = imageRepository + "pause-amd64:3.1" + podInfraContainerImage = imageRepository + "pause:3.1" images = append(images, []string{ podInfraContainerImage, - imageRepository + "pause:3.1", imageRepository + "k8s-dns-kube-dns-amd64:1.14.8", imageRepository + "k8s-dns-dnsmasq-nanny-amd64:1.14.8", imageRepository + "k8s-dns-sidecar-amd64:1.14.8", - imageRepository + "etcd-amd64:3.2.24", + imageRepository + "etcd:3.2.24", imageRepository + "coredns:1.2.2", }...) } else if v1_11(kubernetesVersion) { - podInfraContainerImage = imageRepository + "pause-amd64:3.1" + podInfraContainerImage = imageRepository + "pause:3.1" images = append(images, []string{ podInfraContainerImage, - imageRepository + "pause:3.1", imageRepository + "k8s-dns-kube-dns-amd64:1.14.8", imageRepository + "k8s-dns-dnsmasq-nanny-amd64:1.14.8", imageRepository + "k8s-dns-sidecar-amd64:1.14.8", @@ -308,7 +326,7 @@ func GetKubeadmCachedImages(imageRepository string, kubernetesVersionStr string) }...) } else if v1_10(kubernetesVersion) { - podInfraContainerImage = imageRepository + "pause-amd64:3.1" + podInfraContainerImage = imageRepository + "pause:3.1" images = append(images, []string{ podInfraContainerImage, imageRepository + "k8s-dns-kube-dns-amd64:1.14.8", @@ -318,7 +336,7 @@ func GetKubeadmCachedImages(imageRepository string, kubernetesVersionStr string) }...) } else if v1_9(kubernetesVersion) { - podInfraContainerImage = imageRepository + "pause-amd64:3.0" + podInfraContainerImage = imageRepository + "pause:3.0" images = append(images, []string{ podInfraContainerImage, imageRepository + "k8s-dns-kube-dns-amd64:1.14.7", @@ -328,7 +346,7 @@ func GetKubeadmCachedImages(imageRepository string, kubernetesVersionStr string) }...) } else if v1_8(kubernetesVersion) { - podInfraContainerImage = imageRepository + "pause-amd64:3.0" + podInfraContainerImage = imageRepository + "pause:3.0" images = append(images, []string{ podInfraContainerImage, imageRepository + "k8s-dns-kube-dns-amd64:1.14.5", @@ -338,12 +356,12 @@ func GetKubeadmCachedImages(imageRepository string, kubernetesVersionStr string) }...) } else { - podInfraContainerImage = imageRepository + "/pause-amd64:3.0" + podInfraContainerImage = imageRepository + "pause:3.0" } images = append(images, []string{ imageRepository + "kubernetes-dashboard-amd64:v1.10.1", - imageRepository + "kube-addon-manager:v8.6", + imageRepository + "kube-addon-manager:v9.0", minikubeRepository + "storage-provisioner:v1.8.1", }...) diff --git a/pkg/minikube/cruntime/cruntime_test.go b/pkg/minikube/cruntime/cruntime_test.go index b7b5816b27..194c4a5b1c 100644 --- a/pkg/minikube/cruntime/cruntime_test.go +++ b/pkg/minikube/cruntime/cruntime_test.go @@ -445,9 +445,6 @@ func TestContainerFunctions(t *testing.T) { "fgh1": prefix + "coredns", "xyz2": prefix + "storage", } - if tc.runtime == "docker" { - runner.containers["zzz"] = "unrelated" - } cr, err := New(Config{Type: tc.runtime, Runner: runner}) if err != nil { t.Fatalf("New(%s): %v", tc.runtime, err) diff --git a/pkg/minikube/cruntime/docker.go b/pkg/minikube/cruntime/docker.go index e8a0a9b39b..504ca255fc 100644 --- a/pkg/minikube/cruntime/docker.go +++ b/pkg/minikube/cruntime/docker.go @@ -24,9 +24,6 @@ import ( "github.com/golang/glog" ) -// KubernetesContainerPrefix is the prefix of each kubernetes container -const KubernetesContainerPrefix = "k8s_" - // Docker contains Docker runtime state type Docker struct { Socket string @@ -99,7 +96,6 @@ func (r *Docker) KubeletOptions() map[string]string { // ListContainers returns a list of containers func (r *Docker) ListContainers(filter string) ([]string, error) { - filter = KubernetesContainerPrefix + filter content, err := r.Runner.CombinedOutput(fmt.Sprintf(`docker ps -a --filter="name=%s" --format="{{.ID}}"`, filter)) if err != nil { return nil, err diff --git a/pkg/minikube/exit/exit.go b/pkg/minikube/exit/exit.go index c20139f934..b0908fbf5b 100644 --- a/pkg/minikube/exit/exit.go +++ b/pkg/minikube/exit/exit.go @@ -73,7 +73,7 @@ func WithProblem(msg string, p *problem.Problem) { console.Fatal(msg) p.Display() console.Err("\n") - console.ErrStyle("sad", "If the advice does not help, please let us know: ") + console.ErrStyle("sad", "If the above advice does not help, please let us know: ") console.ErrStyle("url", "https://github.com/kubernetes/minikube/issues/new") os.Exit(Config) } diff --git a/pkg/minikube/logs/logs.go b/pkg/minikube/logs/logs.go index 83e3f5c55f..3d4391551a 100644 --- a/pkg/minikube/logs/logs.go +++ b/pkg/minikube/logs/logs.go @@ -35,12 +35,18 @@ import ( // rootCauseRe is a regular expression that matches known failure root causes var rootCauseRe = regexp.MustCompile(`^error: |eviction manager: pods.* evicted|unknown flag: --|forbidden.*no providers available|eviction manager:.*evicted`) +// ignoreRe is a regular expression that matches spurious errors to not surface +var ignoreCauseRe = regexp.MustCompile("error: no objects passed to apply") + // importantPods are a list of pods to retrieve logs for, in addition to the bootstrapper logs. var importantPods = []string{ "kube-apiserver", "coredns", "kube-scheduler", "kube-proxy", + "kube-addon-manager", + "kubernetes-dashboard", + "storage-provisioner", } // lookbackwardsCount is how far back to look in a log for problems. This should be large enough to @@ -59,7 +65,7 @@ func Follow(r cruntime.Manager, bs bootstrapper.Bootstrapper, runner bootstrappe // IsProblem returns whether this line matches a known problem func IsProblem(line string) bool { - return rootCauseRe.MatchString(line) + return rootCauseRe.MatchString(line) && !ignoreCauseRe.MatchString(line) } // FindProblems finds possible root causes among the logs diff --git a/pkg/minikube/logs/logs_test.go b/pkg/minikube/logs/logs_test.go index 487fba654f..85576a9a16 100644 --- a/pkg/minikube/logs/logs_test.go +++ b/pkg/minikube/logs/logs_test.go @@ -33,6 +33,7 @@ func TestIsProblem(t *testing.T) { {"apiserver-auth-mode #2852", true, `{"log":"Error: unknown flag: --Authorization.Mode\n","stream":"stderr","time":"2018-06-17T22:16:35.134161966Z"}`}, {"apiserver-admission #3524", true, "error: unknown flag: --GenericServerRunOptions.AdmissionControl"}, {"no-providers-available #3818", true, ` kubelet.go:1662] Failed creating a mirror pod for "kube-apiserver-minikube_kube-system(c7d572aebd3d33b17fa78ae6395b6d0a)": pods "kube-apiserver-minikube" is forbidden: no providers available to validate pod request`}, + {"no-objects-passed-to-apply #4010", false, "error: no objects passed to apply"}, } for _, tc := range tests { t.Run(tc.name, func(t *testing.T) { diff --git a/pkg/minikube/machine/cache_binaries.go b/pkg/minikube/machine/cache_binaries.go new file mode 100644 index 0000000000..b21c76d85a --- /dev/null +++ b/pkg/minikube/machine/cache_binaries.go @@ -0,0 +1,96 @@ +/* +Copyright 2016 The Kubernetes Authors All rights reserved. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. +*/ + +package machine + +import ( + "crypto" + "os" + "path" + + "github.com/golang/glog" + "github.com/jimmidyson/go-download" + "github.com/pkg/errors" + "golang.org/x/sync/errgroup" + "k8s.io/minikube/pkg/minikube/assets" + "k8s.io/minikube/pkg/minikube/bootstrapper" + "k8s.io/minikube/pkg/minikube/console" + "k8s.io/minikube/pkg/minikube/constants" +) + +// CacheBinariesForBootstrapper will cache binaries for a bootstrapper +func CacheBinariesForBootstrapper(version string, clusterBootstrapper string) error { + binaries := bootstrapper.GetCachedBinaryList(clusterBootstrapper) + + var g errgroup.Group + for _, bin := range binaries { + bin := bin + g.Go(func() error { + if _, err := CacheBinary(bin, version); err != nil { + return errors.Wrapf(err, "caching image %s", bin) + } + return nil + }) + } + return g.Wait() +} + +// CacheBinary will cache a binary on the host +func CacheBinary(binary, version string) (string, error) { + targetDir := constants.MakeMiniPath("cache", version) + targetFilepath := path.Join(targetDir, binary) + + url := constants.GetKubernetesReleaseURL(binary, version) + + _, err := os.Stat(targetFilepath) + // If it exists, do no verification and continue + if err == nil { + glog.Infof("Not caching binary, using %s", url) + return targetFilepath, nil + } + if !os.IsNotExist(err) { + return "", errors.Wrapf(err, "stat %s version %s at %s", binary, version, targetDir) + } + + if err = os.MkdirAll(targetDir, 0777); err != nil { + return "", errors.Wrapf(err, "mkdir %s", targetDir) + } + + options := download.FileOptions{ + Mkdirs: download.MkdirAll, + } + + options.Checksum = constants.GetKubernetesReleaseURLSHA1(binary, version) + options.ChecksumHash = crypto.SHA1 + + console.OutStyle("file-download", "Downloading %s %s", binary, version) + if err := download.ToFile(url, targetFilepath, options); err != nil { + return "", errors.Wrapf(err, "Error downloading %s %s", binary, version) + } + return targetFilepath, nil +} + +// CopyBinary copies previously cached binaries into the path +func CopyBinary(cr bootstrapper.CommandRunner, binary, path string) error { + f, err := assets.NewFileAsset(path, "/usr/bin", binary, "0641") + if err != nil { + return errors.Wrap(err, "new file asset") + } + if err := cr.Copy(f); err != nil { + return errors.Wrapf(err, "copy") + } + return nil +} diff --git a/pkg/minikube/machine/cache_images.go b/pkg/minikube/machine/cache_images.go index 3a0761e080..4e2a349cbb 100644 --- a/pkg/minikube/machine/cache_images.go +++ b/pkg/minikube/machine/cache_images.go @@ -97,7 +97,8 @@ func LoadImages(cmd bootstrapper.CommandRunner, images []string, cacheDir string g.Go(func() error { src := filepath.Join(cacheDir, image) src = sanitizeCacheDir(src) - if err := LoadFromCacheBlocking(cmd, cc.KubernetesConfig, src); err != nil { + if err := loadImageFromCache(cmd, cc.KubernetesConfig, src); err != nil { + glog.Warningf("Failed to load %s: %v", src, err) return errors.Wrapf(err, "loading image %s", src) } return nil @@ -198,14 +199,12 @@ func getWindowsVolumeNameCmd(d string) (string, error) { return vname, nil } -// LoadFromCacheBlocking loads images from cache, blocking until loaded -func LoadFromCacheBlocking(cr bootstrapper.CommandRunner, k8s config.KubernetesConfig, src string) error { - glog.Infoln("Loading image from cache at ", src) +// loadImageFromCache loads a single image from the cache +func loadImageFromCache(cr bootstrapper.CommandRunner, k8s config.KubernetesConfig, src string) error { + glog.Infof("Loading image from cache: %s", src) filename := filepath.Base(src) - for { - if _, err := os.Stat(src); err == nil { - break - } + if _, err := os.Stat(src); err != nil { + return err } dst := path.Join(tempLoadDir, filename) f, err := assets.NewFileAsset(src, tempLoadDir, filename, "0777") @@ -303,12 +302,12 @@ func CacheImage(image, dst string) error { return errors.Wrapf(err, "making cache image directory: %s", dst) } - tag, err := name.NewTag(image, name.WeakValidation) + ref, err := name.ParseReference(image, name.WeakValidation) if err != nil { return errors.Wrap(err, "creating docker image name") } - img, err := remote.Image(tag, remote.WithAuthFromKeychain(authn.DefaultKeychain)) + img, err := remote.Image(ref, remote.WithAuthFromKeychain(authn.DefaultKeychain)) if err != nil { return errors.Wrap(err, "fetching remote image") } @@ -318,7 +317,7 @@ func CacheImage(image, dst string) error { if err != nil { return err } - err = tarball.Write(tag, img, nil, f) + err = tarball.Write(ref, img, f) if err != nil { return err } diff --git a/pkg/minikube/problem/err_map.go b/pkg/minikube/problem/err_map.go index da9abeae0c..d20293f09d 100644 --- a/pkg/minikube/problem/err_map.go +++ b/pkg/minikube/problem/err_map.go @@ -43,7 +43,7 @@ var vmProblems = map[string]match{ }, "KVM2_NO_IP": { Regexp: re(`Error starting stopped host: Machine didn't return an IP after 120 seconds`), - Advice: "The KVM driver is unable to ressurect this old VM. Please run `minikube delete` to delete it and try again.", + Advice: "The KVM driver is unable to resurrect this old VM. Please run `minikube delete` to delete it and try again.", Issues: []int{3901, 3566, 3434}, }, "VM_DOES_NOT_EXIST": { diff --git a/pkg/minikube/problem/problem.go b/pkg/minikube/problem/problem.go index a9a6b40ff0..e5184341f8 100644 --- a/pkg/minikube/problem/problem.go +++ b/pkg/minikube/problem/problem.go @@ -23,7 +23,7 @@ import ( "k8s.io/minikube/pkg/minikube/console" ) -const issueBase = "https://github.com/kubernetes/minikube/issue" +const issueBase = "https://github.com/kubernetes/minikube/issues" // Problem represents a known problem in minikube. type Problem struct { diff --git a/pkg/minikube/service/service.go b/pkg/minikube/service/service.go index 0db95eca03..b68b527b61 100644 --- a/pkg/minikube/service/service.go +++ b/pkg/minikube/service/service.go @@ -170,29 +170,36 @@ func printURLsForService(c corev1.CoreV1Interface, ip, service, namespace string if err != nil { return nil, errors.Wrapf(err, "service '%s' could not be found running", service) } - var nodePorts []int32 - if len(svc.Spec.Ports) > 0 { - for _, port := range svc.Spec.Ports { - if port.NodePort > 0 { - nodePorts = append(nodePorts, port.NodePort) + + e := c.Endpoints(namespace) + endpoints, err := e.Get(service, metav1.GetOptions{}) + m := make(map[int32]string) + if endpoints != nil && len(endpoints.Subsets) > 0 { + for _, ept := range endpoints.Subsets { + for _, p := range ept.Ports { + m[int32(p.Port)] = p.Name } } } - urls := []string{} - for _, port := range nodePorts { - var doc bytes.Buffer - err = t.Execute(&doc, struct { - IP string - Port int32 - }{ - ip, - port, - }) - if err != nil { - return nil, err - } - urls = append(urls, doc.String()) + urls := []string{} + for _, port := range svc.Spec.Ports { + if port.NodePort > 0 { + var doc bytes.Buffer + err = t.Execute(&doc, struct { + IP string + Port int32 + Name string + }{ + ip, + port.NodePort, + m[port.TargetPort.IntVal], + }) + if err != nil { + return nil, err + } + urls = append(urls, doc.String()) + } } return urls, nil } diff --git a/pkg/minikube/service/service_test.go b/pkg/minikube/service/service_test.go index 0a6ed09b4d..3656fdd1cf 100644 --- a/pkg/minikube/service/service_test.go +++ b/pkg/minikube/service/service_test.go @@ -27,6 +27,7 @@ import ( "github.com/pkg/errors" "k8s.io/api/core/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" + "k8s.io/apimachinery/pkg/util/intstr" "k8s.io/client-go/kubernetes" corev1 "k8s.io/client-go/kubernetes/typed/core/v1" "k8s.io/client-go/kubernetes/typed/core/v1/fake" @@ -36,12 +37,14 @@ import ( ) type MockClientGetter struct { - servicesMap map[string]corev1.ServiceInterface + servicesMap map[string]corev1.ServiceInterface + endpointsMap map[string]corev1.EndpointsInterface } func (m *MockClientGetter) GetCoreClient() (corev1.CoreV1Interface, error) { return &MockCoreClient{ - servicesMap: m.servicesMap, + servicesMap: m.servicesMap, + endpointsMap: m.endpointsMap, }, nil } @@ -51,7 +54,8 @@ func (m *MockClientGetter) GetClientset(timeout time.Duration) (*kubernetes.Clie type MockCoreClient struct { fake.FakeCoreV1 - servicesMap map[string]corev1.ServiceInterface + servicesMap map[string]corev1.ServiceInterface + endpointsMap map[string]corev1.EndpointsInterface } var serviceNamespaces = map[string]corev1.ServiceInterface{ @@ -68,8 +72,18 @@ var defaultNamespaceServiceInterface = &MockServiceInterface{ }, Spec: v1.ServiceSpec{ Ports: []v1.ServicePort{ - {NodePort: int32(1111)}, - {NodePort: int32(2222)}, + { + NodePort: int32(1111), + TargetPort: intstr.IntOrString{ + IntVal: int32(11111), + }, + }, + { + NodePort: int32(2222), + TargetPort: intstr.IntOrString{ + IntVal: int32(22222), + }, + }, }, }, }, @@ -86,8 +100,14 @@ var defaultNamespaceServiceInterface = &MockServiceInterface{ }, } +var endpointNamespaces = map[string]corev1.EndpointsInterface{ + "default": defaultNamespaceEndpointInterface, +} + +var defaultNamespaceEndpointInterface = &MockEndpointsInterface{} + func (m *MockCoreClient) Endpoints(namespace string) corev1.EndpointsInterface { - return &MockEndpointsInterface{} + return m.endpointsMap[namespace] } func (m *MockCoreClient) Services(namespace string) corev1.ServiceInterface { @@ -124,6 +144,22 @@ var endpointMap = map[string]*v1.Endpoints{ }, }, }, + "mock-dashboard": { + Subsets: []v1.EndpointSubset{ + { + Ports: []v1.EndpointPort{ + { + Name: "port1", + Port: int32(11111), + }, + { + Name: "port2", + Port: int32(22222), + }, + }, + }, + }, + }, } func (e MockEndpointsInterface) Get(name string, _ metav1.GetOptions) (*v1.Endpoints, error) { @@ -195,7 +231,8 @@ func TestGetServiceListFromServicesByLabel(t *testing.T) { func TestPrintURLsForService(t *testing.T) { defaultTemplate := template.Must(template.New("svc-template").Parse("http://{{.IP}}:{{.Port}}")) client := &MockCoreClient{ - servicesMap: serviceNamespaces, + servicesMap: serviceNamespaces, + endpointsMap: endpointNamespaces, } var tests = []struct { description string @@ -219,6 +256,13 @@ func TestPrintURLsForService(t *testing.T) { tmpl: template.Must(template.New("svc-arbitrary-template").Parse("{{.IP}}:{{.Port}}")), expectedOutput: []string{"127.0.0.1:1111", "127.0.0.1:2222"}, }, + { + description: "should get the name of all target ports with arbitrary format", + serviceName: "mock-dashboard", + namespace: "default", + tmpl: template.Must(template.New("svc-arbitrary-template").Parse("{{.Name}}={{.IP}}:{{.Port}}")), + expectedOutput: []string{"port1=127.0.0.1:1111", "port2=127.0.0.1:2222"}, + }, { description: "empty slice for no node ports", serviceName: "mock-dashboard-no-ports", @@ -361,7 +405,8 @@ func TestGetServiceURLs(t *testing.T) { t.Parallel() K8s = &MockClientGetter{ - servicesMap: serviceNamespaces, + servicesMap: serviceNamespaces, + endpointsMap: endpointNamespaces, } urls, err := GetServiceURLs(test.api, test.namespace, defaultTemplate) if err != nil && !test.err { @@ -428,7 +473,8 @@ func TestGetServiceURLsForService(t *testing.T) { t.Run(test.description, func(t *testing.T) { t.Parallel() K8s = &MockClientGetter{ - servicesMap: serviceNamespaces, + servicesMap: serviceNamespaces, + endpointsMap: endpointNamespaces, } urls, err := GetServiceURLsForService(test.api, test.namespace, test.service, defaultTemplate) if err != nil && !test.err { diff --git a/pkg/provision/buildroot.go b/pkg/provision/buildroot.go index 30e36d3ffe..e2afdd73ac 100644 --- a/pkg/provision/buildroot.go +++ b/pkg/provision/buildroot.go @@ -21,6 +21,7 @@ import ( "fmt" "path" "path/filepath" + "strings" "text/template" "time" @@ -47,6 +48,9 @@ type BuildrootProvisioner struct { provision.SystemdProvisioner } +// for escaping systemd template specifiers (e.g. '%i'), which are not supported by minikube +var systemdSpecifierEscaper = strings.NewReplacer("%", "%%") + func init() { provision.Register("Buildroot", &provision.RegisteredProvisioner{ New: NewBuildrootProvisioner, @@ -64,6 +68,17 @@ func (p *BuildrootProvisioner) String() string { return "buildroot" } +// escapeSystemdDirectives escapes special characters in the input variables used to create the +// systemd unit file, which would otherwise be interpreted as systemd directives. An example +// are template specifiers (e.g. '%i') which are predefined variables that get evaluated dynamically +// (see systemd man pages for more info). This is not supported by minikube, thus needs to be escaped. +func escapeSystemdDirectives(engineConfigContext *provision.EngineConfigContext) { + // escape '%' in Environment option so that it does not evaluate into a template specifier + engineConfigContext.EngineOptions.Env = util.ReplaceChars(engineConfigContext.EngineOptions.Env, systemdSpecifierEscaper) + // input might contain whitespaces, wrap it in quotes + engineConfigContext.EngineOptions.Env = util.ConcatStrings(engineConfigContext.EngineOptions.Env, "\"", "\"") +} + // GenerateDockerOptions generates the *provision.DockerOptions for this provisioner func (p *BuildrootProvisioner) GenerateDockerOptions(dockerPort int) (*provision.DockerOptions, error) { var engineCfg bytes.Buffer @@ -127,6 +142,8 @@ WantedBy=multi-user.target EngineOptions: p.EngineOptions, } + escapeSystemdDirectives(&engineConfigContext) + if err := t.Execute(&engineCfg, engineConfigContext); err != nil { return nil, err } diff --git a/pkg/util/kubeconfig.go b/pkg/util/kubeconfig.go index 3545576b65..863b6c2e20 100644 --- a/pkg/util/kubeconfig.go +++ b/pkg/util/kubeconfig.go @@ -251,7 +251,7 @@ func UpdateKubeconfigIP(ip net.IP, filename string, machineName string) (bool, e if kip.Equal(ip) { return false, nil } - kport, err := getPortFromKubeConfig(filename, machineName) + kport, err := GetPortFromKubeConfig(filename, machineName) if err != nil { return false, err } @@ -291,8 +291,8 @@ func getIPFromKubeConfig(filename, machineName string) (net.IP, error) { return ip, nil } -// getPortFromKubeConfig returns the Port number stored for minikube in the kubeconfig specified -func getPortFromKubeConfig(filename, machineName string) (int, error) { +// GetPortFromKubeConfig returns the Port number stored for minikube in the kubeconfig specified +func GetPortFromKubeConfig(filename, machineName string) (int, error) { con, err := ReadConfigOrNew(filename) if err != nil { return 0, errors.Wrap(err, "Error getting kubeconfig status") diff --git a/pkg/util/utils.go b/pkg/util/utils.go index 7637e2a588..cf81251219 100644 --- a/pkg/util/utils.go +++ b/pkg/util/utils.go @@ -250,3 +250,26 @@ func TeePrefix(prefix string, r io.Reader, w io.Writer, logger func(format strin } return nil } + +// ReplaceChars returns a copy of the src slice with each string modified by the replacer +func ReplaceChars(src []string, replacer *strings.Replacer) []string { + ret := make([]string, len(src)) + for i, s := range src { + ret[i] = replacer.Replace(s) + } + return ret +} + +// ConcatStrings concatenates each string in the src slice with prefix and postfix and returns a new slice +func ConcatStrings(src []string, prefix string, postfix string) []string { + var buf bytes.Buffer + ret := make([]string, len(src)) + for i, s := range src { + buf.WriteString(prefix) + buf.WriteString(s) + buf.WriteString(postfix) + ret[i] = buf.String() + buf.Reset() + } + return ret +} diff --git a/pkg/util/utils_test.go b/pkg/util/utils_test.go index f7d1c7c372..864a540095 100644 --- a/pkg/util/utils_test.go +++ b/pkg/util/utils_test.go @@ -197,3 +197,43 @@ func TestTeePrefix(t *testing.T) { t.Errorf("log=%q, want: %q", gotLog, wantLog) } } + +func TestReplaceChars(t *testing.T) { + testData := []struct { + src []string + replacer *strings.Replacer + expectedRes []string + }{ + {[]string{"abc%def", "%Y%"}, strings.NewReplacer("%", "X"), []string{"abcXdef", "XYX"}}, + } + + for _, tt := range testData { + res := ReplaceChars(tt.src, tt.replacer) + for i, val := range res { + if val != tt.expectedRes[i] { + t.Fatalf("Expected '%s' but got '%s'", tt.expectedRes, res) + } + } + } +} + +func TestConcatStrings(t *testing.T) { + testData := []struct { + src []string + prefix string + postfix string + expectedRes []string + }{ + {[]string{"abc", ""}, "xx", "yy", []string{"xxabcyy", "xxyy"}}, + {[]string{"abc", ""}, "", "", []string{"abc", ""}}, + } + + for _, tt := range testData { + res := ConcatStrings(tt.src, tt.prefix, tt.postfix) + for i, val := range res { + if val != tt.expectedRes[i] { + t.Fatalf("Expected '%s' but got '%s'", tt.expectedRes, res) + } + } + } +} diff --git a/test/integration/mount_test.go b/test/integration/mount_test.go index 44ab68acbd..b36909ecd0 100644 --- a/test/integration/mount_test.go +++ b/test/integration/mount_test.go @@ -71,12 +71,14 @@ func testMounting(t *testing.T) { // Create the pods we need outside the main test loop. setupTest := func() error { + t.Logf("Deploying pod from: %s", podPath) if _, err := kubectlRunner.RunCommand([]string{"create", "-f", podPath}); err != nil { return err } return nil } defer func() { + t.Logf("Deleting pod from: %s", podPath) if out, err := kubectlRunner.RunCommand([]string{"delete", "-f", podPath}); err != nil { t.Logf("delete -f %s failed: %v\noutput: %s\n", podPath, err, out) } @@ -89,6 +91,7 @@ func testMounting(t *testing.T) { if err := waitForPods(map[string]string{"integration-test": "busybox-mount"}); err != nil { t.Fatalf("Error waiting for busybox mount pod to be up: %v", err) } + t.Logf("Pods appear to be running") mountTest := func() error { if err := verifyFiles(minikubeRunner, kubectlRunner, tempDir, podName, expected); err != nil { diff --git a/test/integration/start_stop_delete_test.go b/test/integration/start_stop_delete_test.go index d6d2ac0d0e..9cf29a3a37 100644 --- a/test/integration/start_stop_delete_test.go +++ b/test/integration/start_stop_delete_test.go @@ -19,12 +19,14 @@ limitations under the License. package integration import ( + "fmt" "net" "strings" "testing" "time" "github.com/docker/machine/libmachine/state" + "k8s.io/minikube/pkg/minikube/constants" "k8s.io/minikube/test/integration/util" ) @@ -33,10 +35,27 @@ func TestStartStop(t *testing.T) { name string args []string }{ - {"docker+cache", []string{"--container-runtime=docker", "--cache-images"}}, - {"docker+cache+ignore_verifications", []string{"--container-runtime=docker", "--cache-images", "--extra-config", "kubeadm.ignore-preflight-errors=SystemVerification"}}, - {"containerd+cache", []string{"--container-runtime=containerd", "--docker-opt containerd=/var/run/containerd/containerd.sock", "--cache-images"}}, - {"crio+cache", []string{"--container-runtime=crio", "--cache-images"}}, + {"nocache_oldest", []string{ + "--cache-images=false", + fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), + }}, + {"feature_gates_newest_cni", []string{ + "--feature-gates", + "ServerSideApply=true", + "--network-plugin=cni", + "--extra-config=kubelet.network-plugin=cni", + fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), + }}, + {"containerd_and_non_default_apiserver_port", []string{ + "--container-runtime=containerd", + "--docker-opt containerd=/var/run/containerd/containerd.sock", + "--apiserver-port=8444", + }}, + {"crio_ignore_preflights", []string{ + "--container-runtime=crio", + "--extra-config", + "kubeadm.ignore-preflight-errors=SystemVerification", + }}, } for _, test := range tests { diff --git a/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go b/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go index 4a8ec24042..c8ba4e6e24 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go +++ b/vendor/github.com/google/go-containerregistry/pkg/authn/helper.go @@ -72,7 +72,7 @@ func (h *helper) Authorization() (string, error) { var out bytes.Buffer cmd.Stdout = &out - err := h.r.Run(cmd) + cmdErr := h.r.Run(cmd) // If we see this specific message, it means the domain wasn't found // and we should fall back on anonymous auth. @@ -81,16 +81,22 @@ func (h *helper) Authorization() (string, error) { return Anonymous.Authorization() } - if err != nil { - return "", err - } - // Any other output should be parsed as JSON and the Username / Secret // fields used for Basic authentication. ho := helperOutput{} if err := json.Unmarshal([]byte(output), &ho); err != nil { + if cmdErr != nil { + // If we failed to parse output, it won't contain Secret, so returning it + // in an error should be fine. + return "", fmt.Errorf("invoking %s: %v; output: %s", helperName, cmdErr, output) + } return "", err } + + if cmdErr != nil { + return "", fmt.Errorf("invoking %s: %v", helperName, cmdErr) + } + b := Basic{Username: ho.Username, Password: ho.Secret} return b.Authorization() } diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go index ea6287a847..dc573ef1d8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/digest.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/digest.go @@ -73,14 +73,14 @@ func NewDigest(name string, strict Strictness) (Digest, error) { base := parts[0] digest := parts[1] - // We don't require a digest, but if we get one check it's valid, - // even when not being strict. - // If we are being strict, we want to validate the digest regardless in case - // it's empty. - if digest != "" || strict == StrictValidation { - if err := checkDigest(digest); err != nil { - return Digest{}, err - } + // Always check that the digest is valid. + if err := checkDigest(digest); err != nil { + return Digest{}, err + } + + tag, err := NewTag(base, strict) + if err == nil { + base = tag.Repository.Name() } repo, err := NewRepository(base, strict) diff --git a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go index c2bf5758a6..ab74193080 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/name/registry.go +++ b/vendor/github.com/google/go-containerregistry/pkg/name/registry.go @@ -15,12 +15,14 @@ package name import ( + "net" "net/url" "regexp" "strings" ) const ( + // DefaultRegistry is Docker Hub, assumed when a hostname is omitted. DefaultRegistry = "index.docker.io" defaultRegistryAlias = "docker.io" ) @@ -63,11 +65,29 @@ func (r Registry) Scope(string) string { return "registry:catalog:*" } +func (r Registry) isRFC1918() bool { + ipStr := strings.Split(r.Name(), ":")[0] + ip := net.ParseIP(ipStr) + if ip == nil { + return false + } + for _, cidr := range []string{"10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16"} { + _, block, _ := net.ParseCIDR(cidr) + if block.Contains(ip) { + return true + } + } + return false +} + // Scheme returns https scheme for all the endpoints except localhost or when explicitly defined. func (r Registry) Scheme() string { if r.insecure { return "http" } + if r.isRFC1918() { + return "http" + } if strings.HasPrefix(r.Name(), "localhost:") { return "http" } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go index d1d809d911..3d8d6d30db 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/config.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/config.go @@ -21,27 +21,28 @@ import ( ) // ConfigFile is the configuration file that holds the metadata describing -// how to launch a container. The names of the fields are chosen to reflect -// the JSON payload of the ConfigFile as defined here: https://git.io/vrAEY +// how to launch a container. See: +// https://github.com/opencontainers/image-spec/blob/master/config.md type ConfigFile struct { Architecture string `json:"architecture"` - Container string `json:"container"` - Created Time `json:"created"` - DockerVersion string `json:"docker_version"` - History []History `json:"history"` + Author string `json:"author,omitempty"` + Container string `json:"container,omitempty"` + Created Time `json:"created,omitempty"` + DockerVersion string `json:"docker_version,omitempty"` + History []History `json:"history,omitempty"` OS string `json:"os"` RootFS RootFS `json:"rootfs"` Config Config `json:"config"` - ContainerConfig Config `json:"container_config"` - OSVersion string `json:"osversion"` + ContainerConfig Config `json:"container_config,omitempty"` + OSVersion string `json:"osversion,omitempty"` } // History is one entry of a list recording how this container image was built. type History struct { - Author string `json:"author"` - Created Time `json:"created"` - CreatedBy string `json:"created_by"` - Comment string `json:"comment"` + Author string `json:"author,omitempty"` + Created Time `json:"created,omitempty"` + CreatedBy string `json:"created_by,omitempty"` + Comment string `json:"comment,omitempty"` EmptyLayer bool `json:"empty_layer,omitempty"` } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go index c9b203173e..7273ec5ab8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/doc.go @@ -12,8 +12,8 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package v1 defines structured types for OCI v1 images +//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i . // +k8s:deepcopy-gen=package -//go:generate deepcopy-gen -O zz_deepcopy_generated --go-header-file $BOILER_PLATE_FILE -i . +// Package v1 defines structured types for OCI v1 images package v1 diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go index f0db0d51cf..40933030d3 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/hash.go @@ -49,7 +49,7 @@ func NewHash(s string) (Hash, error) { } // MarshalJSON implements json.Marshaler -func (h *Hash) MarshalJSON() ([]byte, error) { +func (h Hash) MarshalJSON() ([]byte, error) { return json.Marshal(h.String()) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go index 05568aae0c..17b9839a6e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/image.go @@ -24,9 +24,6 @@ type Image interface { // The order of the list is oldest/base layer first, and most-recent/top layer last. Layers() ([]Layer, error) - // BlobSet returns an unordered collection of all the blobs in the image. - BlobSet() (map[Hash]struct{}, error) - // MediaType of this image's manifest. MediaType() (types.MediaType, error) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go index 25ba29ed70..604e6de360 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/index.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/index.go @@ -18,6 +18,7 @@ import ( "github.com/google/go-containerregistry/pkg/v1/types" ) +// ImageIndex defines the interface for interacting with an OCI image index. type ImageIndex interface { // MediaType of this image's manifest. MediaType() (types.MediaType, error) @@ -28,6 +29,12 @@ type ImageIndex interface { // IndexManifest returns this image index's manifest object. IndexManifest() (*IndexManifest, error) - // RawIndexManifest returns the serialized bytes of IndexManifest(). - RawIndexManifest() ([]byte, error) + // RawManifest returns the serialized bytes of IndexManifest(). + RawManifest() ([]byte, error) + + // Image returns a v1.Image that this ImageIndex references. + Image(Hash) (Image, error) + + // ImageIndex returns a v1.ImageIndex that this ImageIndex references. + ImageIndex(Hash) (ImageIndex, error) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go index 932ae056a1..36c341df8b 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/manifest.go @@ -23,7 +23,7 @@ import ( // Manifest represents the OCI image manifest in a structured way. type Manifest struct { - SchemaVersion int64 `json:"schemaVersion"` + SchemaVersion int64 `json:"schemaVersion,omitempty"` MediaType types.MediaType `json:"mediaType"` Config Descriptor `json:"config"` Layers []Descriptor `json:"layers"` diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go index e6e4f4d42f..497d1af0df 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/compressed.go @@ -17,7 +17,7 @@ package partial import ( "io" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/v1util" ) @@ -91,11 +91,6 @@ type compressedImageExtender struct { // Assert that our extender type completes the v1.Image interface var _ v1.Image = (*compressedImageExtender)(nil) -// BlobSet implements v1.Image -func (i *compressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) { - return BlobSet(i) -} - // Digest implements v1.Image func (i *compressedImageExtender) Digest() (v1.Hash, error) { return Digest(i) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go index 7afa187b87..9f75723ec5 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/uncompressed.go @@ -19,7 +19,7 @@ import ( "io" "sync" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/v1util" ) @@ -37,8 +37,12 @@ type UncompressedLayer interface { // uncompressedLayerExtender implements v1.Image using the uncompressed base properties. type uncompressedLayerExtender struct { UncompressedLayer - // TODO(mattmoor): Memoize size/hash so that the methods aren't twice as + // Memoize size/hash so that the methods aren't twice as // expensive as doing this manually. + hash v1.Hash + size int64 + hashSizeError error + once sync.Once } // Compressed implements v1.Layer @@ -52,29 +56,31 @@ func (ule *uncompressedLayerExtender) Compressed() (io.ReadCloser, error) { // Digest implements v1.Layer func (ule *uncompressedLayerExtender) Digest() (v1.Hash, error) { - r, err := ule.Compressed() - if err != nil { - return v1.Hash{}, err - } - defer r.Close() - h, _, err := v1.SHA256(r) - return h, err + ule.calcSizeHash() + return ule.hash, ule.hashSizeError } // Size implements v1.Layer func (ule *uncompressedLayerExtender) Size() (int64, error) { - r, err := ule.Compressed() - if err != nil { - return -1, err - } - defer r.Close() - _, i, err := v1.SHA256(r) - return i, err + ule.calcSizeHash() + return ule.size, ule.hashSizeError +} + +func (ule *uncompressedLayerExtender) calcSizeHash() { + ule.once.Do(func() { + var r io.ReadCloser + r, ule.hashSizeError = ule.Compressed() + if ule.hashSizeError != nil { + return + } + defer r.Close() + ule.hash, ule.size, ule.hashSizeError = v1.SHA256(r) + }) } // UncompressedToLayer fills in the missing methods from an UncompressedLayer so that it implements v1.Layer func UncompressedToLayer(ul UncompressedLayer) (v1.Layer, error) { - return &uncompressedLayerExtender{ul}, nil + return &uncompressedLayerExtender{UncompressedLayer: ul}, nil } // UncompressedImageCore represents the bare minimum interface a natively @@ -106,11 +112,6 @@ type uncompressedImageExtender struct { // Assert that our extender type completes the v1.Image interface var _ v1.Image = (*uncompressedImageExtender)(nil) -// BlobSet implements v1.Image -func (i *uncompressedImageExtender) BlobSet() (map[v1.Hash]struct{}, error) { - return BlobSet(i) -} - // Digest implements v1.Image func (i *uncompressedImageExtender) Digest() (v1.Hash, error) { return Digest(i) @@ -214,13 +215,6 @@ func (i *uncompressedImageExtender) LayerByDiffID(diffID v1.Hash) (v1.Layer, err // LayerByDigest implements v1.Image func (i *uncompressedImageExtender) LayerByDigest(h v1.Hash) (v1.Layer, error) { - // Support returning the ConfigFile when asked for its hash. - if cfgName, err := i.ConfigName(); err != nil { - return nil, err - } else if cfgName == h { - return ConfigLayer(i) - } - diffID, err := BlobToDiffID(i, h) if err != nil { return nil, err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go index bc6fd8e9f5..f724ec8ab3 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/partial/with.go @@ -19,8 +19,9 @@ import ( "encoding/json" "fmt" "io" + "io/ioutil" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/v1util" ) @@ -49,8 +50,6 @@ func ConfigName(i WithRawConfigFile) (v1.Hash, error) { return h, err } -// configLayer implements v1.Layer from the raw config bytes. -// This is so that clients (e.g. remote) can access the config as a blob. type configLayer struct { hash v1.Hash content []byte @@ -68,12 +67,12 @@ func (cl *configLayer) DiffID() (v1.Hash, error) { // Uncompressed implements v1.Layer func (cl *configLayer) Uncompressed() (io.ReadCloser, error) { - return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil + return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil } // Compressed implements v1.Layer func (cl *configLayer) Compressed() (io.ReadCloser, error) { - return v1util.NopReadCloser(bytes.NewBuffer(cl.content)), nil + return ioutil.NopCloser(bytes.NewBuffer(cl.content)), nil } // Size implements v1.Layer @@ -83,6 +82,8 @@ func (cl *configLayer) Size() (int64, error) { var _ v1.Layer = (*configLayer)(nil) +// ConfigLayer implements v1.Layer from the raw config bytes. +// This is so that clients (e.g. remote) can access the config as a blob. func ConfigLayer(i WithRawConfigFile) (v1.Layer, error) { h, err := ConfigName(i) if err != nil { @@ -190,20 +191,6 @@ func FSLayers(i WithManifest) ([]v1.Hash, error) { return fsl, nil } -// BlobSet is a helper for implementing v1.Image -func BlobSet(i WithManifest) (map[v1.Hash]struct{}, error) { - m, err := i.Manifest() - if err != nil { - return nil, err - } - bs := make(map[v1.Hash]struct{}) - for _, l := range m.Layers { - bs[l.Digest] = struct{}{} - } - bs[m.Config.Digest] = struct{}{} - return bs, nil -} - // BlobSize is a helper for implementing v1.Image func BlobSize(i WithManifest, h v1.Hash) (int64, error) { m, err := i.Manifest() diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go index 5108a05dea..2032e276ea 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/delete.go @@ -25,15 +25,8 @@ import ( "github.com/google/go-containerregistry/pkg/v1/remote/transport" ) -// DeleteOptions are used to expose optional information to guide or -// control the image deletion. -type DeleteOptions struct { - // TODO(mattmoor): Fail on not found? - // TODO(mattmoor): Delete tag and manifest? -} - // Delete removes the specified image reference from the remote registry. -func Delete(ref name.Reference, auth authn.Authenticator, t http.RoundTripper, do DeleteOptions) error { +func Delete(ref name.Reference, auth authn.Authenticator, t http.RoundTripper) error { scopes := []string{ref.Scope(transport.DeleteScope)} tr, err := transport.New(ref.Context().Registry, auth, t, scopes) if err != nil { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go index e2995a00f5..1be0ad2ea4 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/image.go @@ -21,27 +21,35 @@ import ( "io/ioutil" "net/http" "net/url" + "strings" "sync" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" "github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/v1util" ) +var defaultPlatform = v1.Platform{ + Architecture: "amd64", + OS: "linux", +} + // remoteImage accesses an image from a remote registry type remoteImage struct { - ref name.Reference - client *http.Client + fetcher manifestLock sync.Mutex // Protects manifest manifest []byte configLock sync.Mutex // Protects config config []byte + mediaType types.MediaType + platform v1.Platform } +// ImageOption is a functional option for Image. type ImageOption func(*imageOpener) error var _ partial.CompressedImageCore = (*remoteImage)(nil) @@ -51,6 +59,7 @@ type imageOpener struct { transport http.RoundTripper ref name.Reference client *http.Client + platform v1.Platform } func (i *imageOpener) Open() (v1.Image, error) { @@ -59,8 +68,11 @@ func (i *imageOpener) Open() (v1.Image, error) { return nil, err } ri := &remoteImage{ - ref: i.ref, - client: &http.Client{Transport: tr}, + fetcher: fetcher{ + Ref: i.ref, + Client: &http.Client{Transport: tr}, + }, + platform: i.platform, } imgCore, err := partial.CompressedToImage(ri) if err != nil { @@ -81,6 +93,7 @@ func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) { auth: authn.Anonymous, transport: http.DefaultTransport, ref: ref, + platform: defaultPlatform, } for _, option := range options { @@ -91,16 +104,83 @@ func Image(ref name.Reference, options ...ImageOption) (v1.Image, error) { return img.Open() } -func (r *remoteImage) url(resource, identifier string) url.URL { +// fetcher implements methods for reading from a remote image. +type fetcher struct { + Ref name.Reference + Client *http.Client +} + +// url returns a url.Url for the specified path in the context of this remote image reference. +func (f *fetcher) url(resource, identifier string) url.URL { return url.URL{ - Scheme: r.ref.Context().Registry.Scheme(), - Host: r.ref.Context().RegistryStr(), - Path: fmt.Sprintf("/v2/%s/%s/%s", r.ref.Context().RepositoryStr(), resource, identifier), + Scheme: f.Ref.Context().Registry.Scheme(), + Host: f.Ref.Context().RegistryStr(), + Path: fmt.Sprintf("/v2/%s/%s/%s", f.Ref.Context().RepositoryStr(), resource, identifier), } } +func (f *fetcher) fetchManifest(acceptable []types.MediaType) ([]byte, *v1.Descriptor, error) { + u := f.url("manifests", f.Ref.Identifier()) + req, err := http.NewRequest(http.MethodGet, u.String(), nil) + if err != nil { + return nil, nil, err + } + accept := []string{} + for _, mt := range acceptable { + accept = append(accept, string(mt)) + } + req.Header.Set("Accept", strings.Join(accept, ",")) + + resp, err := f.Client.Do(req) + if err != nil { + return nil, nil, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK); err != nil { + return nil, nil, err + } + + manifest, err := ioutil.ReadAll(resp.Body) + if err != nil { + return nil, nil, err + } + + digest, size, err := v1.SHA256(bytes.NewReader(manifest)) + if err != nil { + return nil, nil, err + } + + // Validate the digest matches what we asked for, if pulling by digest. + if dgst, ok := f.Ref.(name.Digest); ok { + if digest.String() != dgst.DigestStr() { + return nil, nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), f.Ref) + } + } else { + // Do nothing for tags; I give up. + // + // We'd like to validate that the "Docker-Content-Digest" header matches what is returned by the registry, + // but so many registries implement this incorrectly that it's not worth checking. + // + // For reference: + // https://github.com/docker/distribution/issues/2395 + // https://github.com/GoogleContainerTools/kaniko/issues/298 + } + + // Return all this info since we have to calculate it anyway. + desc := v1.Descriptor{ + Digest: digest, + Size: size, + MediaType: types.MediaType(resp.Header.Get("Content-Type")), + } + + return manifest, &desc, nil +} + func (r *remoteImage) MediaType() (types.MediaType, error) { - // TODO(jonjohnsonjr): Determine this based on response. + if string(r.mediaType) != "" { + return r.mediaType, nil + } return types.DockerManifestSchema2, nil } @@ -112,48 +192,27 @@ func (r *remoteImage) RawManifest() ([]byte, error) { return r.manifest, nil } - u := r.url("manifests", r.ref.Identifier()) - req, err := http.NewRequest(http.MethodGet, u.String(), nil) - if err != nil { - return nil, err + acceptable := []types.MediaType{ + types.DockerManifestSchema2, + types.OCIManifestSchema1, + // We'll resolve these to an image based on the platform. + types.DockerManifestList, + types.OCIImageIndex, } - // TODO(jonjohnsonjr): Accept OCI manifest, manifest list, and image index. - req.Header.Set("Accept", string(types.DockerManifestSchema2)) - resp, err := r.client.Do(req) - if err != nil { - return nil, err - } - defer resp.Body.Close() - - if err := CheckError(resp, http.StatusOK); err != nil { - return nil, err - } - - manifest, err := ioutil.ReadAll(resp.Body) + manifest, desc, err := r.fetchManifest(acceptable) if err != nil { return nil, err } - digest, _, err := v1.SHA256(bytes.NewReader(manifest)) - if err != nil { - return nil, err - } - - // Validate the digest matches what we asked for, if pulling by digest. - if dgst, ok := r.ref.(name.Digest); ok { - if digest.String() != dgst.DigestStr() { - return nil, fmt.Errorf("manifest digest: %q does not match requested digest: %q for %q", digest, dgst.DigestStr(), r.ref) - } - } else if checksum := resp.Header.Get("Docker-Content-Digest"); checksum != "" && checksum != digest.String() { - err := fmt.Errorf("manifest digest: %q does not match Docker-Content-Digest: %q for %q", digest, checksum, r.ref) - if r.ref.Context().RegistryStr() == name.DefaultRegistry { - // TODO(docker/distribution#2395): Remove this check. - } else { - // When pulling by tag, we can only validate that the digest matches what the registry told us it should be. + // We want an image but the registry has an index, resolve it to an image. + for desc.MediaType == types.DockerManifestList || desc.MediaType == types.OCIImageIndex { + manifest, desc, err = r.matchImage(manifest) + if err != nil { return nil, err } } + r.mediaType = desc.MediaType r.manifest = manifest return r.manifest, nil } @@ -201,12 +260,12 @@ func (rl *remoteLayer) Digest() (v1.Hash, error) { // Compressed implements partial.CompressedLayer func (rl *remoteLayer) Compressed() (io.ReadCloser, error) { u := rl.ri.url("blobs", rl.digest.String()) - resp, err := rl.ri.client.Get(u.String()) + resp, err := rl.ri.Client.Get(u.String()) if err != nil { return nil, err } - if err := CheckError(resp, http.StatusOK); err != nil { + if err := transport.CheckError(resp, http.StatusOK); err != nil { resp.Body.Close() return nil, err } @@ -243,3 +302,36 @@ func (r *remoteImage) LayerByDigest(h v1.Hash) (partial.CompressedLayer, error) digest: h, }, nil } + +// This naively matches the first manifest with matching Architecture and OS. +// +// We should probably use this instead: +// github.com/containerd/containerd/platforms +// +// But first we'd need to migrate to: +// github.com/opencontainers/image-spec/specs-go/v1 +func (r *remoteImage) matchImage(rawIndex []byte) ([]byte, *v1.Descriptor, error) { + index, err := v1.ParseIndexManifest(bytes.NewReader(rawIndex)) + if err != nil { + return nil, nil, err + } + for _, childDesc := range index.Manifests { + // If platform is missing from child descriptor, assume it's amd64/linux. + p := defaultPlatform + if childDesc.Platform != nil { + p = *childDesc.Platform + } + if r.platform.Architecture == p.Architecture && r.platform.OS == p.OS { + childRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), childDesc.Digest), name.StrictValidation) + if err != nil { + return nil, nil, err + } + r.fetcher = fetcher{ + Client: r.Client, + Ref: childRef, + } + return r.fetchManifest([]types.MediaType{childDesc.MediaType}) + } + } + return nil, nil, fmt.Errorf("no matching image for %s/%s, index: %s", r.platform.Architecture, r.platform.OS, string(rawIndex)) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go new file mode 100644 index 0000000000..03afc481ad --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/index.go @@ -0,0 +1,139 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package remote + +import ( + "bytes" + "fmt" + "net/http" + "sync" + + "github.com/google/go-containerregistry/pkg/authn" + "github.com/google/go-containerregistry/pkg/name" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" + "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/types" +) + +// remoteIndex accesses an index from a remote registry +type remoteIndex struct { + fetcher + manifestLock sync.Mutex // Protects manifest + manifest []byte + mediaType types.MediaType +} + +// Index provides access to a remote index reference, applying functional options +// to the underlying imageOpener before resolving the reference into a v1.ImageIndex. +func Index(ref name.Reference, options ...ImageOption) (v1.ImageIndex, error) { + i := &imageOpener{ + auth: authn.Anonymous, + transport: http.DefaultTransport, + ref: ref, + } + + for _, option := range options { + if err := option(i); err != nil { + return nil, err + } + } + tr, err := transport.New(i.ref.Context().Registry, i.auth, i.transport, []string{i.ref.Scope(transport.PullScope)}) + if err != nil { + return nil, err + } + return &remoteIndex{ + fetcher: fetcher{ + Ref: i.ref, + Client: &http.Client{Transport: tr}, + }, + }, nil +} + +func (r *remoteIndex) MediaType() (types.MediaType, error) { + if string(r.mediaType) != "" { + return r.mediaType, nil + } + return types.DockerManifestList, nil +} + +func (r *remoteIndex) Digest() (v1.Hash, error) { + return partial.Digest(r) +} + +func (r *remoteIndex) RawManifest() ([]byte, error) { + r.manifestLock.Lock() + defer r.manifestLock.Unlock() + if r.manifest != nil { + return r.manifest, nil + } + + acceptable := []types.MediaType{ + types.DockerManifestList, + types.OCIImageIndex, + } + manifest, desc, err := r.fetchManifest(acceptable) + if err != nil { + return nil, err + } + + r.mediaType = desc.MediaType + r.manifest = manifest + return r.manifest, nil +} + +func (r *remoteIndex) IndexManifest() (*v1.IndexManifest, error) { + b, err := r.RawManifest() + if err != nil { + return nil, err + } + return v1.ParseIndexManifest(bytes.NewReader(b)) +} + +func (r *remoteIndex) Image(h v1.Hash) (v1.Image, error) { + imgRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation) + if err != nil { + return nil, err + } + ri := &remoteImage{ + fetcher: fetcher{ + Ref: imgRef, + Client: r.Client, + }, + } + imgCore, err := partial.CompressedToImage(ri) + if err != nil { + return imgCore, err + } + // Wrap the v1.Layers returned by this v1.Image in a hint for downstream + // remote.Write calls to facilitate cross-repo "mounting". + return &mountableImage{ + Image: imgCore, + Reference: r.Ref, + }, nil +} + +func (r *remoteIndex) ImageIndex(h v1.Hash) (v1.ImageIndex, error) { + idxRef, err := name.ParseReference(fmt.Sprintf("%s@%s", r.Ref.Context(), h), name.StrictValidation) + if err != nil { + return nil, err + } + return &remoteIndex{ + fetcher: fetcher{ + Ref: idxRef, + Client: r.Client, + }, + }, nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go index 17c00b5e76..1a36d0a4ba 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/list.go @@ -25,12 +25,12 @@ import ( "github.com/google/go-containerregistry/pkg/v1/remote/transport" ) -type Tags struct { +type tags struct { Name string `json:"name"` Tags []string `json:"tags"` } -// TODO(jonjohnsonjr): return []name.Tag? +// List calls /tags/list for the given repository. func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ([]string, error) { scopes := []string{repo.Scope(transport.PullScope)} tr, err := transport.New(repo.Registry, auth, t, scopes) @@ -51,14 +51,14 @@ func List(repo name.Repository, auth authn.Authenticator, t http.RoundTripper) ( } defer resp.Body.Close() - if err := CheckError(resp, http.StatusOK); err != nil { + if err := transport.CheckError(resp, http.StatusOK); err != nil { return nil, err } - tags := Tags{} - if err := json.NewDecoder(resp.Body).Decode(&tags); err != nil { + parsed := tags{} + if err := json.NewDecoder(resp.Body).Decode(&parsed); err != nil { return nil, err } - return tags.Tags, nil + return parsed.Tags, nil } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go index 13b79064da..3afda2a341 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/mount.go @@ -16,7 +16,7 @@ package remote import ( "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" ) // MountableLayer wraps a v1.Layer in a shim that enables the layer to be diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go index a6e9584ee3..335e3fe5be 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/options.go @@ -19,6 +19,7 @@ import ( "net/http" "github.com/google/go-containerregistry/pkg/authn" + v1 "github.com/google/go-containerregistry/pkg/v1" ) // WithTransport is a functional option for overriding the default transport @@ -54,3 +55,10 @@ func WithAuthFromKeychain(keys authn.Keychain) ImageOption { return nil } } + +func WithPlatform(p v1.Platform) ImageOption { + return func(i *imageOpener) error { + i.platform = p + return nil + } +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go index 752038cb1f..e77f47f699 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/basic.go @@ -39,7 +39,8 @@ func (bt *basicTransport) RoundTrip(in *http.Request) (*http.Response, error) { // abstraction, so to avoid forwarding Authorization headers to places // we are redirected, only set it when the authorization header matches // the host with which we are interacting. - if in.Host == bt.target { + // In case of redirect http.Client can use an empty Host, check URL too. + if in.Host == bt.target || in.URL.Host == bt.target { in.Header.Set("Authorization", hdr) } in.Header.Set("User-Agent", transportName) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go index 7dd49ae6f8..f72ab276d6 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/bearer.go @@ -15,9 +15,8 @@ package transport import ( - "fmt" - "encoding/json" + "fmt" "io/ioutil" "net/http" "net/url" @@ -40,28 +39,48 @@ type bearerTransport struct { // See https://docs.docker.com/registry/spec/auth/token/ service string scopes []string + // Scheme we should use, determined by ping response. + scheme string } var _ http.RoundTripper = (*bearerTransport)(nil) // RoundTrip implements http.RoundTripper func (bt *bearerTransport) RoundTrip(in *http.Request) (*http.Response, error) { - hdr, err := bt.bearer.Authorization() + sendRequest := func() (*http.Response, error) { + hdr, err := bt.bearer.Authorization() + if err != nil { + return nil, err + } + + // http.Client handles redirects at a layer above the http.RoundTripper + // abstraction, so to avoid forwarding Authorization headers to places + // we are redirected, only set it when the authorization header matches + // the registry with which we are interacting. + // In case of redirect http.Client can use an empty Host, check URL too. + if in.Host == bt.registry.RegistryStr() || in.URL.Host == bt.registry.RegistryStr() { + in.Header.Set("Authorization", hdr) + } + in.Header.Set("User-Agent", transportName) + + in.URL.Scheme = bt.scheme + return bt.inner.RoundTrip(in) + } + + res, err := sendRequest() if err != nil { return nil, err } - // http.Client handles redirects at a layer above the http.RoundTripper - // abstraction, so to avoid forwarding Authorization headers to places - // we are redirected, only set it when the authorization header matches - // the registry with which we are interacting. - if in.Host == bt.registry.RegistryStr() { - in.Header.Set("Authorization", hdr) + // Perform a token refresh() and retry the request in case the token has expired + if res.StatusCode == http.StatusUnauthorized { + if err = bt.refresh(); err != nil { + return nil, err + } + return sendRequest() } - in.Header.Set("User-Agent", transportName) - // TODO(mattmoor): On 401s perform a single refresh() and retry. - return bt.inner.RoundTrip(in) + return res, err } func (bt *bearerTransport) refresh() error { @@ -87,6 +106,10 @@ func (bt *bearerTransport) refresh() error { } defer resp.Body.Close() + if err := CheckError(resp, http.StatusOK); err != nil { + return err + } + content, err := ioutil.ReadAll(resp.Body) if err != nil { return err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go similarity index 90% rename from vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go rename to vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go index 076274821e..44885effa5 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/error.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/error.go @@ -12,7 +12,7 @@ // See the License for the specific language governing permissions and // limitations under the License. -package remote +package transport import ( "encoding/json" @@ -35,7 +35,7 @@ var _ error = (*Error)(nil) func (e *Error) Error() string { switch len(e.Errors) { case 0: - return "" + return "" case 1: return e.Errors[0].String() default: @@ -55,9 +55,13 @@ type Diagnostic struct { Detail interface{} `json:"detail,omitempty"` } -// String stringifies the Diagnostic +// String stringifies the Diagnostic in the form: $Code: $Message[; $Detail] func (d Diagnostic) String() string { - return fmt.Sprintf("%s: %q", d.Code, d.Message) + msg := fmt.Sprintf("%s: %s", d.Code, d.Message) + if d.Detail != nil { + msg = fmt.Sprintf("%s; %v", msg, d.Detail) + } + return msg } // ErrorCode is an enumeration of supported error codes. @@ -83,6 +87,7 @@ const ( UnsupportedErrorCode ErrorCode = "UNSUPPORTED" ) +// CheckError returns a structured error if the response status is not in codes. func CheckError(resp *http.Response, codes ...int) error { for _, code := range codes { if resp.StatusCode == code { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go index 89133e3263..cc0d2cfeaa 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/ping.go @@ -36,6 +36,9 @@ type pingResp struct { // Following the challenge there are often key/value pairs // e.g. Bearer service="gcr.io",realm="https://auth.gcr.io/v36/tokenz" parameters map[string]string + + // The registry's scheme to use. Communicates whether we fell back to http. + scheme string } func (c challenge) Canonical() challenge { @@ -63,31 +66,50 @@ func parseChallenge(suffix string) map[string]string { func ping(reg name.Registry, t http.RoundTripper) (*pingResp, error) { client := http.Client{Transport: t} - url := fmt.Sprintf("%s://%s/v2/", reg.Scheme(), reg.Name()) - resp, err := client.Get(url) - if err != nil { - return nil, err + // This first attempts to use "https" for every request, falling back to http + // if the registry matches our localhost heuristic or if it is intentionally + // set to insecure via name.NewInsecureRegistry. + schemes := []string{"https"} + if reg.Scheme() == "http" { + schemes = append(schemes, "http") } - defer resp.Body.Close() - switch resp.StatusCode { - case http.StatusOK: - // If we get a 200, then no authentication is needed. - return &pingResp{challenge: anonymous}, nil - case http.StatusUnauthorized: - wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate")) - if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 { - // If there are two parts, then parse the challenge parameters. - return &pingResp{ - challenge: challenge(parts[0]).Canonical(), - parameters: parseChallenge(parts[1]), - }, nil + var connErr error + for _, scheme := range schemes { + url := fmt.Sprintf("%s://%s/v2/", scheme, reg.Name()) + resp, err := client.Get(url) + if err != nil { + connErr = err + // Potentially retry with http. + continue + } + defer resp.Body.Close() + + switch resp.StatusCode { + case http.StatusOK: + // If we get a 200, then no authentication is needed. + return &pingResp{ + challenge: anonymous, + scheme: scheme, + }, nil + case http.StatusUnauthorized: + wac := resp.Header.Get(http.CanonicalHeaderKey("WWW-Authenticate")) + if parts := strings.SplitN(wac, " ", 2); len(parts) == 2 { + // If there are two parts, then parse the challenge parameters. + return &pingResp{ + challenge: challenge(parts[0]).Canonical(), + parameters: parseChallenge(parts[1]), + scheme: scheme, + }, nil + } + // Otherwise, just return the challenge without parameters. + return &pingResp{ + challenge: challenge(wac).Canonical(), + scheme: scheme, + }, nil + default: + return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status) } - // Otherwise, just return the challenge without parameters. - return &pingResp{ - challenge: challenge(wac).Canonical(), - }, nil - default: - return nil, fmt.Errorf("unrecognized HTTP status: %v", resp.Status) } + return nil, connErr } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go index 6140ab2ce3..18c8e66c75 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/transport/transport.go @@ -73,6 +73,7 @@ func New(reg name.Registry, auth authn.Authenticator, t http.RoundTripper, scope registry: reg, service: service, scopes: scopes, + scheme: pr.scheme, } if err := bt.refresh(); err != nil { return nil, err diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go index af61e361be..66f148155d 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/remote/write.go @@ -18,26 +18,29 @@ import ( "bytes" "errors" "fmt" + "io" "log" "net/http" "net/url" "github.com/google/go-containerregistry/pkg/authn" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" + "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/remote/transport" + "github.com/google/go-containerregistry/pkg/v1/stream" + "github.com/google/go-containerregistry/pkg/v1/types" + "golang.org/x/sync/errgroup" ) -// WriteOptions are used to expose optional information to guide or -// control the image write. -type WriteOptions struct { - // TODO(mattmoor): Expose "threads" to limit parallelism? +type manifest interface { + RawManifest() ([]byte, error) + MediaType() (types.MediaType, error) + Digest() (v1.Hash, error) } // Write pushes the provided img to the specified image reference. -func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper, - wo WriteOptions) error { - +func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.RoundTripper) error { ls, err := img.Layers() if err != nil { return err @@ -49,50 +52,74 @@ func Write(ref name.Reference, img v1.Image, auth authn.Authenticator, t http.Ro return err } w := writer{ - ref: ref, - client: &http.Client{Transport: tr}, - img: img, - options: wo, + ref: ref, + client: &http.Client{Transport: tr}, } - bs, err := img.BlobSet() - if err != nil { - return err - } - - // Spin up go routines to publish each of the members of BlobSet(), - // and use an error channel to collect their results. - errCh := make(chan error) - defer close(errCh) - for h := range bs { - go func(h v1.Hash) { - errCh <- w.uploadOne(h) - }(h) - } - - // Now wait for all of the blob uploads to complete. - var errors []error - for _ = range bs { - if err := <-errCh; err != nil { - errors = append(errors, err) + // Upload individual layers in goroutines and collect any errors. + // If we can dedupe by the layer digest, try to do so. If the layer is + // a stream.Layer, we can't dedupe and might re-upload. + var g errgroup.Group + uploaded := map[v1.Hash]bool{} + for _, l := range ls { + l := l + if _, ok := l.(*stream.Layer); !ok { + h, err := l.Digest() + if err != nil { + return err + } + // If we can determine the layer's digest ahead of + // time, use it to dedupe uploads. + if uploaded[h] { + continue // Already uploading. + } + uploaded[h] = true } + + g.Go(func() error { + return w.uploadOne(l) + }) } - if len(errors) > 0 { - // Return the first error we encountered. - return errors[0] + + if l, err := partial.ConfigLayer(img); err == stream.ErrNotComputed { + // We can't read the ConfigLayer, because of streaming layers, since the + // config hasn't been calculated yet. + if err := g.Wait(); err != nil { + return err + } + + // Now that all the layers are uploaded, upload the config file blob. + l, err := partial.ConfigLayer(img) + if err != nil { + return err + } + if err := w.uploadOne(l); err != nil { + return err + } + } else if err != nil { + // This is an actual error, not a streaming error, just return it. + return err + } else { + // We *can* read the ConfigLayer, so upload it concurrently with the layers. + g.Go(func() error { + return w.uploadOne(l) + }) + + // Wait for the layers + config. + if err := g.Wait(); err != nil { + return err + } } // With all of the constituent elements uploaded, upload the manifest // to commit the image. - return w.commitImage() + return w.commitImage(img) } // writer writes the elements of an image to a remote image reference. type writer struct { - ref name.Reference - client *http.Client - img v1.Image - options WriteOptions + ref name.Reference + client *http.Client } // url returns a url.Url for the specified path in the context of this remote image reference. @@ -120,11 +147,11 @@ func (w *writer) nextLocation(resp *http.Response) (string, error) { return resp.Request.URL.ResolveReference(u).String(), nil } -// checkExisting checks if a blob exists already in the repository by making a +// checkExistingBlob checks if a blob exists already in the repository by making a // HEAD request to the blob store API. GCR performs an existence check on the // initiation if "mount" is specified, even if no "from" sources are specified. // However, this is not broadly applicable to all registries, e.g. ECR. -func (w *writer) checkExisting(h v1.Hash) (bool, error) { +func (w *writer) checkExistingBlob(h v1.Hash) (bool, error) { u := w.url(fmt.Sprintf("/v2/%s/blobs/%s", w.ref.Context().RepositoryStr(), h.String())) resp, err := w.client.Head(u.String()) @@ -133,7 +160,31 @@ func (w *writer) checkExisting(h v1.Hash) (bool, error) { } defer resp.Body.Close() - if err := CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { + return false, err + } + + return resp.StatusCode == http.StatusOK, nil +} + +// checkExistingManifest checks if a manifest exists already in the repository +// by making a HEAD request to the manifest API. +func (w *writer) checkExistingManifest(h v1.Hash, mt types.MediaType) (bool, error) { + u := w.url(fmt.Sprintf("/v2/%s/manifests/%s", w.ref.Context().RepositoryStr(), h.String())) + + req, err := http.NewRequest(http.MethodHead, u.String(), nil) + if err != nil { + return false, err + } + req.Header.Set("Accept", string(mt)) + + resp, err := w.client.Do(req) + if err != nil { + return false, err + } + defer resp.Body.Close() + + if err := transport.CheckError(resp, http.StatusOK, http.StatusNotFound); err != nil { return false, err } @@ -146,20 +197,13 @@ func (w *writer) checkExisting(h v1.Hash) (bool, error) { // On success, the layer was either mounted (nothing more to do) or a blob // upload was initiated and the body of that blob should be sent to the returned // location. -func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err error) { +func (w *writer) initiateUpload(from, mount string) (location string, mounted bool, err error) { u := w.url(fmt.Sprintf("/v2/%s/blobs/uploads/", w.ref.Context().RepositoryStr())) - uv := url.Values{ - "mount": []string{h.String()}, - } - l, err := w.img.LayerByDigest(h) - if err != nil { - return "", false, err - } - - if ml, ok := l.(*MountableLayer); ok { - if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() { - uv["from"] = []string{ml.Reference.Context().RepositoryStr()} - } + uv := url.Values{} + if mount != "" && from != "" { + // Quay will fail if we specify a "mount" without a "from". + uv["mount"] = []string{mount} + uv["from"] = []string{from} } u.RawQuery = uv.Encode() @@ -170,7 +214,7 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e } defer resp.Body.Close() - if err := CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { + if err := transport.CheckError(resp, http.StatusCreated, http.StatusAccepted); err != nil { return "", false, err } @@ -191,15 +235,7 @@ func (w *writer) initiateUpload(h v1.Hash) (location string, mounted bool, err e // streamBlob streams the contents of the blob to the specified location. // On failure, this will return an error. On success, this will return the location // header indicating how to commit the streamed blob. -func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation string, err error) { - l, err := w.img.LayerByDigest(h) - if err != nil { - return "", err - } - blob, err := l.Compressed() - if err != nil { - return "", err - } +func (w *writer) streamBlob(blob io.ReadCloser, streamLocation string) (commitLocation string, err error) { defer blob.Close() req, err := http.NewRequest(http.MethodPatch, streamLocation, blob) @@ -213,7 +249,7 @@ func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation st } defer resp.Body.Close() - if err := CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { + if err := transport.CheckError(resp, http.StatusNoContent, http.StatusAccepted, http.StatusCreated); err != nil { return "", err } @@ -222,14 +258,15 @@ func (w *writer) streamBlob(h v1.Hash, streamLocation string) (commitLocation st return w.nextLocation(resp) } -// commitBlob commits this blob by sending a PUT to the location returned from streaming the blob. -func (w *writer) commitBlob(h v1.Hash, location string) (err error) { +// commitBlob commits this blob by sending a PUT to the location returned from +// streaming the blob. +func (w *writer) commitBlob(location, digest string) error { u, err := url.Parse(location) if err != nil { return err } v := u.Query() - v.Set("digest", h.String()) + v.Set("digest", digest) u.RawQuery = v.Encode() req, err := http.NewRequest(http.MethodPut, u.String(), nil) @@ -243,47 +280,82 @@ func (w *writer) commitBlob(h v1.Hash, location string) (err error) { } defer resp.Body.Close() - return CheckError(resp, http.StatusCreated) + return transport.CheckError(resp, http.StatusCreated) } // uploadOne performs a complete upload of a single layer. -func (w *writer) uploadOne(h v1.Hash) error { - existing, err := w.checkExisting(h) - if err != nil { - return err +func (w *writer) uploadOne(l v1.Layer) error { + var from, mount, digest string + if _, ok := l.(*stream.Layer); !ok { + // Layer isn't streamable, we should take advantage of that to + // skip uploading if possible. + // By sending ?digest= in the request, we'll also check that + // our computed digest matches the one computed by the + // registry. + h, err := l.Digest() + if err != nil { + return err + } + digest = h.String() + + existing, err := w.checkExistingBlob(h) + if err != nil { + return err + } + if existing { + log.Printf("existing blob: %v", h) + return nil + } + + mount = h.String() } - if existing { - log.Printf("existing blob: %v", h) - return nil + if ml, ok := l.(*MountableLayer); ok { + if w.ref.Context().RegistryStr() == ml.Reference.Context().RegistryStr() { + from = ml.Reference.Context().RepositoryStr() + } } - location, mounted, err := w.initiateUpload(h) + location, mounted, err := w.initiateUpload(from, mount) if err != nil { return err } else if mounted { - log.Printf("mounted blob: %v", h) + h, err := l.Digest() + if err != nil { + return err + } + log.Printf("mounted blob: %s", h.String()) return nil } - location, err = w.streamBlob(h, location) + blob, err := l.Compressed() + if err != nil { + return err + } + location, err = w.streamBlob(blob, location) if err != nil { return err } - if err := w.commitBlob(h, location); err != nil { + h, err := l.Digest() + if err != nil { return err } - log.Printf("pushed blob %v", h) + digest = h.String() + + if err := w.commitBlob(location, digest); err != nil { + return err + } + log.Printf("pushed blob: %s", digest) return nil } // commitImage does a PUT of the image's manifest. -func (w *writer) commitImage() error { - raw, err := w.img.RawManifest() +func (w *writer) commitImage(man manifest) error { + raw, err := man.RawManifest() if err != nil { return err } - mt, err := w.img.MediaType() + mt, err := man.MediaType() if err != nil { return err } @@ -303,11 +375,11 @@ func (w *writer) commitImage() error { } defer resp.Body.Close() - if err := CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { + if err := transport.CheckError(resp, http.StatusOK, http.StatusCreated, http.StatusAccepted); err != nil { return err } - digest, err := w.img.Digest() + digest, err := man.Digest() if err != nil { return err } @@ -334,11 +406,68 @@ func scopesForUploadingImage(ref name.Reference, layers []v1.Layer) []string { // Push scope should be the first element because a few registries just look at the first scope to determine access. scopes = append(scopes, ref.Scope(transport.PushScope)) - for scope, _ := range scopeSet { + for scope := range scopeSet { scopes = append(scopes, scope) } return scopes } -// TODO(mattmoor): WriteIndex +// WriteIndex pushes the provided ImageIndex to the specified image reference. +// WriteIndex will attempt to push all of the referenced manifests before +// attempting to push the ImageIndex, to retain referential integrity. +func WriteIndex(ref name.Reference, ii v1.ImageIndex, auth authn.Authenticator, t http.RoundTripper) error { + index, err := ii.IndexManifest() + if err != nil { + return err + } + + scopes := []string{ref.Scope(transport.PushScope)} + tr, err := transport.New(ref.Context().Registry, auth, t, scopes) + if err != nil { + return err + } + w := writer{ + ref: ref, + client: &http.Client{Transport: tr}, + } + + for _, desc := range index.Manifests { + ref, err := name.ParseReference(fmt.Sprintf("%s@%s", ref.Context(), desc.Digest), name.StrictValidation) + if err != nil { + return err + } + exists, err := w.checkExistingManifest(desc.Digest, desc.MediaType) + if err != nil { + return err + } + if exists { + log.Printf("existing manifest: %v", desc.Digest) + continue + } + + switch desc.MediaType { + case types.OCIImageIndex, types.DockerManifestList: + ii, err := ii.ImageIndex(desc.Digest) + if err != nil { + return err + } + + if err := WriteIndex(ref, ii, auth, t); err != nil { + return err + } + case types.OCIManifestSchema1, types.DockerManifestSchema2: + img, err := ii.Image(desc.Digest) + if err != nil { + return err + } + if err := Write(ref, img, auth, t); err != nil { + return err + } + } + } + + // With all of the constituent elements uploaded, upload the manifest + // to commit the image. + return w.commitImage(ii) +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go new file mode 100644 index 0000000000..f8895a2262 --- /dev/null +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/stream/layer.go @@ -0,0 +1,194 @@ +// Copyright 2018 Google LLC All Rights Reserved. +// +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// +// http://www.apache.org/licenses/LICENSE-2.0 +// +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package stream + +import ( + "compress/gzip" + "crypto/sha256" + "encoding/hex" + "errors" + "hash" + "io" + "sync" + + v1 "github.com/google/go-containerregistry/pkg/v1" +) + +var ( + // ErrNotComputed is returned when the requested value is not yet + // computed because the stream has not been consumed yet. + ErrNotComputed = errors.New("value not computed until stream is consumed") + + // ErrConsumed is returned by Compressed when the underlying stream has + // already been consumed and closed. + ErrConsumed = errors.New("stream was already consumed") +) + +// Layer is a streaming implementation of v1.Layer. +type Layer struct { + blob io.ReadCloser + consumed bool + + mu sync.Mutex + digest, diffID *v1.Hash + size int64 +} + +var _ v1.Layer = (*Layer)(nil) + +// NewLayer creates a Layer from an io.ReadCloser. +func NewLayer(rc io.ReadCloser) *Layer { return &Layer{blob: rc} } + +// Digest implements v1.Layer. +func (l *Layer) Digest() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.digest == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.digest, nil +} + +// DiffID implements v1.Layer. +func (l *Layer) DiffID() (v1.Hash, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.diffID == nil { + return v1.Hash{}, ErrNotComputed + } + return *l.diffID, nil +} + +// Size implements v1.Layer. +func (l *Layer) Size() (int64, error) { + l.mu.Lock() + defer l.mu.Unlock() + if l.size == 0 { + return 0, ErrNotComputed + } + return l.size, nil +} + +// Uncompressed implements v1.Layer. +func (l *Layer) Uncompressed() (io.ReadCloser, error) { + return nil, errors.New("NYI: stream.Layer.Uncompressed is not implemented") +} + +// Compressed implements v1.Layer. +func (l *Layer) Compressed() (io.ReadCloser, error) { + if l.consumed { + return nil, ErrConsumed + } + return newCompressedReader(l) +} + +type compressedReader struct { + closer io.Closer // original blob's Closer. + + h, zh hash.Hash // collects digests of compressed and uncompressed stream. + pr io.Reader + count *countWriter + + l *Layer // stream.Layer to update upon Close. +} + +func newCompressedReader(l *Layer) (*compressedReader, error) { + h := sha256.New() + zh := sha256.New() + count := &countWriter{} + + // gzip.Writer writes to the output stream via pipe, a hasher to + // capture compressed digest, and a countWriter to capture compressed + // size. + pr, pw := io.Pipe() + zw, err := gzip.NewWriterLevel(io.MultiWriter(pw, zh, count), gzip.BestSpeed) + if err != nil { + return nil, err + } + + cr := &compressedReader{ + closer: newMultiCloser(zw, l.blob), + pr: pr, + h: h, + zh: zh, + count: count, + l: l, + } + go func() { + if _, err := io.Copy(io.MultiWriter(h, zw), l.blob); err != nil { + pw.CloseWithError(err) + return + } + // Now close the compressed reader, to flush the gzip stream + // and calculate digest/diffID/size. This will cause pr to + // return EOF which will cause readers of the Compressed stream + // to finish reading. + pw.CloseWithError(cr.Close()) + }() + + return cr, nil +} + +func (cr *compressedReader) Read(b []byte) (int, error) { return cr.pr.Read(b) } + +func (cr *compressedReader) Close() error { + cr.l.mu.Lock() + defer cr.l.mu.Unlock() + + // Close the inner ReadCloser. + if err := cr.closer.Close(); err != nil { + return err + } + + diffID, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.h.Sum(nil))) + if err != nil { + return err + } + cr.l.diffID = &diffID + + digest, err := v1.NewHash("sha256:" + hex.EncodeToString(cr.zh.Sum(nil))) + if err != nil { + return err + } + cr.l.digest = &digest + + cr.l.size = cr.count.n + cr.l.consumed = true + return nil +} + +// countWriter counts bytes written to it. +type countWriter struct{ n int64 } + +func (c *countWriter) Write(p []byte) (int, error) { + c.n += int64(len(p)) + return len(p), nil +} + +// multiCloser is a Closer that collects multiple Closers and Closes them in order. +type multiCloser []io.Closer + +var _ io.Closer = (multiCloser)(nil) + +func newMultiCloser(c ...io.Closer) multiCloser { return multiCloser(c) } + +func (m multiCloser) Close() error { + for _, c := range m { + if err := c.Close(); err != nil { + return err + } + } + return nil +} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go index 2a62327ce6..ced18735c8 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/image.go @@ -26,7 +26,7 @@ import ( "sync" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/partial" "github.com/google/go-containerregistry/pkg/v1/types" "github.com/google/go-containerregistry/pkg/v1/v1util" @@ -54,6 +54,7 @@ type compressedImage struct { var _ partial.UncompressedImageCore = (*uncompressedImage)(nil) var _ partial.CompressedImageCore = (*compressedImage)(nil) +// Opener is a thunk for opening a tar file. type Opener func() (io.ReadCloser, error) func pathOpener(path string) Opener { @@ -62,6 +63,7 @@ func pathOpener(path string) Opener { } } +// ImageFromPath returns a v1.Image from a tarball located on path. func ImageFromPath(path string, tag *name.Tag) (v1.Image, error) { return Image(pathOpener(path), tag) } diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go index 6d43ff7d49..00256e8f2e 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/layer.go @@ -20,7 +20,7 @@ import ( "io/ioutil" "os" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" "github.com/google/go-containerregistry/pkg/v1/v1util" ) diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go index a7f72cd988..2ee81f0b80 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/tarball/write.go @@ -23,94 +23,134 @@ import ( "os" "github.com/google/go-containerregistry/pkg/name" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" ) -// WriteOptions are used to expose optional information to guide or -// control the image write. -type WriteOptions struct { - // TODO(mattmoor): Whether to store things compressed? -} - // WriteToFile writes in the compressed format to a tarball, on disk. // This is just syntactic sugar wrapping tarball.Write with a new file. -func WriteToFile(p string, tag name.Tag, img v1.Image, wo *WriteOptions) error { +func WriteToFile(p string, ref name.Reference, img v1.Image) error { w, err := os.Create(p) if err != nil { return err } defer w.Close() - return Write(tag, img, wo, w) + return Write(ref, img, w) } -// Write the contents of the image to the provided reader, in the compressed format. +// MultiWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiWrite with a new file. +func MultiWriteToFile(p string, tagToImage map[name.Tag]v1.Image) error { + var refToImage map[name.Reference]v1.Image = make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWriteToFile(p, refToImage) +} + +// MultiRefWriteToFile writes in the compressed format to a tarball, on disk. +// This is just syntactic sugar wrapping tarball.MultiRefWrite with a new file. +func MultiRefWriteToFile(p string, refToImage map[name.Reference]v1.Image) error { + w, err := os.Create(p) + if err != nil { + return err + } + defer w.Close() + + return MultiRefWrite(refToImage, w) +} + +// Write is a wrapper to write a single image and tag to a tarball. +func Write(ref name.Reference, img v1.Image, w io.Writer) error { + return MultiRefWrite(map[name.Reference]v1.Image{ref: img}, w) +} + +// MultiWrite writes the contents of each image to the provided reader, in the compressed format. // The contents are written in the following format: // One manifest.json file at the top level containing information about several images. // One file for each layer, named after the layer's SHA. // One file for the config blob, named after its SHA. -func Write(tag name.Tag, img v1.Image, wo *WriteOptions, w io.Writer) error { +func MultiWrite(tagToImage map[name.Tag]v1.Image, w io.Writer) error { + var refToImage map[name.Reference]v1.Image = make(map[name.Reference]v1.Image, len(tagToImage)) + for i, d := range tagToImage { + refToImage[i] = d + } + return MultiRefWrite(refToImage, w) +} + +// MultiRefWrite writes the contents of each image to the provided reader, in the compressed format. +// The contents are written in the following format: +// One manifest.json file at the top level containing information about several images. +// One file for each layer, named after the layer's SHA. +// One file for the config blob, named after its SHA. +func MultiRefWrite(refToImage map[name.Reference]v1.Image, w io.Writer) error { tf := tar.NewWriter(w) defer tf.Close() - // Write the config. - cfgName, err := img.ConfigName() - if err != nil { - return err - } - cfgBlob, err := img.RawConfigFile() - if err != nil { - return err - } - if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { - return err - } + imageToTags := dedupRefToImage(refToImage) + var td tarDescriptor - // Write the layers. - layers, err := img.Layers() - if err != nil { - return err - } - layerFiles := make([]string, len(layers)) - for i, l := range layers { - d, err := l.Digest() + for img, tags := range imageToTags { + // Write the config. + cfgName, err := img.ConfigName() if err != nil { return err } - - // Munge the file name to appease ancient technology. - // - // tar assumes anything with a colon is a remote tape drive: - // https://www.gnu.org/software/tar/manual/html_section/tar_45.html - // Drop the algorithm prefix, e.g. "sha256:" - hex := d.Hex - - // gunzip expects certain file extensions: - // https://www.gnu.org/software/gzip/manual/html_node/Overview.html - layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) - - r, err := l.Compressed() + cfgBlob, err := img.RawConfigFile() if err != nil { return err } - blobSize, err := l.Size() + if err := writeTarEntry(tf, cfgName.String(), bytes.NewReader(cfgBlob), int64(len(cfgBlob))); err != nil { + return err + } + + // Write the layers. + layers, err := img.Layers() if err != nil { return err } + layerFiles := make([]string, len(layers)) + for i, l := range layers { + d, err := l.Digest() + if err != nil { + return err + } - if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { - return err + // Munge the file name to appease ancient technology. + // + // tar assumes anything with a colon is a remote tape drive: + // https://www.gnu.org/software/tar/manual/html_section/tar_45.html + // Drop the algorithm prefix, e.g. "sha256:" + hex := d.Hex + + // gunzip expects certain file extensions: + // https://www.gnu.org/software/gzip/manual/html_node/Overview.html + layerFiles[i] = fmt.Sprintf("%s.tar.gz", hex) + + r, err := l.Compressed() + if err != nil { + return err + } + blobSize, err := l.Size() + if err != nil { + return err + } + + if err := writeTarEntry(tf, layerFiles[i], r, blobSize); err != nil { + return err + } } - } - // Generate the tar descriptor and write it. - td := tarDescriptor{ - singleImageTarDescriptor{ + // Generate the tar descriptor and write it. + sitd := singleImageTarDescriptor{ Config: cfgName.String(), - RepoTags: []string{tag.String()}, + RepoTags: tags, Layers: layerFiles, - }, + } + + td = append(td, sitd) } + tdBytes, err := json.Marshal(td) if err != nil { return err @@ -118,6 +158,26 @@ func Write(tag name.Tag, img v1.Image, wo *WriteOptions, w io.Writer) error { return writeTarEntry(tf, "manifest.json", bytes.NewReader(tdBytes), int64(len(tdBytes))) } +func dedupRefToImage(refToImage map[name.Reference]v1.Image) map[v1.Image][]string { + imageToTags := make(map[v1.Image][]string) + + for ref, img := range refToImage { + if tag, ok := ref.(name.Tag); ok { + if tags, ok := imageToTags[img]; ok && tags != nil { + imageToTags[img] = append(tags, tag.String()) + } else { + imageToTags[img] = []string{tag.String()} + } + } else { + if _, ok := imageToTags[img]; !ok { + imageToTags[img] = nil + } + } + } + + return imageToTags +} + // write a file to the provided writer with a corresponding tar header func writeTarEntry(tf *tar.Writer, path string, r io.Reader, size int64) error { hdr := &tar.Header{ diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go deleted file mode 100644 index 8ff288d978..0000000000 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/nop.go +++ /dev/null @@ -1,40 +0,0 @@ -// Copyright 2018 Google LLC All Rights Reserved. -// -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// -// http://www.apache.org/licenses/LICENSE-2.0 -// -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -package v1util - -import ( - "io" -) - -func nop() error { - return nil -} - -// NopWriteCloser wraps the io.Writer as an io.WriteCloser with a Close() method that does nothing. -func NopWriteCloser(w io.Writer) io.WriteCloser { - return &writeAndCloser{ - Writer: w, - CloseFunc: nop, - } -} - -// NopReadCloser wraps the io.Reader as an io.ReadCloser with a Close() method that does nothing. -// This is technically redundant with ioutil.NopCloser, but provided for symmetry and clarity. -func NopReadCloser(r io.Reader) io.ReadCloser { - return &readAndCloser{ - Reader: r, - CloseFunc: nop, - } -} diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go index 7ebb9dde9f..c9699770ce 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/verify.go @@ -20,7 +20,7 @@ import ( "hash" "io" - "github.com/google/go-containerregistry/pkg/v1" + v1 "github.com/google/go-containerregistry/pkg/v1" ) type verifyReader struct { diff --git a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go index f12d0ed887..2b0f24f6a2 100644 --- a/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go +++ b/vendor/github.com/google/go-containerregistry/pkg/v1/v1util/zip.go @@ -70,56 +70,14 @@ func GunzipReadCloser(r io.ReadCloser) (io.ReadCloser, error) { }, nil } -// GzipWriteCloser returns an io.WriteCloser to which uncompressed data may be -// written, and the compressed data is then written to the provided -// io.WriteCloser. -func GzipWriteCloser(w io.WriteCloser) io.WriteCloser { - gw := gzip.NewWriter(w) - return &writeAndCloser{ - Writer: gw, - CloseFunc: func() error { - if err := gw.Close(); err != nil { - return err - } - return w.Close() - }, - } -} - -// gunzipWriteCloser implements io.WriteCloser -// It is used to implement GunzipWriteClose. -type gunzipWriteCloser struct { - *bytes.Buffer - writer io.WriteCloser -} - -// Close implements io.WriteCloser -func (gwc *gunzipWriteCloser) Close() error { - // TODO(mattmoor): How to avoid buffering this whole thing into memory? - gr, err := gzip.NewReader(gwc.Buffer) - if err != nil { - return err - } - if _, err := io.Copy(gwc.writer, gr); err != nil { - return err - } - return gwc.writer.Close() -} - -// GunzipWriteCloser returns an io.WriteCloser to which compressed data may be -// written, and the uncompressed data is then written to the provided -// io.WriteCloser. -func GunzipWriteCloser(w io.WriteCloser) (io.WriteCloser, error) { - return &gunzipWriteCloser{ - Buffer: bytes.NewBuffer(nil), - writer: w, - }, nil -} - // IsGzipped detects whether the input stream is compressed. func IsGzipped(r io.Reader) (bool, error) { magicHeader := make([]byte, 2) - if _, err := r.Read(magicHeader); err != nil { + n, err := r.Read(magicHeader) + if n == 0 && err == io.EOF { + return false, nil + } + if err != nil { return false, err } return bytes.Equal(magicHeader, gzipMagicHeader), nil diff --git a/vendor/github.com/pmezard/go-difflib/LICENSE b/vendor/github.com/pmezard/go-difflib/LICENSE new file mode 100644 index 0000000000..c67dad612a --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/LICENSE @@ -0,0 +1,27 @@ +Copyright (c) 2013, Patrick Mezard +All rights reserved. + +Redistribution and use in source and binary forms, with or without +modification, are permitted provided that the following conditions are +met: + + Redistributions of source code must retain the above copyright +notice, this list of conditions and the following disclaimer. + Redistributions in binary form must reproduce the above copyright +notice, this list of conditions and the following disclaimer in the +documentation and/or other materials provided with the distribution. + The names of its contributors may not be used to endorse or promote +products derived from this software without specific prior written +permission. + +THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS +IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED +TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A +PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT +HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, +SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED +TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR +PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF +LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING +NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS +SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. diff --git a/vendor/github.com/pmezard/go-difflib/difflib/difflib.go b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go new file mode 100644 index 0000000000..003e99fadb --- /dev/null +++ b/vendor/github.com/pmezard/go-difflib/difflib/difflib.go @@ -0,0 +1,772 @@ +// Package difflib is a partial port of Python difflib module. +// +// It provides tools to compare sequences of strings and generate textual diffs. +// +// The following class and functions have been ported: +// +// - SequenceMatcher +// +// - unified_diff +// +// - context_diff +// +// Getting unified diffs was the main goal of the port. Keep in mind this code +// is mostly suitable to output text differences in a human friendly way, there +// are no guarantees generated diffs are consumable by patch(1). +package difflib + +import ( + "bufio" + "bytes" + "fmt" + "io" + "strings" +) + +func min(a, b int) int { + if a < b { + return a + } + return b +} + +func max(a, b int) int { + if a > b { + return a + } + return b +} + +func calculateRatio(matches, length int) float64 { + if length > 0 { + return 2.0 * float64(matches) / float64(length) + } + return 1.0 +} + +type Match struct { + A int + B int + Size int +} + +type OpCode struct { + Tag byte + I1 int + I2 int + J1 int + J2 int +} + +// SequenceMatcher compares sequence of strings. The basic +// algorithm predates, and is a little fancier than, an algorithm +// published in the late 1980's by Ratcliff and Obershelp under the +// hyperbolic name "gestalt pattern matching". The basic idea is to find +// the longest contiguous matching subsequence that contains no "junk" +// elements (R-O doesn't address junk). The same idea is then applied +// recursively to the pieces of the sequences to the left and to the right +// of the matching subsequence. This does not yield minimal edit +// sequences, but does tend to yield matches that "look right" to people. +// +// SequenceMatcher tries to compute a "human-friendly diff" between two +// sequences. Unlike e.g. UNIX(tm) diff, the fundamental notion is the +// longest *contiguous* & junk-free matching subsequence. That's what +// catches peoples' eyes. The Windows(tm) windiff has another interesting +// notion, pairing up elements that appear uniquely in each sequence. +// That, and the method here, appear to yield more intuitive difference +// reports than does diff. This method appears to be the least vulnerable +// to synching up on blocks of "junk lines", though (like blank lines in +// ordinary text files, or maybe "

" lines in HTML files). That may be +// because this is the only method of the 3 that has a *concept* of +// "junk" . +// +// Timing: Basic R-O is cubic time worst case and quadratic time expected +// case. SequenceMatcher is quadratic time for the worst case and has +// expected-case behavior dependent in a complicated way on how many +// elements the sequences have in common; best case time is linear. +type SequenceMatcher struct { + a []string + b []string + b2j map[string][]int + IsJunk func(string) bool + autoJunk bool + bJunk map[string]struct{} + matchingBlocks []Match + fullBCount map[string]int + bPopular map[string]struct{} + opCodes []OpCode +} + +func NewMatcher(a, b []string) *SequenceMatcher { + m := SequenceMatcher{autoJunk: true} + m.SetSeqs(a, b) + return &m +} + +func NewMatcherWithJunk(a, b []string, autoJunk bool, + isJunk func(string) bool) *SequenceMatcher { + + m := SequenceMatcher{IsJunk: isJunk, autoJunk: autoJunk} + m.SetSeqs(a, b) + return &m +} + +// Set two sequences to be compared. +func (m *SequenceMatcher) SetSeqs(a, b []string) { + m.SetSeq1(a) + m.SetSeq2(b) +} + +// Set the first sequence to be compared. The second sequence to be compared is +// not changed. +// +// SequenceMatcher computes and caches detailed information about the second +// sequence, so if you want to compare one sequence S against many sequences, +// use .SetSeq2(s) once and call .SetSeq1(x) repeatedly for each of the other +// sequences. +// +// See also SetSeqs() and SetSeq2(). +func (m *SequenceMatcher) SetSeq1(a []string) { + if &a == &m.a { + return + } + m.a = a + m.matchingBlocks = nil + m.opCodes = nil +} + +// Set the second sequence to be compared. The first sequence to be compared is +// not changed. +func (m *SequenceMatcher) SetSeq2(b []string) { + if &b == &m.b { + return + } + m.b = b + m.matchingBlocks = nil + m.opCodes = nil + m.fullBCount = nil + m.chainB() +} + +func (m *SequenceMatcher) chainB() { + // Populate line -> index mapping + b2j := map[string][]int{} + for i, s := range m.b { + indices := b2j[s] + indices = append(indices, i) + b2j[s] = indices + } + + // Purge junk elements + m.bJunk = map[string]struct{}{} + if m.IsJunk != nil { + junk := m.bJunk + for s, _ := range b2j { + if m.IsJunk(s) { + junk[s] = struct{}{} + } + } + for s, _ := range junk { + delete(b2j, s) + } + } + + // Purge remaining popular elements + popular := map[string]struct{}{} + n := len(m.b) + if m.autoJunk && n >= 200 { + ntest := n/100 + 1 + for s, indices := range b2j { + if len(indices) > ntest { + popular[s] = struct{}{} + } + } + for s, _ := range popular { + delete(b2j, s) + } + } + m.bPopular = popular + m.b2j = b2j +} + +func (m *SequenceMatcher) isBJunk(s string) bool { + _, ok := m.bJunk[s] + return ok +} + +// Find longest matching block in a[alo:ahi] and b[blo:bhi]. +// +// If IsJunk is not defined: +// +// Return (i,j,k) such that a[i:i+k] is equal to b[j:j+k], where +// alo <= i <= i+k <= ahi +// blo <= j <= j+k <= bhi +// and for all (i',j',k') meeting those conditions, +// k >= k' +// i <= i' +// and if i == i', j <= j' +// +// In other words, of all maximal matching blocks, return one that +// starts earliest in a, and of all those maximal matching blocks that +// start earliest in a, return the one that starts earliest in b. +// +// If IsJunk is defined, first the longest matching block is +// determined as above, but with the additional restriction that no +// junk element appears in the block. Then that block is extended as +// far as possible by matching (only) junk elements on both sides. So +// the resulting block never matches on junk except as identical junk +// happens to be adjacent to an "interesting" match. +// +// If no blocks match, return (alo, blo, 0). +func (m *SequenceMatcher) findLongestMatch(alo, ahi, blo, bhi int) Match { + // CAUTION: stripping common prefix or suffix would be incorrect. + // E.g., + // ab + // acab + // Longest matching block is "ab", but if common prefix is + // stripped, it's "a" (tied with "b"). UNIX(tm) diff does so + // strip, so ends up claiming that ab is changed to acab by + // inserting "ca" in the middle. That's minimal but unintuitive: + // "it's obvious" that someone inserted "ac" at the front. + // Windiff ends up at the same place as diff, but by pairing up + // the unique 'b's and then matching the first two 'a's. + besti, bestj, bestsize := alo, blo, 0 + + // find longest junk-free match + // during an iteration of the loop, j2len[j] = length of longest + // junk-free match ending with a[i-1] and b[j] + j2len := map[int]int{} + for i := alo; i != ahi; i++ { + // look at all instances of a[i] in b; note that because + // b2j has no junk keys, the loop is skipped if a[i] is junk + newj2len := map[int]int{} + for _, j := range m.b2j[m.a[i]] { + // a[i] matches b[j] + if j < blo { + continue + } + if j >= bhi { + break + } + k := j2len[j-1] + 1 + newj2len[j] = k + if k > bestsize { + besti, bestj, bestsize = i-k+1, j-k+1, k + } + } + j2len = newj2len + } + + // Extend the best by non-junk elements on each end. In particular, + // "popular" non-junk elements aren't in b2j, which greatly speeds + // the inner loop above, but also means "the best" match so far + // doesn't contain any junk *or* popular non-junk elements. + for besti > alo && bestj > blo && !m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + !m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + // Now that we have a wholly interesting match (albeit possibly + // empty!), we may as well suck up the matching junk on each + // side of it too. Can't think of a good reason not to, and it + // saves post-processing the (possibly considerable) expense of + // figuring out what to do with it. In the case of an empty + // interesting match, this is clearly the right thing to do, + // because no other kind of match is possible in the regions. + for besti > alo && bestj > blo && m.isBJunk(m.b[bestj-1]) && + m.a[besti-1] == m.b[bestj-1] { + besti, bestj, bestsize = besti-1, bestj-1, bestsize+1 + } + for besti+bestsize < ahi && bestj+bestsize < bhi && + m.isBJunk(m.b[bestj+bestsize]) && + m.a[besti+bestsize] == m.b[bestj+bestsize] { + bestsize += 1 + } + + return Match{A: besti, B: bestj, Size: bestsize} +} + +// Return list of triples describing matching subsequences. +// +// Each triple is of the form (i, j, n), and means that +// a[i:i+n] == b[j:j+n]. The triples are monotonically increasing in +// i and in j. It's also guaranteed that if (i, j, n) and (i', j', n') are +// adjacent triples in the list, and the second is not the last triple in the +// list, then i+n != i' or j+n != j'. IOW, adjacent triples never describe +// adjacent equal blocks. +// +// The last triple is a dummy, (len(a), len(b), 0), and is the only +// triple with n==0. +func (m *SequenceMatcher) GetMatchingBlocks() []Match { + if m.matchingBlocks != nil { + return m.matchingBlocks + } + + var matchBlocks func(alo, ahi, blo, bhi int, matched []Match) []Match + matchBlocks = func(alo, ahi, blo, bhi int, matched []Match) []Match { + match := m.findLongestMatch(alo, ahi, blo, bhi) + i, j, k := match.A, match.B, match.Size + if match.Size > 0 { + if alo < i && blo < j { + matched = matchBlocks(alo, i, blo, j, matched) + } + matched = append(matched, match) + if i+k < ahi && j+k < bhi { + matched = matchBlocks(i+k, ahi, j+k, bhi, matched) + } + } + return matched + } + matched := matchBlocks(0, len(m.a), 0, len(m.b), nil) + + // It's possible that we have adjacent equal blocks in the + // matching_blocks list now. + nonAdjacent := []Match{} + i1, j1, k1 := 0, 0, 0 + for _, b := range matched { + // Is this block adjacent to i1, j1, k1? + i2, j2, k2 := b.A, b.B, b.Size + if i1+k1 == i2 && j1+k1 == j2 { + // Yes, so collapse them -- this just increases the length of + // the first block by the length of the second, and the first + // block so lengthened remains the block to compare against. + k1 += k2 + } else { + // Not adjacent. Remember the first block (k1==0 means it's + // the dummy we started with), and make the second block the + // new block to compare against. + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + i1, j1, k1 = i2, j2, k2 + } + } + if k1 > 0 { + nonAdjacent = append(nonAdjacent, Match{i1, j1, k1}) + } + + nonAdjacent = append(nonAdjacent, Match{len(m.a), len(m.b), 0}) + m.matchingBlocks = nonAdjacent + return m.matchingBlocks +} + +// Return list of 5-tuples describing how to turn a into b. +// +// Each tuple is of the form (tag, i1, i2, j1, j2). The first tuple +// has i1 == j1 == 0, and remaining tuples have i1 == the i2 from the +// tuple preceding it, and likewise for j1 == the previous j2. +// +// The tags are characters, with these meanings: +// +// 'r' (replace): a[i1:i2] should be replaced by b[j1:j2] +// +// 'd' (delete): a[i1:i2] should be deleted, j1==j2 in this case. +// +// 'i' (insert): b[j1:j2] should be inserted at a[i1:i1], i1==i2 in this case. +// +// 'e' (equal): a[i1:i2] == b[j1:j2] +func (m *SequenceMatcher) GetOpCodes() []OpCode { + if m.opCodes != nil { + return m.opCodes + } + i, j := 0, 0 + matching := m.GetMatchingBlocks() + opCodes := make([]OpCode, 0, len(matching)) + for _, m := range matching { + // invariant: we've pumped out correct diffs to change + // a[:i] into b[:j], and the next matching block is + // a[ai:ai+size] == b[bj:bj+size]. So we need to pump + // out a diff to change a[i:ai] into b[j:bj], pump out + // the matching block, and move (i,j) beyond the match + ai, bj, size := m.A, m.B, m.Size + tag := byte(0) + if i < ai && j < bj { + tag = 'r' + } else if i < ai { + tag = 'd' + } else if j < bj { + tag = 'i' + } + if tag > 0 { + opCodes = append(opCodes, OpCode{tag, i, ai, j, bj}) + } + i, j = ai+size, bj+size + // the list of matching blocks is terminated by a + // sentinel with size 0 + if size > 0 { + opCodes = append(opCodes, OpCode{'e', ai, i, bj, j}) + } + } + m.opCodes = opCodes + return m.opCodes +} + +// Isolate change clusters by eliminating ranges with no changes. +// +// Return a generator of groups with up to n lines of context. +// Each group is in the same format as returned by GetOpCodes(). +func (m *SequenceMatcher) GetGroupedOpCodes(n int) [][]OpCode { + if n < 0 { + n = 3 + } + codes := m.GetOpCodes() + if len(codes) == 0 { + codes = []OpCode{OpCode{'e', 0, 1, 0, 1}} + } + // Fixup leading and trailing groups if they show no changes. + if codes[0].Tag == 'e' { + c := codes[0] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[0] = OpCode{c.Tag, max(i1, i2-n), i2, max(j1, j2-n), j2} + } + if codes[len(codes)-1].Tag == 'e' { + c := codes[len(codes)-1] + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + codes[len(codes)-1] = OpCode{c.Tag, i1, min(i2, i1+n), j1, min(j2, j1+n)} + } + nn := n + n + groups := [][]OpCode{} + group := []OpCode{} + for _, c := range codes { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + // End the current group and start a new one whenever + // there is a large range with no changes. + if c.Tag == 'e' && i2-i1 > nn { + group = append(group, OpCode{c.Tag, i1, min(i2, i1+n), + j1, min(j2, j1+n)}) + groups = append(groups, group) + group = []OpCode{} + i1, j1 = max(i1, i2-n), max(j1, j2-n) + } + group = append(group, OpCode{c.Tag, i1, i2, j1, j2}) + } + if len(group) > 0 && !(len(group) == 1 && group[0].Tag == 'e') { + groups = append(groups, group) + } + return groups +} + +// Return a measure of the sequences' similarity (float in [0,1]). +// +// Where T is the total number of elements in both sequences, and +// M is the number of matches, this is 2.0*M / T. +// Note that this is 1 if the sequences are identical, and 0 if +// they have nothing in common. +// +// .Ratio() is expensive to compute if you haven't already computed +// .GetMatchingBlocks() or .GetOpCodes(), in which case you may +// want to try .QuickRatio() or .RealQuickRation() first to get an +// upper bound. +func (m *SequenceMatcher) Ratio() float64 { + matches := 0 + for _, m := range m.GetMatchingBlocks() { + matches += m.Size + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() relatively quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute. +func (m *SequenceMatcher) QuickRatio() float64 { + // viewing a and b as multisets, set matches to the cardinality + // of their intersection; this counts the number of matches + // without regard to order, so is clearly an upper bound + if m.fullBCount == nil { + m.fullBCount = map[string]int{} + for _, s := range m.b { + m.fullBCount[s] = m.fullBCount[s] + 1 + } + } + + // avail[x] is the number of times x appears in 'b' less the + // number of times we've seen it in 'a' so far ... kinda + avail := map[string]int{} + matches := 0 + for _, s := range m.a { + n, ok := avail[s] + if !ok { + n = m.fullBCount[s] + } + avail[s] = n - 1 + if n > 0 { + matches += 1 + } + } + return calculateRatio(matches, len(m.a)+len(m.b)) +} + +// Return an upper bound on ratio() very quickly. +// +// This isn't defined beyond that it is an upper bound on .Ratio(), and +// is faster to compute than either .Ratio() or .QuickRatio(). +func (m *SequenceMatcher) RealQuickRatio() float64 { + la, lb := len(m.a), len(m.b) + return calculateRatio(min(la, lb), la+lb) +} + +// Convert range to the "ed" format +func formatRangeUnified(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 1 { + return fmt.Sprintf("%d", beginning) + } + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + return fmt.Sprintf("%d,%d", beginning, length) +} + +// Unified diff parameters +type UnifiedDiff struct { + A []string // First sequence lines + FromFile string // First file name + FromDate string // First file time + B []string // Second sequence lines + ToFile string // Second file name + ToDate string // Second file time + Eol string // Headers end of line, defaults to LF + Context int // Number of context lines +} + +// Compare two sequences of lines; generate the delta as a unified diff. +// +// Unified diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by 'n' which +// defaults to three. +// +// By default, the diff control lines (those with ---, +++, or @@) are +// created with a trailing newline. This is helpful so that inputs +// created from file.readlines() result in diffs that are suitable for +// file.writelines() since both the inputs and outputs have trailing +// newlines. +// +// For inputs that do not have trailing newlines, set the lineterm +// argument to "" so that the output will be uniformly newline free. +// +// The unidiff format normally has a header for filenames and modification +// times. Any or all of these may be specified using strings for +// 'fromfile', 'tofile', 'fromfiledate', and 'tofiledate'. +// The modification times are normally expressed in the ISO 8601 format. +func WriteUnifiedDiff(writer io.Writer, diff UnifiedDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + wf := func(format string, args ...interface{}) error { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + return err + } + ws := func(s string) error { + _, err := buf.WriteString(s) + return err + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + err := wf("--- %s%s%s", diff.FromFile, fromDate, diff.Eol) + if err != nil { + return err + } + err = wf("+++ %s%s%s", diff.ToFile, toDate, diff.Eol) + if err != nil { + return err + } + } + } + first, last := g[0], g[len(g)-1] + range1 := formatRangeUnified(first.I1, last.I2) + range2 := formatRangeUnified(first.J1, last.J2) + if err := wf("@@ -%s +%s @@%s", range1, range2, diff.Eol); err != nil { + return err + } + for _, c := range g { + i1, i2, j1, j2 := c.I1, c.I2, c.J1, c.J2 + if c.Tag == 'e' { + for _, line := range diff.A[i1:i2] { + if err := ws(" " + line); err != nil { + return err + } + } + continue + } + if c.Tag == 'r' || c.Tag == 'd' { + for _, line := range diff.A[i1:i2] { + if err := ws("-" + line); err != nil { + return err + } + } + } + if c.Tag == 'r' || c.Tag == 'i' { + for _, line := range diff.B[j1:j2] { + if err := ws("+" + line); err != nil { + return err + } + } + } + } + } + return nil +} + +// Like WriteUnifiedDiff but returns the diff a string. +func GetUnifiedDiffString(diff UnifiedDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteUnifiedDiff(w, diff) + return string(w.Bytes()), err +} + +// Convert range to the "ed" format. +func formatRangeContext(start, stop int) string { + // Per the diff spec at http://www.unix.org/single_unix_specification/ + beginning := start + 1 // lines start numbering with one + length := stop - start + if length == 0 { + beginning -= 1 // empty ranges begin at line just before the range + } + if length <= 1 { + return fmt.Sprintf("%d", beginning) + } + return fmt.Sprintf("%d,%d", beginning, beginning+length-1) +} + +type ContextDiff UnifiedDiff + +// Compare two sequences of lines; generate the delta as a context diff. +// +// Context diffs are a compact way of showing line changes and a few +// lines of context. The number of context lines is set by diff.Context +// which defaults to three. +// +// By default, the diff control lines (those with *** or ---) are +// created with a trailing newline. +// +// For inputs that do not have trailing newlines, set the diff.Eol +// argument to "" so that the output will be uniformly newline free. +// +// The context diff format normally has a header for filenames and +// modification times. Any or all of these may be specified using +// strings for diff.FromFile, diff.ToFile, diff.FromDate, diff.ToDate. +// The modification times are normally expressed in the ISO 8601 format. +// If not specified, the strings default to blanks. +func WriteContextDiff(writer io.Writer, diff ContextDiff) error { + buf := bufio.NewWriter(writer) + defer buf.Flush() + var diffErr error + wf := func(format string, args ...interface{}) { + _, err := buf.WriteString(fmt.Sprintf(format, args...)) + if diffErr == nil && err != nil { + diffErr = err + } + } + ws := func(s string) { + _, err := buf.WriteString(s) + if diffErr == nil && err != nil { + diffErr = err + } + } + + if len(diff.Eol) == 0 { + diff.Eol = "\n" + } + + prefix := map[byte]string{ + 'i': "+ ", + 'd': "- ", + 'r': "! ", + 'e': " ", + } + + started := false + m := NewMatcher(diff.A, diff.B) + for _, g := range m.GetGroupedOpCodes(diff.Context) { + if !started { + started = true + fromDate := "" + if len(diff.FromDate) > 0 { + fromDate = "\t" + diff.FromDate + } + toDate := "" + if len(diff.ToDate) > 0 { + toDate = "\t" + diff.ToDate + } + if diff.FromFile != "" || diff.ToFile != "" { + wf("*** %s%s%s", diff.FromFile, fromDate, diff.Eol) + wf("--- %s%s%s", diff.ToFile, toDate, diff.Eol) + } + } + + first, last := g[0], g[len(g)-1] + ws("***************" + diff.Eol) + + range1 := formatRangeContext(first.I1, last.I2) + wf("*** %s ****%s", range1, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'd' { + for _, cc := range g { + if cc.Tag == 'i' { + continue + } + for _, line := range diff.A[cc.I1:cc.I2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + + range2 := formatRangeContext(first.J1, last.J2) + wf("--- %s ----%s", range2, diff.Eol) + for _, c := range g { + if c.Tag == 'r' || c.Tag == 'i' { + for _, cc := range g { + if cc.Tag == 'd' { + continue + } + for _, line := range diff.B[cc.J1:cc.J2] { + ws(prefix[cc.Tag] + line) + } + } + break + } + } + } + return diffErr +} + +// Like WriteContextDiff but returns the diff a string. +func GetContextDiffString(diff ContextDiff) (string, error) { + w := &bytes.Buffer{} + err := WriteContextDiff(w, diff) + return string(w.Bytes()), err +} + +// Split a string on "\n" while preserving them. The output can be used +// as input for UnifiedDiff and ContextDiff structures. +func SplitLines(s string) []string { + lines := strings.SplitAfter(s, "\n") + lines[len(lines)-1] += "\n" + return lines +}