Merge branch 'master' of github.com:kubernetes/minikube into m2

pull/6787/head
Sharif Elgamal 2020-03-13 15:24:07 -07:00
commit 36089037d9
63 changed files with 1105 additions and 451 deletions

View File

@ -1,5 +1,31 @@
# Release Notes
## Version 1.8.2 - 2020-03-13
Bug Fixes:
* Fix dockerd internal port changing on restart [#7021](https://github.com/kubernetes/minikube/pull/7021)
* none: Skip driver preload and image caching [#7015](https://github.com/kubernetes/minikube/pull/7015)
* preload: fix bug for windows file separators [#6968](https://github.com/kubernetes/minikube/pull/6968)
Documentation:
* Add doc for running ebpf based tools in minikube [#6914](https://github.com/kubernetes/minikube/pull/6914)
New Features:
* Update NewestKubernetesVersion to 1.18.0-beta.2 [#6988](https://github.com/kubernetes/minikube/pull/6988)
* allow setting api-server port for docker/podman drivers [#6991](https://github.com/kubernetes/minikube/pull/6991)
Huge thank you for this release towards our contributors:
- Anders F Björklund
- Ian Molee
- Kenta Iso
- Medya Ghazizadeh
- Priya Wadhwa
- Sharif Elgamal
- Thomas Strömberg
## Version 1.8.1 - 2020-03-06
Minor bug fix:

View File

@ -15,7 +15,7 @@
# Bump these on release - and please check ISO_VERSION for correctness.
VERSION_MAJOR ?= 1
VERSION_MINOR ?= 8
VERSION_BUILD ?= 1
VERSION_BUILD ?= 2
RAW_VERSION=$(VERSION_MAJOR).$(VERSION_MINOR).${VERSION_BUILD}
VERSION ?= v$(RAW_VERSION)

View File

@ -88,6 +88,24 @@ func init() {
RootCmd.AddCommand(deleteCmd)
}
func deleteContainersAndVolumes() {
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will error if there is no container to delete
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
}
}
// runDelete handles the executes the flow of "minikube delete"
func runDelete(cmd *cobra.Command, args []string) {
if len(args) > 0 {
@ -110,23 +128,9 @@ func runDelete(cmd *cobra.Command, args []string) {
}
if deleteAll {
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will error if there is no container to delete
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, err)
}
deleteContainersAndVolumes()
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
}
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
}
errs = DeleteProfiles(profilesToDelete)
errs := DeleteProfiles(profilesToDelete)
if len(errs) > 0 {
HandleDeletionErrors(errs)
} else {
@ -185,13 +189,11 @@ func DeleteProfiles(profiles []*config.Profile) []error {
return errs
}
func deleteProfile(profile *config.Profile) error {
viper.Set(config.ProfileName, profile.Name)
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, profile.Name)
func deleteProfileContainersAndVolumes(name string) {
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name)
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
if errs != nil { // it will error if there is no container to delete
glog.Infof("error deleting containers for %s (might be okay):\n%v", profile.Name, errs)
glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs)
}
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
if errs != nil { // it will not error if there is nothing to delete
@ -202,6 +204,13 @@ func deleteProfile(profile *config.Profile) error {
if len(errs) > 0 { // it will not error if there is nothing to delete
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
}
}
func deleteProfile(profile *config.Profile) error {
viper.Set(config.ProfileName, profile.Name)
deleteProfileContainersAndVolumes(profile.Name)
api, err := machine.NewAPIClient()
if err != nil {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error getting client %v", err))
@ -230,31 +239,13 @@ func deleteProfile(profile *config.Profile) error {
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
}
if cc != nil {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
if err = machine.DeleteHost(api, machineName); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
default:
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
}
}
}
}
deleteHosts(api, cc)
// In case DeleteHost didn't complete the job.
deleteProfileDirectory(profile.Name)
if err := config.DeleteProfile(profile.Name); err != nil {
if config.IsNotExist(err) {
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
return DeletionError{Err: delErr, Errtype: MissingProfile}
}
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("failed to remove profile %v", err))
return DeletionError{Err: delErr, Errtype: Fatal}
if err := deleteConfig(profile.Name); err != nil {
return err
}
if err := deleteContext(profile.Name); err != nil {
@ -264,6 +255,35 @@ func deleteProfile(profile *config.Profile) error {
return nil
}
func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
if cc != nil {
for _, n := range cc.Nodes {
machineName := driver.MachineName(*cc, n)
if err := machine.DeleteHost(api, machineName); err != nil {
switch errors.Cause(err).(type) {
case mcnerror.ErrHostDoesNotExist:
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
default:
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName})
}
}
}
}
}
func deleteConfig(profileName string) error {
if err := config.DeleteProfile(profileName); err != nil {
if config.IsNotExist(err) {
delErr := profileDeletionErr(profileName, fmt.Sprintf("\"%s\" profile does not exist", profileName))
return DeletionError{Err: delErr, Errtype: MissingProfile}
}
delErr := profileDeletionErr(profileName, fmt.Sprintf("failed to remove profile %v", err))
return DeletionError{Err: delErr, Errtype: Fatal}
}
return nil
}
func deleteContext(machineName string) error {
if err := kubeconfig.DeleteContext(machineName); err != nil {
return DeletionError{Err: fmt.Errorf("update config: %v", err), Errtype: Fatal}

View File

@ -117,7 +117,7 @@ const (
minUsableMem = 1024 // Kubernetes will not start with less than 1GB
minRecommendedMem = 2000 // Warn at no lower than existing configurations
minimumCPUS = 2
minimumDiskSize = "2000mb"
minimumDiskSize = 2000
autoUpdate = "auto-update-drivers"
hostOnlyNicType = "host-only-nic-type"
natNicType = "nat-nic-type"
@ -457,8 +457,11 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
if client.Major != cluster.Major || minorSkew > 1 {
out.WarningT("{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster",
out.Ln("")
out.T(out.Warning, "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
out.V{"path": path, "client_version": client, "cluster_version": cluster})
out.T(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
out.V{"path": path, "client_version": client})
}
return nil
}
@ -575,7 +578,7 @@ func validateDriver(ds registry.DriverState, existing *config.ClusterConfig) {
exit.WithCodeT(exit.Config, "Exiting.")
}
func selectImageRepository(mirrorCountry string) (bool, string, error) {
func selectImageRepository(mirrorCountry string, v semver.Version) (bool, string, error) {
var tryCountries []string
var fallback string
glog.Infof("selecting image repository for country %s ...", mirrorCountry)
@ -603,7 +606,7 @@ func selectImageRepository(mirrorCountry string) (bool, string, error) {
}
checkRepository := func(repo string) error {
pauseImage := images.Pause(repo)
pauseImage := images.Pause(v, repo)
ref, err := name.ParseReference(pauseImage, name.WeakValidation)
if err != nil {
return err
@ -668,43 +671,62 @@ func validateUser(drvName string) {
}
}
// defaultMemorySize calculates the default memory footprint in MB
func defaultMemorySize(drvName string) int {
fallback := 2200
maximum := 6000
// memoryLimits returns the amount of memory allocated to the system and hypervisor
func memoryLimits(drvName string) (int, int, error) {
v, err := mem.VirtualMemory()
if err != nil {
return fallback
return -1, -1, err
}
available := v.Total / 1024 / 1024
sysLimit := int(v.Total / 1024 / 1024)
containerLimit := 0
// For KIC, do not allocate more memory than the container has available (+ some slack)
if driver.IsKIC(drvName) {
s, err := oci.DaemonInfo(drvName)
if err != nil {
return fallback
return -1, -1, err
}
maximum = int(s.TotalMemory/1024/1024) - 128
containerLimit = int(s.TotalMemory / 1024 / 1024)
}
return sysLimit, containerLimit, nil
}
// suggestMemoryAllocation calculates the default memory footprint in MB
func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
fallback := 2200
maximum := 6000
if sysLimit > 0 && fallback > sysLimit {
return sysLimit
}
suggested := int(available / 4)
// If there are container limits, add tiny bit of slack for non-minikube components
if containerLimit > 0 {
if fallback > containerLimit {
return containerLimit
}
maximum = containerLimit - 48
}
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
suggested := int(float32(sysLimit)/400.0) * 100
if suggested > maximum {
suggested = maximum
return maximum
}
if suggested < fallback {
suggested = fallback
return fallback
}
glog.Infof("Selecting memory default of %dMB, given %dMB available and %dMB maximum", suggested, available, maximum)
return suggested
}
// validateMemorySize validates the memory size matches the minimum recommended
func validateMemorySize() {
req := pkgutil.CalculateSizeInMB(viper.GetString(memory))
req, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.WithCodeT(exit.Config, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
if req < minUsableMem && !viper.GetBool(force) {
exit.WithCodeT(exit.Config, "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB",
out.V{"requested": req, "mininum": minUsableMem})
@ -737,9 +759,13 @@ func validateCPUCount(local bool) {
// validateFlags validates the supplied flags against known bad combinations
func validateFlags(cmd *cobra.Command, drvName string) {
if cmd.Flags().Changed(humanReadableDiskSize) {
diskSizeMB := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if diskSizeMB < pkgutil.CalculateSizeInMB(minimumDiskSize) && !viper.GetBool(force) {
exit.WithCodeT(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": pkgutil.CalculateSizeInMB(minimumDiskSize)})
diskSizeMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.WithCodeT(exit.Config, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
if diskSizeMB < minimumDiskSize && !viper.GetBool(force) {
exit.WithCodeT(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize})
}
}
@ -822,7 +848,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
repository := viper.GetString(imageRepository)
mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry))
if strings.ToLower(repository) == "auto" || mirrorCountry != "" {
found, autoSelectedRepository, err := selectImageRepository(mirrorCountry)
found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(k8sVersion))
if err != nil {
exit.WithError("Failed to check main repository and mirrors for images for images", err)
}
@ -847,9 +873,20 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
kubeNodeName = "m01"
}
mem := defaultMemorySize(drvName)
if viper.GetString(memory) != "" {
mem = pkgutil.CalculateSizeInMB(viper.GetString(memory))
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
glog.Warningf("Unable to query memory limits: %v", err)
}
mem := suggestMemoryAllocation(sysLimit, containerLimit)
if cmd.Flags().Changed(memory) {
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
} else {
glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
}
// Create the initial node, which will necessarily be a control plane
@ -861,6 +898,11 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
Worker: true,
}
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
cfg := config.ClusterConfig{
Name: viper.GetString(config.ProfileName),
KeepContext: viper.GetBool(keepContext),
@ -868,7 +910,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
MinikubeISO: viper.GetString(isoURL),
Memory: mem,
CPUs: viper.GetInt(cpus),
DiskSize: pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)),
DiskSize: diskSize,
Driver: drvName,
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
@ -984,10 +1026,6 @@ func getKubernetesVersion(old *config.ClusterConfig) string {
}
nv := version.VersionPrefix + nvs.String()
if old == nil || old.KubernetesConfig.KubernetesVersion == "" {
return nv
}
oldestVersion, err := semver.Make(strings.TrimPrefix(constants.OldestKubernetesVersion, version.VersionPrefix))
if err != nil {
exit.WithCodeT(exit.Data, "Unable to parse oldest Kubernetes version from constants: {{.error}}", out.V{"error": err})
@ -1006,6 +1044,10 @@ func getKubernetesVersion(old *config.ClusterConfig) string {
}
}
if old == nil || old.KubernetesConfig.KubernetesVersion == "" {
return nv
}
ovs, err := semver.Make(strings.TrimPrefix(old.KubernetesConfig.KubernetesVersion, version.VersionPrefix))
if err != nil {
glog.Errorf("Error parsing old version %q: %v", old.KubernetesConfig.KubernetesVersion, err)

View File

@ -71,8 +71,9 @@ func TestGetKuberneterVersion(t *testing.T) {
}
func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
viper.SetDefault(memory, defaultMemorySize)
// Set default disk size value in lieu of flag init
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
originalEnv := os.Getenv("HTTP_PROXY")
defer func() {
err := os.Setenv("HTTP_PROXY", originalEnv)
@ -124,3 +125,34 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
})
}
}
func TestSuggestMemoryAllocation(t *testing.T) {
var tests = []struct {
description string
sysLimit int
containerLimit int
want int
}{
{"128GB sys", 128000, 0, 6000},
{"64GB sys", 64000, 0, 6000},
{"16GB sys", 16384, 0, 4000},
{"odd sys", 14567, 0, 3600},
{"4GB sys", 4096, 0, 2200},
{"2GB sys", 2048, 0, 2048},
{"Unable to poll sys", 0, 0, 2200},
{"128GB sys, 16GB container", 128000, 16384, 16336},
{"64GB sys, 16GB container", 64000, 16384, 16000},
{"16GB sys, 4GB container", 16384, 4096, 4000},
{"4GB sys, 3.5GB container", 16384, 3500, 3452},
{"2GB sys, 2GB container", 16384, 2048, 2048},
{"2GB sys, unable to poll container", 16384, 0, 4000},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit)
if got != test.want {
t.Errorf("defaultMemorySize(sys=%d, container=%d) = %d, want: %d", test.sysLimit, test.containerLimit, got, test.want)
}
})
}
}

View File

@ -1,3 +1,4 @@
sha256 3857f109574750403b233b5fdf73f1852d8decc33dac8f73bd49f2003b69ad22 16.tar.gz
sha256 0dcb451f32033154c56710c216e67f245923fe2b011321271f6670e5a2285ce6 17.tar.gz
sha256 7a32543643116ad105da4ddb2f8030de7dcad1cdb3feb1a214ae5e7b65a6a198 18.tar.gz
sha256 0e316138ef6abc34363b05d0caf6df2e389a93b832e8d971e3ae64b48ba96133 19.tar.gz

View File

@ -1,4 +1,4 @@
VARLINK_VERSION = 18
VARLINK_VERSION = 19
VARLINK_SITE = https://github.com/varlink/libvarlink/archive
VARLINK_SOURCE = $(VARLINK_VERSION).tar.gz
VARLINK_LICENSE = Apache-2.0
@ -6,10 +6,4 @@ VARLINK_LICENSE_FILES = LICENSE
VARLINK_NEEDS_HOST_PYTHON = python3
define VARLINK_ENV_PYTHON3
sed -e 's|/usr/bin/python3|/usr/bin/env python3|' -i $(@D)/varlink-wrapper.py
endef
VARLINK_POST_EXTRACT_HOOKS += VARLINK_ENV_PYTHON3
$(eval $(meson-package))

View File

@ -1,4 +1,12 @@
[
{
"name": "v1.8.2",
"checksums": {
"darwin": "cbd1ff4dd239180b417bcd496fe0a31dbe8f212586765c040fdd20991ca13d50",
"linux": "0b21b50a8064aaea816cc7495cbbe324ab126284b0dbbb15c9f4df5ac72c22fb",
"windows": "076ccf11e8238647101e26d327adb0880fdac63cbd6e12bd0bb1420f09a85b9c"
}
},
{
"name": "v1.8.1",
"checksums": {

View File

@ -0,0 +1,47 @@
# Multi-node
* First proposed: 2020-01-31
* Authors: Sharif Elgamal (@sharifelgamal)
## Reviewer Priorities
Please review this proposal with the following priorities:
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
* Are there other approaches to consider?
* Could the implementation be made simpler?
* Are there usability, reliability, or technical debt concerns?
## Summary
Until now minikube has always been a local single node Kubernetes cluster. Having multiple nodes in minikube clusters has been [the most requested feature](https://github.com/kubernetes/minikube/issues/94) in this history of the minikube repository.
## Goals
* Enabling clusters with any number of control plane and worker nodes.
* The ability to add and remove nodes from any cluster.
* The ability to customize config per node.
## Non-Goals
* Reproducing production environments
## Design Details
Since minikube was designed with only a single node cluster in mind, we need to make some fairly significant refactors, the biggest of which is the introduction of the Node object. Each cluster config will be able to have an abitrary number of Node objects, each of which will have attributes that can define it, similar to what [tstromberg proposed](https://github.com/kubernetes/minikube/pull/5874) but with better backwards compatibility with current config.
Each node will correspond to one VM (or container) and will connect back to the primary control plane via `kubeadm join`.
Also added will be the `node` sub command, e.g. `minikube node start` and `minikube node delete`. This will allow users to control their cluster however they please. Eventually, we will want to support passing in a `yaml` file into `minikube start` that defines all the nodes and their configs in one go.
Users will be able to start multinode clusters in two ways:
1. `minikube start --nodes=2`
1. * `minikube start`
* `minikube node add --name=node2`
* `minikube node start --name=node2`
A note about `docker env`, the initial implementation won't properly support `docker env` in any consistent way, use at your own risk. The plan is to propagate any changes made to all the nodes in the cluster, with the caveat that anything that interrupts the command will cause a potentially corrupt cluster.
## Alternatives Considered
_TBD_

3
go.mod
View File

@ -52,6 +52,7 @@ require (
github.com/olekukonko/tablewriter v0.0.0-20170122224234-a0225b3f23b5
github.com/onsi/ginkgo v1.10.3 // indirect
github.com/onsi/gomega v1.7.1 // indirect
github.com/opencontainers/go-digest v1.0.0-rc1
github.com/otiai10/copy v1.0.2
github.com/pborman/uuid v1.2.0
github.com/phayes/freeport v0.0.0-20180830031419-95f893ade6f2
@ -93,7 +94,7 @@ require (
replace (
git.apache.org/thrift.git => github.com/apache/thrift v0.0.0-20180902110319-2566ecd5d999
github.com/docker/docker => github.com/docker/docker v1.4.2-0.20190924003213-a8608b5b67c7
github.com/docker/machine => github.com/machine-drivers/machine v0.7.1-0.20191109154235-b39d5b50de51
github.com/docker/machine => github.com/medyagh/machine v0.16.4
github.com/hashicorp/go-getter => github.com/afbjorklund/go-getter v1.4.1-0.20190910175809-eb9f6c26742c
github.com/samalba/dockerclient => github.com/sayboras/dockerclient v0.0.0-20191231050035-015626177a97
k8s.io/api => k8s.io/api v0.17.3

4
go.sum
View File

@ -479,8 +479,6 @@ github.com/lucas-clemente/quic-go v0.10.2/go.mod h1:hvaRS9IHjFLMq76puFJeWNfmn+H7
github.com/lucas-clemente/quic-go-certificates v0.0.0-20160823095156-d2f86524cced/go.mod h1:NCcRLrOTZbzhZvixZLlERbJtDtYsmMw8Jc4vS8Z0g58=
github.com/machine-drivers/docker-machine-driver-vmware v0.1.1 h1:+E1IKKk+6kaQrCPg6edJZ/zISZijuZTPnzy6RE4C/Ho=
github.com/machine-drivers/docker-machine-driver-vmware v0.1.1/go.mod h1:ej014C83EmSnxJeJ8PtVb8OLJ91PJKO1Q8Y7sM5CK0o=
github.com/machine-drivers/machine v0.7.1-0.20191109154235-b39d5b50de51 h1:ra4e+hU8Ca02yNyF8WM89aOShgXEPWRqerGpsmfqgTA=
github.com/machine-drivers/machine v0.7.1-0.20191109154235-b39d5b50de51/go.mod h1:79Uwa2hGd5S39LDJt58s8JZcIhGEK6pkq9bsuTbFWbk=
github.com/magiconair/properties v1.7.6/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
github.com/magiconair/properties v1.8.0 h1:LLgXmsheXeRoUOBOjtwPQCWIYqM/LU1ayDtDePerRcY=
github.com/magiconair/properties v1.8.0/go.mod h1:PppfXfuXeibc/6YijjN8zIbojt8czPbwD3XqdrwzmxQ=
@ -512,6 +510,8 @@ github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0j
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2 h1:g+4J5sZg6osfvEfkRZxJ1em0VT95/UOZgi/l7zi1/oE=
github.com/maxbrunsfeld/counterfeiter/v6 v6.2.2/go.mod h1:eD9eIE7cdwcMi9rYluz88Jz2VyhSmden33/aXg4oVIY=
github.com/medyagh/machine v0.16.4 h1:oEsH3C1TYzs5axakAI/K1yc5O3r6de0+mCGumX4aHwM=
github.com/medyagh/machine v0.16.4/go.mod h1:/HegrAvHvD0AGQYQaLfrmUqxQTQF3Ks9qkj34p/ZH40=
github.com/mesos/mesos-go v0.0.9/go.mod h1:kPYCMQ9gsOXVAle1OsoY4I1+9kPu8GHkf88aV59fDr4=
github.com/mholt/certmagic v0.6.2-0.20190624175158-6a42ef9fe8c2/go.mod h1:g4cOPxcjV0oFq3qwpjSA30LReKD8AoIfwAY9VvG35NY=
github.com/miekg/dns v1.1.3/go.mod h1:W1PPwlIAgtquWBMBEV9nkV9Cazfe8ScdGz/Lj7v3Nrg=

View File

@ -353,7 +353,7 @@ touch "${HTML_OUT}"
gopogh_status=$(gopogh -in "${JSON_OUT}" -out "${HTML_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
pessimistic_status="$completed with ${fail_num} / ${test_num} failures in ${elapsed}"
pessimistic_status="${fail_num} / ${test_num} failures"
description="completed with ${status} in ${elapsed} minute(s)."
if [ "$status" = "failure" ]; then
description="completed with ${pessimistic_status} in ${elapsed} minute(s)."

View File

@ -47,7 +47,7 @@ sudo rm -rf /etc/kubernetes/*
sudo rm -rf /var/lib/minikube/*
# Stop any leftover kubelet
systemctl is-active --quiet kubelet \
sudo systemctl is-active --quiet kubelet \
&& echo "stopping kubelet" \
&& sudo systemctl stop kubelet

View File

@ -17,6 +17,7 @@ limitations under the License.
package main
import (
"bytes"
"flag"
"fmt"
"os"
@ -92,7 +93,7 @@ func executePreloadImages() error {
defer os.Remove(baseDir)
if err := os.MkdirAll(baseDir, 0755); err != nil {
return err
return errors.Wrap(err, "mkdir")
}
if err := driver.Create(); err != nil {
return errors.Wrap(err, "creating kic driver")
@ -123,7 +124,7 @@ func executePreloadImages() error {
}
// Create image tarball
if err := createImageTarball(); err != nil {
return err
return errors.Wrap(err, "create tarball")
}
return copyTarballToHost()
}
@ -139,7 +140,7 @@ func createImageTarball() error {
cmd := exec.Command("docker", args...)
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "creating image tarball")
return errors.Wrapf(err, "tarball cmd: %s", cmd.Args)
}
return nil
}
@ -149,7 +150,7 @@ func copyTarballToHost() error {
cmd := exec.Command("docker", "cp", fmt.Sprintf("%s:/%s", profile, tarballFilename), dest)
cmd.Stdout = os.Stdout
if err := cmd.Run(); err != nil {
return errors.Wrap(err, "copying tarball to host")
return errors.Wrapf(err, "cp cmd: %s", cmd.Args)
}
return nil
}
@ -162,9 +163,11 @@ func deleteMinikube() error {
func verifyDockerStorage() error {
cmd := exec.Command("docker", "info", "-f", "{{.Info.Driver}}")
var stderr bytes.Buffer
cmd.Stderr = &stderr
output, err := cmd.Output()
if err != nil {
return err
return fmt.Errorf("%v: %v:\n%s", cmd.Args, err, stderr.String())
}
driver := strings.Trim(string(output), " \n")
if driver != dockerStorageDriver {

View File

@ -26,7 +26,6 @@ import (
"github.com/golang/glog"
"github.com/pkg/errors"
"github.com/spf13/viper"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
@ -35,7 +34,6 @@ import (
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/storageclass"
pkgutil "k8s.io/minikube/pkg/util"
)
// defaultStorageClassProvisioner is the name of the default storage class provisioner
@ -49,35 +47,34 @@ func Set(name, value, profile string) error {
return errors.Errorf("%s is not a valid addon", name)
}
// Run any additional validations for this property
if err := run(name, value, profile, a.validations); err != nil {
return errors.Wrap(err, "running validations")
}
// Set the value
c, err := config.Load(profile)
cc, err := config.Load(profile)
if err != nil {
return errors.Wrap(err, "loading profile")
}
if err := a.set(c, name, value); err != nil {
// Run any additional validations for this property
if err := run(cc, name, value, a.validations); err != nil {
return errors.Wrap(err, "running validations")
}
if err := a.set(cc, name, value); err != nil {
return errors.Wrap(err, "setting new value of addon")
}
// Run any callbacks for this property
if err := run(name, value, profile, a.callbacks); err != nil {
if err := run(cc, name, value, a.callbacks); err != nil {
return errors.Wrap(err, "running callbacks")
}
glog.Infof("Writing out %q config to set %s=%v...", profile, name, value)
return config.Write(profile, c)
return config.Write(profile, cc)
}
// Runs all the validation or callback functions and collects errors
func run(name, value, profile string, fns []setFn) error {
func run(cc *config.ClusterConfig, name string, value string, fns []setFn) error {
var errors []error
for _, fn := range fns {
err := fn(name, value, profile)
err := fn(cc, name, value)
if err != nil {
errors = append(errors, err)
}
@ -89,21 +86,21 @@ func run(name, value, profile string, fns []setFn) error {
}
// SetBool sets a bool value
func SetBool(m *config.ClusterConfig, name string, val string) error {
func SetBool(cc *config.ClusterConfig, name string, val string) error {
b, err := strconv.ParseBool(val)
if err != nil {
return err
}
if m.Addons == nil {
m.Addons = map[string]bool{}
if cc.Addons == nil {
cc.Addons = map[string]bool{}
}
m.Addons[name] = b
cc.Addons[name] = b
return nil
}
// enableOrDisableAddon updates addon status executing any commands necessary
func enableOrDisableAddon(name, val, profile string) error {
glog.Infof("Setting addon %s=%s in %q", name, val, profile)
func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) error {
glog.Infof("Setting addon %s=%s in %q", name, val, cc.Name)
enable, err := strconv.ParseBool(val)
if err != nil {
return errors.Wrapf(err, "parsing bool: %s", name)
@ -111,7 +108,7 @@ func enableOrDisableAddon(name, val, profile string) error {
addon := assets.Addons[name]
// check addon status before enabling/disabling it
alreadySet, err := isAddonAlreadySet(addon, enable, profile)
alreadySet, err := isAddonAlreadySet(addon, enable, cc.Name)
if err != nil {
out.ErrT(out.Conflict, "{{.error}}", out.V{"error": err})
return err
@ -124,13 +121,14 @@ func enableOrDisableAddon(name, val, profile string) error {
}
}
if name == "istio" && enable {
if strings.HasPrefix(name, "istio") && enable {
minMem := 8192
minCpus := 4
memorySizeMB := pkgutil.CalculateSizeInMB(viper.GetString("memory"))
cpuCount := viper.GetInt("cpus")
if memorySizeMB < minMem || cpuCount < minCpus {
out.WarningT("Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.", out.V{"minMem": minMem, "minCpus": minCpus})
minCPUs := 4
if cc.Memory < minMem {
out.WarningT("Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB", out.V{"minMem": minMem, "memory": cc.Memory})
}
if cc.CPUs < minCPUs {
out.WarningT("Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs", out.V{"minCPUs": minCPUs, "cpus": cc.CPUs})
}
}
@ -141,14 +139,15 @@ func enableOrDisableAddon(name, val, profile string) error {
}
defer api.Close()
cfg, err := config.Load(profile)
if err != nil && !config.IsNotExist(err) {
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
cp, err := config.PrimaryControlPlane(cc)
if err != nil {
exit.WithError("Error getting primary control plane", err)
}
host, err := machine.CheckIfHostExistsAndLoad(api, profile)
if err != nil || !machine.IsHostRunning(api, profile) {
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement (err=%v)", profile, addon.Name(), enable, err)
mName := driver.MachineName(*cc, cp)
host, err := machine.CheckIfHostExistsAndLoad(api, mName)
if err != nil || !machine.IsHostRunning(api, mName) {
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement (err=%v)", mName, addon.Name(), enable, err)
return nil
}
@ -157,8 +156,8 @@ func enableOrDisableAddon(name, val, profile string) error {
return errors.Wrap(err, "command runner")
}
data := assets.GenerateTemplateData(cfg.KubernetesConfig)
return enableOrDisableAddonInternal(addon, cmd, data, enable, profile)
data := assets.GenerateTemplateData(cc.KubernetesConfig)
return enableOrDisableAddonInternal(cc, addon, cmd, data, enable)
}
func isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool, error) {
@ -176,7 +175,7 @@ func isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool,
return false, nil
}
func enableOrDisableAddonInternal(addon *assets.Addon, cmd command.Runner, data interface{}, enable bool, profile string) error {
func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, cmd command.Runner, data interface{}, enable bool) error {
deployFiles := []string{}
for _, addon := range addon.Assets {
@ -211,10 +210,7 @@ func enableOrDisableAddonInternal(addon *assets.Addon, cmd command.Runner, data
}
}
command, err := kubectlCommand(profile, deployFiles, enable)
if err != nil {
return err
}
command := kubectlCommand(cc, deployFiles, enable)
glog.Infof("Running: %v", command)
rr, err := cmd.RunCmd(command)
if err != nil {
@ -225,8 +221,8 @@ func enableOrDisableAddonInternal(addon *assets.Addon, cmd command.Runner, data
}
// enableOrDisableStorageClasses enables or disables storage classes
func enableOrDisableStorageClasses(name, val, profile string) error {
glog.Infof("enableOrDisableStorageClasses %s=%v on %q", name, val, profile)
func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val string) error {
glog.Infof("enableOrDisableStorageClasses %s=%v on %q", name, val, cc.Name)
enable, err := strconv.ParseBool(val)
if err != nil {
return errors.Wrap(err, "Error parsing boolean")
@ -247,18 +243,13 @@ func enableOrDisableStorageClasses(name, val, profile string) error {
}
defer api.Close()
cc, err := config.Load(profile)
if err != nil {
return errors.Wrap(err, "getting cluster")
}
cp, err := config.PrimaryControlPlane(cc)
if err != nil {
return errors.Wrap(err, "getting control plane")
}
if !machine.IsHostRunning(api, driver.MachineName(*cc, cp)) {
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", profile, name, val)
return enableOrDisableAddon(name, val, profile)
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", driver.MachineName(*cc, cp), name, val)
return enableOrDisableAddon(cc, name, val)
}
if enable {
@ -275,7 +266,7 @@ func enableOrDisableStorageClasses(name, val, profile string) error {
}
}
return enableOrDisableAddon(name, val, profile)
return enableOrDisableAddon(cc, name, val)
}
// Start enables the default addons for a profile, plus any additional

View File

@ -44,7 +44,15 @@ func createTestProfile(t *testing.T) string {
if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil {
t.Fatalf("error creating temporary directory")
}
if err := config.DefaultLoader.WriteConfigToFile(name, &config.ClusterConfig{}); err != nil {
cc := &config.ClusterConfig{
Name: name,
CPUs: 2,
Memory: 2500,
KubernetesConfig: config.KubernetesConfig{},
}
if err := config.DefaultLoader.WriteConfigToFile(name, cc); err != nil {
t.Fatalf("error creating temporary profile config: %v", err)
}
return name

View File

@ -18,7 +18,7 @@ package addons
import "k8s.io/minikube/pkg/minikube/config"
type setFn func(string, string, string) error
type setFn func(*config.ClusterConfig, string, string) error
// Addon represents an addon
type Addon struct {
@ -54,7 +54,7 @@ var Addons = []*Addon{
{
name: "gvisor",
set: SetBool,
validations: []setFn{IsContainerdRuntime},
validations: []setFn{IsRuntimeContainerd},
callbacks: []setFn{enableOrDisableAddon},
},
{

View File

@ -26,16 +26,12 @@ import (
"k8s.io/minikube/pkg/minikube/vmpath"
)
var (
// For testing
k8sVersion = kubernetesVersion
)
func kubectlCommand(profile string, files []string, enable bool) (*exec.Cmd, error) {
v, err := k8sVersion(profile)
if err != nil {
return nil, err
func kubectlCommand(cc *config.ClusterConfig, files []string, enable bool) *exec.Cmd {
v := constants.DefaultKubernetesVersion
if cc != nil {
v = cc.KubernetesConfig.KubernetesVersion
}
kubectlBinary := kubectlBinaryPath(v)
kubectlAction := "apply"
@ -48,20 +44,7 @@ func kubectlCommand(profile string, files []string, enable bool) (*exec.Cmd, err
args = append(args, []string{"-f", f}...)
}
cmd := exec.Command("sudo", args...)
return cmd, nil
}
func kubernetesVersion(profile string) (string, error) {
cc, err := config.Load(profile)
if err != nil && !config.IsNotExist(err) {
return "", err
}
version := constants.DefaultKubernetesVersion
if cc != nil {
version = cc.KubernetesConfig.KubernetesVersion
}
return version, nil
return exec.Command("sudo", args...)
}
func kubectlBinaryPath(version string) string {

View File

@ -19,6 +19,8 @@ package addons
import (
"strings"
"testing"
"k8s.io/minikube/pkg/minikube/config"
)
func TestKubectlCommand(t *testing.T) {
@ -41,18 +43,15 @@ func TestKubectlCommand(t *testing.T) {
},
}
cc := &config.ClusterConfig{
KubernetesConfig: config.KubernetesConfig{
KubernetesVersion: "v1.17.0",
},
}
for _, test := range tests {
t.Run(test.description, func(t *testing.T) {
originalK8sVersion := k8sVersion
defer func() { k8sVersion = originalK8sVersion }()
k8sVersion = func(_ string) (string, error) {
return "v1.17.0", nil
}
command, err := kubectlCommand("", test.files, test.enable)
if err != nil {
t.Fatalf("error getting kubectl command: %v", err)
}
command := kubectlCommand(cc, test.files, test.enable)
actual := strings.Join(command.Args, " ")
if actual != test.expected {

View File

@ -33,13 +33,9 @@ and then start minikube again with the following flags:
minikube start --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock`
// IsContainerdRuntime is a validator which returns an error if the current runtime is not containerd
func IsContainerdRuntime(_, _, profile string) error {
config, err := config.Load(profile)
if err != nil {
return fmt.Errorf("config.Load: %v", err)
}
r, err := cruntime.New(cruntime.Config{Type: config.KubernetesConfig.ContainerRuntime})
// IsRuntimeContainerd is a validator which returns an error if the current runtime is not containerd
func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
if err != nil {
return err
}

View File

@ -125,8 +125,9 @@ func (d *Driver) GetSSHHostname() (string, error) {
return d.IPAddress, nil
}
// GetURL returns a Docker compatible host URL for connecting to this host
// GetURL returns a Docker URL inside this host
// e.g. tcp://1.2.3.4:2376
// more info https://github.com/docker/machine/blob/b170508bf44c3405e079e26d5fdffe35a64c6972/libmachine/provision/utils.go#L159_L175
func (d *Driver) GetURL() (string, error) {
ip, err := d.GetIP()
if err != nil {

View File

@ -81,7 +81,7 @@ func (d *Driver) Create() error {
// control plane specific options
params.PortMappings = append(params.PortMappings, oci.PortMapping{
ListenAddress: oci.DefaultBindIPV4,
ContainerPort: constants.APIServerPort,
ContainerPort: int32(params.APIServerPort),
},
oci.PortMapping{
ListenAddress: oci.DefaultBindIPV4,
@ -200,13 +200,15 @@ func (d *Driver) GetSSHKeyPath() string {
return d.SSHKeyPath
}
// GetURL returns ip of the container running kic control-panel
// GetURL returns a Docker URL inside this host
// e.g. tcp://1.2.3.4:2376
// more info https://github.com/docker/machine/blob/b170508bf44c3405e079e26d5fdffe35a64c6972/libmachine/provision/utils.go#L159_L175
func (d *Driver) GetURL() (string, error) {
p, err := oci.HostPortBinding(d.NodeConfig.OCIBinary, d.MachineName, d.NodeConfig.APIServerPort)
url := fmt.Sprintf("https://%s", net.JoinHostPort("127.0.0.1", fmt.Sprint(p)))
ip, err := d.GetIP()
if err != nil {
return url, errors.Wrap(err, "api host port binding")
return "", err
}
url := fmt.Sprintf("tcp://%s", net.JoinHostPort(ip, "2376"))
return url, nil
}

View File

@ -103,6 +103,7 @@ func allVolumesByLabel(ociBin string, label string) ([]string, error) {
// to the volume named volumeName
func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
cmd := exec.Command(Docker, "run", "--rm", "--entrypoint", "/usr/bin/tar", "-v", fmt.Sprintf("%s:/preloaded.tar:ro", tarballPath), "-v", fmt.Sprintf("%s:/extractDir", volumeName), imageName, "-I", "lz4", "-xvf", "/preloaded.tar", "-C", "/extractDir")
glog.Infof("executing: %s", cmd.Args)
if out, err := cmd.CombinedOutput(); err != nil {
return errors.Wrapf(err, "output %s", string(out))
}
@ -114,6 +115,7 @@ func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
// TODO: this should be fixed as a part of https://github.com/kubernetes/minikube/issues/6530
func createDockerVolume(profile string, nodeName string) error {
cmd := exec.Command(Docker, "volume", "create", nodeName, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, profile), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))
glog.Infof("executing: %s", cmd.Args)
if out, err := cmd.CombinedOutput(); err != nil {
return errors.Wrapf(err, "output %s", string(out))
}

View File

@ -119,7 +119,9 @@ func (d *Driver) PreCommandCheck() error {
return nil
}
// GetURL returns a Docker compatible host URL for connecting to this host
// GetURL returns a Docker URL inside this host
// e.g. tcp://1.2.3.4:2376
// more info https://github.com/docker/machine/blob/b170508bf44c3405e079e26d5fdffe35a64c6972/libmachine/provision/utils.go#L159_L175
func (d *Driver) GetURL() (string, error) {
if err := d.PreCommandCheck(); err != nil {
return "", errors.Wrap(err, "getting URL, precheck failed")

View File

@ -115,8 +115,9 @@ func (d *Driver) GetSSHPort() (int, error) {
return 0, fmt.Errorf("driver does not support ssh commands")
}
// GetURL returns a Docker compatible host URL for connecting to this host
// GetURL returns a Docker URL inside this host
// e.g. tcp://1.2.3.4:2376
// more info https://github.com/docker/machine/blob/b170508bf44c3405e079e26d5fdffe35a64c6972/libmachine/provision/utils.go#L159_L175
func (d *Driver) GetURL() (string, error) {
ip, err := d.GetIP()
if err != nil {

View File

@ -30,6 +30,7 @@ import (
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/vmpath"
"k8s.io/minikube/pkg/util"
)
// Container runtimes
@ -38,7 +39,7 @@ const remoteContainerRuntime = "remote"
// GenerateKubeadmYAML generates the kubeadm.yaml file
func GenerateKubeadmYAML(cc config.ClusterConfig, r cruntime.Manager, n config.Node) ([]byte, error) {
k8s := cc.KubernetesConfig
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return nil, errors.Wrap(err, "parsing kubernetes version")
}

View File

@ -26,13 +26,12 @@ import (
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/util"
)
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
// based on the options present in the KubernetesConfig.
func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) ([]byte, error) {
func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) (map[string]string, error) {
k8s := mc.KubernetesConfig
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return nil, errors.Wrap(err, "parsing kubernetes version")
}
@ -64,7 +63,7 @@ func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manage
extraOpts["hostname-override"] = nc.Name
}
pauseImage := images.Pause(k8s.ImageRepository)
pauseImage := images.Pause(version, k8s.ImageRepository)
if _, ok := extraOpts["pod-infra-container-image"]; !ok && k8s.ImageRepository != "" && pauseImage != "" && k8s.ContainerRuntime != remoteContainerRuntime {
extraOpts["pod-infra-container-image"] = pauseImage
}
@ -79,7 +78,18 @@ func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manage
extraOpts["feature-gates"] = kubeletFeatureArgs
}
return extraOpts, nil
}
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
// based on the options present in the KubernetesConfig.
func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) ([]byte, error) {
b := bytes.Buffer{}
extraOpts, err := extraKubeletOpts(mc, nc, r)
if err != nil {
return nil, err
}
k8s := mc.KubernetesConfig
opts := struct {
ExtraOptions string
ContainerRuntime string

View File

@ -79,7 +79,7 @@ Wants=crio.service
[Service]
ExecStart=
ExecStart=/var/lib/minikube/binaries/v1.17.3/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
ExecStart=/var/lib/minikube/binaries/v1.18.0-beta.2/kubelet --authorization-mode=Webhook --bootstrap-kubeconfig=/etc/kubernetes/bootstrap-kubelet.conf --cgroup-driver=cgroupfs --client-ca-file=/var/lib/minikube/certs/ca.crt --cluster-domain=cluster.local --config=/var/lib/kubelet/config.yaml --container-runtime=remote --container-runtime-endpoint=/var/run/crio/crio.sock --fail-swap-on=false --hostname-override=minikube --image-service-endpoint=/var/run/crio/crio.sock --kubeconfig=/etc/kubernetes/kubelet.conf --node-ip=192.168.1.100 --pod-manifest-path=/etc/kubernetes/manifests --runtime-request-timeout=15m
[Install]
`,

View File

@ -21,23 +21,11 @@ import (
"strings"
"github.com/blang/semver"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/vmpath"
"k8s.io/minikube/pkg/util"
)
// ParseKubernetesVersion parses the kubernetes version
func ParseKubernetesVersion(version string) (semver.Version, error) {
// Strip leading 'v' prefix from version for semver parsing
v, err := semver.Make(version[1:])
if err != nil {
return semver.Version{}, errors.Wrap(err, "invalid version, must begin with 'v'")
}
return v, nil
}
// versionIsBetween checks if a version is between (or including) two given versions
func versionIsBetween(version, gte, lte semver.Version) bool {
if gte.NE(semver.Version{}) && !version.GTE(gte) {

View File

@ -91,13 +91,3 @@ func TestVersionIsBetween(t *testing.T) {
})
}
}
func TestParseKubernetesVersion(t *testing.T) {
version, err := ParseKubernetesVersion("v1.8.0-alpha.5")
if err != nil {
t.Fatalf("Error parsing version: %v", err)
}
if version.NE(semver.MustParse("1.8.0-alpha.5")) {
t.Errorf("Expected: %s, Actual:%s", "1.8.0-alpha.5", version)
}
}

View File

@ -25,11 +25,15 @@ import (
"github.com/blang/semver"
)
// Pause returns the image name to pull for the pause image
func Pause(mirror string) string {
// Pause returns the image name to pull for a given Kubernetes version
func Pause(v semver.Version, mirror string) string {
// Should match `PauseVersion` in:
// https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants.go
return path.Join(kubernetesRepo(mirror), "pause"+archTag(false)+"3.1")
pv := "3.2"
if semver.MustParseRange("<1.18.0-alpha.0")(v) {
pv = "3.1"
}
return path.Join(kubernetesRepo(mirror), "pause"+archTag(false)+pv)
}
// essentials returns images needed too bootstrap a kubenretes
@ -41,7 +45,7 @@ func essentials(mirror string, v semver.Version) []string {
componentImage("kube-apiserver", v, mirror),
coreDNS(v, mirror),
etcd(v, mirror),
Pause(mirror),
Pause(v, mirror),
}
return imgs
}
@ -61,8 +65,10 @@ func componentImage(name string, v semver.Version, mirror string) string {
func coreDNS(v semver.Version, mirror string) string {
// Should match `CoreDNSVersion` in
// https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants.go
cv := "1.6.5"
cv := "1.6.7"
switch v.Minor {
case 17:
cv = "1.6.5"
case 16:
cv = "1.6.2"
case 15, 14:

View File

@ -41,6 +41,7 @@ import (
"k8s.io/minikube/pkg/drivers/kic"
"k8s.io/minikube/pkg/drivers/kic/oci"
"k8s.io/minikube/pkg/kapi"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
@ -53,6 +54,7 @@ import (
"k8s.io/minikube/pkg/minikube/machine"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/vmpath"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/version"
)
@ -163,7 +165,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
glog.Infof("StartCluster complete in %s", time.Since(start))
}()
version, err := bsutil.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "parsing kubernetes version")
}
@ -191,7 +193,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
// Allow older kubeadm versions to function with newer Docker releases.
// For kic on linux example error: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.2.17-1rodete3-amd64"
if version.LT(semver.MustParse("1.13.0")) || driver.IsKIC(cfg.Driver) {
glog.Infof("Older Kubernetes release detected (%s), disabling SystemVerification check.", version)
glog.Info("ignoring SystemVerification for kubeadm because of either driver or kubernetes version")
ignore = append(ignore, "SystemVerification")
}
@ -291,7 +293,10 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
return errors.Wrap(err, "get k8s client")
}
return kverify.SystemPods(c, start, timeout)
if err := kverify.SystemPods(c, start, timeout); err != nil {
return errors.Wrap(err, "waiting for system pods")
}
return nil
}
// restartCluster restarts the Kubernetes cluster configured by kubeadm
@ -303,7 +308,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
glog.Infof("restartCluster took %s", time.Since(start))
}()
version, err := bsutil.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
version, err := util.ParseKubernetesVersion(cfg.KubernetesConfig.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "parsing kubernetes version")
}
@ -414,7 +419,7 @@ func (k *Bootstrapper) GenerateToken(cc config.ClusterConfig) (string, error) {
// DeleteCluster removes the components that were started earlier
func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
version, err := bsutil.ParseKubernetesVersion(k8s.KubernetesVersion)
version, err := util.ParseKubernetesVersion(k8s.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "parsing kubernetes version")
}
@ -483,10 +488,9 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
glog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig)
stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")
// stop kubelet to avoid "Text File Busy" error
if rr, err := k.c.RunCmd(stopCmd); err != nil {
glog.Warningf("unable to stop kubelet: %s command: %q output: %q", err, rr.Command(), rr.Output())
if err := stopKubelet(k.c); err != nil {
glog.Warningf("unable to stop kubelet: %s", err)
}
if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c); err != nil {
@ -498,24 +502,46 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
cniFile = []byte(defaultCNIConfig)
}
files := bsutil.ConfigFileAssets(cfg.KubernetesConfig, kubeadmCfg, kubeletCfg, kubeletService, cniFile)
if err := copyFiles(k.c, files); err != nil {
return err
}
if err := startKubelet(k.c); err != nil {
return err
}
return nil
}
func stopKubelet(runner command.Runner) error {
stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")
if rr, err := runner.RunCmd(stopCmd); err != nil {
return errors.Wrapf(err, "command: %q output: %q", rr.Command(), rr.Output())
}
return nil
}
func copyFiles(runner command.Runner, files []assets.CopyableFile) error {
// Combine mkdir request into a single call to reduce load
dirs := []string{}
for _, f := range files {
dirs = append(dirs, f.GetTargetDir())
}
args := append([]string{"mkdir", "-p"}, dirs...)
if _, err := k.c.RunCmd(exec.Command("sudo", args...)); err != nil {
if _, err := runner.RunCmd(exec.Command("sudo", args...)); err != nil {
return errors.Wrap(err, "mkdir")
}
for _, f := range files {
if err := k.c.Copy(f); err != nil {
if err := runner.Copy(f); err != nil {
return errors.Wrapf(err, "copy")
}
}
return nil
}
if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet")); err != nil {
func startKubelet(runner command.Runner) error {
startCmd := exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet")
if _, err := runner.RunCmd(startCmd); err != nil {
return errors.Wrap(err, "starting kubelet")
}
return nil

View File

@ -43,11 +43,13 @@ const (
// BeginCacheKubernetesImages caches images required for kubernetes version in the background
func BeginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion string, cRuntime string) {
if download.PreloadExists(k8sVersion, cRuntime) {
g.Go(func() error {
glog.Info("Caching tarball of preloaded images")
return download.Preload(k8sVersion, cRuntime)
})
return
glog.Info("Caching tarball of preloaded images")
err := download.Preload(k8sVersion, cRuntime)
if err == nil {
glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime)
return // don't cache individual images if preload is successful.
}
glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err)
}
if !viper.GetBool("cache-images") {

View File

@ -34,7 +34,7 @@ func TestListProfiles(t *testing.T) {
vmDriver string
}{
{0, "p1", "hyperkit"},
{1, "p2", "virtualbox"},
{1, "p2_newformat", "virtualbox"},
}
// test cases for invalid profiles
@ -109,7 +109,7 @@ func TestProfileExists(t *testing.T) {
expected bool
}{
{"p1", true},
{"p2", true},
{"p2_newformat", true},
{"p3_empty", true},
{"p4_invalid_file", true},
{"p5_partial_config", true},
@ -218,3 +218,47 @@ func TestDeleteProfile(t *testing.T) {
}
}
func TestGetPrimaryControlPlane(t *testing.T) {
miniDir, err := filepath.Abs("./testdata/.minikube2")
if err != nil {
t.Errorf("error getting dir path for ./testdata/.minikube : %v", err)
}
var tests = []struct {
description string
profile string
expectedIP string
expectedPort int
expectedName string
}{
{"old style", "p1", "192.168.64.75", 8443, "minikube"},
{"new style", "p2_newformat", "192.168.99.136", 8443, "m01"},
}
for _, tc := range tests {
cc, err := DefaultLoader.LoadConfigFromFile(tc.profile, miniDir)
if err != nil {
t.Fatalf("Failed to load config for %s", tc.description)
}
n, err := PrimaryControlPlane(cc)
if err != nil {
t.Fatalf("Unexpexted error getting primary control plane: %v", err)
}
if n.Name != tc.expectedName {
t.Errorf("Unexpected name. expected: %s, got: %s", tc.expectedName, n.Name)
}
if n.IP != tc.expectedIP {
t.Errorf("Unexpected name. expected: %s, got: %s", tc.expectedIP, n.IP)
}
if n.Port != tc.expectedPort {
t.Errorf("Unexpected name. expected: %d, got: %d", tc.expectedPort, n.Port)
}
}
}

View File

@ -29,9 +29,6 @@
},
"KubernetesConfig": {
"KubernetesVersion": "v1.15.0",
"NodeIP": "192.168.99.136",
"NodePort": 8443,
"NodeName": "minikube",
"APIServerName": "minikubeCA",
"APIServerNames": null,
"APIServerIPs": null,
@ -45,5 +42,15 @@
"ExtraOptions": null,
"ShouldLoadCachedImages": true,
"EnableDefaultCNI": false
}
},
"Nodes": [
{
"Name": "m01",
"IP": "192.168.99.136",
"Port": 8443,
"KubernetesVersion": "v1.15.0",
"ControlPlane": true,
"Worker": true
}
]
}

View File

@ -1,5 +1,5 @@
{
"Name": "p2",
"Name": "p2_newformat",
"KeepContext": false,
"MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso",
"Memory": 2000,
@ -28,9 +28,6 @@
"HostDNSResolver": true,
"KubernetesConfig": {
"KubernetesVersion": "v1.15.0",
"NodeIP": "192.168.99.136",
"NodePort": 8443,
"NodeName": "minikube",
"APIServerName": "minikubeCA",
"APIServerNames": null,
"APIServerIPs": null,
@ -44,5 +41,15 @@
"ExtraOptions": null,
"ShouldLoadCachedImages": true,
"EnableDefaultCNI": false
}
},
"Nodes": [
{
"Name": "m01",
"IP": "192.168.99.136",
"Port": 8443,
"KubernetesVersion": "v1.15.0",
"ControlPlane": true,
"Worker": true
}
]
}

View File

@ -28,7 +28,7 @@ const (
// DefaultKubernetesVersion is the default kubernetes version
DefaultKubernetesVersion = "v1.17.3"
// NewestKubernetesVersion is the newest Kubernetes version to test against
NewestKubernetesVersion = "v1.17.3"
NewestKubernetesVersion = "v1.18.0-beta.2"
// OldestKubernetesVersion is the oldest Kubernetes version to test against
OldestKubernetesVersion = "v1.11.10"
// DefaultClusterName is the default nane for the k8s cluster

View File

@ -25,9 +25,11 @@ import (
"strings"
"text/template"
"github.com/blang/semver"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/out"
)
@ -112,7 +114,7 @@ type Containerd struct {
Socket string
Runner CommandRunner
ImageRepository string
KubernetesVersion string
KubernetesVersion semver.Version
}
// Name is a human readable name for containerd
@ -155,7 +157,7 @@ func (r *Containerd) DefaultCNI() bool {
// Active returns if containerd is active on the host
func (r *Containerd) Active() bool {
c := exec.Command("systemctl", "is-active", "--quiet", "service", "containerd")
c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "containerd")
_, err := r.Runner.RunCmd(c)
return err == nil
}
@ -170,13 +172,13 @@ func (r *Containerd) Available() error {
}
// generateContainerdConfig sets up /etc/containerd/config.toml
func generateContainerdConfig(cr CommandRunner, imageRepository string) error {
func generateContainerdConfig(cr CommandRunner, imageRepository string, kv semver.Version) error {
cPath := containerdConfigFile
t, err := template.New("containerd.config.toml").Parse(containerdConfigTemplate)
if err != nil {
return err
}
pauseImage := images.Pause(imageRepository)
pauseImage := images.Pause(kv, imageRepository)
opts := struct{ PodInfraContainerImage string }{PodInfraContainerImage: pauseImage}
var b bytes.Buffer
if err := t.Execute(&b, opts); err != nil {
@ -199,7 +201,7 @@ func (r *Containerd) Enable(disOthers bool) error {
if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil {
return err
}
if err := generateContainerdConfig(r.Runner, r.ImageRepository); err != nil {
if err := generateContainerdConfig(r.Runner, r.ImageRepository, r.KubernetesVersion); err != nil {
return err
}
if err := enableIPForwarding(r.Runner); err != nil {
@ -310,6 +312,6 @@ func (r *Containerd) SystemLogCmd(len int) string {
}
// Preload preloads the container runtime with k8s images
func (r *Containerd) Preload(k8sVersion string) error {
func (r *Containerd) Preload(cfg config.KubernetesConfig) error {
return fmt.Errorf("not yet implemented for %s", r.Name())
}

View File

@ -21,9 +21,11 @@ import (
"os/exec"
"strings"
"github.com/blang/semver"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/out"
)
@ -37,13 +39,13 @@ type CRIO struct {
Socket string
Runner CommandRunner
ImageRepository string
KubernetesVersion string
KubernetesVersion semver.Version
}
// generateCRIOConfig sets up /etc/crio/crio.conf
func generateCRIOConfig(cr CommandRunner, imageRepository string) error {
func generateCRIOConfig(cr CommandRunner, imageRepository string, kv semver.Version) error {
cPath := crioConfigFile
pauseImage := images.Pause(imageRepository)
pauseImage := images.Pause(kv, imageRepository)
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo sed -e 's|^pause_image = .*$|pause_image = \"%s\"|' -i %s", pauseImage, cPath))
if _, err := cr.RunCmd(c); err != nil {
@ -101,7 +103,7 @@ func (r *CRIO) Available() error {
// Active returns if CRIO is active on the host
func (r *CRIO) Active() bool {
c := exec.Command("systemctl", "is-active", "--quiet", "service", "crio")
c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "crio")
_, err := r.Runner.RunCmd(c)
return err == nil
}
@ -116,7 +118,7 @@ func (r *CRIO) Enable(disOthers bool) error {
if err := populateCRIConfig(r.Runner, r.SocketPath()); err != nil {
return err
}
if err := generateCRIOConfig(r.Runner, r.ImageRepository); err != nil {
if err := generateCRIOConfig(r.Runner, r.ImageRepository, r.KubernetesVersion); err != nil {
return err
}
if err := enableIPForwarding(r.Runner); err != nil {
@ -227,6 +229,6 @@ func (r *CRIO) SystemLogCmd(len int) string {
}
// Preload preloads the container runtime with k8s images
func (r *CRIO) Preload(k8sVersion string) error {
func (r *CRIO) Preload(cfg config.KubernetesConfig) error {
return fmt.Errorf("not yet implemented for %s", r.Name())
}

View File

@ -21,10 +21,12 @@ import (
"fmt"
"os/exec"
"github.com/blang/semver"
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/out"
)
@ -100,7 +102,7 @@ type Manager interface {
// SystemLogCmd returns the command to return the system logs
SystemLogCmd(int) string
// Preload preloads the container runtime with k8s images
Preload(string) error
Preload(config.KubernetesConfig) error
}
// Config is runtime configuration
@ -114,7 +116,7 @@ type Config struct {
// ImageRepository image repository to download image from
ImageRepository string
// KubernetesVersion Kubernetes version
KubernetesVersion string
KubernetesVersion semver.Version
}
// ListOptions are the options to use for listing containers

View File

@ -224,43 +224,60 @@ func (f *FakeRunner) Remove(assets.CopyableFile) error {
return nil
}
func (f *FakeRunner) dockerPs(args []string) (string, error) {
// ps -a --filter="name=apiserver" --format="{{.ID}}"
if args[1] == "-a" && strings.HasPrefix(args[2], "--filter") {
filter := strings.Split(args[2], `r=`)[1]
fname := strings.Split(filter, "=")[1]
ids := []string{}
f.t.Logf("fake docker: Looking for containers matching %q", fname)
for id, cname := range f.containers {
if strings.Contains(cname, fname) {
ids = append(ids, id)
}
}
f.t.Logf("fake docker: Found containers: %v", ids)
return strings.Join(ids, "\n"), nil
}
return "", nil
}
func (f *FakeRunner) dockerStop(args []string) (string, error) {
ids := strings.Split(args[1], " ")
for _, id := range ids {
f.t.Logf("fake docker: Stopping id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
}
delete(f.containers, id)
}
return "", nil
}
func (f *FakeRunner) dockerRm(args []string) (string, error) {
// Skip "-f" argument
for _, id := range args[2:] {
f.t.Logf("fake docker: Removing id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
}
delete(f.containers, id)
}
return "", nil
}
// docker is a fake implementation of docker
func (f *FakeRunner) docker(args []string, _ bool) (string, error) {
switch cmd := args[0]; cmd {
case "ps":
// ps -a --filter="name=apiserver" --format="{{.ID}}"
if args[1] == "-a" && strings.HasPrefix(args[2], "--filter") {
filter := strings.Split(args[2], `r=`)[1]
fname := strings.Split(filter, "=")[1]
ids := []string{}
f.t.Logf("fake docker: Looking for containers matching %q", fname)
for id, cname := range f.containers {
if strings.Contains(cname, fname) {
ids = append(ids, id)
}
}
f.t.Logf("fake docker: Found containers: %v", ids)
return strings.Join(ids, "\n"), nil
}
case "stop":
ids := strings.Split(args[1], " ")
for _, id := range ids {
f.t.Logf("fake docker: Stopping id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
}
delete(f.containers, id)
}
case "rm":
// Skip "-f" argument
for _, id := range args[2:] {
f.t.Logf("fake docker: Removing id %q", id)
if f.containers[id] == "" {
return "", fmt.Errorf("no such container")
}
delete(f.containers, id)
return f.dockerPs(args)
case "stop":
return f.dockerStop(args)
case "rm":
return f.dockerRm(args)
}
case "version":
if args[1] == "--format" && args[2] == "{{.Server.Version}}" {

View File

@ -26,6 +26,10 @@ import (
"github.com/golang/glog"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/bootstrapper/images"
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/docker"
"k8s.io/minikube/pkg/minikube/download"
"k8s.io/minikube/pkg/minikube/out"
)
@ -91,7 +95,7 @@ func (r *Docker) Available() error {
// Active returns if docker is active on the host
func (r *Docker) Active() bool {
c := exec.Command("systemctl", "is-active", "--quiet", "service", "docker")
c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "docker")
_, err := r.Runner.RunCmd(c)
return err == nil
}
@ -283,7 +287,24 @@ func (r *Docker) SystemLogCmd(len int) string {
// 1. Copy over the preloaded tarball into the VM
// 2. Extract the preloaded tarball to the correct directory
// 3. Remove the tarball within the VM
func (r *Docker) Preload(k8sVersion string) error {
func (r *Docker) Preload(cfg config.KubernetesConfig) error {
k8sVersion := cfg.KubernetesVersion
// If images already exist, return
images, err := images.Kubeadm(cfg.ImageRepository, k8sVersion)
if err != nil {
return errors.Wrap(err, "getting images")
}
if DockerImagesPreloaded(r.Runner, images) {
glog.Info("Images already preloaded, skipping extraction")
return nil
}
refStore := docker.NewStorage(r.Runner)
if err := refStore.Save(); err != nil {
glog.Infof("error saving reference store: %v", err)
}
tarballPath := download.TarballPath(k8sVersion)
targetDir := "/"
targetName := "preloaded.tar.lz4"
@ -314,5 +335,37 @@ func (r *Docker) Preload(k8sVersion string) error {
if err := r.Runner.Remove(fa); err != nil {
glog.Infof("error removing tarball: %v", err)
}
// save new reference store again
if err := refStore.Save(); err != nil {
glog.Infof("error saving reference store: %v", err)
}
// update reference store
if err := refStore.Update(); err != nil {
glog.Infof("error updating reference store: %v", err)
}
return r.Restart()
}
// DockerImagesPreloaded returns true if all images have been preloaded
func DockerImagesPreloaded(runner command.Runner, images []string) bool {
rr, err := runner.RunCmd(exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}"))
if err != nil {
return false
}
preloadedImages := map[string]struct{}{}
for _, i := range strings.Split(rr.Stdout.String(), "\n") {
preloadedImages[i] = struct{}{}
}
glog.Infof("Got preloaded images: %s", rr.Output())
// Make sure images == imgs
for _, i := range images {
if _, ok := preloadedImages[i]; !ok {
glog.Infof("%s wasn't preloaded", i)
return false
}
}
return true
}

View File

@ -0,0 +1,104 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"encoding/json"
"os/exec"
"path"
"github.com/golang/glog"
"github.com/opencontainers/go-digest"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/command"
)
const (
referenceStorePath = "/var/lib/docker/image/overlay2/repositories.json"
)
// Storage keeps track of reference stores
type Storage struct {
refStores []ReferenceStore
runner command.Runner
}
// ReferenceStore stores references to images in repositories.json
// used by the docker daemon to name images
// taken from "github.com/docker/docker/reference/store.go"
type ReferenceStore struct {
Repositories map[string]repository
}
type repository map[string]digest.Digest
// NewStorage returns a new storage type
func NewStorage(runner command.Runner) *Storage {
return &Storage{
runner: runner,
}
}
// Save saves the current reference store in memory
func (s *Storage) Save() error {
// get the contents of repositories.json in minikube
// if this command fails, assume the file doesn't exist
rr, err := s.runner.RunCmd(exec.Command("sudo", "cat", referenceStorePath))
if err != nil {
glog.Infof("repositories.json doesn't exist: %v", err)
return nil
}
contents := rr.Stdout.Bytes()
var rs ReferenceStore
if err := json.Unmarshal(contents, &rs); err != nil {
return err
}
s.refStores = append(s.refStores, rs)
return nil
}
// Update merges all reference stores and updates repositories.json
func (s *Storage) Update() error {
// in case we didn't overwrite respoitories.json, do nothing
if len(s.refStores) == 1 {
return nil
}
// merge reference stores
merged := s.mergeReferenceStores()
// write to file in minikube
contents, err := json.Marshal(merged)
if err != nil {
return err
}
asset := assets.NewMemoryAsset(contents, path.Dir(referenceStorePath), path.Base(referenceStorePath), "0644")
return s.runner.Copy(asset)
}
func (s *Storage) mergeReferenceStores() ReferenceStore {
merged := ReferenceStore{
Repositories: map[string]repository{},
}
// otherwise, merge together reference stores
for _, rs := range s.refStores {
for k, v := range rs.Repositories {
merged.Repositories[k] = v
}
}
return merged
}

View File

@ -0,0 +1,75 @@
/*
Copyright 2020 The Kubernetes Authors All rights reserved.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package docker
import (
"testing"
"github.com/google/go-cmp/cmp"
)
func TestMergeReferenceStores(t *testing.T) {
initial := ReferenceStore{
Repositories: map[string]repository{
"image1": repository{
"r1": "d1",
"r2": "d2",
},
"image2": repository{
"r1": "d1",
"r2": "d2",
},
},
}
afterPreload := ReferenceStore{
Repositories: map[string]repository{
"image1": repository{
"r1": "updated",
"r2": "updated",
},
"image3": repository{
"r3": "d3",
},
},
}
expected := ReferenceStore{
Repositories: map[string]repository{
"image1": repository{
"r1": "updated",
"r2": "updated",
},
"image2": repository{
"r1": "d1",
"r2": "d2",
},
"image3": repository{
"r3": "d3",
},
},
}
s := &Storage{
refStores: []ReferenceStore{initial, afterPreload},
}
actual := s.mergeReferenceStores()
if diff := cmp.Diff(actual, expected); diff != "" {
t.Errorf("Actual: %v, Expected: %v, Diff: %s", actual, expected, diff)
}
}

View File

@ -62,9 +62,11 @@ func Binary(binary, version, osName, archName string) (string, error) {
return "", errors.Wrapf(err, "mkdir %s", targetDir)
}
tmpDst := targetFilepath + ".download"
client := &getter.Client{
Src: url,
Dst: targetFilepath,
Dst: tmpDst,
Mode: getter.ClientModeFile,
Options: []getter.ClientOption{getter.WithProgress(DefaultProgressBar)},
}
@ -75,9 +77,9 @@ func Binary(binary, version, osName, archName string) (string, error) {
}
if osName == runtime.GOOS && archName == runtime.GOARCH {
if err = os.Chmod(targetFilepath, 0755); err != nil {
if err = os.Chmod(tmpDst, 0755); err != nil {
return "", errors.Wrapf(err, "chmod +x %s", targetFilepath)
}
}
return targetFilepath, nil
return targetFilepath, os.Rename(tmpDst, targetFilepath)
}

View File

@ -35,11 +35,13 @@ func driverWithChecksumURL(name string, v semver.Version) string {
// Driver downloads an arbitrary driver
func Driver(name string, destination string, v semver.Version) error {
out.T(out.FileDownload, "Downloading driver {{.driver}}:", out.V{"driver": name})
os.Remove(destination)
tmpDst := destination + ".download"
url := driverWithChecksumURL(name, v)
client := &getter.Client{
Src: url,
Dst: destination,
Dst: tmpDst,
Mode: getter.ClientModeFile,
Options: []getter.ClientOption{getter.WithProgress(DefaultProgressBar)},
}
@ -49,5 +51,9 @@ func Driver(name string, destination string, v semver.Version) error {
return errors.Wrapf(err, "download failed: %s", url)
}
// Give downloaded drivers a baseline decent file permission
return os.Chmod(destination, 0755)
err := os.Chmod(tmpDst, 0755)
if err != nil {
return err
}
return os.Rename(tmpDst, destination)
}

View File

@ -134,7 +134,6 @@ func downloadISO(isoURL string, skipChecksum bool) error {
urlWithChecksum = isoURL
}
// Predictable temp destination so that resume can function
tmpDst := dst + ".download"
opts := []getter.ClientOption{getter.WithProgress(DefaultProgressBar)}

View File

@ -81,10 +81,8 @@ func PreloadExists(k8sVersion, containerRuntime string) bool {
// Omit remote check if tarball exists locally
targetPath := TarballPath(k8sVersion)
if _, err := os.Stat(targetPath); err == nil {
if err := verifyChecksum(k8sVersion); err == nil {
glog.Infof("Found %s in cache, no need to check remotely", targetPath)
return true
}
glog.Infof("Found local preload: %s", targetPath)
return true
}
url := remoteTarballURL(k8sVersion)
@ -100,7 +98,7 @@ func PreloadExists(k8sVersion, containerRuntime string) bool {
return false
}
glog.Infof("Goody! %s exists!", url)
glog.Infof("Found remote preload: %s", url)
return true
}
@ -112,10 +110,8 @@ func Preload(k8sVersion, containerRuntime string) error {
targetPath := TarballPath(k8sVersion)
if _, err := os.Stat(targetPath); err == nil {
if err := verifyChecksum(k8sVersion); err == nil {
glog.Infof("Found %s in cache, skipping downloading", targetPath)
return nil
}
glog.Infof("Found %s in cache, skipping download", targetPath)
return nil
}
// Make sure we support this k8s version
@ -126,9 +122,11 @@ func Preload(k8sVersion, containerRuntime string) error {
out.T(out.FileDownload, "Downloading preloaded images tarball for k8s {{.version}} ...", out.V{"version": k8sVersion})
url := remoteTarballURL(k8sVersion)
tmpDst := targetPath + ".download"
client := &getter.Client{
Src: url,
Dst: targetPath,
Dst: tmpDst,
Mode: getter.ClientModeFile,
Options: []getter.ClientOption{getter.WithProgress(DefaultProgressBar)},
}
@ -137,18 +135,19 @@ func Preload(k8sVersion, containerRuntime string) error {
if err := client.Get(); err != nil {
return errors.Wrapf(err, "download failed: %s", url)
}
// Give downloaded drivers a baseline decent file permission
if err := os.Chmod(targetPath, 0755); err != nil {
return err
}
// Save checksum file locally
if err := saveChecksumFile(k8sVersion); err != nil {
return errors.Wrap(err, "saving checksum file")
}
return verifyChecksum(k8sVersion)
if err := verifyChecksum(k8sVersion, tmpDst); err != nil {
return errors.Wrap(err, "verify")
}
return os.Rename(tmpDst, targetPath)
}
func saveChecksumFile(k8sVersion string) error {
glog.Infof("saving checksum for %s ...", tarballName(k8sVersion))
ctx := context.Background()
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
if err != nil {
@ -164,9 +163,10 @@ func saveChecksumFile(k8sVersion string) error {
// verifyChecksum returns true if the checksum of the local binary matches
// the checksum of the remote binary
func verifyChecksum(k8sVersion string) error {
func verifyChecksum(k8sVersion string, path string) error {
glog.Infof("verifying checksumm of %s ...", path)
// get md5 checksum of tarball path
contents, err := ioutil.ReadFile(TarballPath(k8sVersion))
contents, err := ioutil.ReadFile(path)
if err != nil {
return errors.Wrap(err, "reading tarball")
}
@ -179,7 +179,7 @@ func verifyChecksum(k8sVersion string) error {
// create a slice of checksum, which is [16]byte
if string(remoteChecksum) != string(checksum[:]) {
return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", TarballPath(k8sVersion), string(remoteChecksum), string(checksum[:]))
return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", path, string(remoteChecksum), string(checksum[:]))
}
return nil
}

View File

@ -24,6 +24,7 @@ import (
"github.com/golang/glog"
"github.com/google/go-containerregistry/pkg/name"
v1 "github.com/google/go-containerregistry/pkg/v1"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/juju/mutex"
"github.com/pkg/errors"
@ -120,6 +121,20 @@ func saveToTarFile(iname, rawDest string) error {
return errors.Wrapf(err, "nil image for %s", iname)
}
tag, err := name.NewTag(iname, name.WeakValidation)
if err != nil {
return errors.Wrap(err, "newtag")
}
err = writeImage(img, dst, tag)
if err != nil {
return err
}
glog.Infof("%s exists", dst)
return nil
}
func writeImage(img v1.Image, dst string, tag name.Tag) error {
glog.Infoln("opening: ", dst)
f, err := ioutil.TempFile(filepath.Dir(dst), filepath.Base(dst)+".*.tmp")
if err != nil {
@ -135,10 +150,7 @@ func saveToTarFile(iname, rawDest string) error {
}
}
}()
tag, err := name.NewTag(iname, name.WeakValidation)
if err != nil {
return errors.Wrap(err, "newtag")
}
err = tarball.Write(tag, img, f)
if err != nil {
return errors.Wrap(err, "write")
@ -151,6 +163,5 @@ func saveToTarFile(iname, rawDest string) error {
if err != nil {
return errors.Wrap(err, "rename")
}
glog.Infof("%s exists", dst)
return nil
}

View File

@ -77,7 +77,7 @@ func Restart(cr command.Runner) error {
// Check checks on the status of the kubelet
func Check(cr command.Runner) error {
glog.Infof("checking for running kubelet ...")
c := exec.Command("systemctl", "is-active", "--quiet", "service", "kubelet")
c := exec.Command("sudo", "systemctl", "is-active", "--quiet", "service", "kubelet")
if _, err := cr.RunCmd(c); err != nil {
return errors.Wrap(err, "check kubelet")
}

View File

@ -19,10 +19,8 @@ package machine
import (
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"strings"
"sync"
"time"
@ -66,7 +64,7 @@ func CacheImagesForBootstrapper(imageRepository string, version string, clusterB
// LoadImages loads previously cached images into the container runtime
func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string) error {
// Skip loading images if images already exist
if imagesPreloaded(runner, images) {
if cruntime.DockerImagesPreloaded(runner, images) {
glog.Infof("Images are preloaded, skipping loading")
return nil
}
@ -79,6 +77,7 @@ func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string
}()
var g errgroup.Group
cr, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime, Runner: runner})
if err != nil {
return errors.Wrap(err, "runtime")
@ -108,28 +107,6 @@ func LoadImages(cc *config.ClusterConfig, runner command.Runner, images []string
return nil
}
func imagesPreloaded(runner command.Runner, images []string) bool {
rr, err := runner.RunCmd(exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}"))
if err != nil {
return false
}
preloadedImages := map[string]struct{}{}
for _, i := range strings.Split(rr.Stdout.String(), "\n") {
preloadedImages[i] = struct{}{}
}
glog.Infof("Got preloaded images: %s", rr.Output())
// Make sure images == imgs
for _, i := range images {
if _, ok := preloadedImages[i]; !ok {
glog.Infof("%s wasn't preloaded", i)
return false
}
}
return true
}
// needsTransfer returns an error if an image needs to be retransfered
func needsTransfer(imgClient *client.Client, imgName string, cr cruntime.Manager) error {
imgDgst := "" // for instance sha256:7c92a2c6bbcb6b6beff92d0a940779769c2477b807c202954c537e2e0deb9bed

View File

@ -59,7 +59,7 @@ var defaultClusterConfig = config.ClusterConfig{
Name: viper.GetString("profile"),
Driver: driver.Mock,
DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"},
Nodes: []config.Node{config.Node{Name: "minikube"}},
Nodes: []config.Node{{Name: "minikube"}},
}
func TestCreateHost(t *testing.T) {

View File

@ -72,6 +72,45 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
// check if need to re-run docker-env
maybeWarnAboutEvalEnv(cc.Driver, cc.Name)
h, err = recreateIfNeeded(api, cc, n, h)
if err != nil {
return h, err
}
e := engineOptions(cc)
if len(e.Env) > 0 {
h.HostOptions.EngineOptions.Env = e.Env
glog.Infof("Detecting provisioner ...")
provisioner, err := provision.DetectProvisioner(h.Driver)
if err != nil {
return h, errors.Wrap(err, "detecting provisioner")
}
if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil {
return h, errors.Wrap(err, "provision")
}
}
if driver.IsMock(h.DriverName) {
return h, nil
}
if err := postStartSetup(h, cc); err != nil {
return h, errors.Wrap(err, "post-start")
}
if driver.BareMetal(h.Driver.DriverName()) {
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
return h, nil
}
glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName())
if err := h.ConfigureAuth(); err != nil {
return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")}
}
return h, ensureSyncedGuestClock(h, cc.Driver)
}
func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) {
s, err := h.Driver.GetState()
if err != nil || s == state.Stopped || s == state.None {
// If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine
@ -117,37 +156,7 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
}
}
e := engineOptions(cc)
if len(e.Env) > 0 {
h.HostOptions.EngineOptions.Env = e.Env
glog.Infof("Detecting provisioner ...")
provisioner, err := provision.DetectProvisioner(h.Driver)
if err != nil {
return h, errors.Wrap(err, "detecting provisioner")
}
if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil {
return h, errors.Wrap(err, "provision")
}
}
if driver.IsMock(h.DriverName) {
return h, nil
}
if err := postStartSetup(h, cc); err != nil {
return h, errors.Wrap(err, "post-start")
}
if driver.BareMetal(h.Driver.DriverName()) {
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
return h, nil
}
glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName())
if err := h.ConfigureAuth(); err != nil {
return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")}
}
return h, ensureSyncedGuestClock(h, cc.Driver)
return h, nil
}
// maybeWarnAboutEvalEnv wil warn user if they need to re-eval their docker-env, podman-env
@ -160,9 +169,9 @@ func maybeWarnAboutEvalEnv(drver string, name string) {
if p == "" {
return
}
out.T(out.Notice, "Noticed that you are using minikube docker-env:")
out.T(out.Warning, `After minikube restart the dockerd ports might have changed. To ensure docker-env works properly.
Please re-eval the docker-env command:
out.T(out.Notice, "Noticed you have an activated docker-env on {{.driver_name}} driver in this terminal:", out.V{"driver_name": drver})
// TODO: refactor docker-env package to generate only eval command per shell. https://github.com/kubernetes/minikube/issues/6887
out.T(out.Warning, `Please re-eval your docker-env, To ensure your environment variables have updated ports:
'minikube -p {{.profile_name}} docker-env'
@ -221,6 +230,41 @@ func adjustGuestClock(h hostRunner, t time.Time) error {
return err
}
func machineExistsState(s state.State, err error) (bool, error) {
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
}
func machineExistsError(s state.State, err error, drverr error) (bool, error) {
_ = s // not used
if err == drverr {
// if the error matches driver error
return false, ErrorMachineNotExist
}
return true, err
}
func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
if s == state.None || (err != nil && err.Error() == msg) {
// if the error contains the message
return false, ErrorMachineNotExist
}
return true, err
}
func machineExistsDocker(s state.State, err error) (bool, error) {
if s == state.Error {
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
return false, ErrorMachineNotExist
} else if s == state.None {
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
return false, ErrorMachineNotExist
}
return true, err
}
// machineExists checks if virtual machine does not exist
// if the virtual machine exists, return true
func machineExists(d string, s state.State, err error) (bool, error) {
@ -229,54 +273,23 @@ func machineExists(d string, s state.State, err error) (bool, error) {
}
switch d {
case driver.HyperKit:
if s == state.None || (err != nil && err.Error() == "connection is shut down") {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsMessage(s, err, "connection is shut down")
case driver.HyperV:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.KVM2:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.None:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.Parallels:
if err != nil && err.Error() == "machine does not exist" {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsMessage(s, err, "connection is shut down")
case driver.VirtualBox:
if err == virtualbox.ErrMachineNotExist {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsError(s, err, virtualbox.ErrMachineNotExist)
case driver.VMware:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.VMwareFusion:
if s == state.None {
return false, ErrorMachineNotExist
}
return true, err
return machineExistsState(s, err)
case driver.Docker:
if s == state.Error {
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
return false, ErrorMachineNotExist
} else if s == state.None {
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
return false, ErrorMachineNotExist
}
return true, err
return machineExistsDocker(s, err)
case driver.Mock:
if s == state.Error {
return false, ErrorMachineNotExist

View File

@ -23,6 +23,7 @@ import (
"path/filepath"
"strconv"
"github.com/blang/semver"
"github.com/golang/glog"
"github.com/spf13/viper"
cmdcfg "k8s.io/minikube/cmd/minikube/cmd/config"
@ -38,9 +39,13 @@ import (
)
// configureRuntimes does what needs to happen to get a runtime going.
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig) cruntime.Manager {
config := cruntime.Config{Type: viper.GetString(containerRuntime), Runner: runner, ImageRepository: k8s.ImageRepository, KubernetesVersion: k8s.KubernetesVersion}
cr, err := cruntime.New(config)
func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config.KubernetesConfig, kv semver.Version) cruntime.Manager {
co := cruntime.Config{
Type: viper.GetString(containerRuntime),
Runner: runner, ImageRepository: k8s.ImageRepository,
KubernetesVersion: kv,
}
cr, err := cruntime.New(co)
if err != nil {
exit.WithError("Failed runtime", err)
}
@ -49,8 +54,10 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config
if driver.BareMetal(drvName) {
disableOthers = false
}
if !driver.IsKIC(drvName) {
if err := cr.Preload(k8s.KubernetesVersion); err != nil {
// Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere.
if driver.IsVM(drvName) {
if err := cr.Preload(k8s); err != nil {
switch err.(type) {
case *cruntime.ErrISOFeature:
out.T(out.Tip, "Existing disk is missing new features ({{.error}}). To upgrade, run 'minikube delete'", out.V{"error": err})

View File

@ -25,6 +25,7 @@ import (
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/util"
)
// Start spins up a guest and starts the kubernetes node.
@ -57,8 +58,13 @@ func Start(cc config.ClusterConfig, n config.Node, existingAddons map[string]boo
// wait for preloaded tarball to finish downloading before configuring runtimes
cluster.WaitCacheRequiredImages(&cacheGroup)
sv, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion)
if err != nil {
exit.WithError("Failed to parse kubernetes version", err)
}
// configure the runtime (docker, containerd, crio)
cr := configureRuntimes(runner, driverName, cc.KubernetesConfig)
cr := configureRuntimes(runner, driverName, cc.KubernetesConfig, sv)
showVersionInfo(k8sVersion, cr)
configureMounts()

View File

@ -44,16 +44,19 @@ type LoadBalancerEmulator struct {
patchConverter patchConverter
}
// PatchServices will update all load balancer services
func (l *LoadBalancerEmulator) PatchServices() ([]string, error) {
return l.applyOnLBServices(l.updateService)
}
// PatchServiceIP will patch the given service and ip
func (l *LoadBalancerEmulator) PatchServiceIP(restClient rest.Interface, svc core.Service, ip string) error {
// TODO: do not ignore result
_, err := l.updateServiceIP(restClient, svc, ip)
return err
}
// Cleanup will clean up all load balancer services
func (l *LoadBalancerEmulator) Cleanup() ([]string, error) {
return l.applyOnLBServices(l.cleanupService)
}
@ -143,6 +146,7 @@ func (l *LoadBalancerEmulator) cleanupService(restClient rest.Interface, svc cor
}
// NewLoadBalancerEmulator creates a new LoadBalancerEmulator
func NewLoadBalancerEmulator(corev1Client typed_core.CoreV1Interface) LoadBalancerEmulator {
return LoadBalancerEmulator{
coreV1Client: corev1Client,

View File

@ -100,7 +100,7 @@ Environment=DOCKER_RAMDISK=yes
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:{{.DockerPort}} -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }}
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }}
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
@ -151,6 +151,11 @@ WantedBy=multi-user.target
return nil, err
}
// To make sure if there is a already-installed docker on the ISO to pick up the new systemd file
if err := p.Service("", serviceaction.DaemonReload); err != nil {
return nil, err
}
if err := p.Service("docker", serviceaction.Enable); err != nil {
return nil, err
}

View File

@ -104,7 +104,7 @@ Environment=DOCKER_RAMDISK=yes
# NOTE: default-ulimit=nofile is set to an arbitrary number for consistency with other
# container runtimes. If left unlimited, it may result in OOM issues with MySQL.
ExecStart=
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:{{.DockerPort}} -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }}
ExecStart=/usr/bin/dockerd -H tcp://0.0.0.0:2376 -H unix:///var/run/docker.sock --default-ulimit=nofile=1048576:1048576 --tlsverify --tlscacert {{.AuthOptions.CaCertRemotePath}} --tlscert {{.AuthOptions.ServerCertRemotePath}} --tlskey {{.AuthOptions.ServerKeyRemotePath}} {{ range .EngineOptions.Labels }}--label {{.}} {{ end }}{{ range .EngineOptions.InsecureRegistry }}--insecure-registry {{.}} {{ end }}{{ range .EngineOptions.RegistryMirror }}--registry-mirror {{.}} {{ end }}{{ range .EngineOptions.ArbitraryFlags }}--{{.}} {{ end }}
ExecReload=/bin/kill -s HUP $MAINPID
# Having non-zero Limit*s causes performance problems due to accounting overhead
@ -155,6 +155,11 @@ WantedBy=multi-user.target
return nil, err
}
// because in kic base image we pre-install docker it already has a service file. we need to daemon-reload for the new systemd file
if err := p.Service("", serviceaction.DaemonReload); err != nil {
return nil, err
}
if err := p.Service("docker", serviceaction.Enable); err != nil {
return nil, err
}

View File

@ -23,10 +23,9 @@ import (
"path/filepath"
"strconv"
units "github.com/docker/go-units"
"github.com/blang/semver"
"github.com/docker/go-units"
"github.com/pkg/errors"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/out"
)
const (
@ -34,17 +33,17 @@ const (
)
// CalculateSizeInMB returns the number of MB in the human readable string
func CalculateSizeInMB(humanReadableSize string) int {
func CalculateSizeInMB(humanReadableSize string) (int, error) {
_, err := strconv.ParseInt(humanReadableSize, 10, 64)
if err == nil {
humanReadableSize += "mb"
}
size, err := units.FromHumanSize(humanReadableSize)
if err != nil {
exit.WithCodeT(exit.Config, "Invalid size passed in argument: {{.error}}", out.V{"error": err})
return 0, fmt.Errorf("FromHumanSize: %v", err)
}
return int(size / units.MB)
return int(size / units.MB), nil
}
// GetBinaryDownloadURL returns a suitable URL for the platform
@ -89,3 +88,8 @@ func MaybeChownDirRecursiveToMinikubeUser(dir string) error {
}
return nil
}
// ParseKubernetesVersion parses the kubernetes version
func ParseKubernetesVersion(version string) (semver.Version, error) {
return semver.Make(version[1:])
}

View File

@ -18,6 +18,8 @@ package util
import (
"testing"
"github.com/blang/semver"
)
func TestGetBinaryDownloadURL(t *testing.T) {
@ -52,9 +54,22 @@ func TestCalculateSizeInMB(t *testing.T) {
}
for _, tt := range testData {
number := CalculateSizeInMB(tt.size)
number, err := CalculateSizeInMB(tt.size)
if err != nil {
t.Fatalf("unexpected err: %v", err)
}
if number != tt.expectedNumber {
t.Fatalf("Expected '%d'' but got '%d'", tt.expectedNumber, number)
}
}
}
func TestParseKubernetesVersion(t *testing.T) {
version, err := ParseKubernetesVersion("v1.8.0-alpha.5")
if err != nil {
t.Fatalf("Error parsing version: %v", err)
}
if version.NE(semver.MustParse("1.8.0-alpha.5")) {
t.Errorf("Expected: %s, Actual:%s", "1.8.0-alpha.5", version)
}
}

View File

@ -41,7 +41,6 @@ Then minikube is for you.
* **What is it good for?** Developing local Kubernetes applications
* **What is it not good for?** Production Kubernetes deployments
* **What is it *not yet* good for?** Environments which do not allow VM's
## Where should I go next?

View File

@ -0,0 +1,61 @@
---
title: "Running eBPF Tools in Minikube"
linkTitle: "Running eBPF Tools in Minikube"
weight: 1
date: 2019-08-15
description: >
Running eBPF Tools in Minikube
---
## Overview
eBPF tools are performance tools used for observing the Linux kernel.
These tools can be used to monitor your Kubernetes application in minikube.
This tutorial will cover how to set up your minikube cluster so that you can run eBPF tools from a Docker container within minikube.
## Prerequisites
- Latest minikube binary
## Tutorial
First, start minikube:
```
$ minikube start
```
You will need to download and extract necessary kernel headers within minikube:
```shell
$ minikube ssh -- curl -Lo /tmp/kernel-headers-linux-4.19.94.tar.lz4 https://storage.googleapis.com/minikube-kernel-headers/kernel-headers-linux-4.19.94.tar.lz4
$ minikube ssh -- sudo mkdir -p /lib/modules/4.19.94/build
$ minikube ssh -- sudo tar -I lz4 -C /lib/modules/4.19.94/build -xvf /tmp/kernel-headers-linux-4.19.94.tar.lz4
$ minikube ssh -- rm /tmp/kernel-headers-linux-4.19.94.tar.lz4
```
You can now run [BCC tools](https://github.com/iovisor/bcc) as a Docker container in minikube:
```shell
$ minikube ssh -- docker run -it --rm --privileged -v /lib/modules:/lib/modules:ro -v /usr/src:/usr/src:ro -v /etc/localtime:/etc/localtime:ro --workdir /usr/share/bcc/tools zlim/bcc ./execsnoop
Unable to find image 'zlim/bcc:latest' locally
latest: Pulling from zlim/bcc
6cf436f81810: Pull complete
987088a85b96: Pull complete
b4624b3efe06: Pull complete
d42beb8ded59: Pull complete
90970d1ebfd9: Pull complete
29c3815350eb: Pull complete
e21dfbd8fcfc: Pull complete
Digest: sha256:914bea8970535cd6b0d5dee13f99569c5f0d597942c8333c0aa92443473aff27
Status: Downloaded newer image for zlim/bcc:latest
PCOMM PID PPID RET ARGS
runc 5059 2011 0 /usr/bin/runc --version
docker-init 5065 2011 0 /usr/bin/docker-init --version
nice 5066 4012 0 /usr/bin/nice -n 19 du -x -s -B 1 /var/lib/kubelet/pods/1cf22976-f3e0-498b-bc04-8c7068e6e545/volumes/kubernetes.io~secret/storage-provisioner-token-cvk4x
```

View File

@ -166,6 +166,51 @@ func TestStartStop(t *testing.T) {
})
}
func TestStartStopWithPreload(t *testing.T) {
if NoneDriver() {
t.Skipf("skipping %s - incompatible with none driver", t.Name())
}
profile := UniqueProfileName("test-preload")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
defer CleanupWithLogs(t, profile, cancel)
startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"}
startArgs = append(startArgs, StartArgs()...)
k8sVersion := "v1.17.0"
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion))
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
}
// Now, pull the busybox image into the VMs docker daemon
image := "busybox"
rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "pull", image))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
}
// Restart minikube with v1.17.3, which has a preloaded tarball
startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=3", "--wait=true"}
startArgs = append(startArgs, StartArgs()...)
k8sVersion = "v1.17.3"
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion))
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
}
// Ensure that busybox still exists in the daemon
rr, err = Run(t, exec.CommandContext(ctx, Target(), "ssh", "-p", profile, "--", "docker", "images"))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
}
if !strings.Contains(rr.Output(), image) {
t.Fatalf("Expected to find %s in output of `docker images`, instead got %s", image, rr.Output())
}
}
// testPodScheduling asserts that this configuration can schedule new pods
func testPodScheduling(ctx context.Context, t *testing.T, profile string) {
t.Helper()