Merge branch 'master' into no-overwrite-tar
commit
e45b230622
|
@ -88,6 +88,24 @@ func init() {
|
|||
RootCmd.AddCommand(deleteCmd)
|
||||
}
|
||||
|
||||
func deleteContainersAndVolumes() {
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
|
||||
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will error if there is no container to delete
|
||||
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
|
||||
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
|
||||
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
}
|
||||
|
||||
// runDelete handles the executes the flow of "minikube delete"
|
||||
func runDelete(cmd *cobra.Command, args []string) {
|
||||
if len(args) > 0 {
|
||||
|
@ -110,23 +128,9 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
}
|
||||
|
||||
if deleteAll {
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.CreatedByLabelKey, "true")
|
||||
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will error if there is no container to delete
|
||||
glog.Infof("error delete containers by label %q (might be okay): %+v", delLabel, err)
|
||||
}
|
||||
deleteContainersAndVolumes()
|
||||
|
||||
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error delete volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
|
||||
errs = oci.PruneAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error pruning volumes by label %q (might be okay): %+v", delLabel, errs)
|
||||
}
|
||||
|
||||
errs = DeleteProfiles(profilesToDelete)
|
||||
errs := DeleteProfiles(profilesToDelete)
|
||||
if len(errs) > 0 {
|
||||
HandleDeletionErrors(errs)
|
||||
} else {
|
||||
|
@ -185,13 +189,11 @@ func DeleteProfiles(profiles []*config.Profile) []error {
|
|||
return errs
|
||||
}
|
||||
|
||||
func deleteProfile(profile *config.Profile) error {
|
||||
viper.Set(config.ProfileName, profile.Name)
|
||||
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, profile.Name)
|
||||
func deleteProfileContainersAndVolumes(name string) {
|
||||
delLabel := fmt.Sprintf("%s=%s", oci.ProfileLabelKey, name)
|
||||
errs := oci.DeleteContainersByLabel(oci.Docker, delLabel)
|
||||
if errs != nil { // it will error if there is no container to delete
|
||||
glog.Infof("error deleting containers for %s (might be okay):\n%v", profile.Name, errs)
|
||||
glog.Infof("error deleting containers for %s (might be okay):\n%v", name, errs)
|
||||
}
|
||||
errs = oci.DeleteAllVolumesByLabel(oci.Docker, delLabel)
|
||||
if errs != nil { // it will not error if there is nothing to delete
|
||||
|
@ -202,6 +204,13 @@ func deleteProfile(profile *config.Profile) error {
|
|||
if len(errs) > 0 { // it will not error if there is nothing to delete
|
||||
glog.Warningf("error pruning volume (might be okay):\n%v", errs)
|
||||
}
|
||||
}
|
||||
|
||||
func deleteProfile(profile *config.Profile) error {
|
||||
viper.Set(config.ProfileName, profile.Name)
|
||||
|
||||
deleteProfileContainersAndVolumes(profile.Name)
|
||||
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("error getting client %v", err))
|
||||
|
@ -230,31 +239,13 @@ func deleteProfile(profile *config.Profile) error {
|
|||
out.T(out.FailureType, "Failed to kill mount process: {{.error}}", out.V{"error": err})
|
||||
}
|
||||
|
||||
if cc != nil {
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
if err = machine.DeleteHost(api, machineName); err != nil {
|
||||
switch errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
|
||||
default:
|
||||
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": profile.Name})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
deleteHosts(api, cc)
|
||||
|
||||
// In case DeleteHost didn't complete the job.
|
||||
deleteProfileDirectory(profile.Name)
|
||||
|
||||
if err := config.DeleteProfile(profile.Name); err != nil {
|
||||
if config.IsNotExist(err) {
|
||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("\"%s\" profile does not exist", profile.Name))
|
||||
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
||||
}
|
||||
delErr := profileDeletionErr(profile.Name, fmt.Sprintf("failed to remove profile %v", err))
|
||||
return DeletionError{Err: delErr, Errtype: Fatal}
|
||||
if err := deleteConfig(profile.Name); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := deleteContext(profile.Name); err != nil {
|
||||
|
@ -264,6 +255,35 @@ func deleteProfile(profile *config.Profile) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func deleteHosts(api libmachine.API, cc *config.ClusterConfig) {
|
||||
if cc != nil {
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
if err := machine.DeleteHost(api, machineName); err != nil {
|
||||
switch errors.Cause(err).(type) {
|
||||
case mcnerror.ErrHostDoesNotExist:
|
||||
glog.Infof("Host %s does not exist. Proceeding ahead with cleanup.", machineName)
|
||||
default:
|
||||
out.T(out.FailureType, "Failed to delete cluster: {{.error}}", out.V{"error": err})
|
||||
out.T(out.Notice, `You may need to manually remove the "{{.name}}" VM from your hypervisor`, out.V{"name": machineName})
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func deleteConfig(profileName string) error {
|
||||
if err := config.DeleteProfile(profileName); err != nil {
|
||||
if config.IsNotExist(err) {
|
||||
delErr := profileDeletionErr(profileName, fmt.Sprintf("\"%s\" profile does not exist", profileName))
|
||||
return DeletionError{Err: delErr, Errtype: MissingProfile}
|
||||
}
|
||||
delErr := profileDeletionErr(profileName, fmt.Sprintf("failed to remove profile %v", err))
|
||||
return DeletionError{Err: delErr, Errtype: Fatal}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func deleteContext(machineName string) error {
|
||||
if err := kubeconfig.DeleteContext(machineName); err != nil {
|
||||
return DeletionError{Err: fmt.Errorf("update config: %v", err), Errtype: Fatal}
|
||||
|
|
|
@ -116,7 +116,7 @@ const (
|
|||
minUsableMem = 1024 // Kubernetes will not start with less than 1GB
|
||||
minRecommendedMem = 2000 // Warn at no lower than existing configurations
|
||||
minimumCPUS = 2
|
||||
minimumDiskSize = "2000mb"
|
||||
minimumDiskSize = 2000
|
||||
autoUpdate = "auto-update-drivers"
|
||||
hostOnlyNicType = "host-only-nic-type"
|
||||
natNicType = "nat-nic-type"
|
||||
|
@ -337,14 +337,7 @@ func runStart(cmd *cobra.Command, args []string) {
|
|||
ssh.SetDefaultClient(ssh.External)
|
||||
}
|
||||
|
||||
var existingAddons map[string]bool
|
||||
if viper.GetBool(installAddons) {
|
||||
existingAddons = map[string]bool{}
|
||||
if existing != nil && existing.Addons != nil {
|
||||
existingAddons = existing.Addons
|
||||
}
|
||||
}
|
||||
kubeconfig, err := node.Start(mc, n, true, existingAddons)
|
||||
kubeconfig, err := startNode(existing, mc, n)
|
||||
if err != nil {
|
||||
exit.WithError("Starting node", err)
|
||||
}
|
||||
|
@ -389,6 +382,17 @@ func displayEnviron(env []string) {
|
|||
}
|
||||
}
|
||||
|
||||
func startNode(existing *config.ClusterConfig, mc config.ClusterConfig, n config.Node) (*kubeconfig.Settings, error) {
|
||||
var existingAddons map[string]bool
|
||||
if viper.GetBool(installAddons) {
|
||||
existingAddons = map[string]bool{}
|
||||
if existing != nil && existing.Addons != nil {
|
||||
existingAddons = existing.Addons
|
||||
}
|
||||
}
|
||||
return node.Start(mc, n, true, existingAddons)
|
||||
}
|
||||
|
||||
func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName string) error {
|
||||
if kcs.KeepContext {
|
||||
out.T(out.Kubectl, "To connect to this cluster, use: kubectl --context={{.name}}", out.V{"name": kcs.ClusterName})
|
||||
|
@ -427,8 +431,11 @@ func showKubectlInfo(kcs *kubeconfig.Settings, k8sVersion string, machineName st
|
|||
glog.Infof("kubectl: %s, cluster: %s (minor skew: %d)", client, cluster, minorSkew)
|
||||
|
||||
if client.Major != cluster.Major || minorSkew > 1 {
|
||||
out.WarningT("{{.path}} is version {{.client_version}}, and is incompatible with Kubernetes {{.cluster_version}}. You will need to update {{.path}} or use 'minikube kubectl' to connect with this cluster",
|
||||
out.Ln("")
|
||||
out.T(out.Warning, "{{.path}} is v{{.client_version}}, which may be incompatible with Kubernetes v{{.cluster_version}}.",
|
||||
out.V{"path": path, "client_version": client, "cluster_version": cluster})
|
||||
out.T(out.Tip, "You can also use 'minikube kubectl -- get pods' to invoke a matching version",
|
||||
out.V{"path": path, "client_version": client})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -638,43 +645,62 @@ func validateUser(drvName string) {
|
|||
}
|
||||
}
|
||||
|
||||
// defaultMemorySize calculates the default memory footprint in MB
|
||||
func defaultMemorySize(drvName string) int {
|
||||
fallback := 2200
|
||||
maximum := 6000
|
||||
|
||||
// memoryLimits returns the amount of memory allocated to the system and hypervisor
|
||||
func memoryLimits(drvName string) (int, int, error) {
|
||||
v, err := mem.VirtualMemory()
|
||||
if err != nil {
|
||||
return fallback
|
||||
return -1, -1, err
|
||||
}
|
||||
available := v.Total / 1024 / 1024
|
||||
sysLimit := int(v.Total / 1024 / 1024)
|
||||
containerLimit := 0
|
||||
|
||||
// For KIC, do not allocate more memory than the container has available (+ some slack)
|
||||
if driver.IsKIC(drvName) {
|
||||
s, err := oci.DaemonInfo(drvName)
|
||||
if err != nil {
|
||||
return fallback
|
||||
return -1, -1, err
|
||||
}
|
||||
maximum = int(s.TotalMemory/1024/1024) - 128
|
||||
containerLimit = int(s.TotalMemory / 1024 / 1024)
|
||||
}
|
||||
return sysLimit, containerLimit, nil
|
||||
}
|
||||
|
||||
// suggestMemoryAllocation calculates the default memory footprint in MB
|
||||
func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
|
||||
fallback := 2200
|
||||
maximum := 6000
|
||||
|
||||
if sysLimit > 0 && fallback > sysLimit {
|
||||
return sysLimit
|
||||
}
|
||||
|
||||
suggested := int(available / 4)
|
||||
// If there are container limits, add tiny bit of slack for non-minikube components
|
||||
if containerLimit > 0 {
|
||||
if fallback > containerLimit {
|
||||
return containerLimit
|
||||
}
|
||||
maximum = containerLimit - 48
|
||||
}
|
||||
|
||||
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
|
||||
suggested := int(float32(sysLimit)/400.0) * 100
|
||||
|
||||
if suggested > maximum {
|
||||
suggested = maximum
|
||||
return maximum
|
||||
}
|
||||
|
||||
if suggested < fallback {
|
||||
suggested = fallback
|
||||
return fallback
|
||||
}
|
||||
|
||||
glog.Infof("Selecting memory default of %dMB, given %dMB available and %dMB maximum", suggested, available, maximum)
|
||||
return suggested
|
||||
}
|
||||
|
||||
// validateMemorySize validates the memory size matches the minimum recommended
|
||||
func validateMemorySize() {
|
||||
req := pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
req, err := pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
|
||||
}
|
||||
if req < minUsableMem && !viper.GetBool(force) {
|
||||
exit.WithCodeT(exit.Config, "Requested memory allocation {{.requested}}MB is less than the usable minimum of {{.minimum}}MB",
|
||||
out.V{"requested": req, "mininum": minUsableMem})
|
||||
|
@ -707,9 +733,13 @@ func validateCPUCount(local bool) {
|
|||
// validateFlags validates the supplied flags against known bad combinations
|
||||
func validateFlags(cmd *cobra.Command, drvName string) {
|
||||
if cmd.Flags().Changed(humanReadableDiskSize) {
|
||||
diskSizeMB := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
|
||||
if diskSizeMB < pkgutil.CalculateSizeInMB(minimumDiskSize) && !viper.GetBool(force) {
|
||||
exit.WithCodeT(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": pkgutil.CalculateSizeInMB(minimumDiskSize)})
|
||||
diskSizeMB, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Validation unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
|
||||
}
|
||||
|
||||
if diskSizeMB < minimumDiskSize && !viper.GetBool(force) {
|
||||
exit.WithCodeT(exit.Config, "Requested disk size {{.requested_size}} is less than minimum of {{.minimum_size}}", out.V{"requested_size": diskSizeMB, "minimum_size": minimumDiskSize})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -817,9 +847,20 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
kubeNodeName = "m01"
|
||||
}
|
||||
|
||||
mem := defaultMemorySize(drvName)
|
||||
if viper.GetString(memory) != "" {
|
||||
mem = pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
sysLimit, containerLimit, err := memoryLimits(drvName)
|
||||
if err != nil {
|
||||
glog.Warningf("Unable to query memory limits: %v", err)
|
||||
}
|
||||
|
||||
mem := suggestMemoryAllocation(sysLimit, containerLimit)
|
||||
if cmd.Flags().Changed(memory) {
|
||||
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
|
||||
}
|
||||
|
||||
} else {
|
||||
glog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
|
||||
}
|
||||
|
||||
// Create the initial node, which will necessarily be a control plane
|
||||
|
@ -831,6 +872,11 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
Worker: true,
|
||||
}
|
||||
|
||||
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
|
||||
}
|
||||
|
||||
cfg := config.ClusterConfig{
|
||||
Name: viper.GetString(config.ProfileName),
|
||||
KeepContext: viper.GetBool(keepContext),
|
||||
|
@ -838,7 +884,7 @@ func generateCfgFromFlags(cmd *cobra.Command, k8sVersion string, drvName string)
|
|||
MinikubeISO: viper.GetString(isoURL),
|
||||
Memory: mem,
|
||||
CPUs: viper.GetInt(cpus),
|
||||
DiskSize: pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize)),
|
||||
DiskSize: diskSize,
|
||||
Driver: drvName,
|
||||
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
|
||||
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
|
||||
|
|
|
@ -71,8 +71,9 @@ func TestGetKuberneterVersion(t *testing.T) {
|
|||
}
|
||||
|
||||
func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
|
||||
viper.SetDefault(memory, defaultMemorySize)
|
||||
// Set default disk size value in lieu of flag init
|
||||
viper.SetDefault(humanReadableDiskSize, defaultDiskSize)
|
||||
|
||||
originalEnv := os.Getenv("HTTP_PROXY")
|
||||
defer func() {
|
||||
err := os.Setenv("HTTP_PROXY", originalEnv)
|
||||
|
@ -124,3 +125,34 @@ func TestGenerateCfgFromFlagsHTTPProxyHandling(t *testing.T) {
|
|||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSuggestMemoryAllocation(t *testing.T) {
|
||||
var tests = []struct {
|
||||
description string
|
||||
sysLimit int
|
||||
containerLimit int
|
||||
want int
|
||||
}{
|
||||
{"128GB sys", 128000, 0, 6000},
|
||||
{"64GB sys", 64000, 0, 6000},
|
||||
{"16GB sys", 16384, 0, 4000},
|
||||
{"odd sys", 14567, 0, 3600},
|
||||
{"4GB sys", 4096, 0, 2200},
|
||||
{"2GB sys", 2048, 0, 2048},
|
||||
{"Unable to poll sys", 0, 0, 2200},
|
||||
{"128GB sys, 16GB container", 128000, 16384, 16336},
|
||||
{"64GB sys, 16GB container", 64000, 16384, 16000},
|
||||
{"16GB sys, 4GB container", 16384, 4096, 4000},
|
||||
{"4GB sys, 3.5GB container", 16384, 3500, 3452},
|
||||
{"2GB sys, 2GB container", 16384, 2048, 2048},
|
||||
{"2GB sys, unable to poll container", 16384, 0, 4000},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit)
|
||||
if got != test.want {
|
||||
t.Errorf("defaultMemorySize(sys=%d, container=%d) = %d, want: %d", test.sysLimit, test.containerLimit, got, test.want)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
|
|
@ -353,7 +353,7 @@ touch "${HTML_OUT}"
|
|||
gopogh_status=$(gopogh -in "${JSON_OUT}" -out "${HTML_OUT}" -name "${JOB_NAME}" -pr "${MINIKUBE_LOCATION}" -repo github.com/kubernetes/minikube/ -details "${COMMIT}") || true
|
||||
fail_num=$(echo $gopogh_status | jq '.NumberOfFail')
|
||||
test_num=$(echo $gopogh_status | jq '.NumberOfTests')
|
||||
pessimistic_status="$completed with ${fail_num} / ${test_num} failures in ${elapsed}"
|
||||
pessimistic_status="${fail_num} / ${test_num} failures"
|
||||
description="completed with ${status} in ${elapsed} minute(s)."
|
||||
if [ "$status" = "failure" ]; then
|
||||
description="completed with ${pessimistic_status} in ${elapsed} minute(s)."
|
||||
|
|
|
@ -17,6 +17,7 @@ limitations under the License.
|
|||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
|
@ -92,7 +93,7 @@ func executePreloadImages() error {
|
|||
defer os.Remove(baseDir)
|
||||
|
||||
if err := os.MkdirAll(baseDir, 0755); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
if err := driver.Create(); err != nil {
|
||||
return errors.Wrap(err, "creating kic driver")
|
||||
|
@ -123,7 +124,7 @@ func executePreloadImages() error {
|
|||
}
|
||||
// Create image tarball
|
||||
if err := createImageTarball(); err != nil {
|
||||
return err
|
||||
return errors.Wrap(err, "create tarball")
|
||||
}
|
||||
return copyTarballToHost()
|
||||
}
|
||||
|
@ -139,7 +140,7 @@ func createImageTarball() error {
|
|||
cmd := exec.Command("docker", args...)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.Wrap(err, "creating image tarball")
|
||||
return errors.Wrapf(err, "tarball cmd: %s", cmd.Args)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -149,7 +150,7 @@ func copyTarballToHost() error {
|
|||
cmd := exec.Command("docker", "cp", fmt.Sprintf("%s:/%s", profile, tarballFilename), dest)
|
||||
cmd.Stdout = os.Stdout
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.Wrap(err, "copying tarball to host")
|
||||
return errors.Wrapf(err, "cp cmd: %s", cmd.Args)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -162,9 +163,11 @@ func deleteMinikube() error {
|
|||
|
||||
func verifyDockerStorage() error {
|
||||
cmd := exec.Command("docker", "info", "-f", "{{.Info.Driver}}")
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return err
|
||||
return fmt.Errorf("%v: %v:\n%s", cmd.Args, err, stderr.String())
|
||||
}
|
||||
driver := strings.Trim(string(output), " \n")
|
||||
if driver != dockerStorageDriver {
|
||||
|
|
|
@ -26,7 +26,6 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/command"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
|
@ -35,7 +34,6 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/storageclass"
|
||||
pkgutil "k8s.io/minikube/pkg/util"
|
||||
)
|
||||
|
||||
// defaultStorageClassProvisioner is the name of the default storage class provisioner
|
||||
|
@ -49,35 +47,34 @@ func Set(name, value, profile string) error {
|
|||
return errors.Errorf("%s is not a valid addon", name)
|
||||
}
|
||||
|
||||
// Run any additional validations for this property
|
||||
if err := run(name, value, profile, a.validations); err != nil {
|
||||
return errors.Wrap(err, "running validations")
|
||||
}
|
||||
|
||||
// Set the value
|
||||
c, err := config.Load(profile)
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "loading profile")
|
||||
}
|
||||
|
||||
if err := a.set(c, name, value); err != nil {
|
||||
// Run any additional validations for this property
|
||||
if err := run(cc, name, value, a.validations); err != nil {
|
||||
return errors.Wrap(err, "running validations")
|
||||
}
|
||||
|
||||
if err := a.set(cc, name, value); err != nil {
|
||||
return errors.Wrap(err, "setting new value of addon")
|
||||
}
|
||||
|
||||
// Run any callbacks for this property
|
||||
if err := run(name, value, profile, a.callbacks); err != nil {
|
||||
if err := run(cc, name, value, a.callbacks); err != nil {
|
||||
return errors.Wrap(err, "running callbacks")
|
||||
}
|
||||
|
||||
glog.Infof("Writing out %q config to set %s=%v...", profile, name, value)
|
||||
return config.Write(profile, c)
|
||||
return config.Write(profile, cc)
|
||||
}
|
||||
|
||||
// Runs all the validation or callback functions and collects errors
|
||||
func run(name, value, profile string, fns []setFn) error {
|
||||
func run(cc *config.ClusterConfig, name string, value string, fns []setFn) error {
|
||||
var errors []error
|
||||
for _, fn := range fns {
|
||||
err := fn(name, value, profile)
|
||||
err := fn(cc, name, value)
|
||||
if err != nil {
|
||||
errors = append(errors, err)
|
||||
}
|
||||
|
@ -89,21 +86,21 @@ func run(name, value, profile string, fns []setFn) error {
|
|||
}
|
||||
|
||||
// SetBool sets a bool value
|
||||
func SetBool(m *config.ClusterConfig, name string, val string) error {
|
||||
func SetBool(cc *config.ClusterConfig, name string, val string) error {
|
||||
b, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if m.Addons == nil {
|
||||
m.Addons = map[string]bool{}
|
||||
if cc.Addons == nil {
|
||||
cc.Addons = map[string]bool{}
|
||||
}
|
||||
m.Addons[name] = b
|
||||
cc.Addons[name] = b
|
||||
return nil
|
||||
}
|
||||
|
||||
// enableOrDisableAddon updates addon status executing any commands necessary
|
||||
func enableOrDisableAddon(name, val, profile string) error {
|
||||
glog.Infof("Setting addon %s=%s in %q", name, val, profile)
|
||||
func enableOrDisableAddon(cc *config.ClusterConfig, name string, val string) error {
|
||||
glog.Infof("Setting addon %s=%s in %q", name, val, cc.Name)
|
||||
enable, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "parsing bool: %s", name)
|
||||
|
@ -111,7 +108,7 @@ func enableOrDisableAddon(name, val, profile string) error {
|
|||
addon := assets.Addons[name]
|
||||
|
||||
// check addon status before enabling/disabling it
|
||||
alreadySet, err := isAddonAlreadySet(addon, enable, profile)
|
||||
alreadySet, err := isAddonAlreadySet(addon, enable, cc.Name)
|
||||
if err != nil {
|
||||
out.ErrT(out.Conflict, "{{.error}}", out.V{"error": err})
|
||||
return err
|
||||
|
@ -124,13 +121,14 @@ func enableOrDisableAddon(name, val, profile string) error {
|
|||
}
|
||||
}
|
||||
|
||||
if name == "istio" && enable {
|
||||
if strings.HasPrefix(name, "istio") && enable {
|
||||
minMem := 8192
|
||||
minCpus := 4
|
||||
memorySizeMB := pkgutil.CalculateSizeInMB(viper.GetString("memory"))
|
||||
cpuCount := viper.GetInt("cpus")
|
||||
if memorySizeMB < minMem || cpuCount < minCpus {
|
||||
out.WarningT("Enable istio needs {{.minMem}} MB of memory and {{.minCpus}} CPUs.", out.V{"minMem": minMem, "minCpus": minCpus})
|
||||
minCPUs := 4
|
||||
if cc.Memory < minMem {
|
||||
out.WarningT("Istio needs {{.minMem}}MB of memory -- your configuration only allocates {{.memory}}MB", out.V{"minMem": minMem, "memory": cc.Memory})
|
||||
}
|
||||
if cc.CPUs < minCPUs {
|
||||
out.WarningT("Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs", out.V{"minCPUs": minCPUs, "cpus": cc.CPUs})
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -141,14 +139,15 @@ func enableOrDisableAddon(name, val, profile string) error {
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
cfg, err := config.Load(profile)
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
exit.WithCodeT(exit.Data, "Unable to load config: {{.error}}", out.V{"error": err})
|
||||
cp, err := config.PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting primary control plane", err)
|
||||
}
|
||||
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, profile)
|
||||
if err != nil || !machine.IsHostRunning(api, profile) {
|
||||
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement (err=%v)", profile, addon.Name(), enable, err)
|
||||
mName := driver.MachineName(*cc, cp)
|
||||
host, err := machine.CheckIfHostExistsAndLoad(api, mName)
|
||||
if err != nil || !machine.IsHostRunning(api, mName) {
|
||||
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement (err=%v)", mName, addon.Name(), enable, err)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -157,8 +156,8 @@ func enableOrDisableAddon(name, val, profile string) error {
|
|||
return errors.Wrap(err, "command runner")
|
||||
}
|
||||
|
||||
data := assets.GenerateTemplateData(cfg.KubernetesConfig)
|
||||
return enableOrDisableAddonInternal(addon, cmd, data, enable, profile)
|
||||
data := assets.GenerateTemplateData(cc.KubernetesConfig)
|
||||
return enableOrDisableAddonInternal(cc, addon, cmd, data, enable)
|
||||
}
|
||||
|
||||
func isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool, error) {
|
||||
|
@ -176,7 +175,7 @@ func isAddonAlreadySet(addon *assets.Addon, enable bool, profile string) (bool,
|
|||
return false, nil
|
||||
}
|
||||
|
||||
func enableOrDisableAddonInternal(addon *assets.Addon, cmd command.Runner, data interface{}, enable bool, profile string) error {
|
||||
func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, cmd command.Runner, data interface{}, enable bool) error {
|
||||
deployFiles := []string{}
|
||||
|
||||
for _, addon := range addon.Assets {
|
||||
|
@ -211,10 +210,7 @@ func enableOrDisableAddonInternal(addon *assets.Addon, cmd command.Runner, data
|
|||
}
|
||||
}
|
||||
|
||||
command, err := kubectlCommand(profile, deployFiles, enable)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
command := kubectlCommand(cc, deployFiles, enable)
|
||||
glog.Infof("Running: %v", command)
|
||||
rr, err := cmd.RunCmd(command)
|
||||
if err != nil {
|
||||
|
@ -225,8 +221,8 @@ func enableOrDisableAddonInternal(addon *assets.Addon, cmd command.Runner, data
|
|||
}
|
||||
|
||||
// enableOrDisableStorageClasses enables or disables storage classes
|
||||
func enableOrDisableStorageClasses(name, val, profile string) error {
|
||||
glog.Infof("enableOrDisableStorageClasses %s=%v on %q", name, val, profile)
|
||||
func enableOrDisableStorageClasses(cc *config.ClusterConfig, name string, val string) error {
|
||||
glog.Infof("enableOrDisableStorageClasses %s=%v on %q", name, val, cc.Name)
|
||||
enable, err := strconv.ParseBool(val)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "Error parsing boolean")
|
||||
|
@ -247,18 +243,13 @@ func enableOrDisableStorageClasses(name, val, profile string) error {
|
|||
}
|
||||
defer api.Close()
|
||||
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting cluster")
|
||||
}
|
||||
|
||||
cp, err := config.PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting control plane")
|
||||
}
|
||||
if !machine.IsHostRunning(api, driver.MachineName(*cc, cp)) {
|
||||
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", profile, name, val)
|
||||
return enableOrDisableAddon(name, val, profile)
|
||||
glog.Warningf("%q is not running, writing %s=%v to disk and skipping enablement", driver.MachineName(*cc, cp), name, val)
|
||||
return enableOrDisableAddon(cc, name, val)
|
||||
}
|
||||
|
||||
if enable {
|
||||
|
@ -275,7 +266,7 @@ func enableOrDisableStorageClasses(name, val, profile string) error {
|
|||
}
|
||||
}
|
||||
|
||||
return enableOrDisableAddon(name, val, profile)
|
||||
return enableOrDisableAddon(cc, name, val)
|
||||
}
|
||||
|
||||
// Start enables the default addons for a profile, plus any additional
|
||||
|
|
|
@ -44,7 +44,15 @@ func createTestProfile(t *testing.T) string {
|
|||
if err := os.MkdirAll(config.ProfileFolderPath(name), 0777); err != nil {
|
||||
t.Fatalf("error creating temporary directory")
|
||||
}
|
||||
if err := config.DefaultLoader.WriteConfigToFile(name, &config.ClusterConfig{}); err != nil {
|
||||
|
||||
cc := &config.ClusterConfig{
|
||||
Name: name,
|
||||
CPUs: 2,
|
||||
Memory: 2500,
|
||||
KubernetesConfig: config.KubernetesConfig{},
|
||||
}
|
||||
|
||||
if err := config.DefaultLoader.WriteConfigToFile(name, cc); err != nil {
|
||||
t.Fatalf("error creating temporary profile config: %v", err)
|
||||
}
|
||||
return name
|
||||
|
|
|
@ -18,7 +18,7 @@ package addons
|
|||
|
||||
import "k8s.io/minikube/pkg/minikube/config"
|
||||
|
||||
type setFn func(string, string, string) error
|
||||
type setFn func(*config.ClusterConfig, string, string) error
|
||||
|
||||
// Addon represents an addon
|
||||
type Addon struct {
|
||||
|
@ -54,7 +54,7 @@ var Addons = []*Addon{
|
|||
{
|
||||
name: "gvisor",
|
||||
set: SetBool,
|
||||
validations: []setFn{IsContainerdRuntime},
|
||||
validations: []setFn{IsRuntimeContainerd},
|
||||
callbacks: []setFn{enableOrDisableAddon},
|
||||
},
|
||||
{
|
||||
|
|
|
@ -26,16 +26,12 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
)
|
||||
|
||||
var (
|
||||
// For testing
|
||||
k8sVersion = kubernetesVersion
|
||||
)
|
||||
|
||||
func kubectlCommand(profile string, files []string, enable bool) (*exec.Cmd, error) {
|
||||
v, err := k8sVersion(profile)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
func kubectlCommand(cc *config.ClusterConfig, files []string, enable bool) *exec.Cmd {
|
||||
v := constants.DefaultKubernetesVersion
|
||||
if cc != nil {
|
||||
v = cc.KubernetesConfig.KubernetesVersion
|
||||
}
|
||||
|
||||
kubectlBinary := kubectlBinaryPath(v)
|
||||
|
||||
kubectlAction := "apply"
|
||||
|
@ -48,20 +44,7 @@ func kubectlCommand(profile string, files []string, enable bool) (*exec.Cmd, err
|
|||
args = append(args, []string{"-f", f}...)
|
||||
}
|
||||
|
||||
cmd := exec.Command("sudo", args...)
|
||||
return cmd, nil
|
||||
}
|
||||
|
||||
func kubernetesVersion(profile string) (string, error) {
|
||||
cc, err := config.Load(profile)
|
||||
if err != nil && !config.IsNotExist(err) {
|
||||
return "", err
|
||||
}
|
||||
version := constants.DefaultKubernetesVersion
|
||||
if cc != nil {
|
||||
version = cc.KubernetesConfig.KubernetesVersion
|
||||
}
|
||||
return version, nil
|
||||
return exec.Command("sudo", args...)
|
||||
}
|
||||
|
||||
func kubectlBinaryPath(version string) string {
|
||||
|
|
|
@ -19,6 +19,8 @@ package addons
|
|||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
)
|
||||
|
||||
func TestKubectlCommand(t *testing.T) {
|
||||
|
@ -41,18 +43,15 @@ func TestKubectlCommand(t *testing.T) {
|
|||
},
|
||||
}
|
||||
|
||||
cc := &config.ClusterConfig{
|
||||
KubernetesConfig: config.KubernetesConfig{
|
||||
KubernetesVersion: "v1.17.0",
|
||||
},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
originalK8sVersion := k8sVersion
|
||||
defer func() { k8sVersion = originalK8sVersion }()
|
||||
k8sVersion = func(_ string) (string, error) {
|
||||
return "v1.17.0", nil
|
||||
}
|
||||
|
||||
command, err := kubectlCommand("", test.files, test.enable)
|
||||
if err != nil {
|
||||
t.Fatalf("error getting kubectl command: %v", err)
|
||||
}
|
||||
command := kubectlCommand(cc, test.files, test.enable)
|
||||
actual := strings.Join(command.Args, " ")
|
||||
|
||||
if actual != test.expected {
|
||||
|
|
|
@ -33,13 +33,9 @@ and then start minikube again with the following flags:
|
|||
|
||||
minikube start --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock`
|
||||
|
||||
// IsContainerdRuntime is a validator which returns an error if the current runtime is not containerd
|
||||
func IsContainerdRuntime(_, _, profile string) error {
|
||||
config, err := config.Load(profile)
|
||||
if err != nil {
|
||||
return fmt.Errorf("config.Load: %v", err)
|
||||
}
|
||||
r, err := cruntime.New(cruntime.Config{Type: config.KubernetesConfig.ContainerRuntime})
|
||||
// IsRuntimeContainerd is a validator which returns an error if the current runtime is not containerd
|
||||
func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
|
||||
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
@ -81,7 +81,7 @@ func (d *Driver) Create() error {
|
|||
// control plane specific options
|
||||
params.PortMappings = append(params.PortMappings, oci.PortMapping{
|
||||
ListenAddress: oci.DefaultBindIPV4,
|
||||
ContainerPort: constants.APIServerPort,
|
||||
ContainerPort: int32(params.APIServerPort),
|
||||
},
|
||||
oci.PortMapping{
|
||||
ListenAddress: oci.DefaultBindIPV4,
|
||||
|
|
|
@ -103,6 +103,7 @@ func allVolumesByLabel(ociBin string, label string) ([]string, error) {
|
|||
// to the volume named volumeName
|
||||
func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
|
||||
cmd := exec.Command(Docker, "run", "--rm", "--entrypoint", "/usr/bin/tar", "-v", fmt.Sprintf("%s:/preloaded.tar:ro", tarballPath), "-v", fmt.Sprintf("%s:/extractDir", volumeName), imageName, "-I", "lz4", "-xvf", "/preloaded.tar", "-C", "/extractDir")
|
||||
glog.Infof("executing: %s", cmd.Args)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "output %s", string(out))
|
||||
}
|
||||
|
@ -114,6 +115,7 @@ func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
|
|||
// TODO: this should be fixed as a part of https://github.com/kubernetes/minikube/issues/6530
|
||||
func createDockerVolume(profile string, nodeName string) error {
|
||||
cmd := exec.Command(Docker, "volume", "create", nodeName, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, profile), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))
|
||||
glog.Infof("executing: %s", cmd.Args)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "output %s", string(out))
|
||||
}
|
||||
|
|
|
@ -28,9 +28,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||
)
|
||||
|
||||
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
|
||||
// based on the options present in the KubernetesConfig.
|
||||
func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) ([]byte, error) {
|
||||
func extraKubeletOpts(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) (map[string]string, error) {
|
||||
k8s := mc.KubernetesConfig
|
||||
version, err := ParseKubernetesVersion(k8s.KubernetesVersion)
|
||||
if err != nil {
|
||||
|
@ -79,7 +77,18 @@ func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manage
|
|||
extraOpts["feature-gates"] = kubeletFeatureArgs
|
||||
}
|
||||
|
||||
return extraOpts, nil
|
||||
}
|
||||
|
||||
// NewKubeletConfig generates a new systemd unit containing a configured kubelet
|
||||
// based on the options present in the KubernetesConfig.
|
||||
func NewKubeletConfig(mc config.ClusterConfig, nc config.Node, r cruntime.Manager) ([]byte, error) {
|
||||
b := bytes.Buffer{}
|
||||
extraOpts, err := extraKubeletOpts(mc, nc, r)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
k8s := mc.KubernetesConfig
|
||||
opts := struct {
|
||||
ExtraOptions string
|
||||
ContainerRuntime string
|
||||
|
|
|
@ -41,6 +41,7 @@ import (
|
|||
"k8s.io/minikube/pkg/drivers/kic"
|
||||
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||
"k8s.io/minikube/pkg/kapi"
|
||||
"k8s.io/minikube/pkg/minikube/assets"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil"
|
||||
"k8s.io/minikube/pkg/minikube/bootstrapper/bsutil/kverify"
|
||||
|
@ -191,7 +192,7 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
|
|||
// Allow older kubeadm versions to function with newer Docker releases.
|
||||
// For kic on linux example error: "modprobe: FATAL: Module configs not found in directory /lib/modules/5.2.17-1rodete3-amd64"
|
||||
if version.LT(semver.MustParse("1.13.0")) || driver.IsKIC(cfg.Driver) {
|
||||
glog.Infof("Older Kubernetes release detected (%s), disabling SystemVerification check.", version)
|
||||
glog.Info("ignoring SystemVerification for kubeadm because of either driver or kubernetes version")
|
||||
ignore = append(ignore, "SystemVerification")
|
||||
}
|
||||
|
||||
|
@ -280,7 +281,10 @@ func (k *Bootstrapper) WaitForCluster(cfg config.ClusterConfig, timeout time.Dur
|
|||
return errors.Wrap(err, "get k8s client")
|
||||
}
|
||||
|
||||
return kverify.SystemPods(c, start, timeout)
|
||||
if err := kverify.SystemPods(c, start, timeout); err != nil {
|
||||
return errors.Wrap(err, "waiting for system pods")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// restartCluster restarts the Kubernetes cluster configured by kubeadm
|
||||
|
@ -421,10 +425,9 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
|||
|
||||
glog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig)
|
||||
|
||||
stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")
|
||||
// stop kubelet to avoid "Text File Busy" error
|
||||
if rr, err := k.c.RunCmd(stopCmd); err != nil {
|
||||
glog.Warningf("unable to stop kubelet: %s command: %q output: %q", err, rr.Command(), rr.Output())
|
||||
if err := stopKubelet(k.c); err != nil {
|
||||
glog.Warningf("unable to stop kubelet: %s", err)
|
||||
}
|
||||
|
||||
if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c); err != nil {
|
||||
|
@ -436,24 +439,46 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
|||
cniFile = []byte(defaultCNIConfig)
|
||||
}
|
||||
files := bsutil.ConfigFileAssets(cfg.KubernetesConfig, kubeadmCfg, kubeletCfg, kubeletService, cniFile)
|
||||
if err := copyFiles(k.c, files); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
if err := startKubelet(k.c); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func stopKubelet(runner command.Runner) error {
|
||||
stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")
|
||||
if rr, err := runner.RunCmd(stopCmd); err != nil {
|
||||
return errors.Wrapf(err, "command: %q output: %q", rr.Command(), rr.Output())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func copyFiles(runner command.Runner, files []assets.CopyableFile) error {
|
||||
// Combine mkdir request into a single call to reduce load
|
||||
dirs := []string{}
|
||||
for _, f := range files {
|
||||
dirs = append(dirs, f.GetTargetDir())
|
||||
}
|
||||
args := append([]string{"mkdir", "-p"}, dirs...)
|
||||
if _, err := k.c.RunCmd(exec.Command("sudo", args...)); err != nil {
|
||||
if _, err := runner.RunCmd(exec.Command("sudo", args...)); err != nil {
|
||||
return errors.Wrap(err, "mkdir")
|
||||
}
|
||||
|
||||
for _, f := range files {
|
||||
if err := k.c.Copy(f); err != nil {
|
||||
if err := runner.Copy(f); err != nil {
|
||||
return errors.Wrapf(err, "copy")
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
if _, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet")); err != nil {
|
||||
func startKubelet(runner command.Runner) error {
|
||||
startCmd := exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet")
|
||||
if _, err := runner.RunCmd(startCmd); err != nil {
|
||||
return errors.Wrap(err, "starting kubelet")
|
||||
}
|
||||
return nil
|
||||
|
|
|
@ -34,7 +34,7 @@ func TestListProfiles(t *testing.T) {
|
|||
vmDriver string
|
||||
}{
|
||||
{0, "p1", "hyperkit"},
|
||||
{1, "p2", "virtualbox"},
|
||||
{1, "p2_newformat", "virtualbox"},
|
||||
}
|
||||
|
||||
// test cases for invalid profiles
|
||||
|
@ -109,7 +109,7 @@ func TestProfileExists(t *testing.T) {
|
|||
expected bool
|
||||
}{
|
||||
{"p1", true},
|
||||
{"p2", true},
|
||||
{"p2_newformat", true},
|
||||
{"p3_empty", true},
|
||||
{"p4_invalid_file", true},
|
||||
{"p5_partial_config", true},
|
||||
|
@ -218,3 +218,47 @@ func TestDeleteProfile(t *testing.T) {
|
|||
}
|
||||
|
||||
}
|
||||
|
||||
func TestGetPrimaryControlPlane(t *testing.T) {
|
||||
miniDir, err := filepath.Abs("./testdata/.minikube2")
|
||||
if err != nil {
|
||||
t.Errorf("error getting dir path for ./testdata/.minikube : %v", err)
|
||||
}
|
||||
|
||||
var tests = []struct {
|
||||
description string
|
||||
profile string
|
||||
expectedIP string
|
||||
expectedPort int
|
||||
expectedName string
|
||||
}{
|
||||
{"old style", "p1", "192.168.64.75", 8443, "minikube"},
|
||||
{"new style", "p2_newformat", "192.168.99.136", 8443, "m01"},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
cc, err := DefaultLoader.LoadConfigFromFile(tc.profile, miniDir)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to load config for %s", tc.description)
|
||||
}
|
||||
|
||||
n, err := PrimaryControlPlane(cc)
|
||||
if err != nil {
|
||||
t.Fatalf("Unexpexted error getting primary control plane: %v", err)
|
||||
}
|
||||
|
||||
if n.Name != tc.expectedName {
|
||||
t.Errorf("Unexpected name. expected: %s, got: %s", tc.expectedName, n.Name)
|
||||
}
|
||||
|
||||
if n.IP != tc.expectedIP {
|
||||
t.Errorf("Unexpected name. expected: %s, got: %s", tc.expectedIP, n.IP)
|
||||
}
|
||||
|
||||
if n.Port != tc.expectedPort {
|
||||
t.Errorf("Unexpected name. expected: %d, got: %d", tc.expectedPort, n.Port)
|
||||
}
|
||||
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
@ -29,9 +29,6 @@
|
|||
},
|
||||
"KubernetesConfig": {
|
||||
"KubernetesVersion": "v1.15.0",
|
||||
"NodeIP": "192.168.99.136",
|
||||
"NodePort": 8443,
|
||||
"NodeName": "minikube",
|
||||
"APIServerName": "minikubeCA",
|
||||
"APIServerNames": null,
|
||||
"APIServerIPs": null,
|
||||
|
@ -45,5 +42,15 @@
|
|||
"ExtraOptions": null,
|
||||
"ShouldLoadCachedImages": true,
|
||||
"EnableDefaultCNI": false
|
||||
}
|
||||
},
|
||||
"Nodes": [
|
||||
{
|
||||
"Name": "m01",
|
||||
"IP": "192.168.99.136",
|
||||
"Port": 8443,
|
||||
"KubernetesVersion": "v1.15.0",
|
||||
"ControlPlane": true,
|
||||
"Worker": true
|
||||
}
|
||||
]
|
||||
}
|
|
@ -1,5 +1,5 @@
|
|||
{
|
||||
"Name": "p2",
|
||||
"Name": "p2_newformat",
|
||||
"KeepContext": false,
|
||||
"MinikubeISO": "https://storage.googleapis.com/minikube/iso/minikube-v1.2.0.iso",
|
||||
"Memory": 2000,
|
||||
|
@ -28,9 +28,6 @@
|
|||
"HostDNSResolver": true,
|
||||
"KubernetesConfig": {
|
||||
"KubernetesVersion": "v1.15.0",
|
||||
"NodeIP": "192.168.99.136",
|
||||
"NodePort": 8443,
|
||||
"NodeName": "minikube",
|
||||
"APIServerName": "minikubeCA",
|
||||
"APIServerNames": null,
|
||||
"APIServerIPs": null,
|
||||
|
@ -44,5 +41,15 @@
|
|||
"ExtraOptions": null,
|
||||
"ShouldLoadCachedImages": true,
|
||||
"EnableDefaultCNI": false
|
||||
}
|
||||
},
|
||||
"Nodes": [
|
||||
{
|
||||
"Name": "m01",
|
||||
"IP": "192.168.99.136",
|
||||
"Port": 8443,
|
||||
"KubernetesVersion": "v1.15.0",
|
||||
"ControlPlane": true,
|
||||
"Worker": true
|
||||
}
|
||||
]
|
||||
}
|
|
@ -224,43 +224,60 @@ func (f *FakeRunner) Remove(assets.CopyableFile) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (f *FakeRunner) dockerPs(args []string) (string, error) {
|
||||
// ps -a --filter="name=apiserver" --format="{{.ID}}"
|
||||
if args[1] == "-a" && strings.HasPrefix(args[2], "--filter") {
|
||||
filter := strings.Split(args[2], `r=`)[1]
|
||||
fname := strings.Split(filter, "=")[1]
|
||||
ids := []string{}
|
||||
f.t.Logf("fake docker: Looking for containers matching %q", fname)
|
||||
for id, cname := range f.containers {
|
||||
if strings.Contains(cname, fname) {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
}
|
||||
f.t.Logf("fake docker: Found containers: %v", ids)
|
||||
return strings.Join(ids, "\n"), nil
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (f *FakeRunner) dockerStop(args []string) (string, error) {
|
||||
ids := strings.Split(args[1], " ")
|
||||
for _, id := range ids {
|
||||
f.t.Logf("fake docker: Stopping id %q", id)
|
||||
if f.containers[id] == "" {
|
||||
return "", fmt.Errorf("no such container")
|
||||
}
|
||||
delete(f.containers, id)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
func (f *FakeRunner) dockerRm(args []string) (string, error) {
|
||||
// Skip "-f" argument
|
||||
for _, id := range args[2:] {
|
||||
f.t.Logf("fake docker: Removing id %q", id)
|
||||
if f.containers[id] == "" {
|
||||
return "", fmt.Errorf("no such container")
|
||||
}
|
||||
delete(f.containers, id)
|
||||
}
|
||||
return "", nil
|
||||
}
|
||||
|
||||
// docker is a fake implementation of docker
|
||||
func (f *FakeRunner) docker(args []string, _ bool) (string, error) {
|
||||
switch cmd := args[0]; cmd {
|
||||
case "ps":
|
||||
// ps -a --filter="name=apiserver" --format="{{.ID}}"
|
||||
if args[1] == "-a" && strings.HasPrefix(args[2], "--filter") {
|
||||
filter := strings.Split(args[2], `r=`)[1]
|
||||
fname := strings.Split(filter, "=")[1]
|
||||
ids := []string{}
|
||||
f.t.Logf("fake docker: Looking for containers matching %q", fname)
|
||||
for id, cname := range f.containers {
|
||||
if strings.Contains(cname, fname) {
|
||||
ids = append(ids, id)
|
||||
}
|
||||
}
|
||||
f.t.Logf("fake docker: Found containers: %v", ids)
|
||||
return strings.Join(ids, "\n"), nil
|
||||
}
|
||||
case "stop":
|
||||
ids := strings.Split(args[1], " ")
|
||||
for _, id := range ids {
|
||||
f.t.Logf("fake docker: Stopping id %q", id)
|
||||
if f.containers[id] == "" {
|
||||
return "", fmt.Errorf("no such container")
|
||||
}
|
||||
delete(f.containers, id)
|
||||
}
|
||||
case "rm":
|
||||
// Skip "-f" argument
|
||||
for _, id := range args[2:] {
|
||||
f.t.Logf("fake docker: Removing id %q", id)
|
||||
if f.containers[id] == "" {
|
||||
return "", fmt.Errorf("no such container")
|
||||
}
|
||||
delete(f.containers, id)
|
||||
return f.dockerPs(args)
|
||||
|
||||
case "stop":
|
||||
return f.dockerStop(args)
|
||||
|
||||
case "rm":
|
||||
return f.dockerRm(args)
|
||||
|
||||
}
|
||||
case "version":
|
||||
|
||||
if args[1] == "--format" && args[2] == "{{.Server.Version}}" {
|
||||
|
|
|
@ -62,9 +62,11 @@ func Binary(binary, version, osName, archName string) (string, error) {
|
|||
return "", errors.Wrapf(err, "mkdir %s", targetDir)
|
||||
}
|
||||
|
||||
tmpDst := targetFilepath + ".download"
|
||||
|
||||
client := &getter.Client{
|
||||
Src: url,
|
||||
Dst: targetFilepath,
|
||||
Dst: tmpDst,
|
||||
Mode: getter.ClientModeFile,
|
||||
Options: []getter.ClientOption{getter.WithProgress(DefaultProgressBar)},
|
||||
}
|
||||
|
@ -75,9 +77,9 @@ func Binary(binary, version, osName, archName string) (string, error) {
|
|||
}
|
||||
|
||||
if osName == runtime.GOOS && archName == runtime.GOARCH {
|
||||
if err = os.Chmod(targetFilepath, 0755); err != nil {
|
||||
if err = os.Chmod(tmpDst, 0755); err != nil {
|
||||
return "", errors.Wrapf(err, "chmod +x %s", targetFilepath)
|
||||
}
|
||||
}
|
||||
return targetFilepath, nil
|
||||
return targetFilepath, os.Rename(tmpDst, targetFilepath)
|
||||
}
|
||||
|
|
|
@ -35,11 +35,13 @@ func driverWithChecksumURL(name string, v semver.Version) string {
|
|||
// Driver downloads an arbitrary driver
|
||||
func Driver(name string, destination string, v semver.Version) error {
|
||||
out.T(out.FileDownload, "Downloading driver {{.driver}}:", out.V{"driver": name})
|
||||
os.Remove(destination)
|
||||
|
||||
tmpDst := destination + ".download"
|
||||
|
||||
url := driverWithChecksumURL(name, v)
|
||||
client := &getter.Client{
|
||||
Src: url,
|
||||
Dst: destination,
|
||||
Dst: tmpDst,
|
||||
Mode: getter.ClientModeFile,
|
||||
Options: []getter.ClientOption{getter.WithProgress(DefaultProgressBar)},
|
||||
}
|
||||
|
@ -49,5 +51,9 @@ func Driver(name string, destination string, v semver.Version) error {
|
|||
return errors.Wrapf(err, "download failed: %s", url)
|
||||
}
|
||||
// Give downloaded drivers a baseline decent file permission
|
||||
return os.Chmod(destination, 0755)
|
||||
err := os.Chmod(tmpDst, 0755)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
return os.Rename(tmpDst, destination)
|
||||
}
|
||||
|
|
|
@ -134,7 +134,6 @@ func downloadISO(isoURL string, skipChecksum bool) error {
|
|||
urlWithChecksum = isoURL
|
||||
}
|
||||
|
||||
// Predictable temp destination so that resume can function
|
||||
tmpDst := dst + ".download"
|
||||
|
||||
opts := []getter.ClientOption{getter.WithProgress(DefaultProgressBar)}
|
||||
|
|
|
@ -81,10 +81,8 @@ func PreloadExists(k8sVersion, containerRuntime string) bool {
|
|||
// Omit remote check if tarball exists locally
|
||||
targetPath := TarballPath(k8sVersion)
|
||||
if _, err := os.Stat(targetPath); err == nil {
|
||||
if err := verifyChecksum(k8sVersion); err == nil {
|
||||
glog.Infof("Found %s in cache, no need to check remotely", targetPath)
|
||||
return true
|
||||
}
|
||||
glog.Infof("Found local preload: %s", targetPath)
|
||||
return true
|
||||
}
|
||||
|
||||
url := remoteTarballURL(k8sVersion)
|
||||
|
@ -100,7 +98,7 @@ func PreloadExists(k8sVersion, containerRuntime string) bool {
|
|||
return false
|
||||
}
|
||||
|
||||
glog.Infof("Goody! %s exists!", url)
|
||||
glog.Infof("Found remote preload: %s", url)
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -112,10 +110,8 @@ func Preload(k8sVersion, containerRuntime string) error {
|
|||
targetPath := TarballPath(k8sVersion)
|
||||
|
||||
if _, err := os.Stat(targetPath); err == nil {
|
||||
if err := verifyChecksum(k8sVersion); err == nil {
|
||||
glog.Infof("Found %s in cache, skipping downloading", targetPath)
|
||||
return nil
|
||||
}
|
||||
glog.Infof("Found %s in cache, skipping download", targetPath)
|
||||
return nil
|
||||
}
|
||||
|
||||
// Make sure we support this k8s version
|
||||
|
@ -126,9 +122,11 @@ func Preload(k8sVersion, containerRuntime string) error {
|
|||
|
||||
out.T(out.FileDownload, "Downloading preloaded images tarball for k8s {{.version}} ...", out.V{"version": k8sVersion})
|
||||
url := remoteTarballURL(k8sVersion)
|
||||
|
||||
tmpDst := targetPath + ".download"
|
||||
client := &getter.Client{
|
||||
Src: url,
|
||||
Dst: targetPath,
|
||||
Dst: tmpDst,
|
||||
Mode: getter.ClientModeFile,
|
||||
Options: []getter.ClientOption{getter.WithProgress(DefaultProgressBar)},
|
||||
}
|
||||
|
@ -137,18 +135,19 @@ func Preload(k8sVersion, containerRuntime string) error {
|
|||
if err := client.Get(); err != nil {
|
||||
return errors.Wrapf(err, "download failed: %s", url)
|
||||
}
|
||||
// Give downloaded drivers a baseline decent file permission
|
||||
if err := os.Chmod(targetPath, 0755); err != nil {
|
||||
return err
|
||||
}
|
||||
// Save checksum file locally
|
||||
|
||||
if err := saveChecksumFile(k8sVersion); err != nil {
|
||||
return errors.Wrap(err, "saving checksum file")
|
||||
}
|
||||
return verifyChecksum(k8sVersion)
|
||||
|
||||
if err := verifyChecksum(k8sVersion, tmpDst); err != nil {
|
||||
return errors.Wrap(err, "verify")
|
||||
}
|
||||
return os.Rename(tmpDst, targetPath)
|
||||
}
|
||||
|
||||
func saveChecksumFile(k8sVersion string) error {
|
||||
glog.Infof("saving checksum for %s ...", tarballName(k8sVersion))
|
||||
ctx := context.Background()
|
||||
client, err := storage.NewClient(ctx, option.WithoutAuthentication())
|
||||
if err != nil {
|
||||
|
@ -164,9 +163,10 @@ func saveChecksumFile(k8sVersion string) error {
|
|||
|
||||
// verifyChecksum returns true if the checksum of the local binary matches
|
||||
// the checksum of the remote binary
|
||||
func verifyChecksum(k8sVersion string) error {
|
||||
func verifyChecksum(k8sVersion string, path string) error {
|
||||
glog.Infof("verifying checksumm of %s ...", path)
|
||||
// get md5 checksum of tarball path
|
||||
contents, err := ioutil.ReadFile(TarballPath(k8sVersion))
|
||||
contents, err := ioutil.ReadFile(path)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "reading tarball")
|
||||
}
|
||||
|
@ -179,7 +179,7 @@ func verifyChecksum(k8sVersion string) error {
|
|||
|
||||
// create a slice of checksum, which is [16]byte
|
||||
if string(remoteChecksum) != string(checksum[:]) {
|
||||
return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", TarballPath(k8sVersion), string(remoteChecksum), string(checksum[:]))
|
||||
return fmt.Errorf("checksum of %s does not match remote checksum (%s != %s)", path, string(remoteChecksum), string(checksum[:]))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
|
||||
"github.com/golang/glog"
|
||||
"github.com/google/go-containerregistry/pkg/name"
|
||||
v1 "github.com/google/go-containerregistry/pkg/v1"
|
||||
"github.com/google/go-containerregistry/pkg/v1/tarball"
|
||||
"github.com/juju/mutex"
|
||||
"github.com/pkg/errors"
|
||||
|
@ -120,6 +121,20 @@ func saveToTarFile(iname, rawDest string) error {
|
|||
return errors.Wrapf(err, "nil image for %s", iname)
|
||||
}
|
||||
|
||||
tag, err := name.NewTag(iname, name.WeakValidation)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "newtag")
|
||||
}
|
||||
err = writeImage(img, dst, tag)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
glog.Infof("%s exists", dst)
|
||||
return nil
|
||||
}
|
||||
|
||||
func writeImage(img v1.Image, dst string, tag name.Tag) error {
|
||||
glog.Infoln("opening: ", dst)
|
||||
f, err := ioutil.TempFile(filepath.Dir(dst), filepath.Base(dst)+".*.tmp")
|
||||
if err != nil {
|
||||
|
@ -135,10 +150,7 @@ func saveToTarFile(iname, rawDest string) error {
|
|||
}
|
||||
}
|
||||
}()
|
||||
tag, err := name.NewTag(iname, name.WeakValidation)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "newtag")
|
||||
}
|
||||
|
||||
err = tarball.Write(tag, img, f)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "write")
|
||||
|
@ -151,6 +163,5 @@ func saveToTarFile(iname, rawDest string) error {
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "rename")
|
||||
}
|
||||
glog.Infof("%s exists", dst)
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -59,7 +59,7 @@ var defaultClusterConfig = config.ClusterConfig{
|
|||
Name: viper.GetString("profile"),
|
||||
Driver: driver.Mock,
|
||||
DockerEnv: []string{"MOCK_MAKE_IT_PROVISION=true"},
|
||||
Nodes: []config.Node{config.Node{Name: "minikube"}},
|
||||
Nodes: []config.Node{{Name: "minikube"}},
|
||||
}
|
||||
|
||||
func TestCreateHost(t *testing.T) {
|
||||
|
|
|
@ -73,6 +73,45 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
// check if need to re-run docker-env
|
||||
maybeWarnAboutEvalEnv(cc.Driver, cc.Name)
|
||||
|
||||
h, err = recreateIfNeeded(api, cc, n, h)
|
||||
if err != nil {
|
||||
return h, err
|
||||
}
|
||||
|
||||
e := engineOptions(cc)
|
||||
if len(e.Env) > 0 {
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
glog.Infof("Detecting provisioner ...")
|
||||
provisioner, err := provision.DetectProvisioner(h.Driver)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "detecting provisioner")
|
||||
}
|
||||
if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil {
|
||||
return h, errors.Wrap(err, "provision")
|
||||
}
|
||||
}
|
||||
|
||||
if driver.IsMock(h.DriverName) {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
if err := postStartSetup(h, cc); err != nil {
|
||||
return h, errors.Wrap(err, "post-start")
|
||||
}
|
||||
|
||||
if driver.BareMetal(h.Driver.DriverName()) {
|
||||
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
|
||||
return h, nil
|
||||
}
|
||||
|
||||
glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName())
|
||||
if err := h.ConfigureAuth(); err != nil {
|
||||
return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")}
|
||||
}
|
||||
return h, ensureSyncedGuestClock(h, cc.Driver)
|
||||
}
|
||||
|
||||
func recreateIfNeeded(api libmachine.API, cc config.ClusterConfig, n config.Node, h *host.Host) (*host.Host, error) {
|
||||
s, err := h.Driver.GetState()
|
||||
if err != nil || s == state.Stopped || s == state.None {
|
||||
// If virtual machine does not exist due to user interrupt cancel(i.e. Ctrl + C), recreate virtual machine
|
||||
|
@ -118,37 +157,7 @@ func fixHost(api libmachine.API, cc config.ClusterConfig, n config.Node) (*host.
|
|||
}
|
||||
}
|
||||
|
||||
e := engineOptions(cc)
|
||||
if len(e.Env) > 0 {
|
||||
h.HostOptions.EngineOptions.Env = e.Env
|
||||
glog.Infof("Detecting provisioner ...")
|
||||
provisioner, err := provision.DetectProvisioner(h.Driver)
|
||||
if err != nil {
|
||||
return h, errors.Wrap(err, "detecting provisioner")
|
||||
}
|
||||
if err := provisioner.Provision(*h.HostOptions.SwarmOptions, *h.HostOptions.AuthOptions, *h.HostOptions.EngineOptions); err != nil {
|
||||
return h, errors.Wrap(err, "provision")
|
||||
}
|
||||
}
|
||||
|
||||
if driver.IsMock(h.DriverName) {
|
||||
return h, nil
|
||||
}
|
||||
|
||||
if err := postStartSetup(h, cc); err != nil {
|
||||
return h, errors.Wrap(err, "post-start")
|
||||
}
|
||||
|
||||
if driver.BareMetal(h.Driver.DriverName()) {
|
||||
glog.Infof("%s is local, skipping auth/time setup (requires ssh)", h.Driver.DriverName())
|
||||
return h, nil
|
||||
}
|
||||
|
||||
glog.Infof("Configuring auth for driver %s ...", h.Driver.DriverName())
|
||||
if err := h.ConfigureAuth(); err != nil {
|
||||
return h, &retry.RetriableError{Err: errors.Wrap(err, "Error configuring auth on host")}
|
||||
}
|
||||
return h, ensureSyncedGuestClock(h, cc.Driver)
|
||||
return h, nil
|
||||
}
|
||||
|
||||
// maybeWarnAboutEvalEnv wil warn user if they need to re-eval their docker-env, podman-env
|
||||
|
@ -222,6 +231,41 @@ func adjustGuestClock(h hostRunner, t time.Time) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func machineExistsState(s state.State, err error) (bool, error) {
|
||||
if s == state.None {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
func machineExistsError(s state.State, err error, drverr error) (bool, error) {
|
||||
_ = s // not used
|
||||
if err == drverr {
|
||||
// if the error matches driver error
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
func machineExistsMessage(s state.State, err error, msg string) (bool, error) {
|
||||
if s == state.None || (err != nil && err.Error() == msg) {
|
||||
// if the error contains the message
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
func machineExistsDocker(s state.State, err error) (bool, error) {
|
||||
if s == state.Error {
|
||||
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
|
||||
return false, ErrorMachineNotExist
|
||||
} else if s == state.None {
|
||||
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
}
|
||||
|
||||
// machineExists checks if virtual machine does not exist
|
||||
// if the virtual machine exists, return true
|
||||
func machineExists(d string, s state.State, err error) (bool, error) {
|
||||
|
@ -230,54 +274,23 @@ func machineExists(d string, s state.State, err error) (bool, error) {
|
|||
}
|
||||
switch d {
|
||||
case driver.HyperKit:
|
||||
if s == state.None || (err != nil && err.Error() == "connection is shut down") {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsMessage(s, err, "connection is shut down")
|
||||
case driver.HyperV:
|
||||
if s == state.None {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsState(s, err)
|
||||
case driver.KVM2:
|
||||
if s == state.None {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsState(s, err)
|
||||
case driver.None:
|
||||
if s == state.None {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsState(s, err)
|
||||
case driver.Parallels:
|
||||
if err != nil && err.Error() == "machine does not exist" {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsMessage(s, err, "connection is shut down")
|
||||
case driver.VirtualBox:
|
||||
if err == virtualbox.ErrMachineNotExist {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsError(s, err, virtualbox.ErrMachineNotExist)
|
||||
case driver.VMware:
|
||||
if s == state.None {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsState(s, err)
|
||||
case driver.VMwareFusion:
|
||||
if s == state.None {
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsState(s, err)
|
||||
case driver.Docker:
|
||||
if s == state.Error {
|
||||
// if the kic image is not present on the host machine, when user cancel `minikube start`, state.Error will be return
|
||||
return false, ErrorMachineNotExist
|
||||
} else if s == state.None {
|
||||
// if the kic image is present on the host machine, when user cancel `minikube start`, state.None will be return
|
||||
return false, ErrorMachineNotExist
|
||||
}
|
||||
return true, err
|
||||
return machineExistsDocker(s, err)
|
||||
case driver.Mock:
|
||||
if s == state.Error {
|
||||
return false, ErrorMachineNotExist
|
||||
|
|
|
@ -38,11 +38,13 @@ import (
|
|||
// beginCacheKubernetesImages caches images required for kubernetes version in the background
|
||||
func beginCacheKubernetesImages(g *errgroup.Group, imageRepository string, k8sVersion, cRuntime string) {
|
||||
if download.PreloadExists(k8sVersion, cRuntime) {
|
||||
g.Go(func() error {
|
||||
glog.Info("Caching tarball of preloaded images")
|
||||
return download.Preload(k8sVersion, cRuntime)
|
||||
})
|
||||
return
|
||||
glog.Info("Caching tarball of preloaded images")
|
||||
err := download.Preload(k8sVersion, cRuntime)
|
||||
if err == nil {
|
||||
glog.Infof("Finished downloading the preloaded tar for %s on %s", k8sVersion, cRuntime)
|
||||
return // don't cache individual images if preload is successful.
|
||||
}
|
||||
glog.Warningf("Error downloading preloaded artifacts will continue without preload: %v", err)
|
||||
}
|
||||
|
||||
if !viper.GetBool("cache-images") {
|
||||
|
|
|
@ -66,7 +66,9 @@ func configureRuntimes(runner cruntime.CommandRunner, drvName string, k8s config
|
|||
if driver.BareMetal(drvName) {
|
||||
disableOthers = false
|
||||
}
|
||||
if !driver.IsKIC(drvName) {
|
||||
|
||||
// Preload is overly invasive for bare metal, and caching is not meaningful. KIC handled elsewhere.
|
||||
if driver.IsVM(drvName) {
|
||||
if err := cr.Preload(k8s); err != nil {
|
||||
switch err.(type) {
|
||||
case *cruntime.ErrISOFeature:
|
||||
|
|
|
@ -44,10 +44,13 @@ func Start(mc config.ClusterConfig, n config.Node, primary bool, existingAddons
|
|||
}
|
||||
|
||||
var cacheGroup errgroup.Group
|
||||
beginCacheKubernetesImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, k8sVersion, mc.KubernetesConfig.ContainerRuntime)
|
||||
// Adding a second layer of cache does not make sense for the none driver
|
||||
if !driver.BareMetal(driverName) {
|
||||
beginCacheKubernetesImages(&cacheGroup, mc.KubernetesConfig.ImageRepository, k8sVersion, mc.KubernetesConfig.ContainerRuntime)
|
||||
}
|
||||
|
||||
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
|
||||
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
|
||||
// Hence, saveProfile must be called before startHost, and again afterwards when we know the IP.
|
||||
if err := config.SaveProfile(viper.GetString(config.ProfileName), &mc); err != nil {
|
||||
exit.WithError("Failed to save config", err)
|
||||
}
|
||||
|
|
|
@ -44,16 +44,19 @@ type LoadBalancerEmulator struct {
|
|||
patchConverter patchConverter
|
||||
}
|
||||
|
||||
// PatchServices will update all load balancer services
|
||||
func (l *LoadBalancerEmulator) PatchServices() ([]string, error) {
|
||||
return l.applyOnLBServices(l.updateService)
|
||||
}
|
||||
|
||||
// PatchServiceIP will patch the given service and ip
|
||||
func (l *LoadBalancerEmulator) PatchServiceIP(restClient rest.Interface, svc core.Service, ip string) error {
|
||||
// TODO: do not ignore result
|
||||
_, err := l.updateServiceIP(restClient, svc, ip)
|
||||
return err
|
||||
}
|
||||
|
||||
// Cleanup will clean up all load balancer services
|
||||
func (l *LoadBalancerEmulator) Cleanup() ([]string, error) {
|
||||
return l.applyOnLBServices(l.cleanupService)
|
||||
}
|
||||
|
@ -143,6 +146,7 @@ func (l *LoadBalancerEmulator) cleanupService(restClient rest.Interface, svc cor
|
|||
|
||||
}
|
||||
|
||||
// NewLoadBalancerEmulator creates a new LoadBalancerEmulator
|
||||
func NewLoadBalancerEmulator(corev1Client typed_core.CoreV1Interface) LoadBalancerEmulator {
|
||||
return LoadBalancerEmulator{
|
||||
coreV1Client: corev1Client,
|
||||
|
|
|
@ -25,8 +25,6 @@ import (
|
|||
|
||||
units "github.com/docker/go-units"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
const (
|
||||
|
@ -34,17 +32,17 @@ const (
|
|||
)
|
||||
|
||||
// CalculateSizeInMB returns the number of MB in the human readable string
|
||||
func CalculateSizeInMB(humanReadableSize string) int {
|
||||
func CalculateSizeInMB(humanReadableSize string) (int, error) {
|
||||
_, err := strconv.ParseInt(humanReadableSize, 10, 64)
|
||||
if err == nil {
|
||||
humanReadableSize += "mb"
|
||||
}
|
||||
size, err := units.FromHumanSize(humanReadableSize)
|
||||
if err != nil {
|
||||
exit.WithCodeT(exit.Config, "Invalid size passed in argument: {{.error}}", out.V{"error": err})
|
||||
return 0, fmt.Errorf("FromHumanSize: %v", err)
|
||||
}
|
||||
|
||||
return int(size / units.MB)
|
||||
return int(size / units.MB), nil
|
||||
}
|
||||
|
||||
// GetBinaryDownloadURL returns a suitable URL for the platform
|
||||
|
|
|
@ -52,7 +52,10 @@ func TestCalculateSizeInMB(t *testing.T) {
|
|||
}
|
||||
|
||||
for _, tt := range testData {
|
||||
number := CalculateSizeInMB(tt.size)
|
||||
number, err := CalculateSizeInMB(tt.size)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected err: %v", err)
|
||||
}
|
||||
if number != tt.expectedNumber {
|
||||
t.Fatalf("Expected '%d'' but got '%d'", tt.expectedNumber, number)
|
||||
}
|
||||
|
|
|
@ -41,7 +41,6 @@ Then minikube is for you.
|
|||
|
||||
* **What is it good for?** Developing local Kubernetes applications
|
||||
* **What is it not good for?** Production Kubernetes deployments
|
||||
* **What is it *not yet* good for?** Environments which do not allow VM's
|
||||
|
||||
## Where should I go next?
|
||||
|
||||
|
|
Loading…
Reference in New Issue