Merge branch 'master' into ilyaz/do_not_run_amd64_on_m1

pull/11024/head
Ilya Zuyev 2021-04-08 11:44:52 -07:00
commit 8d8f95305b
42 changed files with 1076 additions and 226 deletions

View File

@ -93,7 +93,7 @@ var settings = []Setting{
{
name: "memory",
set: SetString,
validations: []setFn{IsValidDiskSize},
validations: []setFn{IsValidMemory},
callbacks: []setFn{RequiresRestartMsg},
},
{

View File

@ -108,11 +108,12 @@ var addonsConfigureCmd = &cobra.Command{
}
cname := ClusterFlagValue()
namespace := "kube-system"
// Create ECR Secret
err := service.CreateSecret(
cname,
"kube-system",
namespace,
"registry-creds-ecr",
map[string]string{
"AWS_ACCESS_KEY_ID": awsAccessID,
@ -134,7 +135,7 @@ var addonsConfigureCmd = &cobra.Command{
// Create GCR Secret
err = service.CreateSecret(
cname,
"kube-system",
namespace,
"registry-creds-gcr",
map[string]string{
"application_default_credentials.json": gcrApplicationDefaultCredentials,
@ -153,7 +154,7 @@ var addonsConfigureCmd = &cobra.Command{
// Create Docker Secret
err = service.CreateSecret(
cname,
"kube-system",
namespace,
"registry-creds-dpr",
map[string]string{
"DOCKER_PRIVATE_REGISTRY_SERVER": dockerServer,
@ -173,7 +174,7 @@ var addonsConfigureCmd = &cobra.Command{
// Create Azure Container Registry Secret
err = service.CreateSecret(
cname,
"kube-system",
namespace,
"registry-creds-acr",
map[string]string{
"ACR_URL": acrURL,

View File

@ -39,7 +39,7 @@ func TestSetNotAllowed(t *testing.T) {
t.Fatalf("Set did not return error for unallowed value: %+v", err)
}
err = Set("memory", "10a")
if err == nil || err.Error() != "run validations for \"memory\" with value of \"10a\": [invalid disk size: invalid size: '10a']" {
if err == nil || err.Error() != "run validations for \"memory\" with value of \"10a\": [invalid memory size: invalid size: '10a']" {
t.Fatalf("Set did not return error for unallowed value: %+v", err)
}
}

View File

@ -53,6 +53,15 @@ func IsValidDiskSize(name string, disksize string) error {
return nil
}
// IsValidMemory checks if a string is a valid memory size
func IsValidMemory(name string, memsize string) error {
_, err := units.FromHumanSize(memsize)
if err != nil {
return fmt.Errorf("invalid memory size: %v", err)
}
return nil
}
// IsValidURL checks if a location is a valid URL
func IsValidURL(name string, location string) error {
_, err := url.Parse(location)

View File

@ -145,9 +145,28 @@ $ minikube image unload image busybox
},
}
var listImageCmd = &cobra.Command{
Use: "list",
Short: "List images",
Example: `
$ minikube image list
`,
Aliases: []string{"ls"},
Run: func(cmd *cobra.Command, args []string) {
profile, err := config.LoadProfile(viper.GetString(config.ProfileName))
if err != nil {
exit.Error(reason.Usage, "loading profile", err)
}
if err := machine.ListImages(profile); err != nil {
exit.Error(reason.GuestImageList, "Failed to list images", err)
}
},
}
func init() {
imageCmd.AddCommand(loadImageCmd)
imageCmd.AddCommand(removeImageCmd)
loadImageCmd.Flags().BoolVar(&imgDaemon, "daemon", false, "Cache image from docker daemon")
loadImageCmd.Flags().BoolVar(&imgRemote, "remote", false, "Cache image from remote registry")
imageCmd.AddCommand(listImageCmd)
}

View File

@ -251,139 +251,8 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
cc = updateExistingConfigFromFlags(cmd, existing)
} else {
klog.Info("no existing cluster config was found, will generate one from the flags ")
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
klog.Warningf("Unable to query memory limits: %+v", err)
}
cc = generateNewConfigFromFlags(cmd, k8sVersion, drvName)
mem := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
if cmd.Flags().Changed(memory) {
var err error
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.Message(reason.Usage, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
if driver.IsKIC(drvName) && mem > containerLimit {
exit.Message(reason.Usage, "{{.driver_name}} has only {{.container_limit}}MB memory but you specified {{.specified_memory}}MB", out.V{"container_limit": containerLimit, "specified_memory": mem, "driver_name": driver.FullName(drvName)})
}
} else {
validateRequestedMemorySize(mem, drvName)
klog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
}
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.Message(reason.Usage, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
repository := viper.GetString(imageRepository)
mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry))
if strings.ToLower(repository) == "auto" || (mirrorCountry != "" && repository == "") {
found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix)))
if err != nil {
exit.Error(reason.InetRepo, "Failed to check main repository and mirrors for images", err)
}
if !found {
if autoSelectedRepository == "" {
exit.Message(reason.InetReposUnavailable, "None of the known repositories are accessible. Consider specifying an alternative image repository with --image-repository flag")
} else {
out.WarningT("None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.", out.V{"image_repository_name": autoSelectedRepository})
}
}
repository = autoSelectedRepository
}
if cmd.Flags().Changed(imageRepository) || cmd.Flags().Changed(imageMirrorCountry) {
out.Styled(style.Success, "Using image repository {{.name}}", out.V{"name": repository})
}
// Backwards compatibility with --enable-default-cni
chosenCNI := viper.GetString(cniFlag)
if viper.GetBool(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) {
klog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
chosenCNI = "bridge"
}
// networkPlugin cni deprecation warning
chosenNetworkPlugin := viper.GetString(networkPlugin)
if chosenNetworkPlugin == "cni" {
out.WarningT("With --network-plugin=cni, you will need to provide your own CNI. See --cni flag as a user-friendly alternative")
}
if !(driver.IsKIC(drvName) || driver.IsKVM(drvName)) && viper.GetString(network) != "" {
out.WarningT("--network flag is only valid with the docker/podman and KVM drivers, it will be ignored")
}
checkNumaCount(k8sVersion)
cc = config.ClusterConfig{
Name: ClusterFlagValue(),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
MinikubeISO: viper.GetString(isoURL),
KicBaseImage: viper.GetString(kicBaseImage),
Network: viper.GetString(network),
Memory: mem,
CPUs: viper.GetInt(cpus),
DiskSize: diskSize,
Driver: drvName,
ListenAddress: viper.GetString(listenAddress),
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
NFSShare: viper.GetStringSlice(nfsShare),
NFSSharesRoot: viper.GetString(nfsSharesRoot),
DockerEnv: config.DockerEnv,
DockerOpt: config.DockerOpt,
InsecureRegistry: insecureRegistry,
RegistryMirror: registryMirror,
HostOnlyCIDR: viper.GetString(hostOnlyCIDR),
HypervVirtualSwitch: viper.GetString(hypervVirtualSwitch),
HypervUseExternalSwitch: viper.GetBool(hypervUseExternalSwitch),
HypervExternalAdapter: viper.GetString(hypervExternalAdapter),
KVMNetwork: viper.GetString(kvmNetwork),
KVMQemuURI: viper.GetString(kvmQemuURI),
KVMGPU: viper.GetBool(kvmGPU),
KVMHidden: viper.GetBool(kvmHidden),
KVMNUMACount: viper.GetInt(kvmNUMACount),
DisableDriverMounts: viper.GetBool(disableDriverMounts),
UUID: viper.GetString(uuid),
NoVTXCheck: viper.GetBool(noVTXCheck),
DNSProxy: viper.GetBool(dnsProxy),
HostDNSResolver: viper.GetBool(hostDNSResolver),
HostOnlyNicType: viper.GetString(hostOnlyNicType),
NatNicType: viper.GetString(natNicType),
StartHostTimeout: viper.GetDuration(waitTimeout),
ExposedPorts: viper.GetStringSlice(ports),
SSHIPAddress: viper.GetString(sshIPAddress),
SSHUser: viper.GetString(sshSSHUser),
SSHKey: viper.GetString(sshSSHKey),
SSHPort: viper.GetInt(sshSSHPort),
KubernetesConfig: config.KubernetesConfig{
KubernetesVersion: k8sVersion,
ClusterName: ClusterFlagValue(),
Namespace: viper.GetString(startNamespace),
APIServerName: viper.GetString(apiServerName),
APIServerNames: apiServerNames,
APIServerIPs: apiServerIPs,
DNSDomain: viper.GetString(dnsDomain),
FeatureGates: viper.GetString(featureGates),
ContainerRuntime: viper.GetString(containerRuntime),
CRISocket: viper.GetString(criSocket),
NetworkPlugin: chosenNetworkPlugin,
ServiceCIDR: viper.GetString(serviceCIDR),
ImageRepository: repository,
ExtraOptions: config.ExtraOptions,
ShouldLoadCachedImages: viper.GetBool(cacheImages),
CNI: chosenCNI,
NodePort: viper.GetInt(apiServerPort),
},
MultiNodeRequested: viper.GetInt(nodes) > 1,
}
cc.VerifyComponents = interpretWaitFlag(*cmd)
if viper.GetBool(createMount) && driver.IsKIC(drvName) {
cc.ContainerVolumeMounts = []string{viper.GetString(mountString)}
}
cnm, err := cni.New(cc)
if err != nil {
return cc, config.Node{}, errors.Wrap(err, "cni")
@ -418,6 +287,147 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
return createNode(cc, kubeNodeName, existing)
}
// generateNewConfigFromFlags generate a config.ClusterConfig based on flags
func generateNewConfigFromFlags(cmd *cobra.Command, k8sVersion string, drvName string) config.ClusterConfig {
var cc config.ClusterConfig
sysLimit, containerLimit, err := memoryLimits(drvName)
if err != nil {
klog.Warningf("Unable to query memory limits: %+v", err)
}
mem := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
if cmd.Flags().Changed(memory) || viper.IsSet(memory) {
var err error
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
if err != nil {
exit.Message(reason.Usage, "Generate unable to parse memory '{{.memory}}': {{.error}}", out.V{"memory": viper.GetString(memory), "error": err})
}
if driver.IsKIC(drvName) && mem > containerLimit {
exit.Message(reason.Usage, "{{.driver_name}} has only {{.container_limit}}MB memory but you specified {{.specified_memory}}MB", out.V{"container_limit": containerLimit, "specified_memory": mem, "driver_name": driver.FullName(drvName)})
}
} else {
validateRequestedMemorySize(mem, drvName)
klog.Infof("Using suggested %dMB memory alloc based on sys=%dMB, container=%dMB", mem, sysLimit, containerLimit)
}
diskSize, err := pkgutil.CalculateSizeInMB(viper.GetString(humanReadableDiskSize))
if err != nil {
exit.Message(reason.Usage, "Generate unable to parse disk size '{{.diskSize}}': {{.error}}", out.V{"diskSize": viper.GetString(humanReadableDiskSize), "error": err})
}
repository := viper.GetString(imageRepository)
mirrorCountry := strings.ToLower(viper.GetString(imageMirrorCountry))
if strings.ToLower(repository) == "auto" || (mirrorCountry != "" && repository == "") {
found, autoSelectedRepository, err := selectImageRepository(mirrorCountry, semver.MustParse(strings.TrimPrefix(k8sVersion, version.VersionPrefix)))
if err != nil {
exit.Error(reason.InetRepo, "Failed to check main repository and mirrors for images", err)
}
if !found {
if autoSelectedRepository == "" {
exit.Message(reason.InetReposUnavailable, "None of the known repositories are accessible. Consider specifying an alternative image repository with --image-repository flag")
} else {
out.WarningT("None of the known repositories in your location are accessible. Using {{.image_repository_name}} as fallback.", out.V{"image_repository_name": autoSelectedRepository})
}
}
repository = autoSelectedRepository
}
if cmd.Flags().Changed(imageRepository) || cmd.Flags().Changed(imageMirrorCountry) {
out.Styled(style.Success, "Using image repository {{.name}}", out.V{"name": repository})
}
// Backwards compatibility with --enable-default-cni
chosenCNI := viper.GetString(cniFlag)
if viper.GetBool(enableDefaultCNI) && !cmd.Flags().Changed(cniFlag) {
klog.Errorf("Found deprecated --enable-default-cni flag, setting --cni=bridge")
chosenCNI = "bridge"
}
// networkPlugin cni deprecation warning
chosenNetworkPlugin := viper.GetString(networkPlugin)
if chosenNetworkPlugin == "cni" {
out.WarningT("With --network-plugin=cni, you will need to provide your own CNI. See --cni flag as a user-friendly alternative")
}
if !(driver.IsKIC(drvName) || driver.IsKVM(drvName)) && viper.GetString(network) != "" {
out.WarningT("--network flag is only valid with the docker/podman and KVM drivers, it will be ignored")
}
checkNumaCount(k8sVersion)
cc = config.ClusterConfig{
Name: ClusterFlagValue(),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
MinikubeISO: viper.GetString(isoURL),
KicBaseImage: viper.GetString(kicBaseImage),
Network: viper.GetString(network),
Memory: mem,
CPUs: viper.GetInt(cpus),
DiskSize: diskSize,
Driver: drvName,
ListenAddress: viper.GetString(listenAddress),
HyperkitVpnKitSock: viper.GetString(vpnkitSock),
HyperkitVSockPorts: viper.GetStringSlice(vsockPorts),
NFSShare: viper.GetStringSlice(nfsShare),
NFSSharesRoot: viper.GetString(nfsSharesRoot),
DockerEnv: config.DockerEnv,
DockerOpt: config.DockerOpt,
InsecureRegistry: insecureRegistry,
RegistryMirror: registryMirror,
HostOnlyCIDR: viper.GetString(hostOnlyCIDR),
HypervVirtualSwitch: viper.GetString(hypervVirtualSwitch),
HypervUseExternalSwitch: viper.GetBool(hypervUseExternalSwitch),
HypervExternalAdapter: viper.GetString(hypervExternalAdapter),
KVMNetwork: viper.GetString(kvmNetwork),
KVMQemuURI: viper.GetString(kvmQemuURI),
KVMGPU: viper.GetBool(kvmGPU),
KVMHidden: viper.GetBool(kvmHidden),
KVMNUMACount: viper.GetInt(kvmNUMACount),
DisableDriverMounts: viper.GetBool(disableDriverMounts),
UUID: viper.GetString(uuid),
NoVTXCheck: viper.GetBool(noVTXCheck),
DNSProxy: viper.GetBool(dnsProxy),
HostDNSResolver: viper.GetBool(hostDNSResolver),
HostOnlyNicType: viper.GetString(hostOnlyNicType),
NatNicType: viper.GetString(natNicType),
StartHostTimeout: viper.GetDuration(waitTimeout),
ExposedPorts: viper.GetStringSlice(ports),
SSHIPAddress: viper.GetString(sshIPAddress),
SSHUser: viper.GetString(sshSSHUser),
SSHKey: viper.GetString(sshSSHKey),
SSHPort: viper.GetInt(sshSSHPort),
KubernetesConfig: config.KubernetesConfig{
KubernetesVersion: k8sVersion,
ClusterName: ClusterFlagValue(),
Namespace: viper.GetString(startNamespace),
APIServerName: viper.GetString(apiServerName),
APIServerNames: apiServerNames,
APIServerIPs: apiServerIPs,
DNSDomain: viper.GetString(dnsDomain),
FeatureGates: viper.GetString(featureGates),
ContainerRuntime: viper.GetString(containerRuntime),
CRISocket: viper.GetString(criSocket),
NetworkPlugin: chosenNetworkPlugin,
ServiceCIDR: viper.GetString(serviceCIDR),
ImageRepository: repository,
ExtraOptions: config.ExtraOptions,
ShouldLoadCachedImages: viper.GetBool(cacheImages),
CNI: chosenCNI,
NodePort: viper.GetInt(apiServerPort),
},
MultiNodeRequested: viper.GetInt(nodes) > 1,
}
cc.VerifyComponents = interpretWaitFlag(*cmd)
if viper.GetBool(createMount) && driver.IsKIC(drvName) {
cc.ContainerVolumeMounts = []string{viper.GetString(mountString)}
}
return cc
}
// setCNIConfDir sets kubelet's '--cni-conf-dir' flag to custom CNI Config Directory path (same used also by CNI Deployment) to avoid conflicting CNI configs.
// ref: https://github.com/kubernetes/minikube/issues/10984
// Note: currently, this change affects only Kindnet CNI (and all multinodes using it), but it can be easily expanded to other/all CNIs if needed.

2
go.mod
View File

@ -68,7 +68,7 @@ require (
github.com/pmezard/go-difflib v1.0.0
github.com/russross/blackfriday v1.5.3-0.20200218234912-41c5fccfd6f6 // indirect
github.com/samalba/dockerclient v0.0.0-20160414174713-91d7393ff859 // indirect
github.com/shirou/gopsutil/v3 v3.21.2
github.com/shirou/gopsutil/v3 v3.21.3
github.com/spf13/cobra v1.1.3
github.com/spf13/pflag v1.0.5
github.com/spf13/viper v1.7.1

4
go.sum
View File

@ -892,8 +892,8 @@ github.com/sclevine/spec v1.2.0/go.mod h1:W4J29eT/Kzv7/b9IWLB055Z+qvVC9vt0Arko24
github.com/sean-/seed v0.0.0-20170313163322-e2103e2c3529/go.mod h1:DxrIzT+xaE7yg65j358z/aeFdxmN0P9QXhEzd20vsDc=
github.com/seccomp/libseccomp-golang v0.9.1/go.mod h1:GbW5+tmTXfcxTToHLXlScSlAvWlF4P2Ca7zGrPiEpWo=
github.com/sergi/go-diff v1.0.0/go.mod h1:0CfEIISq7TuYL3j771MWULgwwjU+GofnZX9QAmXWZgo=
github.com/shirou/gopsutil/v3 v3.21.2 h1:fIOk3hyqV1oGKogfGNjUZa0lUbtlkx3+ZT0IoJth2uM=
github.com/shirou/gopsutil/v3 v3.21.2/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw=
github.com/shirou/gopsutil/v3 v3.21.3 h1:wgcdAHZS2H6qy4JFewVTtqfiYxFzCeEJod/mLztdPG8=
github.com/shirou/gopsutil/v3 v3.21.3/go.mod h1:ghfMypLDrFSWN2c9cDYFLHyynQ+QUht0cv/18ZqVczw=
github.com/shurcooL/sanitized_anchor_name v1.0.0 h1:PdmoCO6wvbs+7yrJyMORt4/BmY5IYyJwS/kOiWx8mHo=
github.com/shurcooL/sanitized_anchor_name v1.0.0/go.mod h1:1NzhyTcUVG4SuEtjjoZeVRXNmyL/1OwPU0+IJeTBvfc=
github.com/sirupsen/logrus v1.0.4-0.20170822132746-89742aefa4b2/go.mod h1:pMByvHTf9Beacp5x1UXfOR9xyW/9antXMhjMPG0dEzc=

View File

@ -41,8 +41,8 @@ fi
# installing golang so we could do go get for gopogh
sudo ./installers/check_install_golang.sh "1.16" "/usr/local" || true
# install docker and kubectl if not present
sudo ./installers/check_install_docker.sh
# install docker and kubectl if not present, currently skipping since it fails
#sudo ./installers/check_install_docker.sh
# let's just clean all docker artifacts up
docker system prune --force --volumes || true

View File

@ -441,7 +441,7 @@ func (d *Driver) Stop() error {
// even though we can't stop the cotainers inside, we still wanna stop the minikube container itself
klog.Errorf("unable to get container runtime: %v", err)
} else {
containers, err := runtime.ListContainers(cruntime.ListOptions{Namespaces: constants.DefaultNamespaces})
containers, err := runtime.ListContainers(cruntime.ListContainersOptions{Namespaces: constants.DefaultNamespaces})
if err != nil {
klog.Infof("unable list containers : %v", err)
}

View File

@ -186,7 +186,7 @@ const (
MountPropagationBidirectional MountPropagation = 2
)
// MountPropagationValueToName is a map of valid MountPropogation values to
// MountPropagationValueToName is a map of valid MountPropagation values to
// their string names
var MountPropagationValueToName = map[MountPropagation]string{
MountPropagationNone: "None",
@ -194,7 +194,7 @@ var MountPropagationValueToName = map[MountPropagation]string{
MountPropagationBidirectional: "Bidirectional",
}
// MountPropagationNameToValue is a map of valid MountPropogation names to
// MountPropagationNameToValue is a map of valid MountPropagation names to
// their values
var MountPropagationNameToValue = map[string]MountPropagation{
"None": MountPropagationNone,

View File

@ -152,7 +152,7 @@ func (d *Driver) Kill() error {
}
// First try to gracefully stop containers
containers, err := d.runtime.ListContainers(cruntime.ListOptions{})
containers, err := d.runtime.ListContainers(cruntime.ListContainersOptions{})
if err != nil {
return errors.Wrap(err, "containers")
}
@ -164,7 +164,7 @@ func (d *Driver) Kill() error {
return errors.Wrap(err, "stop")
}
containers, err = d.runtime.ListContainers(cruntime.ListOptions{})
containers, err = d.runtime.ListContainers(cruntime.ListContainersOptions{})
if err != nil {
return errors.Wrap(err, "containers")
}
@ -220,7 +220,7 @@ func (d *Driver) Stop() error {
klog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err)
}
}
containers, err := d.runtime.ListContainers(cruntime.ListOptions{})
containers, err := d.runtime.ListContainers(cruntime.ListContainersOptions{})
if err != nil {
return errors.Wrap(err, "containers")
}

View File

@ -191,7 +191,7 @@ func (d *Driver) Stop() error {
klog.Warningf("couldn't force stop kubelet. will continue with stop anyways: %v", err)
}
}
containers, err := d.runtime.ListContainers(cruntime.ListOptions{})
containers, err := d.runtime.ListContainers(cruntime.ListContainersOptions{})
if err != nil {
return errors.Wrap(err, "containers")
}
@ -219,7 +219,7 @@ func (d *Driver) Kill() error {
}
// First try to gracefully stop containers
containers, err := d.runtime.ListContainers(cruntime.ListOptions{})
containers, err := d.runtime.ListContainers(cruntime.ListContainersOptions{})
if err != nil {
return errors.Wrap(err, "containers")
}
@ -231,7 +231,7 @@ func (d *Driver) Kill() error {
return errors.Wrap(err, "stop")
}
containers, err = d.runtime.ListContainers(cruntime.ListOptions{})
containers, err = d.runtime.ListContainers(cruntime.ListContainersOptions{})
if err != nil {
return errors.Wrap(err, "containers")
}

View File

@ -0,0 +1,68 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 12345
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:12345
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249

View File

@ -0,0 +1,68 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: cluster.local
podSubnet: "192.168.32.0/20"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "192.168.32.0/20"
metricsBindAddress: 0.0.0.0:10249

View File

@ -0,0 +1,68 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /run/containerd/containerd.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249

View File

@ -0,0 +1,75 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/crio/crio.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
fail-no-swap: "true"
feature-gates: "a=b"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
feature-gates: "a=b"
kube-api-burst: "32"
leader-elect: "false"
scheduler:
extraArgs:
feature-gates: "a=b"
leader-elect: "false"
scheduler-name: "mini-scheduler"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
mode: "iptables"

View File

@ -0,0 +1,68 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/crio/crio.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249

View File

@ -0,0 +1,68 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249

View File

@ -0,0 +1,68 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: minikube.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "minikube.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249

View File

@ -0,0 +1,69 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
imageRepository: test/repo
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249

View File

@ -0,0 +1,72 @@
apiVersion: kubeadm.k8s.io/v1beta2
kind: InitConfiguration
localAPIEndpoint:
advertiseAddress: 1.1.1.1
bindPort: 8443
bootstrapTokens:
- groups:
- system:bootstrappers:kubeadm:default-node-token
ttl: 24h0m0s
usages:
- signing
- authentication
nodeRegistration:
criSocket: /var/run/dockershim.sock
name: "mk"
kubeletExtraArgs:
node-ip: 1.1.1.1
taints: []
---
apiVersion: kubeadm.k8s.io/v1beta2
kind: ClusterConfiguration
apiServer:
certSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
extraArgs:
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
fail-no-swap: "true"
controllerManager:
extraArgs:
allocate-node-cidrs: "true"
kube-api-burst: "32"
leader-elect: "false"
scheduler:
extraArgs:
leader-elect: "false"
scheduler-name: "mini-scheduler"
certificatesDir: /var/lib/minikube/certs
clusterName: mk
controlPlaneEndpoint: control-plane.minikube.internal:8443
dns:
type: CoreDNS
etcd:
local:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.21.0-rc.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"
serviceSubnet: 10.96.0.0/12
---
apiVersion: kubelet.config.k8s.io/v1beta1
kind: KubeletConfiguration
authentication:
x509:
clientCAFile: /var/lib/minikube/certs/ca.crt
cgroupDriver: systemd
clusterDomain: "cluster.local"
# disable disk resource management by default
imageGCHighThresholdPercent: 100
evictionHard:
nodefs.available: "0%"
nodefs.inodesFree: "0%"
imagefs.available: "0%"
failSwapOn: false
staticPodPath: /etc/kubernetes/manifests
---
apiVersion: kubeproxy.config.k8s.io/v1alpha1
kind: KubeProxyConfiguration
clusterCIDR: "10.244.0.0/16"
metricsBindAddress: 0.0.0.0:10249
mode: "iptables"

View File

@ -367,7 +367,7 @@ func (k *Bootstrapper) unpause(cfg config.ClusterConfig) error {
return err
}
ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: []string{"kube-system"}})
ids, err := cr.ListContainers(cruntime.ListContainersOptions{State: cruntime.Paused, Namespaces: []string{"kube-system"}})
if err != nil {
return errors.Wrap(err, "list paused")
}
@ -819,7 +819,7 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
klog.Warningf("stop kubelet: %v", err)
}
containers, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}})
containers, err := cr.ListContainers(cruntime.ListContainersOptions{Namespaces: []string{"kube-system"}})
if err != nil {
klog.Warningf("unable to list kube-system containers: %v", err)
}
@ -1023,7 +1023,7 @@ func (k *Bootstrapper) stopKubeSystem(cfg config.ClusterConfig) error {
return errors.Wrap(err, "new cruntime")
}
ids, err := cr.ListContainers(cruntime.ListOptions{Namespaces: []string{"kube-system"}})
ids, err := cr.ListContainers(cruntime.ListContainersOptions{Namespaces: []string{"kube-system"}})
if err != nil {
return errors.Wrap(err, "list")
}

View File

@ -55,7 +55,7 @@ func pause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]string
return ids, errors.Wrap(err, "kubelet stop")
}
ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Running, Namespaces: namespaces})
ids, err := cr.ListContainers(cruntime.ListContainersOptions{State: cruntime.Running, Namespaces: namespaces})
if err != nil {
return ids, errors.Wrap(err, "list running")
}
@ -84,7 +84,7 @@ func Unpause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]stri
// unpause unpauses a Kubernetes cluster
func unpause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]string, error) {
ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: namespaces})
ids, err := cr.ListContainers(cruntime.ListContainersOptions{State: cruntime.Paused, Namespaces: namespaces})
if err != nil {
return ids, errors.Wrap(err, "list paused")
}
@ -105,7 +105,7 @@ func unpause(cr cruntime.Manager, r command.Runner, namespaces []string) ([]stri
}
func CheckIfPaused(cr cruntime.Manager, namespaces []string) (bool, error) {
ids, err := cr.ListContainers(cruntime.ListOptions{State: cruntime.Paused, Namespaces: namespaces})
ids, err := cr.ListContainers(cruntime.ListContainersOptions{State: cruntime.Paused, Namespaces: namespaces})
if err != nil {
return true, errors.Wrap(err, "list paused")
}

View File

@ -29,10 +29,10 @@ import (
const (
// DefaultKubernetesVersion is the default Kubernetes version
// dont update till #10545 is solved
DefaultKubernetesVersion = "v1.20.2"
DefaultKubernetesVersion = "v1.20.5"
// NewestKubernetesVersion is the newest Kubernetes version to test against
// NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go
NewestKubernetesVersion = "v1.20.5-rc.0"
NewestKubernetesVersion = "v1.21.0-rc.0"
// OldestKubernetesVersion is the oldest Kubernetes version to test against
OldestKubernetesVersion = "v1.14.0"
// DefaultClusterName is the default nane for the k8s cluster

View File

@ -239,6 +239,24 @@ func (r *Containerd) ImageExists(name string, sha string) bool {
return true
}
// ListImages lists images managed by this container runtime
func (r *Containerd) ListImages(ListImagesOptions) ([]string, error) {
c := exec.Command("sudo", "ctr", "-n=k8s.io", "images", "list", "--quiet")
rr, err := r.Runner.RunCmd(c)
if err != nil {
return nil, errors.Wrapf(err, "ctr images list")
}
all := strings.Split(rr.Stdout.String(), "\n")
imgs := []string{}
for _, img := range all {
if img == "" || strings.Contains(img, "sha256:") {
continue
}
imgs = append(imgs, img)
}
return imgs, nil
}
// LoadImage loads an image into this runtime
func (r *Containerd) LoadImage(path string) error {
klog.Infof("Loading image: %s", path)
@ -288,7 +306,7 @@ func (r *Containerd) KubeletOptions() map[string]string {
}
// ListContainers returns a list of managed by this container runtime
func (r *Containerd) ListContainers(o ListOptions) ([]string, error) {
func (r *Containerd) ListContainers(o ListContainersOptions) ([]string, error) {
return listCRIContainers(r.Runner, containerdNamespaceRoot, o)
}

View File

@ -37,7 +37,7 @@ type container struct {
}
// crictlList returns the output of 'crictl ps' in an efficient manner
func crictlList(cr CommandRunner, root string, o ListOptions) (*command.RunResult, error) {
func crictlList(cr CommandRunner, root string, o ListContainersOptions) (*command.RunResult, error) {
klog.Infof("listing CRI containers in root %s: %+v", root, o)
// Use -a because otherwise paused containers are missed
@ -63,7 +63,7 @@ func crictlList(cr CommandRunner, root string, o ListOptions) (*command.RunResul
}
// listCRIContainers returns a list of containers
func listCRIContainers(cr CommandRunner, root string, o ListOptions) ([]string, error) {
func listCRIContainers(cr CommandRunner, root string, o ListContainersOptions) ([]string, error) {
rr, err := crictlList(cr, root, o)
if err != nil {
return nil, errors.Wrap(err, "crictl list")

View File

@ -167,6 +167,16 @@ func (r *CRIO) ImageExists(name string, sha string) bool {
return true
}
// ListImages returns a list of images managed by this container runtime
func (r *CRIO) ListImages(ListImagesOptions) ([]string, error) {
c := exec.Command("sudo", "podman", "images", "--format", "{{.Repository}}:{{.Tag}}")
rr, err := r.Runner.RunCmd(c)
if err != nil {
return nil, errors.Wrapf(err, "podman images")
}
return strings.Split(strings.TrimSpace(rr.Stdout.String()), "\n"), nil
}
// LoadImage loads an image into this runtime
func (r *CRIO) LoadImage(path string) error {
klog.Infof("Loading image: %s", path)
@ -213,7 +223,7 @@ func (r *CRIO) KubeletOptions() map[string]string {
}
// ListContainers returns a list of managed by this container runtime
func (r *CRIO) ListContainers(o ListOptions) ([]string, error) {
func (r *CRIO) ListContainers(o ListContainersOptions) ([]string, error) {
return listCRIContainers(r.Runner, "", o)
}

View File

@ -98,12 +98,14 @@ type Manager interface {
// ImageExists takes image name and image sha checks if an it exists
ImageExists(string, string) bool
// ListImages returns a list of images managed by this container runtime
ListImages(ListImagesOptions) ([]string, error)
// RemoveImage remove image based on name
RemoveImage(string) error
// ListContainers returns a list of managed by this container runtime
ListContainers(ListOptions) ([]string, error)
// ListContainers returns a list of containers managed by this container runtime
ListContainers(ListContainersOptions) ([]string, error)
// KillContainers removes containers based on ID
KillContainers([]string) error
// StopContainers stops containers based on ID
@ -138,8 +140,8 @@ type Config struct {
InsecureRegistry []string
}
// ListOptions are the options to use for listing containers
type ListOptions struct {
// ListContainersOptions are the options to use for listing containers
type ListContainersOptions struct {
// State is the container state to filter by (All, Running, Paused)
State ContainerState
// Name is a name filter
@ -148,6 +150,10 @@ type ListOptions struct {
Namespaces []string
}
// ListImageOptions are the options to use for listing images
type ListImagesOptions struct {
}
// ErrContainerRuntimeNotRunning is thrown when container runtime is not running
var ErrContainerRuntimeNotRunning = errors.New("container runtime is not running")

View File

@ -699,7 +699,7 @@ func TestContainerFunctions(t *testing.T) {
}
// Get the list of apiservers
got, err := cr.ListContainers(ListOptions{Name: "apiserver"})
got, err := cr.ListContainers(ListContainersOptions{Name: "apiserver"})
if err != nil {
t.Fatalf("ListContainers: %v", err)
}
@ -712,7 +712,7 @@ func TestContainerFunctions(t *testing.T) {
if err := cr.StopContainers(got); err != nil {
t.Fatalf("stop failed: %v", err)
}
got, err = cr.ListContainers(ListOptions{Name: "apiserver"})
got, err = cr.ListContainers(ListContainersOptions{Name: "apiserver"})
if err != nil {
t.Fatalf("ListContainers: %v", err)
}
@ -722,7 +722,7 @@ func TestContainerFunctions(t *testing.T) {
}
// Get the list of everything else.
got, err = cr.ListContainers(ListOptions{})
got, err = cr.ListContainers(ListContainersOptions{})
if err != nil {
t.Fatalf("ListContainers: %v", err)
}
@ -735,7 +735,7 @@ func TestContainerFunctions(t *testing.T) {
if err := cr.KillContainers(got); err != nil {
t.Errorf("KillContainers: %v", err)
}
got, err = cr.ListContainers(ListOptions{})
got, err = cr.ListContainers(ListContainersOptions{})
if err != nil {
t.Fatalf("ListContainers: %v", err)
}

View File

@ -162,6 +162,25 @@ func (r *Docker) ImageExists(name string, sha string) bool {
return true
}
// ListImages returns a list of images managed by this container runtime
func (r *Docker) ListImages(ListImagesOptions) ([]string, error) {
c := exec.Command("docker", "images", "--format", "{{.Repository}}:{{.Tag}}")
rr, err := r.Runner.RunCmd(c)
if err != nil {
return nil, errors.Wrapf(err, "docker images")
}
short := strings.Split(rr.Stdout.String(), "\n")
imgs := []string{}
for _, img := range short {
if img == "" {
continue
}
img = addDockerIO(img)
imgs = append(imgs, img)
}
return imgs, nil
}
// LoadImage loads an image into this runtime
func (r *Docker) LoadImage(path string) error {
klog.Infof("Loading image: %s", path)
@ -212,7 +231,7 @@ func (r *Docker) KubeletOptions() map[string]string {
}
// ListContainers returns a list of containers
func (r *Docker) ListContainers(o ListOptions) ([]string, error) {
func (r *Docker) ListContainers(o ListContainersOptions) ([]string, error) {
if r.UseCRI {
return listCRIContainers(r.Runner, "", o)
}
@ -446,6 +465,29 @@ func dockerImagesPreloaded(runner command.Runner, images []string) bool {
return true
}
// Add docker.io prefix
func addDockerIO(name string) string {
var reg, usr, img string
p := strings.SplitN(name, "/", 2)
if len(p) > 1 && strings.Contains(p[0], ".") {
reg = p[0]
img = p[1]
} else {
reg = "docker.io"
img = name
p = strings.SplitN(img, "/", 2)
if len(p) > 1 {
usr = p[0]
img = p[1]
} else {
usr = "library"
img = name
}
return reg + "/" + usr + "/" + img
}
return reg + "/" + img
}
// Remove docker.io prefix since it won't be included in images names
// when we call 'docker images'
func trimDockerIO(name string) string {

View File

@ -248,7 +248,7 @@ func OutputOffline(lines int) {
func logCommands(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, length int, follow bool) map[string]string {
cmds := bs.LogCommands(cfg, bootstrapper.LogOptions{Lines: length, Follow: follow})
for _, pod := range importantPods {
ids, err := r.ListContainers(cruntime.ListOptions{Name: pod})
ids, err := r.ListContainers(cruntime.ListContainersOptions{Name: pod})
if err != nil {
klog.Errorf("Failed to list containers for %q: %v", pod, err)
continue

View File

@ -21,6 +21,7 @@ import (
"os"
"path"
"path/filepath"
"sort"
"strings"
"sync"
"time"
@ -372,3 +373,54 @@ func RemoveImages(images []string, profile *config.Profile) error {
klog.Infof("failed removing from: %s", strings.Join(failed, " "))
return nil
}
func ListImages(profile *config.Profile) error {
api, err := NewAPIClient()
if err != nil {
return errors.Wrap(err, "error creating api client")
}
defer api.Close()
pName := profile.Name
c, err := config.Load(pName)
if err != nil {
klog.Errorf("Failed to load profile %q: %v", pName, err)
return errors.Wrapf(err, "error loading config for profile :%v", pName)
}
for _, n := range c.Nodes {
m := config.MachineName(*c, n)
status, err := Status(api, m)
if err != nil {
klog.Warningf("error getting status for %s: %v", m, err)
continue
}
if status == state.Running.String() {
h, err := api.Load(m)
if err != nil {
klog.Warningf("Failed to load machine %q: %v", m, err)
continue
}
runner, err := CommandRunner(h)
if err != nil {
return err
}
cr, err := cruntime.New(cruntime.Config{Type: c.KubernetesConfig.ContainerRuntime, Runner: runner})
if err != nil {
return errors.Wrap(err, "error creating container runtime")
}
list, err := cr.ListImages(cruntime.ListImagesOptions{})
if err != nil {
klog.Warningf("Failed to list images for profile %s %v", pName, err.Error())
continue
}
sort.Sort(sort.Reverse(sort.StringSlice(list)))
fmt.Printf(strings.Join(list, "\n") + "\n")
}
}
return nil
}

View File

@ -64,10 +64,8 @@ func (rm *resultManager) averageTime(binary *Binary) float64 {
return average(times)
}
func (rm *resultManager) summarizeResults(binaries []*Binary, driver string) {
func (rm *resultManager) summarizeResults(binaries []*Binary) {
// print total and average times
fmt.Printf("**%s Driver**\n", driver)
for _, b := range binaries {
fmt.Printf("Times for %s: ", b.Name())
for _, tt := range rm.totalTimes(b) {

View File

@ -23,18 +23,22 @@ import (
"log"
"os"
"os/exec"
"runtime"
"github.com/pkg/errors"
)
const (
// runs is the number of times each binary will be timed for 'minikube start'
runs = 3
runs = 5
)
// CompareMinikubeStart compares the time to run `minikube start` between two minikube binaries
func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []*Binary) error {
drivers := []string{"kvm2", "docker"}
if runtime.GOOS == "darwin" {
drivers = []string{"hyperkit", "docker"}
}
for _, d := range drivers {
fmt.Printf("**%s Driver**\n", d)
if err := downloadArtifacts(ctx, binaries, d); err != nil {
@ -46,7 +50,7 @@ func CompareMinikubeStart(ctx context.Context, out io.Writer, binaries []*Binary
fmt.Printf("error collecting results for %s driver: %v\n", d, err)
continue
}
rm.summarizeResults(binaries, d)
rm.summarizeResults(binaries)
fmt.Println()
}
return nil
@ -62,11 +66,17 @@ func collectResults(ctx context.Context, binaries []*Binary, driver string) (*re
return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name())
}
rm.addResult(binary, r)
r, err = timeEnableIngress(ctx, binary)
if err != nil {
return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name())
if runtime.GOOS != "darwin" {
r, err = timeEnableIngress(ctx, binary)
if err != nil {
return nil, errors.Wrapf(err, "timing run %d with %s", run, binary.Name())
}
rm.addResult(binary, r)
}
deleteCmd := exec.CommandContext(ctx, binary.path, "delete")
if err := deleteCmd.Run(); err != nil {
log.Printf("error deleting minikube: %v", err)
}
rm.addResult(binary, r)
}
}
return rm, nil
@ -108,16 +118,9 @@ func timeMinikubeStart(ctx context.Context, binary *Binary, driver string) (*res
// timeEnableIngress returns the time it takes to execute `minikube addons enable ingress`
// It deletes the VM after `minikube addons enable ingress`.
func timeEnableIngress(ctx context.Context, binary *Binary) (*result, error) {
enableCmd := exec.CommandContext(ctx, binary.path, "addons enable ingress")
enableCmd := exec.CommandContext(ctx, binary.path, "addons", "enable", "ingress")
enableCmd.Stderr = os.Stderr
deleteCmd := exec.CommandContext(ctx, binary.path, "delete")
defer func() {
if err := deleteCmd.Run(); err != nil {
log.Printf("error deleting minikube: %v", err)
}
}()
log.Printf("Running: %v...", enableCmd.Args)
r, err := timeCommandLogs(enableCmd)
if err != nil {

View File

@ -247,6 +247,7 @@ var (
GuestCert = Kind{ID: "GUEST_CERT", ExitCode: ExGuestError}
GuestCpConfig = Kind{ID: "GUEST_CP_CONFIG", ExitCode: ExGuestConfig}
GuestDeletion = Kind{ID: "GUEST_DELETION", ExitCode: ExGuestError}
GuestImageList = Kind{ID: "GUEST_IMAGE_LIST", ExitCode: ExGuestError}
GuestImageLoad = Kind{ID: "GUEST_IMAGE_LOAD", ExitCode: ExGuestError}
GuestImageRemove = Kind{ID: "GUEST_IMAGE_REMOVE", ExitCode: ExGuestError}
GuestLoadHost = Kind{ID: "GUEST_LOAD_HOST", ExitCode: ExGuestError}

View File

@ -235,13 +235,13 @@ func FreeSubnet(startSubnet string, step, tries int) (*Parameters, error) {
// uses sync.Map to manage reservations thread-safe
func reserveSubnet(subnet string, period time.Duration) bool {
// put 'zero' reservation{} Map value for subnet Map key
// to block other processes from concurently changing this subnet
// to block other processes from concurrently changing this subnet
zero := reservation{}
r, loaded := reservedSubnets.LoadOrStore(subnet, zero)
// check if there was previously issued reservation
if loaded {
// back off if previous reservation was already set to 'zero'
// as then other process is already managing this subnet concurently
// as then other process is already managing this subnet concurrently
if r == zero {
klog.Infof("backing off reserving subnet %s (other process is managing it!): %+v", subnet, &reservedSubnets)
return false

View File

@ -70,6 +70,52 @@ minikube image help [command] [flags]
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
## minikube image list
List images
### Synopsis
List images
```shell
minikube image list [flags]
```
### Aliases
[ls]
### Examples
```
$ minikube image list
```
### Options inherited from parent commands
```
--add_dir_header If true, adds the file directory to the header of the log messages
--alsologtostderr log to standard error as well as files
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
-h, --help
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--log_file string If non-empty, use this log file
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
--logtostderr log to standard error instead of files
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level)
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
--skip_headers If true, avoid header prefixes in the log messages
--skip_log_headers If true, avoid headers when opening log files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
--user string Specifies the user executing the operation. Useful for auditing operations executed by 3rd party tools. Defaults to the operating system username.
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
## minikube image load
Load a image into minikube

View File

@ -66,7 +66,7 @@ minikube start [flags]
--interactive Allow user prompts for more information (default true)
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.19.0.iso,https://github.com/kubernetes/minikube/releases/download/v1.19.0/minikube-v1.19.0.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.19.0.iso])
--keep-context This will keep the existing kubectl context and will create a minikube context.
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.20.2, 'latest' for v1.20.5-rc.0). Defaults to 'stable'.
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.20.5, 'latest' for v1.21.0-rc.0). Defaults to 'stable'.
--kvm-gpu Enable experimental NVIDIA GPU support in minikube
--kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only)
--kvm-network string The KVM default network name. (kvm2 driver only) (default "default")

View File

@ -12,6 +12,11 @@ description: >
* Two minikube repos checked out locally:
* Your personal fork
* Upstream
## Update the Kubernetes version
* Run `make update-kubernetes-version` from your local upstream repo copy
* If any files are updated, create and merge a PR before moving forward
## Build a new ISO

View File

@ -39,12 +39,16 @@ var stderrAllow = []string{
`cache_images.go:.*error getting status`,
// don't care if we can't push images to other profiles which are deleted.
`cache_images.go:.*Failed to load profile`,
// ! 'docker' driver reported a issue that could affect the performance."
// "! 'docker' driver reported a issue that could affect the performance."
`docker.*issue.*performance`,
// "* Suggestion: enable overlayfs kernel module on your Linux"
`Suggestion.*overlayfs`,
// "! docker is currently using the btrfs storage driver, consider switching to overlay2 for better performance"
`docker.*btrfs storage driver`,
// jenkins VMs (debian 9) cgoups don't allow setting memory
`Your cgroup does not allow setting memory.`,
// progress bar output
` > .*`,
}
// stderrAllowRe combines rootCauses into a single regex
@ -61,8 +65,15 @@ func TestErrorSpam(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), Minutes(25))
defer CleanupWithLogs(t, profile, cancel)
logDir := filepath.Join(os.TempDir(), profile)
if err := os.MkdirAll(logDir, 0755); err != nil {
t.Fatalf("Unable to make logDir %s: %v", logDir, err)
}
defer os.RemoveAll(logDir)
// This should likely use multi-node once it's ready
args := append([]string{"start", "-p", profile, "-n=1", "--memory=2250", "--wait=false"}, StartArgs()...)
// use `--log_dir` flag to run isolated and avoid race condition - ie, failing to clean up (locked) log files created by other concurently-run tests, or counting them in results
args := append([]string{"start", "-p", profile, "-n=1", "--memory=2250", "--wait=false", fmt.Sprintf("--log_dir=%s", logDir)}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
@ -116,8 +127,8 @@ func TestErrorSpam(t *testing.T) {
}{
{
command: "start",
args: []string{"--dry-run", "--log_dir", os.TempDir()},
runCount: 120, // calling this 120 times should create 2 files with 1 greater than 1M
args: []string{"--dry-run"},
runCount: 175, // calling this 175 times should create 2 files with 1 greater than 1M
expectedLogFiles: 2,
},
{
@ -141,38 +152,46 @@ func TestErrorSpam(t *testing.T) {
for _, test := range logTests {
t.Run(test.command, func(t *testing.T) {
args := []string{test.command, "-p", profile}
args := []string{test.command, "-p", profile, "--log_dir", logDir}
args = append(args, test.args...)
// before starting the test, ensure no other logs from the current command are written
logFiles, err := getLogFiles(test.command)
logFiles, err := filepath.Glob(filepath.Join(logDir, fmt.Sprintf("minikube_%s*", test.command)))
if err != nil {
t.Errorf("failed to find tmp log files: command %s : %v", test.command, err)
t.Errorf("failed to get old log files for command %s : %v", test.command, err)
}
cleanupLogFiles(t, logFiles)
// run command runCount times
for i := 0; i < test.runCount; i++ {
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("%q failed: %v", rr.Command(), err)
t.Errorf("%q failed: %v", rr.Command(), err)
}
}
// get log files generated above
logFiles, err = getLogFiles(test.command)
logFiles, err = filepath.Glob(filepath.Join(logDir, fmt.Sprintf("minikube_%s*", test.command)))
if err != nil {
t.Errorf("failed to find tmp log files: command %s : %v", test.command, err)
t.Errorf("failed to get new log files for command %s : %v", test.command, err)
}
// cleanup generated logfiles
defer cleanupLogFiles(t, logFiles)
// if not the expected number of files, throw err
if len(logFiles) != test.expectedLogFiles {
t.Errorf("failed to find expected number of log files: cmd %s: expected: %d got %d", test.command, test.expectedLogFiles, len(logFiles))
}
// if more than 1 logfile is expected, only one file should be less than 1M
if test.expectedLogFiles > 1 {
foundSmall := false
maxSize := 1024 * 1024 // 1M
var maxSize int64 = 1024 * 1024 // 1M
for _, logFile := range logFiles {
isSmall := int(logFile.Size()) < maxSize
finfo, err := os.Stat(logFile)
if err != nil {
t.Logf("logfile %q for command %q not found:", logFile, test.command)
continue
}
isSmall := finfo.Size() < maxSize
if isSmall && !foundSmall {
foundSmall = true
} else if isSmall && foundSmall {
@ -185,25 +204,12 @@ func TestErrorSpam(t *testing.T) {
}
}
// getLogFiles returns logfiles corresponding to cmd
func getLogFiles(cmdName string) ([]os.FileInfo, error) {
var logFiles []os.FileInfo
err := filepath.Walk(os.TempDir(), func(path string, info os.FileInfo, err error) error {
if strings.Contains(info.Name(), fmt.Sprintf("minikube_%s", cmdName)) {
logFiles = append(logFiles, info)
}
return nil
})
return logFiles, err
}
// cleanupLogFiles removes logfiles generated during testing
func cleanupLogFiles(t *testing.T, logFiles []os.FileInfo) {
func cleanupLogFiles(t *testing.T, logFiles []string) {
t.Logf("Cleaning up %d logfile(s) ...", len(logFiles))
for _, logFile := range logFiles {
logFilePath := filepath.Join(os.TempDir(), logFile.Name())
t.Logf("Cleaning up logfile %s ...", logFilePath)
if err := os.Remove(logFilePath); err != nil {
t.Errorf("failed to cleanup log file: %s : %v", logFilePath, err)
if err := os.Remove(logFile); err != nil {
t.Logf("failed to cleanup log file: %s : %v", logFile, err)
}
}
}

View File

@ -19,7 +19,7 @@ spec:
# flaky nslookup in busybox versions newer than 1.28:
# https://github.com/docker-library/busybox/issues/48
# note: gcr.io/kubernetes-e2e-test-images/dnsutils:1.3
# has similar issues (ie, resolves but returns exit 1)
# has similar issues (ie, resolves but returns exit code 1)
image: busybox:1.28
command:
- sleep