Merge branch 'master' into DELETE_ALL_PROFILES

pull/4780/head
Marek Schwarz 2019-07-17 08:52:37 +02:00
commit 019ee1f018
25 changed files with 528 additions and 185 deletions

View File

@ -41,6 +41,7 @@ ISO_BUCKET ?= minikube/iso
MINIKUBE_VERSION ?= $(ISO_VERSION)
MINIKUBE_BUCKET ?= minikube/releases
MINIKUBE_UPLOAD_LOCATION := gs://${MINIKUBE_BUCKET}
MINIKUBE_RELEASES_URL=https://github.com/kubernetes/minikube/releases/download
KERNEL_VERSION ?= 4.16.14
@ -307,6 +308,20 @@ out/minikube-$(RPM_VERSION).rpm: out/minikube-linux-amd64
out/minikube-$(RPM_VERSION)/minikube.spec
rm -rf out/minikube-$(RPM_VERSION)
.PHONY: apt
apt: out/Release
out/Release: out/minikube_$(DEB_VERSION).deb
( cd out && apt-ftparchive packages . ) | gzip -c > out/Packages.gz
( cd out && apt-ftparchive release . ) > out/Release
.PHONY: yum
yum: out/repodata/repomd.xml
out/repodata/repomd.xml: out/minikube-$(RPM_VERSION).rpm
createrepo --simple-md-filenames --no-database \
-u "$(MINIKUBE_RELEASES_URL)/$(VERSION)/" out
.SECONDEXPANSION:
TAR_TARGETS_linux := out/minikube-linux-amd64 out/docker-machine-driver-kvm2
TAR_TARGETS_darwin := out/minikube-darwin-amd64
@ -407,6 +422,23 @@ out/docker-machine-driver-kvm2:
k8s.io/minikube/cmd/drivers/kvm
chmod +X $@
out/docker-machine-driver-kvm2_$(DEB_VERSION).deb: out/docker-machine-driver-kvm2
cp -r installers/linux/deb/kvm2_deb_template out/docker-machine-driver-kvm2_$(DEB_VERSION)
chmod 0755 out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN
sed -E -i 's/--VERSION--/'$(DEB_VERSION)'/g' out/docker-machine-driver-kvm2_$(DEB_VERSION)/DEBIAN/control
mkdir -p out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin
cp out/docker-machine-driver-kvm2 out/docker-machine-driver-kvm2_$(DEB_VERSION)/usr/bin/docker-machine-driver-kvm2
fakeroot dpkg-deb --build out/docker-machine-driver-kvm2_$(DEB_VERSION)
rm -rf out/docker-machine-driver-kvm2_$(DEB_VERSION)
out/docker-machine-driver-kvm2-$(RPM_VERSION).rpm: out/docker-machine-driver-kvm2
cp -r installers/linux/rpm/kvm2_rpm_template out/docker-machine-driver-kvm2-$(RPM_VERSION)
sed -E -i 's/--VERSION--/'$(RPM_VERSION)'/g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
sed -E -i 's|--OUT--|'$(PWD)/out'|g' out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
rpmbuild -bb -D "_rpmdir $(PWD)/out" -D "_rpmfilename docker-machine-driver-kvm2-$(RPM_VERSION).rpm" \
out/docker-machine-driver-kvm2-$(RPM_VERSION)/docker-machine-driver-kvm2.spec
rm -rf out/docker-machine-driver-kvm2-$(RPM_VERSION)
kvm-image: $(KVM_BUILD_IMAGE) # convenient alias to build the docker container
$(KVM_BUILD_IMAGE): installers/linux/kvm/Dockerfile
docker build --build-arg "GO_VERSION=$(GO_VERSION)" -t $@ -f $< $(dir $<)

View File

@ -24,6 +24,9 @@ Usage: from the root minikube directory, go run cmd/extract/extract.go
package main
import (
"os"
"strings"
"k8s.io/minikube/pkg/minikube/extract"
)
@ -31,7 +34,19 @@ func main() {
paths := []string{"cmd", "pkg"}
functions := []string{"translate.T"}
outDir := "translations"
err := extract.TranslatableStrings(paths, functions, outDir)
cwd, err := os.Getwd()
if err != nil {
panic("Getting current working directory failed")
}
if strings.Contains(cwd, "cmd") {
panic("run extract.go from the minikube root directory")
}
if _, err = os.Stat(extract.ErrMapFile); os.IsNotExist(err) {
panic("err_map.go doesn't exist")
}
err = extract.TranslatableStrings(paths, functions, outDir)
if err != nil {
panic(err)

View File

@ -93,8 +93,8 @@ func CacheImagesInConfigFile() error {
return machine.CacheImages(images, constants.ImageCacheDir)
}
// LoadCachedImagesInConfigFile loads the images currently in the config file (minikube start)
func LoadCachedImagesInConfigFile() error {
// loadCachedImagesInConfigFile loads the images currently in the config file (minikube start)
func loadCachedImagesInConfigFile() error {
images, err := imagesInConfigFile()
if err != nil {
return err

View File

@ -124,7 +124,7 @@ func deleteAllProfiles(profiles []string) {
func uninstallKubernetes(api libmachine.API, kc pkg_config.KubernetesConfig, bsName string) {
console.OutStyle(console.Resetting, "Uninstalling Kubernetes %s using %s ...", kc.KubernetesVersion, bsName)
clusterBootstrapper, err := GetClusterBootstrapper(api, bsName)
clusterBootstrapper, err := getClusterBootstrapper(api, bsName)
if err != nil {
console.ErrLn("Unable to get bootstrapper: %v", err)
} else if err = clusterBootstrapper.DeleteCluster(kc); err != nil {

View File

@ -66,7 +66,7 @@ var logsCmd = &cobra.Command{
if err != nil {
exit.WithError("command runner", err)
}
bs, err := GetClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
bs, err := getClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
if err != nil {
exit.WithError("Error getting cluster bootstrapper", err)
}

View File

@ -170,8 +170,8 @@ func setupViper() {
setFlagsUsingViper()
}
// GetClusterBootstrapper returns a new bootstrapper for the cluster
func GetClusterBootstrapper(api libmachine.API, bootstrapperName string) (bootstrapper.Bootstrapper, error) {
// getClusterBootstrapper returns a new bootstrapper for the cluster
func getClusterBootstrapper(api libmachine.API, bootstrapperName string) (bootstrapper.Bootstrapper, error) {
var b bootstrapper.Bootstrapper
var err error
switch bootstrapperName {

View File

@ -115,59 +115,91 @@ var (
)
func init() {
startCmd.Flags().Bool(keepContext, constants.DefaultKeepContext, "This will keep the existing kubectl context and will create a minikube context.")
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube")
startCmd.Flags().String(mountString, constants.DefaultMountDir+":"+constants.DefaultMountEndpoint, "The argument to pass the minikube mount command on start")
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors (vboxfs, xhyve-9p)")
startCmd.Flags().String(isoURL, constants.DefaultISOURL, "Location of the minikube iso")
startCmd.Flags().String(vmDriver, constants.DefaultVMDriver, fmt.Sprintf("VM driver is one of: %v", constants.SupportedVMDrivers))
startCmd.Flags().String(memory, constants.DefaultMemorySize, "Amount of RAM allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g)")
initMinikubeFlags()
initKubernetesFlags()
initDriverFlags()
initNetworkingFlags()
if err := viper.BindPFlags(startCmd.Flags()); err != nil {
exit.WithError("unable to bind flags", err)
}
RootCmd.AddCommand(startCmd)
}
// initMinikubeFlags includes commandline flags for minikube.
func initMinikubeFlags() {
startCmd.Flags().Int(cpus, constants.DefaultCPUS, "Number of CPUs allocated to the minikube VM")
startCmd.Flags().String(memory, constants.DefaultMemorySize, "Amount of RAM allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g)")
startCmd.Flags().String(humanReadableDiskSize, constants.DefaultDiskSize, "Disk size allocated to the minikube VM (format: <number>[<unit>], where unit = b, k, m or g)")
startCmd.Flags().String(hostOnlyCIDR, "192.168.99.1/24", "The CIDR to be used for the minikube VM (only supported with Virtualbox driver)")
startCmd.Flags().String(hypervVirtualSwitch, "", "The hyperv virtual switch name. Defaults to first found. (only supported with HyperV driver)")
startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (only supported with KVM driver)")
startCmd.Flags().String(kvmQemuURI, "qemu:///system", "The KVM QEMU connection URI. (works only with kvm2 driver on linux)")
startCmd.Flags().Bool(kvmGPU, false, "Enable experimental NVIDIA GPU support in minikube")
startCmd.Flags().Bool(kvmHidden, false, "Hide the hypervisor signature from the guest in minikube")
startCmd.Flags().String(xhyveDiskDriver, "ahci-hd", "The disk driver to use [ahci-hd|virtio-blk] (only supported with xhyve driver)")
startCmd.Flags().StringSlice(nfsShare, []string{}, "Local folders to share with Guest via NFS mounts (Only supported on with hyperkit now)")
startCmd.Flags().String(nfsSharesRoot, "/nfsshares", "Where to root the NFS Shares (defaults to /nfsshares, only supported with hyperkit now)")
startCmd.Flags().StringArrayVar(&dockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().StringArrayVar(&dockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().Int(apiServerPort, pkgutil.APIServerPort, "The apiserver listening port")
startCmd.Flags().String(apiServerName, constants.APIServerName, "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the kubernetes cluster")
startCmd.Flags().String(serviceCIDR, pkgutil.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.")
startCmd.Flags().StringSliceVar(&registryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon")
startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers")
startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn")
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd)")
startCmd.Flags().String(criSocket, "", "The cri socket path to be used")
startCmd.Flags().String(kubernetesVersion, constants.DefaultKubernetesVersion, "The kubernetes version that the minikube VM will use (ex: v1.2.3)")
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin")
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\"")
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
startCmd.Flags().Bool(downloadOnly, false, "If true, only download and cache files for later use - don't install or start anything.")
startCmd.Flags().Bool(cacheImages, true, "If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --vm-driver=none.")
startCmd.Flags().String(isoURL, constants.DefaultISOURL, "Location of the minikube iso")
startCmd.Flags().Bool(keepContext, constants.DefaultKeepContext, "This will keep the existing kubectl context and will create a minikube context.")
startCmd.Flags().String(containerRuntime, "docker", "The container runtime to be used (docker, crio, containerd)")
startCmd.Flags().Bool(createMount, false, "This will start the mount daemon and automatically mount files into minikube")
startCmd.Flags().String(mountString, constants.DefaultMountDir+":"+constants.DefaultMountEndpoint, "The argument to pass the minikube mount command on start")
startCmd.Flags().String(criSocket, "", "The cri socket path to be used")
startCmd.Flags().String(networkPlugin, "", "The name of the network plugin")
startCmd.Flags().Bool(enableDefaultCNI, false, "Enable the default CNI plugin (/etc/cni/net.d/k8s.conf). Used in conjunction with \"--network-plugin=cni\"")
}
// initKubernetesFlags inits the commandline flags for kubernetes related options
func initKubernetesFlags() {
startCmd.Flags().String(kubernetesVersion, constants.DefaultKubernetesVersion, "The kubernetes version that the minikube VM will use (ex: v1.2.3)")
startCmd.Flags().Var(&extraOptions, "extra-config",
`A set of key=value pairs that describe configuration that may be passed to different components.
The key should be '.' separated, and the first part before the dot is the component to apply the configuration to.
Valid components are: kubelet, kubeadm, apiserver, controller-manager, etcd, proxy, scheduler
Valid kubeadm parameters: `+fmt.Sprintf("%s, %s", strings.Join(kubeadm.KubeadmExtraArgsWhitelist[kubeadm.KubeadmCmdParam], ", "), strings.Join(kubeadm.KubeadmExtraArgsWhitelist[kubeadm.KubeadmConfigParam], ",")))
startCmd.Flags().String(uuid, "", "Provide VM UUID to restore MAC address (only supported with Hyperkit driver).")
startCmd.Flags().String(vpnkitSock, "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock.")
startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (Only supported on with hyperkit now).")
startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox)")
startCmd.Flags().String(featureGates, "", "A set of key=value pairs that describe feature gates for alpha/experimental features.")
startCmd.Flags().String(dnsDomain, constants.ClusterDNSDomain, "The cluster dns domain name used in the kubernetes cluster")
startCmd.Flags().Int(apiServerPort, pkgutil.APIServerPort, "The apiserver listening port")
startCmd.Flags().String(apiServerName, constants.APIServerName, "The apiserver name which is used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
startCmd.Flags().StringArrayVar(&apiServerNames, "apiserver-names", nil, "A set of apiserver names which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
startCmd.Flags().IPSliceVar(&apiServerIPs, "apiserver-ips", nil, "A set of apiserver IP Addresses which are used in the generated certificate for kubernetes. This can be used if you want to make the apiserver available from outside the machine")
}
// initDriverFlags inits the commandline flags for vm drivers
func initDriverFlags() {
startCmd.Flags().String(vmDriver, constants.DefaultVMDriver, fmt.Sprintf("VM driver is one of: %v", constants.SupportedVMDrivers))
// kvm
startCmd.Flags().String(kvmNetwork, "default", "The KVM network name. (only supported with KVM driver)")
startCmd.Flags().String(kvmQemuURI, "qemu:///system", "The KVM QEMU connection URI. (works only with kvm2 driver on linux)")
startCmd.Flags().Bool(kvmGPU, false, "Enable experimental NVIDIA GPU support in minikube")
startCmd.Flags().Bool(kvmHidden, false, "Hide the hypervisor signature from the guest in minikube")
// virtualbox
startCmd.Flags().String(hostOnlyCIDR, "192.168.99.1/24", "The CIDR to be used for the minikube VM (only supported with Virtualbox driver)")
startCmd.Flags().Bool(dnsProxy, false, "Enable proxy for NAT DNS requests (virtualbox)")
startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox)")
if err := viper.BindPFlags(startCmd.Flags()); err != nil {
exit.WithError("unable to bind flags", err)
startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox)")
// hyperkit
startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (Only supported on with hyperkit now).")
startCmd.Flags().String(uuid, "", "Provide VM UUID to restore MAC address (only supported with Hyperkit driver).")
startCmd.Flags().String(vpnkitSock, "", "Location of the VPNKit socket used for networking. If empty, disables Hyperkit VPNKitSock, if 'auto' uses Docker for Mac VPNKit connection, otherwise uses the specified VSock.")
startCmd.Flags().StringSlice(nfsShare, []string{}, "Local folders to share with Guest via NFS mounts (Only supported on with hyperkit now)")
startCmd.Flags().String(nfsSharesRoot, "/nfsshares", "Where to root the NFS Shares (defaults to /nfsshares, only supported with hyperkit now)")
// hyperv
startCmd.Flags().String(hypervVirtualSwitch, "", "The hyperv virtual switch name. Defaults to first found. (only supported with HyperV driver)")
// xhyveDiskDriver
startCmd.Flags().String(xhyveDiskDriver, "ahci-hd", "The disk driver to use [ahci-hd|virtio-blk] (only supported with xhyve driver)")
startCmd.Flags().Bool(disableDriverMounts, false, "Disables the filesystem mounts provided by the hypervisors (vboxfs, xhyve-9p)")
}
RootCmd.AddCommand(startCmd)
// initNetworkingFlags inits the commandline flags for connectivity related flags for start
func initNetworkingFlags() {
startCmd.Flags().StringSliceVar(&insecureRegistry, "insecure-registry", nil, "Insecure Docker registries to pass to the Docker daemon. The default service CIDR range will automatically be added.")
startCmd.Flags().StringSliceVar(&registryMirror, "registry-mirror", nil, "Registry mirrors to pass to the Docker daemon")
startCmd.Flags().String(imageRepository, "", "Alternative image repository to pull docker images from. This can be used when you have limited access to gcr.io. Set it to \"auto\" to let minikube decide one for you. For Chinese mainland users, you may use local gcr.io mirrors such as registry.cn-hangzhou.aliyuncs.com/google_containers")
startCmd.Flags().String(imageMirrorCountry, "", "Country code of the image mirror to be used. Leave empty to use the global one. For Chinese mainland users, set it to cn")
startCmd.Flags().String(serviceCIDR, pkgutil.DefaultServiceCIDR, "The CIDR to be used for service cluster IPs.")
startCmd.Flags().StringArrayVar(&dockerEnv, "docker-env", nil, "Environment variables to pass to the Docker daemon. (format: key=value)")
startCmd.Flags().StringArrayVar(&dockerOpt, "docker-opt", nil, "Specify arbitrary flags to pass to the Docker daemon. (format: key=value)")
}
// startCmd represents the start command
@ -183,14 +215,9 @@ assumes you have already installed one of the VM drivers: virtualbox/parallels/v
func runStart(cmd *cobra.Command, args []string) {
console.OutT(console.Happy, "minikube {{.version}} on {{.os}} ({{.arch}})", console.Arg{"version": version.GetVersion(), "os": runtime.GOOS, "arch": runtime.GOARCH})
validateConfig()
validateUser()
oldConfig, err := cfg.Load()
if err != nil && !os.IsNotExist(err) {
exit.WithCode(exit.Data, "Unable to load config: %v", err)
}
k8sVersion, isUpgrade := validateKubernetesVersions(oldConfig)
k8sVersion, isUpgrade := getKubernetesVersion()
config, err := generateConfig(cmd, k8sVersion)
if err != nil {
exit.WithError("Failed to generate config", err)
@ -200,7 +227,7 @@ func runStart(cmd *cobra.Command, args []string) {
downloadISO(config)
// With "none", images are persistently stored in Docker, so internal caching isn't necessary.
skipCache(config)
skipCache(&config)
// Now that the ISO is downloaded, pull images in the background while the VM boots.
var cacheGroup errgroup.Group
@ -208,70 +235,32 @@ func runStart(cmd *cobra.Command, args []string) {
// Abstraction leakage alert: startHost requires the config to be saved, to satistfy pkg/provision/buildroot.
// Hence, saveConfig must be called before startHost, and again afterwards when we know the IP.
if err := saveConfig(config); err != nil {
if err := saveConfig(&config); err != nil {
exit.WithError("Failed to save config", err)
}
validateDriverVersion(viper.GetString(vmDriver))
m, err := machine.NewAPIClient()
if err != nil {
exit.WithError("Failed to get machine client", err)
}
defer m.Close()
// If --download-only, complete the remaining downloads and exit.
if viper.GetBool(downloadOnly) {
if err := doCacheBinaries(k8sVersion); err != nil {
exit.WithError("Failed to cache binaries", err)
}
waitCacheImages(&cacheGroup)
if err := CacheImagesInConfigFile(); err != nil {
exit.WithError("Failed to cache images", err)
}
console.OutStyle(console.Check, "Download complete!")
return
}
host, preexisting := startHost(m, config.MachineConfig)
ip := validateNetwork(host)
// Bypass proxy for minikube's vm ip
err = proxy.ExcludeIP(ip)
if err != nil {
console.ErrT(console.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", console.Arg{"ip": ip})
}
// Save IP to configuration file for subsequent use
config.KubernetesConfig.NodeIP = ip
if err := saveConfig(config); err != nil {
exit.WithError("Failed to save config", err)
}
runner, err := machine.CommandRunner(host)
if err != nil {
exit.WithError("Failed to get command runner", err)
}
cr := configureRuntimes(runner)
// exits here in case of --download-only option.
handleDownloadOnly(&cacheGroup, k8sVersion)
mRunner, preExists, machineAPI, host := startMachine(&config)
defer machineAPI.Close()
// configure the runtime (docker, containerd, crio)
cr := configureRuntimes(mRunner)
showVersionInfo(k8sVersion, cr)
// prepareHostEnvironment uses the downloaded images, so we need to wait for background task completion.
waitCacheImages(&cacheGroup)
bs := prepareHostEnvironment(m, config.KubernetesConfig)
// setup kube adm and certs and return bootstrapperx
bs := setupKubeAdm(machineAPI, config.KubernetesConfig)
// The kube config must be update must come before bootstrapping, otherwise health checks may use a stale IP
kubeconfig := updateKubeConfig(host, &config)
bootstrapCluster(bs, cr, runner, config.KubernetesConfig, preexisting, isUpgrade)
// pull images or restart cluster
bootstrapCluster(bs, cr, mRunner, config.KubernetesConfig, preExists, isUpgrade)
configureMounts()
if err = LoadCachedImagesInConfigFile(); err != nil {
if err = loadCachedImagesInConfigFile(); err != nil {
console.Failure("Unable to load cached images from config file.")
}
if config.MachineConfig.VMDriver == constants.DriverNone {
console.OutStyle(console.StartingNone, "Configuring local host environment ...")
prepareNone()
}
// special ops for none driver, like change minikube directory.
prepareNone(viper.GetString(vmDriver))
if err := bs.WaitCluster(config.KubernetesConfig); err != nil {
exit.WithError("Wait failed", err)
@ -280,6 +269,57 @@ func runStart(cmd *cobra.Command, args []string) {
}
func handleDownloadOnly(cacheGroup *errgroup.Group, k8sVersion string) {
// If --download-only, complete the remaining downloads and exit.
if !viper.GetBool(downloadOnly) {
return
}
if err := doCacheBinaries(k8sVersion); err != nil {
exit.WithError("Failed to cache binaries", err)
}
waitCacheImages(cacheGroup)
if err := CacheImagesInConfigFile(); err != nil {
exit.WithError("Failed to cache images", err)
}
console.OutStyle(console.Check, "Download complete!")
os.Exit(0)
}
func startMachine(config *cfg.Config) (runner command.Runner, preExists bool, machineAPI libmachine.API, host *host.Host) {
m, err := machine.NewAPIClient()
if err != nil {
exit.WithError("Failed to get machine client", err)
}
host, preExists = startHost(m, config.MachineConfig)
ip := validateNetwork(host)
// Bypass proxy for minikube's vm host ip
err = proxy.ExcludeIP(ip)
if err != nil {
console.ErrT(console.FailureType, "Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.", console.Arg{"ip": ip})
}
// Save IP to configuration file for subsequent use
config.KubernetesConfig.NodeIP = ip
if err := saveConfig(config); err != nil {
exit.WithError("Failed to save config", err)
}
runner, err = machine.CommandRunner(host)
if err != nil {
exit.WithError("Failed to get command runner", err)
}
return runner, preExists, m, host
}
func getKubernetesVersion() (k8sVersion string, isUpgrade bool) {
oldConfig, err := cfg.Load()
if err != nil && !os.IsNotExist(err) {
exit.WithCode(exit.Data, "Unable to load config: %v", err)
}
return validateKubernetesVersions(oldConfig)
}
func downloadISO(config cfg.Config) {
if viper.GetString(vmDriver) != constants.DriverNone {
if err := cluster.CacheISO(config.MachineConfig); err != nil {
@ -288,7 +328,7 @@ func downloadISO(config cfg.Config) {
}
}
func skipCache(config cfg.Config) {
func skipCache(config *cfg.Config) {
if viper.GetString(vmDriver) == constants.DriverNone {
viper.Set(cacheImages, false)
config.KubernetesConfig.ShouldLoadCachedImages = false
@ -583,7 +623,11 @@ func autoSetOptions(vmDriver string) error {
}
// prepareNone prepares the user and host for the joy of the "none" driver
func prepareNone() {
func prepareNone(vmDriver string) {
if vmDriver != constants.DriverNone {
return
}
console.OutStyle(console.StartingNone, "Configuring local host environment ...")
if viper.GetBool(cfg.WantNoneDriverWarning) {
console.OutLn("")
console.Warning("The 'none' driver provides limited isolation and may reduce system security and reliability.")
@ -696,9 +740,9 @@ func validateKubernetesVersions(old *cfg.Config) (string, bool) {
return nv, isUpgrade
}
// prepareHostEnvironment adds any requested files into the VM before Kubernetes is started
func prepareHostEnvironment(api libmachine.API, kc cfg.KubernetesConfig) bootstrapper.Bootstrapper {
bs, err := GetClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
// setupKubeAdm adds any requested files into the VM before Kubernetes is started
func setupKubeAdm(mAPI libmachine.API, kc cfg.KubernetesConfig) bootstrapper.Bootstrapper {
bs, err := getClusterBootstrapper(mAPI, viper.GetString(cmdcfg.Bootstrapper))
if err != nil {
exit.WithError("Failed to get bootstrapper", err)
}
@ -816,7 +860,7 @@ func configureMounts() {
}
// saveConfig saves profile cluster configuration in $MINIKUBE_HOME/profiles/<profilename>/config.json
func saveConfig(clusterConfig cfg.Config) error {
func saveConfig(clusterConfig *cfg.Config) error {
data, err := json.MarshalIndent(clusterConfig, "", " ")
if err != nil {
return err

View File

@ -75,7 +75,7 @@ var statusCmd = &cobra.Command{
apiserverSt := state.None.String()
if hostSt == state.Running.String() {
clusterBootstrapper, err := GetClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
clusterBootstrapper, err := getClusterBootstrapper(api, viper.GetString(cmdcfg.Bootstrapper))
if err != nil {
exit.WithError("Error getting bootstrapper", err)
}

View File

@ -1,2 +1,3 @@
sha256 ccf83574556793ceb01717dc91c66b70f183c60c2bbec70283939aae8fdef768 crictl-v1.11.1-linux-amd64.tar.gz
sha256 9bdbea7a2b382494aff2ff014da328a042c5aba9096a7772e57fdf487e5a1d51 crictl-v1.13.0-linux-amd64.tar.gz
sha256 c3b71be1f363e16078b51334967348aab4f72f46ef64a61fe7754e029779d45a crictl-v1.15.0-linux-amd64.tar.gz

View File

@ -4,7 +4,7 @@
#
################################################################################
CRICTL_BIN_VERSION = v1.13.0
CRICTL_BIN_VERSION = v1.15.0
CRICTL_BIN_SITE = https://github.com/kubernetes-sigs/cri-tools/releases/download/$(CRICTL_BIN_VERSION)
CRICTL_BIN_SOURCE = crictl-$(CRICTL_BIN_VERSION)-linux-amd64.tar.gz
CRICTL_BIN_STRIP_COMPONENTS = 0

View File

@ -0,0 +1,9 @@
#if !defined(CONFIG_H)
#define CONFIG_H
#define BUF_SIZE 8192
#define STDIO_BUF_SIZE 8192
#define DEFAULT_SOCKET_PATH "/var/run/crio"
#endif // CONFIG_H

View File

@ -5,3 +5,4 @@ sha256 92588998dbb79002c38f65f84602b5659f0d0ef1cd36b1a568a2e40269b66816 v1.13.0.
sha256 48e7cf64a757d62a3edf214e1b93b74d99f090ca924f956ede2494a260eab2db v1.13.1.tar.gz
sha256 7435c4745017f06c260973b049440d924efe65b0df008d14175dfb8f5e23b599 v1.14.0.tar.gz
sha256 1f6f72b1f89d4286b2d5b54a48f4d5ed4c0c01065d484635dcb343a706feb743 v1.14.1.tar.gz
sha256 f7041a92e2d3a4c341be8df58f1076ba57ecb5daa02b6c65e652530c5f242739 v1.15.0.tar.gz

View File

@ -4,9 +4,9 @@
#
################################################################################
CRIO_BIN_VERSION = v1.14.1
CRIO_BIN_COMMIT = b7644f67e6383cc862b3e37fb74fba334b0b2721
CRIO_BIN_SITE = https://github.com/kubernetes-sigs/cri-o/archive
CRIO_BIN_VERSION = v1.15.0
CRIO_BIN_COMMIT = 485227d727401fa0472a449b5df3b0537e314ebb
CRIO_BIN_SITE = https://github.com/cri-o/cri-o/archive
CRIO_BIN_SOURCE = $(CRIO_BIN_VERSION).tar.gz
CRIO_BIN_DEPENDENCIES = host-go libgpgme
CRIO_BIN_GOPATH = $(@D)/_output
@ -23,8 +23,10 @@ define CRIO_BIN_USERS
endef
define CRIO_BIN_CONFIGURE_CMDS
mkdir -p $(CRIO_BIN_GOPATH)/src/github.com/kubernetes-sigs
ln -sf $(@D) $(CRIO_BIN_GOPATH)/src/github.com/kubernetes-sigs/cri-o
mkdir -p $(CRIO_BIN_GOPATH)/src/github.com/cri-o
ln -sf $(@D) $(CRIO_BIN_GOPATH)/src/github.com/cri-o/cri-o
# Generate conmon/config.h with a simplified bin/crio-config
$(CRIO_BIN_ENV) $(MAKE) $(TARGET_CONFIGURE_OPTS) -C $(@D) BUILDTAGS="containers_image_ostree_stub exclude_graphdriver_btrfs exclude_graphdriver_devicemapper containers_image_openpgp" conmon/config.h
endef
define CRIO_BIN_BUILD_CMDS
@ -45,9 +47,6 @@ define CRIO_BIN_INSTALL_TARGET_CMDS
$(INSTALL) -Dm755 \
$(@D)/bin/pause \
$(TARGET_DIR)/usr/libexec/crio/pause
$(INSTALL) -Dm644 \
$(@D)/seccomp.json \
$(TARGET_DIR)/etc/crio/seccomp.json
$(INSTALL) -Dm644 \
$(BR2_EXTERNAL_MINIKUBE_PATH)/package/crio-bin/crio.conf \
$(TARGET_DIR)/etc/crio/crio.conf

View File

@ -0,0 +1,39 @@
diff --git a/Makefile b/Makefile
index de79f63..a9ea13b 100644
--- a/Makefile
+++ b/Makefile
@@ -136,7 +136,7 @@ crio.conf: bin/crio
release-note: ${RELEASE_TOOL}
${RELEASE_TOOL} -n $(release)
-conmon/config.h: git-vars cmd/crio-config/config.go oci/oci.go
+conmon/config.h: cmd/crio-config/config.go oci/oci.go
$(GO) build $(LDFLAGS) -tags "$(BUILDTAGS)" -o bin/crio-config $(PROJECT)/cmd/crio-config
( cd conmon && $(CURDIR)/bin/crio-config )
diff --git a/cmd/crio/main.go b/cmd/crio/main.go
index 0bc347a..ab9969f 100644
--- a/cmd/crio/main.go
+++ b/cmd/crio/main.go
@@ -207,7 +207,7 @@ func mergeConfig(config *server.Config, ctx *cli.Context) (string, error) {
}
func writeCrioGoroutineStacks() {
- path := filepath.Join("/tmp", fmt.Sprintf("crio-goroutine-stacks-%s.log", strings.ReplaceAll(time.Now().Format(time.RFC3339), ":", "")))
+ path := filepath.Join("/tmp", fmt.Sprintf("crio-goroutine-stacks-%s.log", strings.Replace(time.Now().Format(time.RFC3339), ":", "", -1)))
if err := utils.WriteGoroutineStacksToFile(path); err != nil {
logrus.Warnf("Failed to write goroutine stacks: %s", err)
}
diff --git a/oci/runtime_vm.go b/oci/runtime_vm.go
index 57a1fde..64f853f 100644
--- a/oci/runtime_vm.go
+++ b/oci/runtime_vm.go
@@ -172,7 +172,7 @@ func (r *runtimeVM) startRuntimeDaemon(c *Container) error {
args = append(args, "start")
// Modify the runtime path so that it complies with v2 shim API
- newRuntimePath := strings.ReplaceAll(r.path, "-", ".")
+ newRuntimePath := strings.Replace(r.path, "-", ".", -1)
// Setup default namespace
r.ctx = namespaces.WithNamespace(r.ctx, namespaces.Default)

View File

@ -5,6 +5,11 @@
#
# Please refer to crio.conf(5) for details of all configuration options.
# CRI-O supports partial configuration reload during runtime, which can be
# done by sending SIGHUP to the running process. Currently supported options
# are explicitly mentioned with: 'This option supports live configuration
# reload'.
# CRI-O reads its storage defaults from the containers-storage.conf(5) file
# located at /etc/containers/storage.conf. Modify this storage configuration if
# you want to change the system's defaults. If you want to modify storage just
@ -28,9 +33,11 @@ storage_driver = "overlay"
#]
# If set to false, in-memory locking will be used instead of file-based locking.
file_locking = true
# **Deprecated** this option will be removed in the future.
file_locking = false
# Path to the lock file.
# **Deprecated** this option will be removed in the future.
file_locking_path = "/run/crio.lock"
@ -90,6 +97,9 @@ no_pivot = true
# Path to the conmon binary, used for monitoring the OCI runtime.
conmon = "/usr/libexec/crio/conmon"
# Cgroup setting for conmon
conmon_cgroup = "pod"
# Environment variable list for the conmon process, used for passing necessary
# environment variables to conmon or the runtime.
conmon_env = [
@ -100,8 +110,9 @@ conmon_env = [
selinux = false
# Path to the seccomp.json profile which is used as the default seccomp profile
# for the runtime.
seccomp_profile = "/etc/crio/seccomp.json"
# for the runtime. If not specified, then the internal default seccomp profile
# will be used.
seccomp_profile = ""
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default-" followed by the version string of CRI-O.
@ -185,9 +196,13 @@ container_attach_socket_dir = "/var/run/crio"
read_only = false
# Changes the verbosity of the logs based on the level it is set to. Options
# are fatal, panic, error, warn, info, and debug.
# are fatal, panic, error, warn, info, and debug. This option supports live
# configuration reload.
log_level = "error"
# The default log directory where all logs will go unless directly specified by the kubelet
log_dir = "/var/log/crio/pods"
# The UID mappings for the user namespace of each container. A range is
# specified in the form containerUID:HostUID:Size. Multiple ranges must be
# separated by comma.
@ -202,6 +217,10 @@ gid_mappings = ""
# regarding the proper termination of the container.
ctr_stop_timeout = 0
# ManageNetworkNSLifecycle determines whether we pin and remove network namespace
# and manage its lifecycle.
manage_network_ns_lifecycle = false
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
@ -210,7 +229,7 @@ ctr_stop_timeout = 0
[crio.runtime.runtimes.runc]
runtime_path = "/usr/bin/runc"
runtime_type = "oci"
runtime_root = "/run/runc"
# The crio.image table contains settings pertaining to the management of OCI images.
@ -225,14 +244,21 @@ ctr_stop_timeout = 0
# Default transport for pulling images from a remote container storage.
default_transport = "docker://"
# The path to a file containing credentials necessary for pulling images from
# secure registries. The file is similar to that of /var/lib/kubelet/config.json
global_auth_file = ""
# The image used to instantiate infra containers.
# This option supports live configuration reload.
pause_image = "k8s.gcr.io/pause:3.1"
# If not empty, the path to a docker/config.json-like file containing credentials
# necessary for pulling the image specified by pause_image above.
# The path to a file containing credentials specific for pulling the pause_image from
# above. The file is similar to that of /var/lib/kubelet/config.json
# This option supports live configuration reload.
pause_image_auth_file = ""
# The command to run to have a container stay in the paused state.
# This option supports live configuration reload.
pause_command = "/pause"
# Path to the file which decides what sort of policy we use when deciding
@ -264,6 +290,6 @@ registries = [
network_dir = "/etc/cni/net.d/"
# Paths to directories where CNI plugin binaries are located.
plugin_dir = [
plugin_dirs = [
"/opt/cni/bin/",
]

View File

@ -5,6 +5,11 @@
#
# Please refer to crio.conf(5) for details of all configuration options.
# CRI-O supports partial configuration reload during runtime, which can be
# done by sending SIGHUP to the running process. Currently supported options
# are explicitly mentioned with: 'This option supports live configuration
# reload'.
# CRI-O reads its storage defaults from the containers-storage.conf(5) file
# located at /etc/containers/storage.conf. Modify this storage configuration if
# you want to change the system's defaults. If you want to modify storage just
@ -20,7 +25,7 @@
# Storage driver used to manage the storage of images and containers. Please
# refer to containers-storage.conf(5) to see all available storage drivers.
#storage_driver = "overlay"
#storage_driver = ""
# List to pass options to the storage driver. Please refer to
# containers-storage.conf(5) to see all available storage options.
@ -28,9 +33,11 @@
#]
# If set to false, in-memory locking will be used instead of file-based locking.
file_locking = true
# **Deprecated** this option will be removed in the future.
file_locking = false
# Path to the lock file.
# **Deprecated** this option will be removed in the future.
file_locking_path = "/run/crio.lock"
@ -90,6 +97,9 @@ no_pivot = false
# Path to the conmon binary, used for monitoring the OCI runtime.
conmon = "/usr/local/libexec/crio/conmon"
# Cgroup setting for conmon
conmon_cgroup = "pod"
# Environment variable list for the conmon process, used for passing necessary
# environment variables to conmon or the runtime.
conmon_env = [
@ -100,8 +110,9 @@ conmon_env = [
selinux = false
# Path to the seccomp.json profile which is used as the default seccomp profile
# for the runtime.
seccomp_profile = "/etc/crio/seccomp.json"
# for the runtime. If not specified, then the internal default seccomp profile
# will be used.
seccomp_profile = ""
# Used to change the name of the default AppArmor profile of CRI-O. The default
# profile name is "crio-default-" followed by the version string of CRI-O.
@ -185,9 +196,13 @@ container_attach_socket_dir = "/var/run/crio"
read_only = false
# Changes the verbosity of the logs based on the level it is set to. Options
# are fatal, panic, error, warn, info, and debug.
# are fatal, panic, error, warn, info, and debug. This option supports live
# configuration reload.
log_level = "error"
# The default log directory where all logs will go unless directly specified by the kubelet
log_dir = "/var/log/crio/pods"
# The UID mappings for the user namespace of each container. A range is
# specified in the form containerUID:HostUID:Size. Multiple ranges must be
# separated by comma.
@ -202,15 +217,19 @@ gid_mappings = ""
# regarding the proper termination of the container.
ctr_stop_timeout = 0
# ManageNetworkNSLifecycle determines whether we pin and remove network namespace
# and manage its lifecycle.
manage_network_ns_lifecycle = false
# The "crio.runtime.runtimes" table defines a list of OCI compatible runtimes.
# The runtime to use is picked based on the runtime_handler provided by the CRI.
# If no runtime_handler is provided, the runtime will be picked based on the level
# of trust of the workload.
[crio.runtime.runtimes.runc]
runtime_path = "/usr/bin/runc"
runtime_path = ""
runtime_type = "oci"
runtime_root = "/run/runc"
# The crio.image table contains settings pertaining to the management of OCI images.
@ -225,14 +244,21 @@ ctr_stop_timeout = 0
# Default transport for pulling images from a remote container storage.
default_transport = "docker://"
# The path to a file containing credentials necessary for pulling images from
# secure registries. The file is similar to that of /var/lib/kubelet/config.json
global_auth_file = ""
# The image used to instantiate infra containers.
# This option supports live configuration reload.
pause_image = "k8s.gcr.io/pause:3.1"
# If not empty, the path to a docker/config.json-like file containing credentials
# necessary for pulling the image specified by pause_image above.
# The path to a file containing credentials specific for pulling the pause_image from
# above. The file is similar to that of /var/lib/kubelet/config.json
# This option supports live configuration reload.
pause_image_auth_file = ""
# The command to run to have a container stay in the paused state.
# This option supports live configuration reload.
pause_command = "/pause"
# Path to the file which decides what sort of policy we use when deciding
@ -263,6 +289,6 @@ image_volumes = "mkdir"
network_dir = "/etc/cni/net.d/"
# Paths to directories where CNI plugin binaries are located.
plugin_dir = [
plugin_dirs = [
"/opt/cni/bin/",
]

View File

@ -11,7 +11,7 @@ the host PATH:
* [KVM2](#kvm2-driver)
* [Hyperkit](#hyperkit-driver)
* [HyperV](#hyperv-driver)
* [Hyper-V](#hyper-v-driver)
* [VMware](#vmware-unified-driver)
* [Parallels](#parallels-driver)
@ -151,11 +151,11 @@ Make sure you are running the lastest version of your driver.
docker-machine-driver-hyperkit version
```
## HyperV driver
## Hyper-V driver
Hyper-v users may need to create a new external network switch as described [here](https://docs.docker.com/machine/drivers/hyper-v/). This step may prevent a problem in which `minikube start` hangs indefinitely, unable to ssh into the minikube virtual machine. In this add, add the `--hyperv-virtual-switch=switch-name` argument to the `minikube start` command.
Hyper-V users will need to create a new external network switch as described [here](https://docs.docker.com/machine/drivers/hyper-v/). This step may prevent a problem in which `minikube start` hangs indefinitely, unable to ssh into the minikube virtual machine. In this add, add the `--hyperv-virtual-switch=switch-name` argument to the `minikube start` command.
On some machines, having **dynamic memory management** turned on for the minikube VM can cause problems of unexpected and random restarts which manifests itself in simply losing the connection to the cluster, after which `minikube status` would simply state `stopped`. Machine restarts are caused due to following Hyper-V error: `The dynamic memory balancer could not add memory to the virtual machine 'minikube' because its configured maximum has been reached`. **Solution**: turned the dynamic memory management in hyper-v settings off (and allocate a fixed amount of memory to the machine).
Older Hyper-V VM's may have **dynamic memory management** enabled, which can cause problems of unexpected and random restarts which manifests itself in simply losing the connection to the cluster, after which `minikube status` would simply state `stopped`. **Solution**: run `minikube delete` to delete the old VM.
To use the driver:

View File

@ -37,7 +37,9 @@ cat Makefile | grep "VERSION_MINOR ?=" | grep $VERSION_MINOR
cat Makefile | grep "VERSION_BUILD ?=" | grep $VERSION_BUILD
# Build and upload
BUILD_IN_DOCKER=y make -j 16 all out/minikube-installer.exe out/minikube_${DEB_VERSION}.deb out/minikube-${RPM_VERSION}.rpm
BUILD_IN_DOCKER=y make -j 16 all out/minikube-installer.exe \
out/minikube_${DEB_VERSION}.deb out/minikube-${RPM_VERSION}.rpm \
out/docker-machine-driver-kvm2_${DEB_VERSION}.deb out/docker-machine-driver-kvm2-${RPM_VERSION}.rpm
make checksum
gsutil -m cp out/* gs://$BUCKET/releases/$TAGNAME/

View File

@ -20,11 +20,12 @@
https://k8s-testgrid.appspot.com
"""
from __future__ import print_function
import os, sys, json, re, argparse, calendar, time, subprocess, shlex
def get_classname(test_script):
""" parse out the test classname from the full path of the test script"""
classname = os.path.basename(test).split('.')[0]
classname = os.path.basename(test_script).split('.')[0]
return classname
def write_results(outdir, started, finished, test_results):
@ -86,8 +87,8 @@ def upload_results(outdir, test_script, buildnum, bucket):
classname = get_classname(test_script)
args = shlex.split("gsutil cp -R gcs_out/ gs://%s/logs/%s/%s" % (bucket, classname, buildnum))
p = subprocess.Popen(args, stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout:
print line
for line in str(p.stdout):
print(line)
def run_tests(test_script, log_path, exit_status, started, finished, test_results):
""" execute the test script, grab the start time, finish time, build logs and exit status
@ -109,9 +110,9 @@ def run_tests(test_script, log_path, exit_status, started, finished, test_result
classname = get_classname(test_script)
build_log_file = open(log_path, 'w')
p = subprocess.Popen(['bash','-x',test_script], stdout=subprocess.PIPE, stderr=subprocess.STDOUT)
for line in p.stdout:
for line in str(p.stdout):
build_log_file.write(line)
print line.rstrip()
print(line.rstrip())
if '--- PASS' in line:
match = re.match('.*--- PASS: ([^ ]+) \(([0-9.]+)s\)', line)
(name, seconds) = match.group(1, 2)

View File

@ -0,0 +1,12 @@
Package: docker-machine-driver-kvm2
Version: --VERSION--
Section: base
Priority: optional
Architecture: amd64
Depends: libvirt0 (>= 1.3.1)
Recommends: minikube
Maintainer: Thomas Strömberg <t+minikube@stromberg.org>
Description: Machine driver for KVM
minikube uses Docker Machine to manage the Kubernetes VM so it benefits
from the driver plugin architecture that Docker Machine uses to provide
a consistent way to manage various VM providers.

View File

@ -4,9 +4,9 @@ Section: base
Priority: optional
Architecture: amd64
Recommends: virtualbox
Maintainer: Aaron Prindle <aaprindle@gmail.com>
Maintainer: Thomas Strömberg <t+minikube@stromberg.org>
Description: Minikube
Minikube is a tool that makes it easy to run Kubernetes locally.
Minikube runs a single-node Kubernetes cluster inside a VM on your
minikube is a tool that makes it easy to run Kubernetes locally.
minikube runs a single-node Kubernetes cluster inside a VM on your
laptop for users looking to try out Kubernetes or develop with it
day-to-day.

View File

@ -0,0 +1,29 @@
Name: docker-machine-driver-kvm2
Version: --VERSION--
Release: 0
Summary: Machine driver for KVM
License: ASL 2.0
Group: Development/Tools
URL: https://github.com/kubernetes/minikube
#Requires: <determined automatically by rpm>
# Needed for older versions of RPM
BuildRoot: %{_tmppath}%{name}-buildroot
%description
Minikube uses Docker Machine to manage the Kubernetes VM so it benefits
from the driver plugin architecture that Docker Machine uses to provide
a consistent way to manage various VM providers.
%prep
mkdir -p %{name}-%{version}
cd %{name}-%{version}
cp --OUT--/docker-machine-driver-kvm2 .
%install
cd %{name}-%{version}
mkdir -p %{buildroot}%{_bindir}
install -m 755 docker-machine-driver-kvm2 %{buildroot}%{_bindir}/%{name}
%files
%{_bindir}/%{name}

View File

@ -47,6 +47,8 @@ var blacklist = []string{
"opt %s",
}
const ErrMapFile string = "pkg/minikube/problem/err_map.go"
// state is a struct that represent the current state of the extraction process
type state struct {
// The list of functions to check for
@ -112,16 +114,6 @@ func setParentFunc(e *state, f string) {
// TranslatableStrings finds all strings to that need to be translated in paths and prints them out to all json files in output
func TranslatableStrings(paths []string, functions []string, output string) error {
cwd, err := os.Getwd()
if err != nil {
return errors.Wrap(err, "Getting current working directory")
}
if strings.Contains(cwd, "cmd") {
fmt.Println("Run extract.go from the minikube root directory.")
os.Exit(1)
}
e, err := newExtractor(functions)
if err != nil {
@ -173,6 +165,10 @@ func inspectFile(e *state) error {
return err
}
if e.filename == ErrMapFile {
return extractAdvice(file, e)
}
ast.Inspect(file, func(x ast.Node) bool {
if fi, ok := x.(*ast.File); ok {
e.currentPackage = fi.Name.String()
@ -394,3 +390,30 @@ func addParentFuncToList(e *state) {
e.fs.Push(e.parentFunc)
}
}
// extractAdvice specifically extracts Advice strings in err_map.go, since they don't conform to our normal translatable string format.
func extractAdvice(f ast.Node, e *state) error {
ast.Inspect(f, func(x ast.Node) bool {
// We want the "Advice: <advice string>" key-value pair
// First make sure we're looking at a kvp
kvp, ok := x.(*ast.KeyValueExpr)
if !ok {
return true
}
// Now make sure we're looking at an Advice kvp
i, ok := kvp.Key.(*ast.Ident)
if !ok {
return true
}
if i.Name == "Advice" {
// At this point we know the value in the kvp is guaranteed to be a string
advice, _ := kvp.Value.(*ast.BasicLit)
addStringToList(advice.Value, e)
}
return true
})
return nil
}

View File

@ -16,15 +16,24 @@
"'none' driver does not support 'minikube docker-env' command": "",
"'none' driver does not support 'minikube mount' command": "",
"'none' driver does not support 'minikube ssh' command": "",
"A firewall is blocking Docker within the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
"A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "",
"A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "",
"Advice: %s": "",
"Alternatively, you may delete the existing VM using `minikube delete -p %s`": "",
"Cannot find directory %s for mount": "",
"Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "",
"Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "",
"Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "",
"Configuring environment for Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}}": "Configurant l'environment pour Kubernetes {{.k8sVersion}} sur {{.runtime}} {{.runtimeVersion}}",
"Configuring local host environment ...": "",
"Creating %s VM (CPUs=%d, Memory=%dMB, Disk=%dMB) ...": "Créant un VM %s (CPUs=%d, Mémoire=%dMB, Disque=%dMB)",
"Creating mount {{.name}} ...": "",
"Deleting %q from %s ...": "",
"Disable Hyper-V when you want to run VirtualBox to boot the VM": "",
"Disable dynamic memory in your VM manager, or pass in a larger --memory value": "",
"Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "",
"Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.": "",
"Documentation: %s": "",
"Done! kubectl is now configured to use {{.name}}": "Fini! kubectl est maintenant configuré pour utiliser {{.name}}.",
"Download complete!": "",
@ -34,6 +43,7 @@
"ERROR creating `registry-creds-ecr` secret: %v": "",
"ERROR creating `registry-creds-gcr` secret: %v": "",
"Enabling dashboard ...": "",
"Error checking driver version: %v": "",
"Error creating list template": "",
"Error creating minikube directory": "",
"Error creating status template": "",
@ -61,6 +71,8 @@
"Error killing mount process": "",
"Error loading api": "",
"Error opening service": "",
"Error parsing minukube version: %v": "",
"Error parsing vmDriver version: %v": "",
"Error reading %s: %v": "",
"Error restarting cluster": "",
"Error setting shell variables": "",
@ -103,8 +115,14 @@
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
"For more information, see:": "",
"Found network options:": "",
"Have you set up libvirt correctly?": "",
"If the above advice does not help, please let us know: ": "",
"If using the none driver, ensure that systemctl is installed": "",
"Ignoring --vm-driver=%s, as the existing %q VM was created using the %s driver.": "",
"In some environments, this message is incorrect. Try 'minikube start --no-vtx-check'": "",
"Install VirtualBox, ensure that VBoxManage is executable and in path, or select an alternative value for --vm-driver": "",
"Install the latest kvm2 driver and run 'virt-host-validate'": "",
"Install the latest minikube hyperkit driver, and run 'minikube delete'": "",
"Invalid size passed in argument: %v": "",
"IsEnabled failed": "",
"Kubernetes downgrade is not supported, will continue to use {{.version}}": "",
@ -119,20 +137,32 @@
"Opening %s in your default browser...": "",
"Opening kubernetes service %s/%s in default browser...": "",
"Options: %s": "",
"Please check your BIOS, and ensure that you are running without HyperV or other nested virtualization that may interfere": "",
"Please don't run minikube as root or with 'sudo' privileges. It isn't necessary with {{.driver}} driver.": "",
"Please enter a value:": "",
"Please install the minikube kvm2 VM driver, or select an alternative --vm-driver": "",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please run with sudo. the vm-driver %q requires sudo.": "",
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
"Please upgrade the 'docker-machine-driver-kvm2'. %s": "",
"Powering off %q via SSH ...": "",
"Problems detected in %q:": "",
"Pulling images ...": "Extrayant les images ... ",
"Re-run 'minikube start' with --alsologtostderr -v=8 to see the VM driver error message": "",
"Re-using the currently running %s VM for %q ...": "",
"Reboot to complete VirtualBox installation, and verify that VirtualBox is not blocked by your system": "",
"Rebuild libvirt with virt-network support": "",
"Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "",
"Related issues:": "",
"Relaunching Kubernetes {{.version}} using {{.bootstrapper}} ... ": "",
"Requested disk size (%dMB) is less than minimum of (%dMB)": "",
"Requested memory allocation (%dMB) is less than the minimum allowed of %dMB": "",
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "",
"Restarting existing %s VM for %q ...": "",
"Run 'minikube delete' to delete the stale VM": "",
"Run 'minikube delete'. If the problem persists, check your proxy or firewall configuration": "",
"Run 'sudo modprobe vboxdrv' and reinstall VirtualBox if it fails.": "",
"Run minikube from the C: drive.": "",
"Set failed": "",
"Setting profile failed": "",
"Skipped switching kubectl context for %s , because --keep-context": "",
@ -140,14 +170,20 @@
"Sorry, completion support is not yet implemented for %q": "",
"Sorry, the kubeadm.%s parameter is currently not supported by --extra-config": "",
"Sorry, url provided with --registry-mirror flag is invalid %q": "",
"Specify --kubernetes-version in v\u003cmajor\u003e.\u003cminor.\u003cbuild\u003e form. example: 'v1.1.14'": "",
"Specify an alternate --host-only-cidr value, such as 172.16.0.1/24": "",
"Stopping %q in %s ...": "",
"Successfully mounted %s to %s": "",
"Target directory %q must be an absolute path": "",
"The %q cluster has been deleted.": "",
"The 'docker-machine-driver-kvm2' version is old. Please consider upgrading. %s": "",
"The 'none' driver provides limited isolation and may reduce system security and reliability.": "",
"The KVM driver is unable to resurrect this old VM. Please run `minikube delete` to delete it and try again.": "",
"The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "",
"The docker host is currently not running": "",
"The docker service is currently not active": "",
"The kvm driver is deprecated and support for it will be removed in a future release.\n\t\t\t\tPlease consider switching to the kvm2 driver, which is intended to replace the kvm driver.\n\t\t\t\tSee https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#kvm2-driver for more information.\n\t\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
"The minikube VM is offline. Please run 'minikube start' to start it again.": "",
"The value passed to --format is invalid": "",
"The value passed to --format is invalid: %s": "",
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\t\tSee https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#vmware-unified-driver for more information.\n\t\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
@ -178,10 +214,12 @@
"Uninstalling Kubernetes %s using %s ...": "",
"Unmounting %s ...": "",
"Update server returned an empty list": "",
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
"Usage: minikube completion SHELL": "",
"Userspace file server is shutdown": "",
"Userspace file server: ": "",
"Using image repository {{.name}}": "",
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
"Verifying dashboard health ...": "",
"Verifying proxy health ...": "",
"Verifying:": "Vérifiant:",
@ -191,6 +229,8 @@
"Waiting for SSH access ...": "Attendant l'accès SSH ...",
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP (%s). Please see https://github.com/kubernetes/minikube/blob/master/docs/http_proxy.md for more details": "",
"You must specify a service name": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "",
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
"addon '%s' is currently not enabled.\nTo enable this addon run:\nminikube addons enable %s": "",
"addon '%s' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
"addon list failed": "",
@ -211,12 +251,14 @@
"kubectl proxy": "",
"logdir set failed": "",
"minikube is not running, so the service cannot be accessed": "",
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
"minikube profile was successfully set to %s": "",
"minikube will upgrade the local cluster from Kubernetes {{.old}} to {{.new}}": "",
"minikube {{.version}} on {{.os}} ({{.arch}})": "minikube {{.version}} sur {{.os}} ({{.arch}})",
"mount argument %q must be in form: \u003csource directory\u003e:\u003ctarget directory\u003e": "",
"mount failed": "",
"need to relocate them. For example, to overwrite your own settings:": "",
"service %s/%s has no node port": "",
"stat failed": "",
"unable to bind flags": "",
"unable to set logtostderr": "",

View File

@ -16,15 +16,24 @@
"'none' driver does not support 'minikube docker-env' command": "",
"'none' driver does not support 'minikube mount' command": "",
"'none' driver does not support 'minikube ssh' command": "",
"A firewall is blocking Docker within the minikube VM from reaching the internet. You may need to configure it to use a proxy.": "",
"A firewall is interfering with minikube's ability to make outgoing HTTPS requests. You may need to change the value of the HTTPS_PROXY environment variable.": "",
"A firewall is likely blocking minikube from reaching the internet. You may need to configure minikube to use a proxy.": "",
"Advice: %s": "",
"Alternatively, you may delete the existing VM using `minikube delete -p %s`": "",
"Cannot find directory %s for mount": "",
"Check that minikube is running and that you have specified the correct namespace (-n flag) if required.": "",
"Check that your --kubernetes-version has a leading 'v'. For example: 'v1.1.14'": "",
"Configure an external network switch following the official documentation, then add `--hyperv-virtual-switch=\u003cswitch-name\u003e` to `minikube start`": "",
"Configuring environment for Kubernetes {{.k8sVersion}} on {{.runtime}} {{.runtimeVersion}}": "开始为Kubernetes {{.k8sVersion}}{{.runtime}} {{.runtimeVersion}} 配置环境变量",
"Configuring local host environment ...": "",
"Creating %s VM (CPUs=%d, Memory=%dMB, Disk=%dMB) ...": "正在创建%s虚拟机CPU=%d内存=%dMB磁盘=%dMB...",
"Creating mount {{.name}} ...": "",
"Deleting %q from %s ...": "",
"Disable Hyper-V when you want to run VirtualBox to boot the VM": "",
"Disable dynamic memory in your VM manager, or pass in a larger --memory value": "",
"Disable real-time anti-virus software, reboot, and reinstall VirtualBox if the problem continues.": "",
"Docker inside the VM is unavailable. Try running 'minikube delete' to reset the VM.": "",
"Documentation: %s": "",
"Done! kubectl is now configured to use {{.name}}": "完成kubectl已经配置至{{.name}}",
"Download complete!": "",
@ -34,6 +43,7 @@
"ERROR creating `registry-creds-ecr` secret: %v": "",
"ERROR creating `registry-creds-gcr` secret: %v": "",
"Enabling dashboard ...": "",
"Error checking driver version: %v": "",
"Error creating list template": "",
"Error creating minikube directory": "",
"Error creating status template": "",
@ -61,6 +71,8 @@
"Error killing mount process": "",
"Error loading api": "",
"Error opening service": "",
"Error parsing minukube version: %v": "",
"Error parsing vmDriver version: %v": "",
"Error reading %s: %v": "",
"Error restarting cluster": "",
"Error setting shell variables": "",
@ -103,8 +115,14 @@
"For best results, install kubectl: https://kubernetes.io/docs/tasks/tools/install-kubectl/": "",
"For more information, see:": "",
"Found network options:": "",
"Have you set up libvirt correctly?": "",
"If the above advice does not help, please let us know: ": "",
"If using the none driver, ensure that systemctl is installed": "",
"Ignoring --vm-driver=%s, as the existing %q VM was created using the %s driver.": "",
"In some environments, this message is incorrect. Try 'minikube start --no-vtx-check'": "",
"Install VirtualBox, ensure that VBoxManage is executable and in path, or select an alternative value for --vm-driver": "",
"Install the latest kvm2 driver and run 'virt-host-validate'": "",
"Install the latest minikube hyperkit driver, and run 'minikube delete'": "",
"Invalid size passed in argument: %v": "",
"IsEnabled failed": "",
"Kubernetes downgrade is not supported, will continue to use {{.version}}": "",
@ -119,20 +137,32 @@
"Opening %s in your default browser...": "",
"Opening kubernetes service %s/%s in default browser...": "",
"Options: %s": "",
"Please check your BIOS, and ensure that you are running without HyperV or other nested virtualization that may interfere": "",
"Please don't run minikube as root or with 'sudo' privileges. It isn't necessary with {{.driver}} driver.": "",
"Please enter a value:": "",
"Please install the minikube kvm2 VM driver, or select an alternative --vm-driver": "",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please run with sudo. the vm-driver %q requires sudo.": "",
"Please specify the directory to be mounted: \n\tminikube mount \u003csource directory\u003e:\u003ctarget directory\u003e (example: \"/host-home:/vm-home\")": "",
"Please upgrade the 'docker-machine-driver-kvm2'. %s": "",
"Powering off %q via SSH ...": "",
"Problems detected in %q:": "",
"Pulling images ...": "拉取镜像 ...",
"Re-run 'minikube start' with --alsologtostderr -v=8 to see the VM driver error message": "",
"Re-using the currently running %s VM for %q ...": "",
"Reboot to complete VirtualBox installation, and verify that VirtualBox is not blocked by your system": "",
"Rebuild libvirt with virt-network support": "",
"Reinstall VirtualBox and verify that it is not blocked: System Preferences -\u003e Security \u0026 Privacy -\u003e General -\u003e Some system software was blocked from loading": "",
"Related issues:": "",
"Relaunching Kubernetes {{.version}} using {{.bootstrapper}} ... ": "",
"Requested disk size (%dMB) is less than minimum of (%dMB)": "",
"Requested memory allocation (%dMB) is less than the minimum allowed of %dMB": "",
"Requested memory allocation ({{.memory}}MB) is less than the default memory allocation of {{.default}}MB. Beware that minikube might not work correctly or crash unexpectedly.": "",
"Restarting existing %s VM for %q ...": "",
"Run 'minikube delete' to delete the stale VM": "",
"Run 'minikube delete'. If the problem persists, check your proxy or firewall configuration": "",
"Run 'sudo modprobe vboxdrv' and reinstall VirtualBox if it fails.": "",
"Run minikube from the C: drive.": "",
"Set failed": "",
"Setting profile failed": "",
"Skipped switching kubectl context for %s , because --keep-context": "",
@ -140,14 +170,20 @@
"Sorry, completion support is not yet implemented for %q": "",
"Sorry, the kubeadm.%s parameter is currently not supported by --extra-config": "",
"Sorry, url provided with --registry-mirror flag is invalid %q": "",
"Specify --kubernetes-version in v\u003cmajor\u003e.\u003cminor.\u003cbuild\u003e form. example: 'v1.1.14'": "",
"Specify an alternate --host-only-cidr value, such as 172.16.0.1/24": "",
"Stopping %q in %s ...": "",
"Successfully mounted %s to %s": "",
"Target directory %q must be an absolute path": "",
"The %q cluster has been deleted.": "",
"The 'docker-machine-driver-kvm2' version is old. Please consider upgrading. %s": "",
"The 'none' driver provides limited isolation and may reduce system security and reliability.": "",
"The KVM driver is unable to resurrect this old VM. Please run `minikube delete` to delete it and try again.": "",
"The VM driver exited with an error, and may be corrupt. Run 'minikube start' with --alsologtostderr -v=8 to see the error": "",
"The docker host is currently not running": "",
"The docker service is currently not active": "",
"The kvm driver is deprecated and support for it will be removed in a future release.\n\t\t\t\tPlease consider switching to the kvm2 driver, which is intended to replace the kvm driver.\n\t\t\t\tSee https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#kvm2-driver for more information.\n\t\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
"The minikube VM is offline. Please run 'minikube start' to start it again.": "",
"The value passed to --format is invalid": "",
"The value passed to --format is invalid: %s": "",
"The vmwarefusion driver is deprecated and support for it will be removed in a future release.\n\t\t\t\tPlease consider switching to the new vmware unified driver, which is intended to replace the vmwarefusion driver.\n\t\t\t\tSee https://github.com/kubernetes/minikube/blob/master/docs/drivers.md#vmware-unified-driver for more information.\n\t\t\t\tTo disable this message, run [minikube config set ShowDriverDeprecationNotification false]": "",
@ -178,10 +214,12 @@
"Uninstalling Kubernetes %s using %s ...": "",
"Unmounting %s ...": "",
"Update server returned an empty list": "",
"Upgrade to QEMU v3.1.0+, run 'virt-host-validate', or ensure that you are not running in a nested VM environment.": "",
"Usage: minikube completion SHELL": "",
"Userspace file server is shutdown": "",
"Userspace file server: ": "",
"Using image repository {{.name}}": "",
"Verify that your HTTP_PROXY and HTTPS_PROXY environment variables are set correctly.": "",
"Verifying dashboard health ...": "",
"Verifying proxy health ...": "",
"Verifying:": "正在验证:",
@ -191,6 +229,8 @@
"Waiting for SSH access ...": "",
"You appear to be using a proxy, but your NO_PROXY environment does not include the minikube IP (%s). Please see https://github.com/kubernetes/minikube/blob/master/docs/http_proxy.md for more details": "",
"You must specify a service name": "",
"Your host does not support KVM virtualization. Ensure that qemu-kvm is installed, and run 'virt-host-validate' to debug the problem": "",
"Your host is failing to route packets to the minikube VM. If you have VPN software, try turning it off or configuring it so that it does not re-route traffic to the VM IP. If not, check your VM environment routing options.": "",
"addon '%s' is currently not enabled.\nTo enable this addon run:\nminikube addons enable %s": "",
"addon '%s' is not a valid addon packaged with minikube.\nTo see the list of available addons run:\nminikube addons list": "",
"addon list failed": "",
@ -211,12 +251,14 @@
"kubectl proxy": "",
"logdir set failed": "",
"minikube is not running, so the service cannot be accessed": "",
"minikube is unable to access the Google Container Registry. You may need to configure it to use a HTTP proxy.": "",
"minikube profile was successfully set to %s": "",
"minikube will upgrade the local cluster from Kubernetes {{.old}} to {{.new}}": "",
"minikube {{.version}} on {{.os}} ({{.arch}})": "您正在使用minikube {{.version}} 运行平台:{{.os}} ({{.arch}})",
"mount argument %q must be in form: \u003csource directory\u003e:\u003ctarget directory\u003e": "",
"mount failed": "",
"need to relocate them. For example, to overwrite your own settings:": "",
"service %s/%s has no node port": "",
"stat failed": "",
"unable to bind flags": "",
"unable to set logtostderr": "",