Merge branch 'master' into add_desc_node_to_minikube_logs

pull/7105/head
Prasad Katti 2020-03-21 12:27:17 -07:00
commit 376271afca
18 changed files with 449 additions and 191 deletions

View File

@ -88,6 +88,11 @@ jobs:
docker info || true
docker version || true
docker ps || true
- name: install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Install gopogh
shell: bash
run: |
@ -150,6 +155,11 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
needs: [build_minikube]
steps:
- name: install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Docker Info
shell: bash
run: |
@ -218,6 +228,11 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-16.04
steps:
- name: install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Install gopogh
shell: bash
run: |
@ -280,6 +295,11 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: Install gopogh
shell: bash
run: |
@ -342,6 +362,11 @@ jobs:
SHELL: "/bin/bash" # To prevent https://github.com/kubernetes/minikube/issues/6643
runs-on: ubuntu-18.04
steps:
- name: install lz4
shell: bash
run: |
sudo apt-get update -qq
sudo apt-get -qq -y install liblz4-tool
- name: install podman
shell: bash
run: |

View File

@ -17,6 +17,7 @@ limitations under the License.
package cmd
import (
"errors"
"fmt"
"net/url"
"os"
@ -104,6 +105,11 @@ var serviceCmd = &cobra.Command{
urls, err := service.WaitForService(api, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval)
if err != nil {
var s *service.SVCNotFoundError
if errors.As(err, &s) {
exit.WithCodeT(exit.Data, `Service '{{.service}}' was not found in '{{.namespace}}' namespace.
You may select another namespace by using 'minikube service {{.service}} -n <namespace>'. Or list out all the services using 'minikube service list'`, out.V{"service": svc, "namespace": namespace})
}
exit.WithError("Error opening service", err)
}

View File

@ -46,21 +46,18 @@ spec:
hostPath:
path: /dev
containers:
- image: "{{default "k8s.gcr.io" .ImageRepository}}/nvidia-gpu-device-plugin@sha256:0842734032018be107fa2490c98156992911e3e1f2a21e059ff0105b07dd8e9e"
command: ["/usr/bin/nvidia-gpu-device-plugin", "-logtostderr"]
- image: "nvidia/k8s-device-plugin:1.0.0-beta4"
command: ["/usr/bin/nvidia-device-plugin", "-logtostderr"]
name: nvidia-gpu-device-plugin
resources:
requests:
cpu: 50m
memory: 10Mi
limits:
cpu: 50m
memory: 10Mi
securityContext:
privileged: true
volumeMounts:
- name: device-plugin
mountPath: /device-plugin
mountPath: /var/lib/kubelet/device-plugins
- name: dev
mountPath: /dev
updateStrategy:

View File

@ -46,7 +46,7 @@ spec:
value: kube-system
- name: TILLER_HISTORY_MAX
value: "0"
image: gcr.io/kubernetes-helm/tiller:v2.16.1
image: gcr.io/kubernetes-helm/tiller:v2.16.3
imagePullPolicy: IfNotPresent
livenessProbe:
failureThreshold: 3

View File

@ -52,18 +52,18 @@ func main() {
}
mode := info.Mode()
re := regexp.MustCompile(`var DefaultKubernetesVersion = .*`)
f := re.ReplaceAllString(string(cf), "var DefaultKubernetesVersion = \""+v+"\"")
re := regexp.MustCompile(`DefaultKubernetesVersion = \".*`)
f := re.ReplaceAllString(string(cf), "DefaultKubernetesVersion = \""+v+"\"")
re = regexp.MustCompile(`var NewestKubernetesVersion = .*`)
f = re.ReplaceAllString(f, "var NewestKubernetesVersion = \""+v+"\"")
re = regexp.MustCompile(`NewestKubernetesVersion = \".*`)
f = re.ReplaceAllString(f, "NewestKubernetesVersion = \""+v+"\"")
if err := ioutil.WriteFile(constantsFile, []byte(f), mode); err != nil {
fmt.Println(err)
os.Exit(1)
}
testData := "../../pkg/minikube/bootstrapper/kubeadm/testdata"
testData := "../../pkg/minikube/bootstrapper/bsutil/testdata"
err = filepath.Walk(testData, func(path string, info os.FileInfo, err error) error {
if err != nil {

View File

@ -296,11 +296,11 @@ var Addons = map[string]*Addon{
}, false, "nvidia-driver-installer"),
"nvidia-gpu-device-plugin": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/gpu/nvidia-gpu-device-plugin.yaml.tmpl",
"deploy/addons/gpu/nvidia-gpu-device-plugin.yaml",
vmpath.GuestAddonsDir,
"nvidia-gpu-device-plugin.yaml",
"0640",
true),
false),
}, false, "nvidia-gpu-device-plugin"),
"logviewer": NewAddon([]*BinAsset{
MustBinAsset(

View File

@ -50,6 +50,11 @@ func TransferBinaries(cfg config.KubernetesConfig, c command.Runner) error {
return err
}
// stop kubelet to avoid "Text File Busy" error
if _, err := c.RunCmd(exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")); err != nil {
glog.Warningf("unable to stop kubelet: %s", err)
}
var g errgroup.Group
for _, name := range constants.KubernetesReleaseBinaries {
name := name

View File

@ -40,9 +40,9 @@ const (
// ConfigFileAssets returns configuration file assets
func ConfigFileAssets(cfg config.KubernetesConfig, kubeadm []byte, kubelet []byte, kubeletSvc []byte, defaultCNIConfig []byte) []assets.CopyableFile {
fs := []assets.CopyableFile{
assets.NewMemoryAssetTarget(kubeadm, KubeadmYamlPath, "0640"),
assets.NewMemoryAssetTarget(kubelet, KubeletSystemdConfFile, "0644"),
assets.NewMemoryAssetTarget(kubeletSvc, KubeletServiceFile, "0644"),
assets.NewMemoryAssetTarget(kubeadm, KubeadmYamlPath+".new", "0640"),
assets.NewMemoryAssetTarget(kubelet, KubeletSystemdConfFile+".new", "0644"),
assets.NewMemoryAssetTarget(kubeletSvc, KubeletServiceFile+".new", "0644"),
}
// Copy the default CNI config (k8s.conf), so that kubelet can successfully
// start a Pod in the case a user hasn't manually installed any CNI plugin

View File

@ -30,6 +30,7 @@ import (
"github.com/docker/machine/libmachine/state"
"github.com/golang/glog"
core "k8s.io/api/core/v1"
meta "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/wait"
"k8s.io/client-go/kubernetes"
@ -79,6 +80,68 @@ func apiServerPID(cr command.Runner) (int, error) {
return strconv.Atoi(s)
}
// ExpectedComponentsRunning returns whether or not all expected components are running
func ExpectedComponentsRunning(cs *kubernetes.Clientset) error {
expected := []string{
"kube-dns", // coredns
"etcd",
"kube-apiserver",
"kube-controller-manager",
"kube-proxy",
"kube-scheduler",
}
found := map[string]bool{}
pods, err := cs.CoreV1().Pods("kube-system").List(meta.ListOptions{})
if err != nil {
return err
}
for _, pod := range pods.Items {
glog.Infof("found pod: %s", podStatusMsg(pod))
if pod.Status.Phase != core.PodRunning {
continue
}
for k, v := range pod.ObjectMeta.Labels {
if k == "component" || k == "k8s-app" {
found[v] = true
}
}
}
missing := []string{}
for _, e := range expected {
if !found[e] {
missing = append(missing, e)
}
}
if len(missing) > 0 {
return fmt.Errorf("missing components: %v", strings.Join(missing, ", "))
}
return nil
}
// podStatusMsg returns a human-readable pod status, for generating debug status
func podStatusMsg(pod core.Pod) string {
var sb strings.Builder
sb.WriteString(fmt.Sprintf("%q [%s] %s", pod.ObjectMeta.GetName(), pod.ObjectMeta.GetUID(), pod.Status.Phase))
for i, c := range pod.Status.Conditions {
if c.Reason != "" {
if i == 0 {
sb.WriteString(": ")
} else {
sb.WriteString(" / ")
}
sb.WriteString(fmt.Sprintf("%s:%s", c.Type, c.Reason))
}
if c.Message != "" {
sb.WriteString(fmt.Sprintf(" (%s)", c.Message))
}
}
return sb.String()
}
// WaitForSystemPods verifies essential pods for running kurnetes is running
func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg config.ClusterConfig, cr command.Runner, client *kubernetes.Clientset, start time.Time, timeout time.Duration) error {
glog.Info("waiting for kube-system pods to appear ...")
@ -100,6 +163,10 @@ func WaitForSystemPods(r cruntime.Manager, bs bootstrapper.Bootstrapper, cfg con
return false, nil
}
glog.Infof("%d kube-system pods found", len(pods.Items))
for _, pod := range pods.Items {
glog.Infof(podStatusMsg(pod))
}
if len(pods.Items) < 2 {
return false, nil
}
@ -160,7 +227,7 @@ func APIServerStatus(cr command.Runner, ip net.IP, port int) (state.State, error
pid, err := apiServerPID(cr)
if err != nil {
glog.Warningf("unable to get apiserver pid: %v", err)
glog.Warningf("stopped: unable to get apiserver pid: %v", err)
return state.Stopped, nil
}
@ -206,6 +273,7 @@ func apiServerHealthz(ip net.IP, port int) (state.State, error) {
resp, err := client.Get(url)
// Connection refused, usually.
if err != nil {
glog.Infof("stopped: %s: %v", url, err)
return state.Stopped, nil
}
if resp.StatusCode == http.StatusUnauthorized {

View File

@ -17,6 +17,7 @@ limitations under the License.
package bootstrapper
import (
"crypto/sha1"
"encoding/pem"
"fmt"
"io/ioutil"
@ -25,9 +26,11 @@ import (
"os/exec"
"path"
"path/filepath"
"sort"
"strings"
"github.com/golang/glog"
"github.com/otiai10/copy"
"github.com/pkg/errors"
"k8s.io/apimachinery/pkg/runtime"
"k8s.io/client-go/tools/clientcmd/api"
@ -40,63 +43,50 @@ import (
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/vmpath"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/lock"
"github.com/juju/mutex"
)
var (
certs = []string{
"ca.crt", "ca.key", "apiserver.crt", "apiserver.key", "proxy-client-ca.crt",
"proxy-client-ca.key", "proxy-client.crt", "proxy-client.key",
}
)
// SetupCerts gets the generated credentials required to talk to the APIServer.
func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) error {
localPath := localpath.MiniPath()
func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node) ([]assets.CopyableFile, error) {
localPath := localpath.Profile(k8s.ClusterName)
glog.Infof("Setting up %s for IP: %s\n", localPath, n.IP)
// WARNING: This function was not designed for multiple profiles, so it is VERY racey:
//
// It updates a shared certificate file and uploads it to the apiserver before launch.
//
// If another process updates the shared certificate, it's invalid.
// TODO: Instead of racey manipulation of a shared certificate, use per-profile certs
spec := lock.PathMutexSpec(filepath.Join(localPath, "certs"))
glog.Infof("acquiring lock: %+v", spec)
releaser, err := mutex.Acquire(spec)
ccs, err := generateSharedCACerts()
if err != nil {
return errors.Wrapf(err, "unable to acquire lock for %+v", spec)
return nil, errors.Wrap(err, "shared CA certs")
}
defer releaser.Release()
if err := generateCerts(k8s, n); err != nil {
return errors.Wrap(err, "Error generating certs")
xfer, err := generateProfileCerts(k8s, n, ccs)
if err != nil {
return nil, errors.Wrap(err, "profile certs")
}
xfer = append(xfer, ccs.caCert)
xfer = append(xfer, ccs.caKey)
xfer = append(xfer, ccs.proxyCert)
xfer = append(xfer, ccs.proxyKey)
copyableFiles := []assets.CopyableFile{}
for _, cert := range certs {
p := filepath.Join(localPath, cert)
for _, p := range xfer {
cert := filepath.Base(p)
perms := "0644"
if strings.HasSuffix(cert, ".key") {
perms = "0600"
}
certFile, err := assets.NewFileAsset(p, vmpath.GuestKubernetesCertsDir, cert, perms)
if err != nil {
return err
return nil, errors.Wrapf(err, "key asset %s", cert)
}
copyableFiles = append(copyableFiles, certFile)
}
caCerts, err := collectCACerts()
if err != nil {
return err
return nil, err
}
for src, dst := range caCerts {
certFile, err := assets.NewFileAsset(src, path.Dir(dst), path.Base(dst), "0644")
if err != nil {
return err
return nil, errors.Wrapf(err, "ca asset %s", src)
}
copyableFiles = append(copyableFiles, certFile)
@ -114,11 +104,11 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node)
kubeCfg := api.NewConfig()
err = kubeconfig.PopulateFromSettings(kcs, kubeCfg)
if err != nil {
return errors.Wrap(err, "populating kubeconfig")
return nil, errors.Wrap(err, "populating kubeconfig")
}
data, err := runtime.Encode(latest.Codec, kubeCfg)
if err != nil {
return errors.Wrap(err, "encoding kubeconfig")
return nil, errors.Wrap(err, "encoding kubeconfig")
}
if n.ControlPlane {
@ -128,46 +118,74 @@ func SetupCerts(cmd command.Runner, k8s config.KubernetesConfig, n config.Node)
for _, f := range copyableFiles {
if err := cmd.Copy(f); err != nil {
return errors.Wrapf(err, "Copy %s", f.GetAssetName())
return nil, errors.Wrapf(err, "Copy %s", f.GetAssetName())
}
}
if err := installCertSymlinks(cmd, caCerts); err != nil {
return errors.Wrapf(err, "certificate symlinks")
return nil, errors.Wrapf(err, "certificate symlinks")
}
return nil
return copyableFiles, nil
}
func generateCerts(k8s config.KubernetesConfig, n config.Node) error {
serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR)
if err != nil {
return errors.Wrap(err, "getting service cluster ip")
type CACerts struct {
caCert string
caKey string
proxyCert string
proxyKey string
}
// generateSharedCACerts generates CA certs shared among profiles, but only if missing
func generateSharedCACerts() (CACerts, error) {
globalPath := localpath.MiniPath()
cc := CACerts{
caCert: localpath.CACert(),
caKey: filepath.Join(globalPath, "ca.key"),
proxyCert: filepath.Join(globalPath, "proxy-client-ca.crt"),
proxyKey: filepath.Join(globalPath, "proxy-client-ca.key"),
}
localPath := localpath.MiniPath()
caCertPath := filepath.Join(localPath, "ca.crt")
caKeyPath := filepath.Join(localPath, "ca.key")
proxyClientCACertPath := filepath.Join(localPath, "proxy-client-ca.crt")
proxyClientCAKeyPath := filepath.Join(localPath, "proxy-client-ca.key")
caCertSpecs := []struct {
certPath string
keyPath string
subject string
}{
{ // client / apiserver CA
certPath: caCertPath,
keyPath: caKeyPath,
certPath: cc.caCert,
keyPath: cc.caKey,
subject: "minikubeCA",
},
{ // proxy-client CA
certPath: proxyClientCACertPath,
keyPath: proxyClientCAKeyPath,
certPath: cc.proxyCert,
keyPath: cc.proxyKey,
subject: "proxyClientCA",
},
}
for _, ca := range caCertSpecs {
if canRead(ca.certPath) && canRead(ca.keyPath) {
glog.Infof("skipping %s CA generation: %s", ca.subject, ca.keyPath)
continue
}
glog.Infof("generating %s CA: %s", ca.subject, ca.keyPath)
if err := util.GenerateCACert(ca.certPath, ca.keyPath, ca.subject); err != nil {
return cc, errors.Wrap(err, "generate ca cert")
}
}
return cc, nil
}
// generateProfileCerts generates profile certs for a profile
func generateProfileCerts(k8s config.KubernetesConfig, n config.Node, ccs CACerts) ([]string, error) {
profilePath := localpath.Profile(k8s.ClusterName)
serviceIP, err := util.GetServiceClusterIP(k8s.ServiceCIDR)
if err != nil {
return nil, errors.Wrap(err, "getting service cluster ip")
}
apiServerIPs := append(
k8s.APIServerIPs,
[]net.IP{net.ParseIP(n.IP), serviceIP, net.ParseIP(oci.DefaultBindIPV4), net.ParseIP("10.0.0.1")}...)
@ -176,9 +194,19 @@ func generateCerts(k8s config.KubernetesConfig, n config.Node) error {
apiServerNames,
util.GetAlternateDNS(k8s.DNSDomain)...)
signedCertSpecs := []struct {
certPath string
keyPath string
// Generate a hash input for certs that depend on ip/name combinations
hi := []string{}
hi = append(hi, apiServerAlternateNames...)
for _, ip := range apiServerIPs {
hi = append(hi, ip.String())
}
sort.Strings(hi)
specs := []struct {
certPath string
keyPath string
hash string
subject string
ips []net.IP
alternateNames []string
@ -186,56 +214,77 @@ func generateCerts(k8s config.KubernetesConfig, n config.Node) error {
caKeyPath string
}{
{ // Client cert
certPath: filepath.Join(localPath, "client.crt"),
keyPath: filepath.Join(localPath, "client.key"),
certPath: localpath.ClientCert(k8s.ClusterName),
keyPath: localpath.ClientKey(k8s.ClusterName),
subject: "minikube-user",
ips: []net.IP{},
alternateNames: []string{},
caCertPath: caCertPath,
caKeyPath: caKeyPath,
caCertPath: ccs.caCert,
caKeyPath: ccs.caKey,
},
{ // apiserver serving cert
certPath: filepath.Join(localPath, "apiserver.crt"),
keyPath: filepath.Join(localPath, "apiserver.key"),
hash: fmt.Sprintf("%x", sha1.Sum([]byte(strings.Join(hi, "/"))))[0:8],
certPath: filepath.Join(profilePath, "apiserver.crt"),
keyPath: filepath.Join(profilePath, "apiserver.key"),
subject: "minikube",
ips: apiServerIPs,
alternateNames: apiServerAlternateNames,
caCertPath: caCertPath,
caKeyPath: caKeyPath,
caCertPath: ccs.caCert,
caKeyPath: ccs.caKey,
},
{ // aggregator proxy-client cert
certPath: filepath.Join(localPath, "proxy-client.crt"),
keyPath: filepath.Join(localPath, "proxy-client.key"),
certPath: filepath.Join(profilePath, "proxy-client.crt"),
keyPath: filepath.Join(profilePath, "proxy-client.key"),
subject: "aggregator",
ips: []net.IP{},
alternateNames: []string{},
caCertPath: proxyClientCACertPath,
caKeyPath: proxyClientCAKeyPath,
caCertPath: ccs.proxyCert,
caKeyPath: ccs.proxyKey,
},
}
for _, caCertSpec := range caCertSpecs {
if !(canReadFile(caCertSpec.certPath) &&
canReadFile(caCertSpec.keyPath)) {
if err := util.GenerateCACert(
caCertSpec.certPath, caCertSpec.keyPath, caCertSpec.subject,
); err != nil {
return errors.Wrap(err, "Error generating CA certificate")
xfer := []string{}
for _, spec := range specs {
if spec.subject != "minikube-user" {
xfer = append(xfer, spec.certPath)
xfer = append(xfer, spec.keyPath)
}
cp := spec.certPath
kp := spec.keyPath
if spec.hash != "" {
cp = cp + "." + spec.hash
kp = kp + "." + spec.hash
}
if canRead(cp) && canRead(kp) {
glog.Infof("skipping %s signed cert generation: %s", spec.subject, kp)
continue
}
glog.Infof("generating %s signed cert: %s", spec.subject, kp)
err := util.GenerateSignedCert(
cp, kp, spec.subject,
spec.ips, spec.alternateNames,
spec.caCertPath, spec.caKeyPath,
)
if err != nil {
return xfer, errors.Wrapf(err, "generate signed cert for %q", spec.subject)
}
if spec.hash != "" {
glog.Infof("copying %s -> %s", cp, spec.certPath)
if err := copy.Copy(cp, spec.certPath); err != nil {
return xfer, errors.Wrap(err, "copy cert")
}
glog.Infof("copying %s -> %s", kp, spec.keyPath)
if err := copy.Copy(kp, spec.keyPath); err != nil {
return xfer, errors.Wrap(err, "copy key")
}
}
}
for _, signedCertSpec := range signedCertSpecs {
if err := util.GenerateSignedCert(
signedCertSpec.certPath, signedCertSpec.keyPath, signedCertSpec.subject,
signedCertSpec.ips, signedCertSpec.alternateNames,
signedCertSpec.caCertPath, signedCertSpec.caKeyPath,
); err != nil {
return errors.Wrap(err, "Error generating signed apiserver serving cert")
}
}
return nil
return xfer, nil
}
// isValidPEMCertificate checks whether the input file is a valid PEM certificate (with at least one CERTIFICATE block)
@ -357,9 +406,9 @@ func installCertSymlinks(cr command.Runner, caCerts map[string]string) error {
return nil
}
// canReadFile returns true if the file represented
// canRead returns true if the file represented
// by path exists and is readable, otherwise false.
func canReadFile(path string) bool {
func canRead(path string) bool {
f, err := os.Open(path)
if err != nil {
return false

View File

@ -24,7 +24,6 @@ import (
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/tests"
"k8s.io/minikube/pkg/util"
)
@ -58,20 +57,8 @@ func TestSetupCerts(t *testing.T) {
f := command.NewFakeCommandRunner()
f.SetCommandToOutput(expected)
var filesToBeTransferred []string
for _, cert := range certs {
filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), cert))
}
filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), "ca.crt"))
filesToBeTransferred = append(filesToBeTransferred, filepath.Join(localpath.MiniPath(), "certs", "mycert.pem"))
if err := SetupCerts(f, k8s, config.Node{}); err != nil {
_, err := SetupCerts(f, k8s, config.Node{})
if err != nil {
t.Fatalf("Error starting cluster: %v", err)
}
for _, cert := range filesToBeTransferred {
_, err := f.GetFileToContents(cert)
if err != nil {
t.Errorf("Cert not generated: %s", cert)
}
}
}

View File

@ -215,7 +215,8 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
}
c = exec.Command("/bin/bash", "-c", fmt.Sprintf("%s init --config %s %s --ignore-preflight-errors=%s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), bsutil.KubeadmYamlPath, extraFlags, strings.Join(ignore, ",")))
conf := bsutil.KubeadmYamlPath
c = exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && %s init --config %s %s --ignore-preflight-errors=%s", conf, conf, bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), conf, extraFlags, strings.Join(ignore, ",")))
rr, err := k.c.RunCmd(c)
if err != nil {
return errors.Wrapf(err, "init failed. output: %q", rr.Output())
@ -242,6 +243,20 @@ func (k *Bootstrapper) StartCluster(cfg config.ClusterConfig) error {
return nil
}
func (k *Bootstrapper) controlPlaneEndpoint(cfg config.ClusterConfig) (string, int, error) {
cp, err := config.PrimaryControlPlane(&cfg)
if err != nil {
return "", 0, err
}
if driver.IsKIC(cfg.Driver) {
ip := oci.DefaultBindIPV4
port, err := oci.ForwardedPort(cfg.Driver, cfg.Name, cp.Port)
return ip, port, err
}
return cp.IP, cp.Port, nil
}
// client sets and returns a Kubernetes client to use to speak to a kubeadm launched apiserver
func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error) {
if k.k8sClient != nil {
@ -268,34 +283,28 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error
// WaitForNode blocks until the node appears to be healthy
func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error {
start := time.Now()
out.T(out.Waiting, "Waiting for cluster to come online ...")
if !n.ControlPlane {
glog.Infof("%s is not a control plane, nothing to wait for", n.Name)
return nil
}
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return err
}
if n.ControlPlane {
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil {
return err
}
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil {
return err
}
ip := n.IP
port := n.Port
if driver.IsKIC(cfg.Driver) {
ip = oci.DefaultBindIPV4
p, err := oci.ForwardedPort(cfg.Driver, driver.MachineName(cfg, n), port)
if err != nil {
return errors.Wrapf(err, "get host-bind port %d for container %s", port, cfg.Name)
}
port = p
ip, port, err := k.controlPlaneEndpoint(cfg)
if err != nil {
return err
}
if n.ControlPlane {
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil {
return err
}
if err := kverify.WaitForHealthyAPIServer(cr, k, cfg, k.c, start, ip, port, timeout); err != nil {
return err
}
c, err := k.client(ip, port)
@ -309,6 +318,31 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
return nil
}
// needsReset returns whether or not the cluster needs to be reconfigured
func (k *Bootstrapper) needsReset(conf string, ip string, port int, client *kubernetes.Clientset) bool {
if _, err := k.c.RunCmd(exec.Command("sudo", "diff", "-u", conf, conf+".new")); err != nil {
glog.Infof("needs reset: configs differ")
return true
}
st, err := kverify.APIServerStatus(k.c, net.ParseIP(ip), port)
if err != nil {
glog.Infof("needs reset: apiserver error: %v", err)
return true
}
if st != state.Running {
glog.Infof("needs reset: apiserver in state %s", st)
return true
}
if err := kverify.ExpectedComponentsRunning(client); err != nil {
glog.Infof("needs reset: %v", err)
return true
}
return false
}
// restartCluster restarts the Kubernetes cluster configured by kubeadm
func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
glog.Infof("restartCluster start")
@ -334,14 +368,36 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
glog.Errorf("failed to create compat symlinks: %v", err)
}
baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase)
cmds := []string{
fmt.Sprintf("%s phase certs all --config %s", baseCmd, bsutil.KubeadmYamlPath),
fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, bsutil.KubeadmYamlPath),
fmt.Sprintf("%s phase %s all --config %s", baseCmd, controlPlane, bsutil.KubeadmYamlPath),
fmt.Sprintf("%s phase etcd local --config %s", baseCmd, bsutil.KubeadmYamlPath),
ip, port, err := k.controlPlaneEndpoint(cfg)
if err != nil {
return errors.Wrap(err, "control plane")
}
client, err := k.client(ip, port)
if err != nil {
return errors.Wrap(err, "getting k8s client")
}
// If the cluster is running, check if we have any work to do.
conf := bsutil.KubeadmYamlPath
if !k.needsReset(conf, ip, port, client) {
glog.Infof("Taking a shortcut, as the cluster seems to be properly configured")
return nil
}
if _, err := k.c.RunCmd(exec.Command("sudo", "mv", conf+".new", conf)); err != nil {
return errors.Wrap(err, "mv")
}
baseCmd := fmt.Sprintf("%s %s", bsutil.InvokeKubeadm(cfg.KubernetesConfig.KubernetesVersion), phase)
cmds := []string{
fmt.Sprintf("%s phase certs all --config %s", baseCmd, conf),
fmt.Sprintf("%s phase kubeconfig all --config %s", baseCmd, conf),
fmt.Sprintf("%s phase %s all --config %s", baseCmd, controlPlane, conf),
fmt.Sprintf("%s phase etcd local --config %s", baseCmd, conf),
}
glog.Infof("resetting cluster from %s", conf)
// Run commands one at a time so that it is easier to root cause failures.
for _, c := range cmds {
rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", c))
@ -352,7 +408,7 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
cr, err := cruntime.New(cruntime.Config{Type: cfg.KubernetesConfig.ContainerRuntime, Runner: k.c})
if err != nil {
return err
return errors.Wrap(err, "runtime")
}
// We must ensure that the apiserver is healthy before proceeding
@ -360,30 +416,11 @@ func (k *Bootstrapper) restartCluster(cfg config.ClusterConfig) error {
return errors.Wrap(err, "apiserver healthz")
}
cp, err := config.PrimaryControlPlane(&cfg)
if err != nil {
return errors.Wrap(err, "getting control plane")
}
ip := cp.IP
port := cp.Port
if driver.IsKIC(cfg.Driver) {
ip = oci.DefaultBindIPV4
port, err = oci.ForwardedPort(cfg.Driver, driver.MachineName(cfg, cp), port)
if err != nil {
return errors.Wrapf(err, "get host-bind port %d for container %s", port, driver.MachineName(cfg, cp))
}
}
client, err := k.client(ip, port)
if err != nil {
return errors.Wrap(err, "getting k8s client")
}
if err := kverify.WaitForSystemPods(cr, k, cfg, k.c, client, time.Now(), kconst.DefaultControlPlaneTimeout); err != nil {
return errors.Wrap(err, "system pods")
}
// Explicitly re-enable kubeadm addons (proxy, coredns) so that they will check for IP or configuration changes.
if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, bsutil.KubeadmYamlPath))); err != nil {
if rr, err := k.c.RunCmd(exec.Command("/bin/bash", "-c", fmt.Sprintf("%s phase addon all --config %s", baseCmd, conf))); err != nil {
return errors.Wrapf(err, fmt.Sprintf("addon phase cmd:%q", rr.Command()))
}
@ -451,7 +488,8 @@ func (k *Bootstrapper) DeleteCluster(k8s config.KubernetesConfig) error {
// SetupCerts sets up certificates within the cluster.
func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) error {
return bootstrapper.SetupCerts(k.c, k8s, n)
_, err := bootstrapper.SetupCerts(k.c, k8s, n)
return err
}
// UpdateCluster updates the cluster.
@ -501,11 +539,6 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
glog.Infof("kubelet %s config:\n%+v", kubeletCfg, cfg.KubernetesConfig)
// stop kubelet to avoid "Text File Busy" error
if err := stopKubelet(k.c); err != nil {
glog.Warningf("unable to stop kubelet: %s", err)
}
if err := bsutil.TransferBinaries(cfg.KubernetesConfig, k.c); err != nil {
return errors.Wrap(err, "downloading binaries")
}
@ -514,25 +547,19 @@ func (k *Bootstrapper) UpdateNode(cfg config.ClusterConfig, n config.Node, r cru
if cfg.KubernetesConfig.EnableDefaultCNI {
cniFile = []byte(defaultCNIConfig)
}
// Install assets into temporary files
files := bsutil.ConfigFileAssets(cfg.KubernetesConfig, kubeadmCfg, kubeletCfg, kubeletService, cniFile)
if err := copyFiles(k.c, files); err != nil {
return err
}
if err := startKubelet(k.c); err != nil {
if err := reloadKubelet(k.c); err != nil {
return err
}
return nil
}
func stopKubelet(runner command.Runner) error {
stopCmd := exec.Command("/bin/bash", "-c", "pgrep kubelet && sudo systemctl stop kubelet")
if rr, err := runner.RunCmd(stopCmd); err != nil {
return errors.Wrapf(err, "command: %q output: %q", rr.Command(), rr.Output())
}
return nil
}
func copyFiles(runner command.Runner, files []assets.CopyableFile) error {
// Combine mkdir request into a single call to reduce load
dirs := []string{}
@ -552,8 +579,17 @@ func copyFiles(runner command.Runner, files []assets.CopyableFile) error {
return nil
}
func startKubelet(runner command.Runner) error {
startCmd := exec.Command("/bin/bash", "-c", "sudo systemctl daemon-reload && sudo systemctl start kubelet")
func reloadKubelet(runner command.Runner) error {
svc := bsutil.KubeletServiceFile
conf := bsutil.KubeletSystemdConfFile
checkCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("pgrep kubelet && diff -u %s %s.new && diff -u %s %s.new", svc, svc, conf, conf))
if _, err := runner.RunCmd(checkCmd); err == nil {
glog.Infof("kubelet is already running with the right configs")
return nil
}
startCmd := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo mv %s.new %s && sudo mv %s.new %s && sudo systemctl daemon-reload && sudo systemctl restart kubelet", svc, svc, conf, conf))
if _, err := runner.RunCmd(startCmd); err != nil {
return errors.Wrap(err, "starting kubelet")
}

View File

@ -54,6 +54,26 @@ func MakeMiniPath(fileName ...string) string {
return filepath.Join(args...)
}
// Profile returns the path to a profile
func Profile(name string) string {
return filepath.Join(MiniPath(), "profiles", name)
}
// ClientCert returns client certificate path, used by kubeconfig
func ClientCert(name string) string {
return filepath.Join(Profile(name), "client.crt")
}
// ClientKey returns client certificate path, used by kubeconfig
func ClientKey(name string) string {
return filepath.Join(Profile(name), "client.key")
}
// CACert returns the minikube CA certificate shared between profiles
func CACert() string {
return filepath.Join(MiniPath(), "ca.crt")
}
// MachinePath returns the Minikube machine path of a machine
func MachinePath(machine string, miniHome ...string) string {
miniPath := MiniPath()

View File

@ -66,8 +66,9 @@ func DeleteHost(api libmachine.API, machineName string) error {
// Get the status of the host. Ensure that it exists before proceeding ahead.
status, err := Status(api, machineName)
if err != nil {
// Warn, but proceed
out.WarningT(`Unable to get host status for "{{.name}}": {{.error}}`, out.V{"name": machineName, "error": err})
// Assume that the host has already been deleted, log and return
glog.Infof("Unable to get host status for %s, assuming it has already been deleted: %v", machineName, err)
return nil
}
if status == state.None.String() {

View File

@ -250,9 +250,9 @@ func setupKubeconfig(h *host.Host, cc *config.ClusterConfig, n *config.Node, clu
kcs := &kubeconfig.Settings{
ClusterName: clusterName,
ClusterServerAddress: addr,
ClientCertificate: localpath.MakeMiniPath("client.crt"),
ClientKey: localpath.MakeMiniPath("client.key"),
CertificateAuthority: localpath.MakeMiniPath("ca.crt"),
ClientCertificate: localpath.ClientCert(cc.Name),
ClientKey: localpath.ClientKey(cc.Name),
CertificateAuthority: localpath.CACert(),
KeepContext: viper.GetBool(keepContext),
EmbedCerts: viper.GetBool(embedCerts),
}

View File

@ -264,19 +264,34 @@ func PrintServiceList(writer io.Writer, data [][]string) {
table.Render()
}
// SVCNotFoundError error type handles 'service not found' scenarios
type SVCNotFoundError struct {
Err error
}
// Error method for SVCNotFoundError type
func (t SVCNotFoundError) Error() string {
return "Service not found"
}
// WaitForService waits for a service, and return the urls when available
func WaitForService(api libmachine.API, namespace string, service string, urlTemplate *template.Template, urlMode bool, https bool,
wait int, interval int) ([]string, error) {
var urlList []string
// Convert "Amount of time to wait" and "interval of each check" to attempts
if interval == 0 {
interval = 1
}
err := CheckService(namespace, service)
if err != nil {
return nil, &SVCNotFoundError{err}
}
chkSVC := func() error { return CheckService(namespace, service) }
if err := retry.Expo(chkSVC, time.Duration(interval)*time.Second, time.Duration(wait)*time.Second); err != nil {
return urlList, errors.Wrapf(err, "Service %s was not found in %q namespace. You may select another namespace by using 'minikube service %s -n <namespace>", service, namespace, service)
return nil, &SVCNotFoundError{err}
}
serviceURL, err := GetServiceURLsForService(api, namespace, service, urlTemplate)

View File

@ -40,7 +40,7 @@ func TestAddons(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
defer CleanupWithLogs(t, profile, cancel)
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server"}, StartArgs()...)
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "-v=1", "--addons=ingress", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
if err != nil {
t.Fatalf("%s failed: %v", rr.Args, err)
@ -55,6 +55,7 @@ func TestAddons(t *testing.T) {
{"Registry", validateRegistryAddon},
{"Ingress", validateIngressAddon},
{"MetricsServer", validateMetricsServerAddon},
{"HelmTiller", validateHelmTillerAddon},
}
for _, tc := range tests {
tc := tc
@ -249,3 +250,45 @@ func validateMetricsServerAddon(ctx context.Context, t *testing.T, profile strin
t.Errorf("%s failed: %v", rr.Args, err)
}
}
func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string) {
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("kubernetes client: %v", client)
}
start := time.Now()
if err := kapi.WaitForDeploymentToStabilize(client, "kube-system", "tiller-deploy", Minutes(6)); err != nil {
t.Errorf("waiting for tiller-deploy deployment to stabilize: %v", err)
}
t.Logf("tiller-deploy stabilized in %s", time.Since(start))
if _, err := PodWait(ctx, t, profile, "kube-system", "app=helm", Minutes(6)); err != nil {
t.Fatalf("wait: %v", err)
}
want := "Server: &version.Version"
// Test from inside the cluster (`helm version` use pod.list permission. we use tiller serviceaccount in kube-system to list pod)
checkHelmTiller := func() error {
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "run", "--rm", "helm-test", "--restart=Never", "--image=alpine/helm:2.16.3", "-it", "--namespace=kube-system", "--serviceaccount=tiller", "--", "version"))
if err != nil {
return err
}
if rr.Stderr.String() != "" {
t.Logf("%v: unexpected stderr: %s", rr.Args, rr.Stderr)
}
if !strings.Contains(rr.Stdout.String(), want) {
return fmt.Errorf("%v stdout = %q, want %q", rr.Args, rr.Stdout, want)
}
return nil
}
if err := retry.Expo(checkHelmTiller, 500*time.Millisecond, Minutes(2)); err != nil {
t.Errorf(err.Error())
}
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "helm-tiller", "--alsologtostderr", "-v=1"))
if err != nil {
t.Errorf("%s failed: %v", rr.Args, err)
}
}

View File

@ -77,7 +77,13 @@ func TestChangeNoneUser(t *testing.T) {
t.Errorf("Failed to convert uid to int: %v", err)
}
for _, p := range []string{localpath.MiniPath(), filepath.Join(u.HomeDir, ".kube/config")} {
// Retrieve the kube config from env
kubeConfig := os.Getenv("KUBECONFIG")
if kubeConfig == "" {
kubeConfig = filepath.Join(u.HomeDir, ".kube/config")
}
for _, p := range []string{localpath.MiniPath(), kubeConfig} {
info, err := os.Stat(p)
if err != nil {
t.Errorf("stat(%s): %v", p, err)