Merge branch 'master' of github.com:kubernetes/minikube into release

pull/7832/head
Priya Wadhwa 2020-04-22 16:53:25 -07:00
commit 90730a0ea7
11 changed files with 190 additions and 72 deletions

View File

@ -75,7 +75,6 @@ var nodeStartCmd = &cobra.Command{
}
func init() {
nodeStartCmd.Flags().String("name", "", "The name of the node to start")
nodeStartCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
nodeCmd.AddCommand(nodeStartCmd)
}

View File

@ -53,6 +53,5 @@ var nodeStopCmd = &cobra.Command{
}
func init() {
nodeStopCmd.Flags().String("name", "", "The name of the node to delete")
nodeCmd.AddCommand(nodeStopCmd)
}

View File

@ -104,12 +104,11 @@ var statusCmd = &cobra.Command{
cname := ClusterFlagValue()
api, cc := mustload.Partial(cname)
var st *Status
var err error
var statuses []*Status
for _, n := range cc.Nodes {
glog.Infof("checking status of %s ...", n.Name)
machineName := driver.MachineName(*cc, n)
st, err = status(api, *cc, n)
st, err := status(api, *cc, n)
glog.Infof("%s status: %+v", machineName, st)
if err != nil {
@ -118,36 +117,40 @@ var statusCmd = &cobra.Command{
if st.Host == Nonexistent {
glog.Errorf("The %q host does not exist!", machineName)
}
statuses = append(statuses, st)
}
switch strings.ToLower(output) {
case "text":
switch strings.ToLower(output) {
case "text":
for _, st := range statuses {
if err := statusText(st, os.Stdout); err != nil {
exit.WithError("status text failure", err)
}
case "json":
if err := statusJSON(st, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
case "json":
if err := statusJSON(statuses, os.Stdout); err != nil {
exit.WithError("status json failure", err)
}
default:
exit.WithCodeT(exit.BadUsage, fmt.Sprintf("invalid output format: %s. Valid values: 'text', 'json'", output))
}
// TODO: Update for multi-node
os.Exit(exitCode(st))
os.Exit(exitCode(statuses))
},
}
func exitCode(st *Status) int {
func exitCode(statuses []*Status) int {
c := 0
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
for _, st := range statuses {
if st.Host != state.Running.String() {
c |= minikubeNotRunningStatusFlag
}
if (st.APIServer != state.Running.String() && st.APIServer != Irrelevant) || st.Kubelet != state.Running.String() {
c |= clusterNotRunningStatusFlag
}
if st.Kubeconfig != Configured && st.Kubeconfig != Irrelevant {
c |= k8sNotRunningStatusFlag
}
}
return c
}
@ -270,8 +273,15 @@ func statusText(st *Status, w io.Writer) error {
return nil
}
func statusJSON(st *Status, w io.Writer) error {
js, err := json.Marshal(st)
func statusJSON(st []*Status, w io.Writer) error {
var js []byte
var err error
// Keep backwards compat with single node clusters to not break anyone
if len(st) == 1 {
js, err = json.Marshal(st[0])
} else {
js, err = json.Marshal(st)
}
if err != nil {
return err
}

View File

@ -35,7 +35,7 @@ func TestExitCode(t *testing.T) {
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
got := exitCode(tc.state)
got := exitCode([]*Status{tc.state})
if got != tc.want {
t.Errorf("exitcode(%+v) = %d, want: %d", tc.state, got, tc.want)
}
@ -93,7 +93,7 @@ func TestStatusJSON(t *testing.T) {
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
var b bytes.Buffer
err := statusJSON(tc.state, &b)
err := statusJSON([]*Status{tc.state}, &b)
if err != nil {
t.Errorf("json(%+v) error: %v", tc.state, err)
}

View File

@ -101,6 +101,10 @@ if [ -n "$BOOT2DOCKER_DATA" ]; then
mkdir -p /var/lib/docker
mount --bind /mnt/$PARTNAME/var/lib/docker /var/lib/docker
mkdir -p /mnt/$PARTNAME/var/lib/containerd
mkdir -p /var/lib/containerd
mount --bind /mnt/$PARTNAME/var/lib/containerd /var/lib/containerd
mkdir -p /mnt/$PARTNAME/var/lib/containers
mkdir -p /var/lib/containers
mount --bind /mnt/$PARTNAME/var/lib/containers /var/lib/containers

View File

@ -30,7 +30,7 @@ endef
define PODMAN_INSTALL_TARGET_CMDS
$(INSTALL) -Dm755 $(@D)/bin/podman $(TARGET_DIR)/usr/bin/podman
$(INSTALL) -d -m 755 $(TARGET_DIR)/etc/cni/net.d/
$(INSTALL) -m 644 cni/87-podman-bridge.conflist $(TARGET_DIR)/etc/cni/net.d/87-podman-bridge.conflist
$(INSTALL) -m 644 $(@D)/cni/87-podman-bridge.conflist $(TARGET_DIR)/etc/cni/net.d/87-podman-bridge.conflist
endef
$(eval $(generic-package))

View File

@ -27,7 +27,7 @@ set -e
OS_ARCH="linux-amd64"
VM_DRIVER="podman"
JOB_NAME="Podman_Linux"
JOB_NAME="Experimental_Podman_Linux"
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"

View File

@ -75,34 +75,13 @@ github-release -v release ${RELEASE_FLAGS} \
--name "${TAGNAME}" \
--description "${DESCRIPTION}"
# Uploading the files into github
FILES_TO_UPLOAD=(
'minikube-linux-amd64'
'minikube-linux-amd64.sha256'
'minikube-darwin-amd64'
'minikube-darwin-amd64.sha256'
'minikube-windows-amd64.exe'
'minikube-windows-amd64.exe.sha256'
'minikube-installer.exe'
"minikube_${DEB_VERSION}-0_amd64.deb"
"minikube-${RPM_VERSION}-0.x86_64.rpm"
'docker-machine-driver-kvm2'
'docker-machine-driver-kvm2.sha256'
'docker-machine-driver-hyperkit'
'docker-machine-driver-hyperkit.sha256'
)
# ISO files are special, as they are generated pre-release tagging
ISO_FILES=("minikube-v${VERSION}.iso" "minikube-v${VERSION}.iso.sha256")
for DOWNLOAD in "${ISO_FILES[@]}"
do
gsutil cp "gs://${ISO_BUCKET}/${DOWNLOAD}" out/ \
&& FILES_TO_UPLOAD+=("${DOWNLOAD}") \
|| echo "${DOWNLOAD} was not generated for this release"
# ISO files are built from a separate process, and may not be included in this release
for path in $(gsutil ls "gs://${ISO_BUCKET}/minikube-v${VERSION}*" || true); do
gsutil cp "${path}" out/
done
for UPLOAD in "${FILES_TO_UPLOAD[@]}"
do
# Upload all end-user assets other than preload files, as they are release independent
for file in out/minikube[_-]* out/docker-machine-*; do
n=0
until [ $n -ge 5 ]
do
@ -110,8 +89,8 @@ do
--user "${GITHUB_ORGANIZATION}" \
--repo "${GITHUB_REPO}" \
--tag "${TAGNAME}" \
--name "$UPLOAD" \
--file "out/$UPLOAD" && break
--name "$(basename ${file})" \
--file "${file}" && break
n=$((n+1))
sleep 15
done

View File

@ -31,7 +31,6 @@ import (
"k8s.io/minikube/pkg/minikube/command"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/localpath"
"k8s.io/minikube/pkg/minikube/sysinit"
"k8s.io/minikube/pkg/util"
@ -41,7 +40,7 @@ import (
func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string) error {
driver := kic.NewDriver(kic.Config{
KubernetesVersion: kubernetesVersion,
ContainerRuntime: driver.Docker,
ContainerRuntime: containerRuntime,
OCIBinary: oci.Docker,
MachineName: profile,
ImageDigest: kic.BaseImage,

View File

@ -155,7 +155,6 @@ minikube node start [flags]
```
--delete-on-failure If set, delete the current cluster if start fails and try again. Defaults to false.
-h, --help help for start
--name string The name of the node to start
```
### Options inherited from parent commands
@ -187,8 +186,7 @@ minikube node stop [flags]
### Options
```
-h, --help help for stop
--name string The name of the node to delete
-h, --help help for stop
```
### Options inherited from parent commands

View File

@ -29,24 +29,154 @@ func TestMultiNode(t *testing.T) {
if NoneDriver() {
t.Skip("none driver does not support multinode")
}
MaybeParallel(t)
type validatorFunc func(context.Context, *testing.T, string)
profile := UniqueProfileName("multinode")
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
defer CleanupWithLogs(t, profile, cancel)
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
t.Run("serial", func(t *testing.T) {
tests := []struct {
name string
validator validatorFunc
}{
{"FreshStart2Nodes", validateMultiNodeStart},
{"AddNode", validateAddNodeToMultiNode},
{"StopNode", validateStopRunningNode},
{"StartAfterStop", validateStartNodeAfterStop},
{"DeleteNode", validateDeleteNodeFromMultiNode},
}
for _, tc := range tests {
tc := tc
t.Run(tc.name, func(t *testing.T) {
tc.validator(ctx, t, profile)
})
}
})
}
func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
// Start a 2 node cluster with the --nodes param
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--nodes=2"}, StartArgs()...)
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
if err != nil {
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
}
// Add a node to the current cluster
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
rr, err = Run(t, exec.CommandContext(ctx, Target(), addArgs...))
if err != nil {
t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err)
}
// Make sure minikube status shows 2 nodes
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 2 {
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateAddNodeToMultiNode(ctx context.Context, t *testing.T, profile string) {
// Add a node to the current cluster
addArgs := []string{"node", "add", "-p", profile, "-v", "3", "--alsologtostderr"}
rr, err := Run(t, exec.CommandContext(ctx, Target(), addArgs...))
if err != nil {
t.Fatalf("failed to add node to current cluster. args %q : %v", rr.Command(), err)
}
// Make sure minikube status shows 3 nodes
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
t.Errorf("status says all hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
t.Errorf("status says all kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateStopRunningNode(ctx context.Context, t *testing.T, profile string) {
// Names are autogenerated using the node.Name() function
name := "m03"
// Run minikube node stop on that node
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "stop", name))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
// Run status again to see the stopped host
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
// Exit code 7 means one host is stopped, which we are expecting
if err != nil && rr.ExitCode != 7 {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
// Make sure minikube status shows 2 running nodes and 1 stopped one
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil && rr.ExitCode != 7 {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
t.Errorf("incorrect number of running kubelets: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "host: Stopped") != 1 {
t.Errorf("incorrect number of stopped hosts: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Stopped") != 1 {
t.Errorf("incorrect number of stopped kubelets: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile string) {
// TODO (#7496): remove skip once restarts work
t.Skip("Restarting nodes is broken :(")
// Grab the stopped node
name := "m03"
// Start the node back up
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "start", name))
if err != nil {
t.Errorf("node start returned an error. args %q: %v", rr.Command(), err)
}
// Make sure minikube status shows 3 running hosts
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)
}
if strings.Count(rr.Stdout.String(), "host: Running") != 3 {
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 3 {
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
}
}
func validateDeleteNodeFromMultiNode(ctx context.Context, t *testing.T, profile string) {
name := "m03"
// Start the node back up
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "node", "delete", name))
if err != nil {
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
}
// Make sure status is back down to 2 hosts
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "status"))
if err != nil {
t.Fatalf("failed to run minikube status. args %q : %v", rr.Command(), err)