Fix KVM driver (tests) timeouts (#20852)
* Fix KVM driver tests timeouts Rewrite KVM driver waiting logic for domain start, getting ip address and shutting domain down. Add more config/state outputs to aid future debugging. Bump go/libvirt to v1.11002.0 and set the minimum memory required for running all tests to 3GB to avoid some really weird system behaviour. * revert reduction of timelimit for TestCert tests run * set memory and debug output in TestNoKubernetes tests * extend kvm waitForStaticIP timeout * add console log to debug output * Updating ISO to v1.36.0-1748823857-20852 --------- Co-authored-by: minikube-bot <minikube-bot@google.com>pull/20876/head
parent
2ca8686064
commit
4da3cedc84
14
Makefile
14
Makefile
|
@ -24,7 +24,7 @@ KIC_VERSION ?= $(shell grep -E "Version =" pkg/drivers/kic/types.go | cut -d \"
|
|||
HUGO_VERSION ?= $(shell grep -E "HUGO_VERSION = \"" netlify.toml | cut -d \" -f2)
|
||||
|
||||
# Default to .0 for higher cache hit rates, as build increments typically don't require new ISO versions
|
||||
ISO_VERSION ?= v1.36.0
|
||||
ISO_VERSION ?= v1.36.0-1748823857-20852
|
||||
|
||||
# Dashes are valid in semver, but not Linux packaging. Use ~ to delimit alpha/beta
|
||||
DEB_VERSION ?= $(subst -,~,$(RAW_VERSION))
|
||||
|
@ -103,7 +103,7 @@ $(shell mkdir -p $(BUILD_DIR))
|
|||
CURRENT_GIT_BRANCH ?= $(shell git branch | grep \* | cut -d ' ' -f2)
|
||||
|
||||
# Use system python if it exists, otherwise use Docker.
|
||||
PYTHON := $(shell command -v python || echo "docker run --rm -it -v $(shell pwd):/minikube:Z -w /minikube python python")
|
||||
PYTHON := $(shell command -v python || echo "docker run --rm -it -v $(shell pwd):/minikube -w /minikube python python")
|
||||
BUILD_OS := $(shell uname -s)
|
||||
|
||||
SHA512SUM=$(shell command -v sha512sum || echo "shasum -a 512")
|
||||
|
@ -189,7 +189,7 @@ endef
|
|||
|
||||
# $(call DOCKER, image, command)
|
||||
define DOCKER
|
||||
docker run --rm -e GOCACHE=/app/.cache -e IN_DOCKER=1 --user $(shell id -u):$(shell id -g) -w /app -v $(PWD):/app:Z -v $(GOPATH):/go --init $(1) /bin/bash -c '$(2)'
|
||||
docker run --rm -e GOCACHE=/app/.cache -e IN_DOCKER=1 --user $(shell id -u):$(shell id -g) -w /app -v $(PWD):/app -v $(GOPATH):/go --init $(1) /bin/bash -c '$(2)'
|
||||
endef
|
||||
|
||||
ifeq ($(BUILD_IN_DOCKER),y)
|
||||
|
@ -341,13 +341,13 @@ out/minikube-%.iso: $(shell find "deploy/iso/minikube-iso" -type f)
|
|||
ifeq ($(IN_DOCKER),1)
|
||||
$(MAKE) minikube-iso-$*
|
||||
else
|
||||
docker run --rm --workdir /mnt --volume $(CURDIR):/mnt:Z $(ISO_DOCKER_EXTRA_ARGS) \
|
||||
docker run --rm --workdir /mnt --volume $(CURDIR):/mnt $(ISO_DOCKER_EXTRA_ARGS) \
|
||||
--user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \
|
||||
$(ISO_BUILD_IMAGE) /bin/bash -lc '/usr/bin/make minikube-iso-$*'
|
||||
endif
|
||||
|
||||
iso_in_docker:
|
||||
docker run -it --rm --workdir /mnt --volume $(CURDIR):/mnt:Z $(ISO_DOCKER_EXTRA_ARGS) \
|
||||
docker run -it --rm --workdir /mnt --volume $(CURDIR):/mnt $(ISO_DOCKER_EXTRA_ARGS) \
|
||||
--user $(shell id -u):$(shell id -g) --env HOME=/tmp --env IN_DOCKER=1 \
|
||||
$(ISO_BUILD_IMAGE) /bin/bash
|
||||
|
||||
|
@ -523,7 +523,7 @@ out/linters/golangci-lint-$(GOLINT_VERSION):
|
|||
.PHONY: lint
|
||||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
lint:
|
||||
docker run --rm -v `pwd`:/app:Z -w /app golangci/golangci-lint:$(GOLINT_VERSION) \
|
||||
docker run --rm -v `pwd`:/app -w /app golangci/golangci-lint:$(GOLINT_VERSION) \
|
||||
golangci-lint run ${GOLINT_OPTIONS} ./..."
|
||||
# --skip-dirs "cmd/drivers/kvm|cmd/drivers/hyperkit|pkg/drivers/kvm|pkg/drivers/hyperkit"
|
||||
# The "--skip-dirs" parameter is no longer supported in the V2 version. If you need to skip the directory,
|
||||
|
@ -657,7 +657,7 @@ out/docker-machine-driver-hyperkit:
|
|||
ifeq ($(MINIKUBE_BUILD_IN_DOCKER),y)
|
||||
docker run --rm -e GOCACHE=/app/.cache -e IN_DOCKER=1 \
|
||||
--user $(shell id -u):$(shell id -g) -w /app \
|
||||
-v $(PWD):/app:Z -v $(GOPATH):/go:Z --init --entrypoint "" \
|
||||
-v $(PWD):/app -v $(GOPATH):/go --init --entrypoint "" \
|
||||
$(HYPERKIT_BUILD_IMAGE) /bin/bash -c 'CC=o64-clang CXX=o64-clang++ /usr/bin/make $@'
|
||||
else
|
||||
$(if $(quiet),@echo " GO $@")
|
||||
|
|
|
@ -1098,8 +1098,8 @@ func suggestMemoryAllocation(sysLimit, containerLimit, nodes int) int {
|
|||
return mem
|
||||
}
|
||||
|
||||
const fallback = 2200
|
||||
maximum := 6000
|
||||
const fallback = 3072
|
||||
maximum := 6144
|
||||
|
||||
if sysLimit > 0 && fallback > sysLimit {
|
||||
return sysLimit
|
||||
|
|
|
@ -277,26 +277,26 @@ func TestSuggestMemoryAllocation(t *testing.T) {
|
|||
nodes int
|
||||
want int
|
||||
}{
|
||||
{"128GB sys", 128000, 0, 1, 6000},
|
||||
{"64GB sys", 64000, 0, 1, 6000},
|
||||
{"32GB sys", 32768, 0, 1, 6000},
|
||||
{"128GB sys", 128000, 0, 1, 6144},
|
||||
{"64GB sys", 64000, 0, 1, 6144},
|
||||
{"32GB sys", 32768, 0, 1, 6144},
|
||||
{"16GB sys", 16384, 0, 1, 4000},
|
||||
{"odd sys", 14567, 0, 1, 3600},
|
||||
{"4GB sys", 4096, 0, 1, 2200},
|
||||
{"4GB sys", 4096, 0, 1, 3072},
|
||||
{"2GB sys", 2048, 0, 1, 2048},
|
||||
{"Unable to poll sys", 0, 0, 1, 2200},
|
||||
{"Unable to poll sys", 0, 0, 1, 3072},
|
||||
{"128GB sys, 16GB container", 128000, 16384, 1, 16336},
|
||||
{"64GB sys, 16GB container", 64000, 16384, 1, 16000},
|
||||
{"16GB sys, 4GB container", 16384, 4096, 1, 4000},
|
||||
{"4GB sys, 3.5GB container", 16384, 3500, 1, 3452},
|
||||
{"16GB sys, 2GB container", 16384, 2048, 1, 2048},
|
||||
{"16GB sys, unable to poll container", 16384, 0, 1, 4000},
|
||||
{"128GB sys 2 nodes", 128000, 0, 2, 6000},
|
||||
{"8GB sys 3 nodes", 8192, 0, 3, 2200},
|
||||
{"16GB sys 2 nodes", 16384, 0, 2, 2200},
|
||||
{"128GB sys 2 nodes", 128000, 0, 2, 6144},
|
||||
{"8GB sys 3 nodes", 8192, 0, 3, 3072},
|
||||
{"16GB sys 2 nodes", 16384, 0, 2, 3072},
|
||||
{"32GB sys 2 nodes", 32768, 0, 2, 4050},
|
||||
{"odd sys 2 nodes", 14567, 0, 2, 2200},
|
||||
{"4GB sys 2 nodes", 4096, 0, 2, 2200},
|
||||
{"odd sys 2 nodes", 14567, 0, 2, 3072},
|
||||
{"4GB sys 2 nodes", 4096, 0, 2, 3072},
|
||||
{"2GB sys 3 nodes", 2048, 0, 3, 2048},
|
||||
}
|
||||
for _, test := range tests {
|
||||
|
|
2
go.mod
2
go.mod
|
@ -87,7 +87,7 @@ require (
|
|||
k8s.io/klog/v2 v2.130.1
|
||||
k8s.io/kubectl v0.32.2
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738
|
||||
libvirt.org/go/libvirt v1.11001.0
|
||||
libvirt.org/go/libvirt v1.11002.0
|
||||
sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0
|
||||
)
|
||||
|
||||
|
|
4
go.sum
4
go.sum
|
@ -3219,8 +3219,8 @@ k8s.io/utils v0.0.0-20210930125809-cb0fa318a74b/go.mod h1:jPW/WVKK9YHAvNhRxK0md/
|
|||
k8s.io/utils v0.0.0-20211116205334-6203023598ed/go.mod h1:jPW/WVKK9YHAvNhRxK0md/EJ228hCsBRufyofKtW8HA=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro=
|
||||
k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0=
|
||||
libvirt.org/go/libvirt v1.11001.0 h1:QJgpslxY7qkpXZIDxdMHpkDl7FfhgQJwqRTGBbg/S8E=
|
||||
libvirt.org/go/libvirt v1.11001.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ=
|
||||
libvirt.org/go/libvirt v1.11002.0 h1:cb8KJG3D97pc/hxQ2n6P82hRX3rlgdzO7bih6W1AAQ8=
|
||||
libvirt.org/go/libvirt v1.11002.0/go.mod h1:1WiFE8EjZfq+FCVog+rvr1yatKbKZ9FaFMZgEqxEJqQ=
|
||||
lukechampine.com/uint128 v1.1.1/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
lukechampine.com/uint128 v1.2.0/go.mod h1:c4eWIwlEGaxC/+H1VguhU4PHXNWDCDMUlWdIWl2j1gk=
|
||||
modernc.org/cc/v3 v3.36.0/go.mod h1:NFUHyPn4ekoC/JHeZFfZurN6ixxawE1BnVonP/oahEI=
|
||||
|
|
|
@ -145,7 +145,7 @@ func getLabels(containerRuntime string) *stackdriver.Labels {
|
|||
func minikubeStartTime(ctx context.Context, projectID, minikubePath, containerRuntime string) (float64, error) {
|
||||
defer deleteMinikube(ctx, minikubePath)
|
||||
|
||||
cmd := exec.CommandContext(ctx, minikubePath, "start", "--driver=docker", "-p", profile, "--memory=2048", "--trace=gcp", fmt.Sprintf("--container-runtime=%s", containerRuntime))
|
||||
cmd := exec.CommandContext(ctx, minikubePath, "start", "--driver=docker", "-p", profile, "--memory=3072", "--trace=gcp", fmt.Sprintf("--container-runtime=%s", containerRuntime))
|
||||
cmd.Env = append(os.Environ(), fmt.Sprintf("%s=%s", pkgtrace.ProjectEnvVar, projectID))
|
||||
cmd.Stdout = os.Stderr
|
||||
cmd.Stderr = os.Stderr
|
||||
|
|
|
@ -31,12 +31,12 @@ import (
|
|||
func (d *Driver) getDomain() (*libvirt.Domain, *libvirt.Connect, error) {
|
||||
conn, err := getConnection(d.ConnectionURI)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "getting libvirt connection")
|
||||
return nil, nil, fmt.Errorf("failed opening libvirt connection: %w", err)
|
||||
}
|
||||
|
||||
dom, err := conn.LookupDomainByName(d.MachineName)
|
||||
if err != nil {
|
||||
return nil, nil, errors.Wrap(err, "looking up domain")
|
||||
return nil, nil, fmt.Errorf("failed looking up domain: %w", lvErr(err))
|
||||
}
|
||||
|
||||
return dom, conn, nil
|
||||
|
@ -45,13 +45,17 @@ func (d *Driver) getDomain() (*libvirt.Domain, *libvirt.Connect, error) {
|
|||
func getConnection(connectionURI string) (*libvirt.Connect, error) {
|
||||
conn, err := libvirt.NewConnect(connectionURI)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "connecting to libvirt socket")
|
||||
return nil, fmt.Errorf("failed connecting to libvirt socket: %w", lvErr(err))
|
||||
}
|
||||
|
||||
return conn, nil
|
||||
}
|
||||
|
||||
func closeDomain(dom *libvirt.Domain, conn *libvirt.Connect) error {
|
||||
if dom == nil {
|
||||
return fmt.Errorf("nil domain, cannot close")
|
||||
}
|
||||
|
||||
if err := dom.Free(); err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -62,25 +66,31 @@ func closeDomain(dom *libvirt.Domain, conn *libvirt.Connect) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (d *Driver) createDomain() (*libvirt.Domain, error) {
|
||||
// create the XML for the domain using our domainTmpl template
|
||||
// defineDomain defines the XML for the domain using our domainTmpl template
|
||||
func (d *Driver) defineDomain() (*libvirt.Domain, error) {
|
||||
tmpl := template.Must(template.New("domain").Parse(domainTmpl))
|
||||
var domainXML bytes.Buffer
|
||||
if err := tmpl.Execute(&domainXML, d); err != nil {
|
||||
dlog := struct {
|
||||
Driver
|
||||
ConsoleLogPath string
|
||||
}{
|
||||
Driver: *d,
|
||||
ConsoleLogPath: consoleLogPath(*d),
|
||||
}
|
||||
if err := tmpl.Execute(&domainXML, dlog); err != nil {
|
||||
return nil, errors.Wrap(err, "executing domain xml")
|
||||
}
|
||||
conn, err := getConnection(d.ConnectionURI)
|
||||
if err != nil {
|
||||
return nil, errors.Wrap(err, "getting libvirt connection")
|
||||
return nil, fmt.Errorf("failed opening libvirt connection: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := conn.Close(); err != nil {
|
||||
log.Errorf("unable to close libvirt connection: %v", err)
|
||||
log.Errorf("failed closing libvirt connection: %v", lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
log.Infof("define libvirt domain using xml: %v", domainXML.String())
|
||||
// define the domain in libvirt using the generated XML
|
||||
log.Infof("defining domain using XML: %v", domainXML.String())
|
||||
dom, err := conn.DomainDefineXML(domainXML.String())
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "error defining domain xml: %s", domainXML.String())
|
||||
|
|
|
@ -27,16 +27,16 @@ const domainTmpl = `
|
|||
<acpi/>
|
||||
<apic/>
|
||||
<pae/>
|
||||
{{if .Hidden}}
|
||||
{{- if .Hidden}}
|
||||
<kvm>
|
||||
<hidden state='on'/>
|
||||
</kvm>
|
||||
{{end}}
|
||||
{{- end}}
|
||||
</features>
|
||||
<cpu mode='host-passthrough'>
|
||||
{{if gt .NUMANodeCount 1}}
|
||||
{{- if gt .NUMANodeCount 1}}
|
||||
{{.NUMANodeXML}}
|
||||
{{end}}
|
||||
{{- end}}
|
||||
</cpu>
|
||||
<os>
|
||||
<type machine='virt-4.2' arch='aarch64'>hvm</type>
|
||||
|
@ -75,12 +75,12 @@ const domainTmpl = `
|
|||
<rng model='virtio'>
|
||||
<backend model='random'>/dev/random</backend>
|
||||
</rng>
|
||||
{{if .GPU}}
|
||||
{{- if .GPU}}
|
||||
{{.DevicesXML}}
|
||||
{{end}}
|
||||
{{if gt .ExtraDisks 0}}
|
||||
{{- end}}
|
||||
{{- if gt .ExtraDisks 0}}
|
||||
{{.ExtraDisksXML}}
|
||||
{{end}}
|
||||
{{- end}}
|
||||
</devices>
|
||||
</domain>
|
||||
`
|
||||
|
|
|
@ -27,16 +27,16 @@ const domainTmpl = `
|
|||
<acpi/>
|
||||
<apic/>
|
||||
<pae/>
|
||||
{{if .Hidden}}
|
||||
{{- if .Hidden}}
|
||||
<kvm>
|
||||
<hidden state='on'/>
|
||||
</kvm>
|
||||
{{end}}
|
||||
{{- end}}
|
||||
</features>
|
||||
<cpu mode='host-passthrough'>
|
||||
{{if gt .NUMANodeCount 1}}
|
||||
{{- if gt .NUMANodeCount 1}}
|
||||
{{.NUMANodeXML}}
|
||||
{{end}}
|
||||
{{- end}}
|
||||
</cpu>
|
||||
<os>
|
||||
<type>hvm</type>
|
||||
|
@ -55,6 +55,7 @@ const domainTmpl = `
|
|||
<source file='{{.DiskPath}}'/>
|
||||
<target dev='hda' bus='virtio'/>
|
||||
</disk>
|
||||
<controller type='virtio-serial'/>
|
||||
<interface type='network'>
|
||||
<source network='{{.PrivateNetwork}}'/>
|
||||
<model type='virtio'/>
|
||||
|
@ -65,19 +66,23 @@ const domainTmpl = `
|
|||
</interface>
|
||||
<serial type='pty'>
|
||||
<target port='0'/>
|
||||
<log file='{{.ConsoleLogPath}}' append='on'/>
|
||||
</serial>
|
||||
<console type='pty'>
|
||||
<target type='serial' port='0'/>
|
||||
</console>
|
||||
<console type='pty'>
|
||||
<target type="virtio" port="1"/>
|
||||
</console>
|
||||
<rng model='virtio'>
|
||||
<backend model='random'>/dev/random</backend>
|
||||
</rng>
|
||||
{{if .GPU}}
|
||||
{{- if .GPU}}
|
||||
{{.DevicesXML}}
|
||||
{{end}}
|
||||
{{if gt .ExtraDisks 0}}
|
||||
{{- end}}
|
||||
{{- if gt .ExtraDisks 0}}
|
||||
{{.ExtraDisksXML}}
|
||||
{{end}}
|
||||
{{- end}}
|
||||
</devices>
|
||||
</domain>
|
||||
`
|
||||
|
|
|
@ -141,11 +141,11 @@ func (d *Driver) GetURL() (string, error) {
|
|||
func (d *Driver) PreCommandCheck() error {
|
||||
conn, err := getConnection(d.ConnectionURI)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting libvirt connection")
|
||||
return fmt.Errorf("failed opening libvirt connection: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := conn.Close(); err != nil {
|
||||
log.Errorf("unable to close libvirt connection: %v", err)
|
||||
log.Errorf("failed closing libvirt connection: %v", lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -167,7 +167,7 @@ func (d *Driver) GetState() (state.State, error) {
|
|||
}
|
||||
defer func() {
|
||||
if err := closeDomain(dom, conn); err != nil {
|
||||
log.Errorf("unable to close domain: %v", err)
|
||||
log.Errorf("failed closing domain: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -225,11 +225,11 @@ func (d *Driver) GetIP() (string, error) {
|
|||
|
||||
conn, err := getConnection(d.ConnectionURI)
|
||||
if err != nil {
|
||||
return "", errors.Wrap(err, "getting libvirt connection")
|
||||
return "", fmt.Errorf("failed opening libvirt connection: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := conn.Close(); err != nil {
|
||||
log.Errorf("unable to close libvirt connection: %v", err)
|
||||
log.Errorf("failed closing libvirt connection: %v", lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -265,7 +265,7 @@ func (d *Driver) Kill() error {
|
|||
}
|
||||
defer func() {
|
||||
if err := closeDomain(dom, conn); err != nil {
|
||||
log.Errorf("unable to close domain: %v", err)
|
||||
log.Errorf("failed closing domain: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -303,17 +303,47 @@ func (d *Driver) Start() error {
|
|||
}
|
||||
defer func() {
|
||||
if err := closeDomain(dom, conn); err != nil {
|
||||
log.Errorf("unable to close domain: %v", err)
|
||||
log.Errorf("failed closing domain: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
domXML, err := dom.GetXMLDesc(libvirt.DOMAIN_XML_SECURE)
|
||||
if err != nil {
|
||||
log.Debugf("failed to get domain XML: %v", lvErr(err))
|
||||
} else {
|
||||
log.Debugf("starting domain XML:\n%s", domXML)
|
||||
}
|
||||
|
||||
// libvirt/qemu creates a console log file owned by root:root and permissions 0600,
|
||||
// so we pre-create it (and close it immediately), just to be able to read it later
|
||||
logPath := consoleLogPath(*d)
|
||||
f, err := os.Create(logPath)
|
||||
if err != nil {
|
||||
log.Debugf("failed to create console log file %q: %v", logPath, err)
|
||||
} else {
|
||||
f.Close()
|
||||
}
|
||||
// ensure console log file is cleaned up
|
||||
defer func() {
|
||||
if _, err := os.Stat(logPath); err == nil {
|
||||
if err := os.Remove(logPath); err != nil {
|
||||
log.Debugf("failed removing console log file %q: %v", logPath, err)
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("creating domain...")
|
||||
if err := dom.Create(); err != nil {
|
||||
return errors.Wrap(err, "creating domain")
|
||||
}
|
||||
|
||||
log.Info("waiting for domain to start...")
|
||||
if err := d.waitForDomainState(state.Running, 30*time.Second); err != nil {
|
||||
return errors.Wrap(err, "waiting for domain to start")
|
||||
}
|
||||
log.Info("domain is now running")
|
||||
|
||||
log.Info("waiting for IP...")
|
||||
if err := d.waitForStaticIP(conn); err != nil {
|
||||
if err := d.waitForStaticIP(conn, 90*time.Second); err != nil {
|
||||
return errors.Wrap(err, "waiting for IP")
|
||||
}
|
||||
|
||||
|
@ -325,8 +355,51 @@ func (d *Driver) Start() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// consoleLogPath returns the path to the console log file for the given machine name.
|
||||
func consoleLogPath(d Driver) string {
|
||||
// return fmt.Sprintf("%s-console.log", machineName)
|
||||
return d.ResolveStorePath("console.log")
|
||||
}
|
||||
|
||||
// waitForDomainState waits maxTime for the domain to reach a target state.
|
||||
func (d *Driver) waitForDomainState(targetState state.State, maxTime time.Duration) error {
|
||||
query := func() error {
|
||||
currentState, err := d.GetState()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed getting domain state: %w", err)
|
||||
}
|
||||
|
||||
if currentState == targetState {
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Debugf("current domain state is %q, will retry", currentState.String())
|
||||
return fmt.Errorf("last domain state: %q", currentState.String())
|
||||
}
|
||||
if err := retry.Local(query, maxTime); err != nil {
|
||||
dumpConsoleLogs(consoleLogPath(*d))
|
||||
return fmt.Errorf("timed out waiting %v for domain to reach %q state: %w", maxTime, targetState.String(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// dumpConsoleLogs prints out the console log.
|
||||
func dumpConsoleLogs(logPath string) {
|
||||
if _, err := os.Stat(logPath); err != nil {
|
||||
log.Debugf("failed checking console log file %q: %v", logPath, err)
|
||||
return
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(logPath)
|
||||
if err != nil {
|
||||
log.Debugf("failed dumping console log file %q: %v", logPath, err)
|
||||
return
|
||||
}
|
||||
log.Debugf("console log:\n%s", data)
|
||||
}
|
||||
|
||||
// waitForStaticIP waits for IP address of domain that has been created & starting and then makes that IP static.
|
||||
func (d *Driver) waitForStaticIP(conn *libvirt.Connect) error {
|
||||
func (d *Driver) waitForStaticIP(conn *libvirt.Connect, maxTime time.Duration) error {
|
||||
query := func() error {
|
||||
sip, err := ipFromAPI(conn, d.MachineName, d.PrivateNetwork)
|
||||
if err != nil {
|
||||
|
@ -342,8 +415,9 @@ func (d *Driver) waitForStaticIP(conn *libvirt.Connect) error {
|
|||
|
||||
return nil
|
||||
}
|
||||
if err := retry.Local(query, 1*time.Minute); err != nil {
|
||||
return fmt.Errorf("domain %s didn't return IP after 1 minute", d.MachineName)
|
||||
if err := retry.Local(query, maxTime); err != nil {
|
||||
dumpConsoleLogs(consoleLogPath(*d))
|
||||
return fmt.Errorf("domain %s didn't return IP after %v", d.MachineName, maxTime)
|
||||
}
|
||||
|
||||
log.Info("reserving static IP address...")
|
||||
|
@ -358,7 +432,7 @@ func (d *Driver) waitForStaticIP(conn *libvirt.Connect) error {
|
|||
|
||||
// Create a host using the driver's config
|
||||
func (d *Driver) Create() error {
|
||||
log.Info("creating KVM machine...")
|
||||
log.Info("creating domain...")
|
||||
|
||||
log.Info("creating network...")
|
||||
if err := d.createNetwork(); err != nil {
|
||||
|
@ -418,15 +492,16 @@ func (d *Driver) Create() error {
|
|||
log.Errorf("unable to ensure permissions on %s: %v", store, err)
|
||||
}
|
||||
|
||||
log.Info("creating domain...")
|
||||
|
||||
dom, err := d.createDomain()
|
||||
log.Info("defining domain...")
|
||||
dom, err := d.defineDomain()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "creating domain")
|
||||
return errors.Wrap(err, "defining domain")
|
||||
}
|
||||
defer func() {
|
||||
if err := dom.Free(); err != nil {
|
||||
log.Errorf("unable to free domain: %v", err)
|
||||
if dom == nil {
|
||||
log.Warnf("nil domain, cannot free")
|
||||
} else if err := dom.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s domain: %v", d.MachineName, lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -434,7 +509,7 @@ func (d *Driver) Create() error {
|
|||
return errors.Wrap(err, "starting domain")
|
||||
}
|
||||
|
||||
log.Infof("KVM machine creation complete")
|
||||
log.Infof("domain creation complete")
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -470,28 +545,29 @@ func ensureDirPermissions(store string) error {
|
|||
|
||||
// Stop a host gracefully or forcefully otherwise.
|
||||
func (d *Driver) Stop() error {
|
||||
log.Info("stopping domain...")
|
||||
|
||||
s, err := d.GetState()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting domain state")
|
||||
return fmt.Errorf("getting domain state: %w", err)
|
||||
}
|
||||
|
||||
if s == state.Stopped {
|
||||
log.Info("domain already stopped, nothing to do")
|
||||
return nil
|
||||
}
|
||||
|
||||
log.Info("stopping domain...")
|
||||
|
||||
dom, conn, err := d.getDomain()
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting domain")
|
||||
return fmt.Errorf("getting domain: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if err := closeDomain(dom, conn); err != nil {
|
||||
log.Errorf("unable to close domain: %v", err)
|
||||
log.Errorf("failed closing domain: %v", err)
|
||||
}
|
||||
}()
|
||||
|
||||
log.Info("gracefully shutting down domain...")
|
||||
log.Info("gracefully shutting domain down...")
|
||||
|
||||
// ref: https://libvirt.org/html/libvirt-libvirt-domain.html#virDomainShutdownFlags
|
||||
// note: "The order in which the hypervisor tries each shutdown method is undefined, and a hypervisor is not required to support all methods."
|
||||
|
@ -508,52 +584,25 @@ func (d *Driver) Stop() error {
|
|||
}
|
||||
|
||||
if err := dom.Shutdown(); err != nil {
|
||||
return errors.Wrap(err, "gracefully shutting down domain")
|
||||
return fmt.Errorf("gracefully shutting domain down: %w", err)
|
||||
}
|
||||
|
||||
if s, err = d.waitForStopState(90, "graceful shutdown"); err == nil {
|
||||
if err = d.waitForDomainState(state.Stopped, 90*time.Second); err == nil {
|
||||
log.Info("domain gracefully shut down")
|
||||
return nil
|
||||
}
|
||||
|
||||
// could not get domain state
|
||||
if s == state.None {
|
||||
return err
|
||||
}
|
||||
log.Warn("failed graceful domain shut down, will try to force-stop")
|
||||
|
||||
// at this point shutdown failed, so we try with a little bit of force
|
||||
log.Warn("waiting for domain graceful shutdown failed, will try to force-stop")
|
||||
if err := d.Kill(); err != nil {
|
||||
log.Warnf("force-stopping domain request failed: %v", err)
|
||||
}
|
||||
|
||||
if s, err := d.waitForStopState(30, "force-stop"); err != nil {
|
||||
return fmt.Errorf("unable to stop domain %s, current state is %q", d.MachineName, s.String())
|
||||
return fmt.Errorf("force-stopping domain request failed: %w", err)
|
||||
}
|
||||
|
||||
if err = d.waitForDomainState(state.Stopped, 30*time.Second); err == nil {
|
||||
log.Info("domain force-stopped")
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// waitForStopState waits maxsec for the domain to reach a stopped state.
|
||||
func (d *Driver) waitForStopState(maxsec int, method string) (state.State, error) {
|
||||
var s state.State
|
||||
var err error
|
||||
for i := 0; i < maxsec; i++ {
|
||||
if s, err = d.GetState(); err != nil {
|
||||
return s, errors.Wrap(err, "getting domain state")
|
||||
}
|
||||
|
||||
if s == state.Stopped {
|
||||
return state.Stopped, nil
|
||||
}
|
||||
|
||||
log.Infof("waiting for domain %s %d/%d", method, i, maxsec)
|
||||
time.Sleep(1 * time.Second)
|
||||
}
|
||||
|
||||
return s, fmt.Errorf("timed out waiting for domain %s, current state is %q", method, s)
|
||||
return fmt.Errorf("unable to stop domain: %w", err)
|
||||
}
|
||||
|
||||
// Remove a host
|
||||
|
@ -562,11 +611,11 @@ func (d *Driver) Remove() error {
|
|||
|
||||
conn, err := getConnection(d.ConnectionURI)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting libvirt connection")
|
||||
return fmt.Errorf("failed opening libvirt connection: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := conn.Close(); err != nil {
|
||||
log.Errorf("unable to close libvirt connection: %v", err)
|
||||
log.Errorf("failed closing libvirt connection: %v", lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
|
|
@ -39,13 +39,13 @@ const networkTmpl = `
|
|||
<network>
|
||||
<name>{{.Name}}</name>
|
||||
<dns enable='no'/>
|
||||
{{with .Parameters}}
|
||||
{{- with .Parameters}}
|
||||
<ip address='{{.Gateway}}' netmask='{{.Netmask}}'>
|
||||
<dhcp>
|
||||
<range start='{{.ClientMin}}' end='{{.ClientMax}}'/>
|
||||
</dhcp>
|
||||
</ip>
|
||||
{{end}}
|
||||
{{- end}}
|
||||
</network>
|
||||
`
|
||||
|
||||
|
@ -84,9 +84,15 @@ const firstSubnetAddr = "192.168.39.0"
|
|||
func setupNetwork(conn *libvirt.Connect, name string) error {
|
||||
n, err := conn.LookupNetworkByName(name)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "checking network %s", name)
|
||||
return fmt.Errorf("failed looking up network %s: %w", name, lvErr(err))
|
||||
}
|
||||
defer func() { _ = n.Free() }()
|
||||
defer func() {
|
||||
if n == nil {
|
||||
log.Warnf("nil network, cannot free")
|
||||
} else if err := n.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s network: %v", name, lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
// always ensure autostart is set on the network
|
||||
autostart, err := n.GetAutostart()
|
||||
|
@ -104,7 +110,9 @@ func setupNetwork(conn *libvirt.Connect, name string) error {
|
|||
if err != nil {
|
||||
return errors.Wrapf(err, "checking network status for %s", name)
|
||||
}
|
||||
|
||||
if !active {
|
||||
log.Debugf("network %s is not active, trying to start it...", name)
|
||||
if err := n.Create(); err != nil {
|
||||
return errors.Wrapf(err, "starting network %s", name)
|
||||
}
|
||||
|
@ -116,11 +124,11 @@ func setupNetwork(conn *libvirt.Connect, name string) error {
|
|||
func (d *Driver) ensureNetwork() error {
|
||||
conn, err := getConnection(d.ConnectionURI)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting libvirt connection")
|
||||
return fmt.Errorf("failed opening libvirt connection: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := conn.Close(); err != nil {
|
||||
log.Errorf("unable to close libvirt connection: %v", err)
|
||||
log.Errorf("failed closing libvirt connection: %v", lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -164,11 +172,11 @@ func (d *Driver) createNetwork() error {
|
|||
|
||||
conn, err := getConnection(d.ConnectionURI)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting libvirt connection")
|
||||
return fmt.Errorf("failed opening libvirt connection: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := conn.Close(); err != nil {
|
||||
log.Errorf("unable to close libvirt connection: %v", err)
|
||||
log.Errorf("failed closing libvirt connection: %v", lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -176,23 +184,34 @@ func (d *Driver) createNetwork() error {
|
|||
// It is assumed that the libvirt/kvm installation has already created this network
|
||||
netd, err := conn.LookupNetworkByName(d.Network)
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "%s KVM network doesn't exist", d.Network)
|
||||
return fmt.Errorf("failed looking up network %s, cannot continue: %w", d.Network, lvErr(err))
|
||||
}
|
||||
log.Debugf("found existing %s KVM network", d.Network)
|
||||
if netd != nil {
|
||||
_ = netd.Free()
|
||||
log.Debugf("found existing %s network", d.Network)
|
||||
|
||||
if netdXML, err := netd.GetXMLDesc(0); err != nil {
|
||||
log.Debugf("failed getting %s network XML: %v", d.Network, lvErr(err))
|
||||
} else {
|
||||
log.Debug(netdXML)
|
||||
}
|
||||
|
||||
if err := netd.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s network: %v", d.Network, lvErr(err))
|
||||
}
|
||||
|
||||
// network: private
|
||||
// Only create the private network if it does not already exist
|
||||
netp, err := conn.LookupNetworkByName(d.PrivateNetwork)
|
||||
defer func() {
|
||||
if netp != nil {
|
||||
_ = netp.Free()
|
||||
if netp, err := conn.LookupNetworkByName(d.PrivateNetwork); err == nil {
|
||||
log.Warnf("found existing %s private network, skipping creation", d.PrivateNetwork)
|
||||
|
||||
if netpXML, err := netp.GetXMLDesc(0); err != nil {
|
||||
log.Debugf("failed getting %s private network XML: %v", d.PrivateNetwork, lvErr(err))
|
||||
} else {
|
||||
log.Debug(netpXML)
|
||||
}
|
||||
|
||||
if err := netp.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s private network: %v", d.PrivateNetwork, lvErr(err))
|
||||
}
|
||||
}()
|
||||
if err == nil {
|
||||
log.Debugf("found existing private KVM network %s", d.PrivateNetwork)
|
||||
return nil
|
||||
}
|
||||
|
||||
|
@ -203,7 +222,7 @@ func (d *Driver) createNetwork() error {
|
|||
var subnet *network.Parameters
|
||||
subnet, err = network.FreeSubnet(subnetAddr, 11, 20)
|
||||
if err != nil {
|
||||
log.Debugf("failed to find free subnet for private KVM network %s after %d attempts: %v", d.PrivateNetwork, 20, err)
|
||||
log.Debugf("failed finding free subnet for private network %s after %d attempts: %v", d.PrivateNetwork, 20, err)
|
||||
return fmt.Errorf("un-retryable: %w", err)
|
||||
}
|
||||
|
||||
|
@ -220,37 +239,42 @@ func (d *Driver) createNetwork() error {
|
|||
tmpl := template.Must(template.New("network").Parse(networkTmpl))
|
||||
var networkXML bytes.Buffer
|
||||
if err = tmpl.Execute(&networkXML, tryNet); err != nil {
|
||||
return fmt.Errorf("executing private KVM network template: %w", err)
|
||||
return fmt.Errorf("executing private network template: %w", err)
|
||||
}
|
||||
log.Debugf("created network xml: %s", networkXML.String())
|
||||
|
||||
// define the network using our template
|
||||
var libvirtNet *libvirt.Network
|
||||
libvirtNet, err = conn.NetworkDefineXML(networkXML.String())
|
||||
log.Debugf("defining private network:\n%s", networkXML.String())
|
||||
libvirtNet, err := conn.NetworkDefineXML(networkXML.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("defining private KVM network %s %s from xml %s: %w", d.PrivateNetwork, subnet.CIDR, networkXML.String(), err)
|
||||
return fmt.Errorf("defining private network %s %s from xml %s: %w", d.PrivateNetwork, subnet.CIDR, networkXML.String(), err)
|
||||
}
|
||||
|
||||
// and finally create & start it
|
||||
log.Debugf("trying to create private KVM network %s %s...", d.PrivateNetwork, subnet.CIDR)
|
||||
log.Debugf("creating private network %s %s...", d.PrivateNetwork, subnet.CIDR)
|
||||
if err = libvirtNet.Create(); err == nil {
|
||||
log.Debugf("private KVM network %s %s created", d.PrivateNetwork, subnet.CIDR)
|
||||
log.Debugf("private network %s %s created", d.PrivateNetwork, subnet.CIDR)
|
||||
if netpXML, err := libvirtNet.GetXMLDesc(0); err != nil {
|
||||
log.Debugf("failed getting %s private network XML: %v", d.PrivateNetwork, lvErr(err))
|
||||
} else {
|
||||
log.Debug(netpXML)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
log.Debugf("failed to create private KVM network %s %s, will retry: %v", d.PrivateNetwork, subnet.CIDR, err)
|
||||
log.Debugf("failed creating private network %s %s, will retry: %v", d.PrivateNetwork, subnet.CIDR, err)
|
||||
subnetAddr = subnet.IP
|
||||
}
|
||||
return fmt.Errorf("failed to create private KVM network %s: %w", d.PrivateNetwork, err)
|
||||
return fmt.Errorf("failed creating private network %s: %w", d.PrivateNetwork, err)
|
||||
}
|
||||
|
||||
func (d *Driver) deleteNetwork() error {
|
||||
conn, err := getConnection(d.ConnectionURI)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "getting libvirt connection")
|
||||
return fmt.Errorf("failed opening libvirt connection: %w", err)
|
||||
}
|
||||
defer func() {
|
||||
if _, err := conn.Close(); err != nil {
|
||||
log.Errorf("unable to close libvirt connection: %v", err)
|
||||
log.Errorf("failed closing libvirt connection: %v", lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
|
@ -271,7 +295,14 @@ func (d *Driver) deleteNetwork() error {
|
|||
}
|
||||
return errors.Wrapf(err, "failed looking up network %s", d.PrivateNetwork)
|
||||
}
|
||||
defer func() { _ = libvirtNet.Free() }()
|
||||
defer func() {
|
||||
if libvirtNet == nil {
|
||||
log.Warnf("nil network, cannot free")
|
||||
} else if err := libvirtNet.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s network: %v", d.PrivateNetwork, lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
log.Debugf("Network %s exists", d.PrivateNetwork)
|
||||
|
||||
err = d.checkDomains(conn)
|
||||
|
@ -405,7 +436,13 @@ func addStaticIP(conn *libvirt.Connect, networkName, hostname, mac, ip string) e
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed looking up network %s: %w", networkName, err)
|
||||
}
|
||||
defer func() { _ = libvirtNet.Free() }()
|
||||
defer func() {
|
||||
if libvirtNet == nil {
|
||||
log.Warnf("nil network, cannot free")
|
||||
} else if err := libvirtNet.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s network: %v", networkName, lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return libvirtNet.Update(
|
||||
libvirt.NETWORK_UPDATE_COMMAND_ADD_LAST,
|
||||
|
@ -431,7 +468,13 @@ func delStaticIP(conn *libvirt.Connect, networkName, hostname, mac, ip string) e
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed looking up network %s: %w", networkName, err)
|
||||
}
|
||||
defer func() { _ = libvirtNet.Free() }()
|
||||
defer func() {
|
||||
if libvirtNet == nil {
|
||||
log.Warnf("nil network, cannot free")
|
||||
} else if err := libvirtNet.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s network: %v", networkName, lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
return libvirtNet.Update(
|
||||
libvirt.NETWORK_UPDATE_COMMAND_DELETE,
|
||||
|
@ -451,7 +494,13 @@ func dhcpLease(conn *libvirt.Connect, networkName, hostname, mac, ip string) (le
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed looking up network %s: %w", networkName, err)
|
||||
}
|
||||
defer func() { _ = libvirtNet.Free() }()
|
||||
defer func() {
|
||||
if libvirtNet == nil {
|
||||
log.Warnf("nil network, cannot free")
|
||||
} else if err := libvirtNet.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s network: %v", networkName, lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
leases, err := libvirtNet.GetDHCPLeases()
|
||||
if err != nil {
|
||||
|
@ -491,7 +540,7 @@ func ipFromAPI(conn *libvirt.Connect, domain, networkName string) (string, error
|
|||
}
|
||||
}
|
||||
|
||||
log.Debugf("unable to find current IP address of domain %s in network %s", domain, networkName)
|
||||
log.Debugf("unable to find current IP address of domain %s in network %s (interfaces detected: %+v)", domain, networkName, ifaces)
|
||||
return "", nil
|
||||
}
|
||||
|
||||
|
@ -499,22 +548,27 @@ func ipFromAPI(conn *libvirt.Connect, domain, networkName string) (string, error
|
|||
func ifListFromAPI(conn *libvirt.Connect, domain string) ([]libvirt.DomainInterface, error) {
|
||||
dom, err := conn.LookupDomainByName(domain)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed looking up domain %s: %w", domain, err)
|
||||
return nil, fmt.Errorf("failed looking up domain %s: %w", domain, lvErr(err))
|
||||
}
|
||||
defer func() { _ = dom.Free() }()
|
||||
defer func() {
|
||||
if dom == nil {
|
||||
log.Warnf("nil domain, cannot free")
|
||||
} else if err := dom.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s domain: %v", domain, lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
ifs, err := dom.ListAllInterfaceAddresses(libvirt.DOMAIN_INTERFACE_ADDRESSES_SRC_ARP)
|
||||
if ifs == nil {
|
||||
ifs, err := dom.ListAllInterfaceAddresses(libvirt.DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE)
|
||||
if len(ifs) == 0 {
|
||||
if err != nil {
|
||||
log.Debugf("failed listing network interface addresses of domain %s(source=arp): %w", domain, err)
|
||||
log.Debugf("failed listing network interface addresses of domain %s (source=lease): %v", domain, lvErr(err))
|
||||
} else {
|
||||
log.Debugf("No network interface addresses found for domain %s(source=arp)", domain)
|
||||
log.Debugf("no network interface addresses found for domain %s (source=lease)", domain)
|
||||
}
|
||||
log.Debugf("trying to list again with source=lease")
|
||||
log.Debugf("trying to list again with source=arp")
|
||||
|
||||
ifs, err = dom.ListAllInterfaceAddresses(libvirt.DOMAIN_INTERFACE_ADDRESSES_SRC_LEASE)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed listing network interface addresses of domain %s(source=lease): %w", domain, err)
|
||||
if ifs, err = dom.ListAllInterfaceAddresses(libvirt.DOMAIN_INTERFACE_ADDRESSES_SRC_ARP); err != nil {
|
||||
return nil, fmt.Errorf("failed listing network interface addresses of domain %s (source=arp): %w", domain, lvErr(err))
|
||||
}
|
||||
}
|
||||
|
||||
|
@ -564,7 +618,13 @@ func ifListFromXML(conn *libvirt.Connect, domain string) ([]kvmIface, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("failed looking up domain %s: %w", domain, err)
|
||||
}
|
||||
defer func() { _ = dom.Free() }()
|
||||
defer func() {
|
||||
if dom == nil {
|
||||
log.Warnf("nil domain, cannot free")
|
||||
} else if err := dom.Free(); err != nil {
|
||||
log.Errorf("failed freeing %s domain: %v", domain, lvErr(err))
|
||||
}
|
||||
}()
|
||||
|
||||
domXML, err := dom.GetXMLDesc(0)
|
||||
if err != nil {
|
||||
|
|
|
@ -41,7 +41,7 @@ const fileScheme = "file"
|
|||
// DefaultISOURLs returns a list of ISO URL's to consult by default, in priority order
|
||||
func DefaultISOURLs() []string {
|
||||
v := version.GetISOVersion()
|
||||
isoBucket := "minikube/iso"
|
||||
isoBucket := "minikube-builds/iso/20852"
|
||||
|
||||
return []string{
|
||||
fmt.Sprintf("https://storage.googleapis.com/%s/minikube-%s-%s.iso", isoBucket, v, runtime.GOARCH),
|
||||
|
|
|
@ -43,7 +43,7 @@ func TestOffline(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(15))
|
||||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
startArgs := []string{"start", "-p", profile, "--alsologtostderr", "-v=1", "--memory=2048", "--wait=true"}
|
||||
startArgs := []string{"start", "-p", profile, "--alsologtostderr", "-v=1", "--memory=3072", "--wait=true"}
|
||||
startArgs = append(startArgs, StartArgs()...)
|
||||
c := exec.CommandContext(ctx, Target(), startArgs...)
|
||||
env := os.Environ()
|
||||
|
|
|
@ -101,7 +101,7 @@ func TestAddons(t *testing.T) {
|
|||
// so we override that here to let minikube auto-detect appropriate cgroup driver
|
||||
os.Setenv(constants.MinikubeForceSystemdEnv, "")
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4000", "--alsologtostderr", "--addons=registry", "--addons=registry-creds", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner", "--addons=inspektor-gadget", "--addons=nvidia-device-plugin", "--addons=yakd", "--addons=volcano", "--addons=amd-gpu-device-plugin"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--wait=true", "--memory=4096", "--alsologtostderr", "--addons=registry", "--addons=registry-creds", "--addons=metrics-server", "--addons=volumesnapshots", "--addons=csi-hostpath-driver", "--addons=gcp-auth", "--addons=cloud-spanner", "--addons=inspektor-gadget", "--addons=nvidia-device-plugin", "--addons=yakd", "--addons=volcano", "--addons=amd-gpu-device-plugin"}, StartArgs()...)
|
||||
if !NoneDriver() {
|
||||
args = append(args, "--addons=ingress", "--addons=ingress-dns", "--addons=storage-provisioner-rancher")
|
||||
}
|
||||
|
|
|
@ -39,7 +39,7 @@ func TestCertOptions(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
|
||||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2048", "--apiserver-ips=127.0.0.1", "--apiserver-ips=192.168.15.15", "--apiserver-names=localhost", "--apiserver-names=www.google.com", "--apiserver-port=8555"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072", "--apiserver-ips=127.0.0.1", "--apiserver-ips=192.168.15.15", "--apiserver-names=localhost", "--apiserver-names=www.google.com", "--apiserver-port=8555"}, StartArgs()...)
|
||||
|
||||
// We can safely override --apiserver-name with
|
||||
if NeedsPortForward() {
|
||||
|
@ -118,7 +118,7 @@ func TestCertExpiration(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
|
||||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2048", "--cert-expiration=3m"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072", "--cert-expiration=3m"}, StartArgs()...)
|
||||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
|
@ -127,7 +127,7 @@ func TestCertExpiration(t *testing.T) {
|
|||
|
||||
// Now wait 3 minutes for the certs to expire and make sure minikube starts properly
|
||||
time.Sleep(time.Minute * 3)
|
||||
args = append([]string{"start", "-p", profile, "--memory=2048", "--cert-expiration=8760h"}, StartArgs()...)
|
||||
args = append([]string{"start", "-p", profile, "--memory=3072", "--cert-expiration=8760h"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("failed to start minikube after cert expiration: %q : %v", rr.Command(), err)
|
||||
|
|
|
@ -47,7 +47,7 @@ func TestDockerFlags(t *testing.T) {
|
|||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
// Use the most verbose logging for the simplest test. If it fails, something is very wrong.
|
||||
args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=2048", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--cache-images=false", "--memory=3072", "--install-addons=false", "--wait=false", "--docker-env=FOO=BAR", "--docker-env=BAZ=BAT", "--docker-opt=debug", "--docker-opt=icc=true", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||
|
@ -87,7 +87,7 @@ func TestForceSystemdFlag(t *testing.T) {
|
|||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
// Use the most verbose logging for the simplest test. If it fails, something is very wrong.
|
||||
args := append([]string{"start", "-p", profile, "--memory=2048", "--force-systemd", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072", "--force-systemd", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||
|
@ -149,7 +149,7 @@ func TestForceSystemdEnv(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(30))
|
||||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2048", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
cmd := exec.CommandContext(ctx, Target(), args...)
|
||||
cmd.Env = append(os.Environ(), "MINIKUBE_FORCE_SYSTEMD=true")
|
||||
rr, err := Run(t, cmd)
|
||||
|
|
|
@ -76,7 +76,7 @@ func TestErrorSpam(t *testing.T) {
|
|||
t.Run("setup", func(t *testing.T) {
|
||||
// This should likely use multi-node once it's ready
|
||||
// use `--log_dir` flag to run isolated and avoid race condition - ie, failing to clean up (locked) log files created by other concurently-run tests, or counting them in results
|
||||
args := append([]string{"start", "-p", profile, "-n=1", "--memory=2250", "--wait=false", fmt.Sprintf("--log_dir=%s", logDir)}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "-n=1", "--memory=3072", "--wait=false", fmt.Sprintf("--log_dir=%s", logDir)}, StartArgs()...)
|
||||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
|
|
|
@ -2236,10 +2236,10 @@ func startHTTPProxy(t *testing.T) (*http.Server, error) {
|
|||
|
||||
func startMinikubeWithProxy(ctx context.Context, t *testing.T, profile string, proxyEnv string, addr string) {
|
||||
// Use more memory so that we may reliably fit MySQL and nginx
|
||||
memoryFlag := "--memory=4000"
|
||||
memoryFlag := "--memory=4096"
|
||||
// to avoid failure for mysq/pv on virtualbox on darwin on free github actions,
|
||||
if detect.GithubActionRunner() && VirtualboxDriver() {
|
||||
memoryFlag = "--memory=6000"
|
||||
memoryFlag = "--memory=6144"
|
||||
}
|
||||
// passing --api-server-port so later verify it didn't change in soft start.
|
||||
startArgs := append([]string{"start", "-p", profile, memoryFlag, fmt.Sprintf("--apiserver-port=%d", apiPortTest), "--wait=all"}, StartArgsWithContext(ctx)...)
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
|
@ -36,7 +37,7 @@ func TestGuestEnvironment(t *testing.T) {
|
|||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
t.Run("Setup", func(t *testing.T) {
|
||||
args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=2048", "--wait=false", "--disable-metrics=true"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--install-addons=false", "--memory=3072", "--wait=false", "--disable-metrics=true"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("failed to start minikube: args %q: %v", rr.Command(), err)
|
||||
|
|
|
@ -48,7 +48,7 @@ func TestGvisorAddon(t *testing.T) {
|
|||
CleanupWithLogs(t, profile, cancel)
|
||||
}()
|
||||
|
||||
startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...)
|
||||
startArgs := append([]string{"start", "-p", profile, "--memory=3072", "--container-runtime=containerd", "--docker-opt", "containerd=/var/run/containerd/containerd.sock"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start minikube: args %q: %v", rr.Command(), err)
|
||||
|
|
|
@ -97,7 +97,7 @@ func TestMultiControlPlane(t *testing.T) {
|
|||
// validateHAStartCluster ensures ha (multi-control plane) cluster can start.
|
||||
func validateHAStartCluster(ctx context.Context, t *testing.T, profile string) {
|
||||
// start ha (multi-control plane) cluster
|
||||
startArgs := append([]string{"-p", profile, "start", "--ha", "--memory", "2200", "--wait", "true", "--alsologtostderr", "-v", "5"}, StartArgs()...)
|
||||
startArgs := append([]string{"-p", profile, "start", "--ha", "--memory", "3072", "--wait", "true", "--alsologtostderr", "-v", "5"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to fresh-start ha (multi-control plane) cluster. args %q : %v", rr.Command(), err)
|
||||
|
|
|
@ -43,7 +43,7 @@ func TestJSONOutput(t *testing.T) {
|
|||
}{
|
||||
{
|
||||
command: "start",
|
||||
args: append([]string{"--memory=2200", "--wait=true"}, StartArgs()...),
|
||||
args: append([]string{"--memory=3072", "--wait=true"}, StartArgs()...),
|
||||
}, {
|
||||
command: "pause",
|
||||
}, {
|
||||
|
@ -155,7 +155,7 @@ func TestErrorJSONOutput(t *testing.T) {
|
|||
|
||||
// force a failure via --driver=fail so that we can make sure errors
|
||||
// are printed as expected
|
||||
startArgs := []string{"start", "-p", profile, "--memory=2200", "--output=json", "--wait=true", "--driver=fail"}
|
||||
startArgs := []string{"start", "-p", profile, "--memory=3072", "--output=json", "--wait=true", "--driver=fail"}
|
||||
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err == nil {
|
||||
|
|
|
@ -93,7 +93,7 @@ func validateStartWithMount(ctx context.Context, t *testing.T, profile string) {
|
|||
// We have to increment this because if you have two mounts with the same port, when you kill one cluster the mount will break for the other
|
||||
mountStartPort++
|
||||
|
||||
args := []string{"start", "-p", profile, "--memory=2048", "--mount", "--mount-gid", mountGID, "--mount-msize", mountMSize, "--mount-port", mountPort(), "--mount-uid", mountUID, "--no-kubernetes"}
|
||||
args := []string{"start", "-p", profile, "--memory=3072", "--mount", "--mount-gid", mountGID, "--mount-msize", mountMSize, "--mount-port", mountPort(), "--mount-uid", mountUID, "--no-kubernetes"}
|
||||
args = append(args, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
|
|
|
@ -92,7 +92,7 @@ func TestMultiNode(t *testing.T) {
|
|||
// validateMultiNodeStart makes sure a 2 node cluster can start
|
||||
func validateMultiNodeStart(ctx context.Context, t *testing.T, profile string) {
|
||||
// Start a 2 node cluster with the --nodes param
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=2200", "--nodes=2", "-v=5", "--alsologtostderr"}, StartArgs()...)
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true", "--memory=3072", "--nodes=2", "-v=5", "--alsologtostderr"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
|
||||
|
|
|
@ -239,7 +239,7 @@ func validateFalseCNI(ctx context.Context, t *testing.T, profile string) {
|
|||
cr = "crio"
|
||||
}
|
||||
|
||||
startArgs := []string{"start", "-p", profile, "--memory=2048", "--alsologtostderr", "--cni=false"}
|
||||
startArgs := []string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "--cni=false"}
|
||||
startArgs = append(startArgs, StartArgs()...)
|
||||
|
||||
mkCmd := exec.CommandContext(ctx, Target(), startArgs...)
|
||||
|
|
|
@ -91,7 +91,7 @@ func validateStartWithK8S(ctx context.Context, t *testing.T, profile string) {
|
|||
defer PostMortemLogs(t, profile)
|
||||
|
||||
// docs: start minikube with Kubernetes.
|
||||
args := append([]string{"start", "-p", profile}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||
|
@ -108,7 +108,7 @@ func validateStartWithStopK8s(ctx context.Context, t *testing.T, profile string)
|
|||
defer PostMortemLogs(t, profile)
|
||||
|
||||
// docs: start minikube with no Kubernetes.
|
||||
args := append([]string{"start", "-p", profile, "--no-kubernetes"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--no-kubernetes", "--memory=3072", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||
|
@ -132,7 +132,7 @@ func validateStartNoK8S(ctx context.Context, t *testing.T, profile string) {
|
|||
defer PostMortemLogs(t, profile)
|
||||
|
||||
// docs: start minikube with no Kubernetes.
|
||||
args := append([]string{"start", "-p", profile, "--no-kubernetes"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--no-kubernetes", "--memory=3072", "--alsologtostderr", "-v=5"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||
|
|
|
@ -76,7 +76,7 @@ func TestPause(t *testing.T) {
|
|||
func validateFreshStart(ctx context.Context, t *testing.T, profile string) {
|
||||
defer PostMortemLogs(t, profile)
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2048", "--install-addons=false", "--wait=all"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072", "--install-addons=false", "--wait=all"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start minikube with args: %q : %v", rr.Command(), err)
|
||||
|
|
|
@ -36,7 +36,7 @@ func TestPreload(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
|
||||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
startArgs := []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "--wait=true", "--preload=false"}
|
||||
startArgs := []string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "--wait=true", "--preload=false"}
|
||||
startArgs = append(startArgs, StartArgs()...)
|
||||
k8sVersion := "v1.24.4"
|
||||
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", k8sVersion))
|
||||
|
@ -61,7 +61,7 @@ func TestPreload(t *testing.T) {
|
|||
}
|
||||
|
||||
// re-start the cluster and check if image is preserved
|
||||
startArgs = []string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1", "--wait=true"}
|
||||
startArgs = []string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=1", "--wait=true"}
|
||||
startArgs = append(startArgs, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
|
|
|
@ -124,7 +124,7 @@ func TestScheduledStopUnix(t *testing.T) {
|
|||
}
|
||||
|
||||
func startMinikube(ctx context.Context, t *testing.T, profile string) {
|
||||
args := append([]string{"start", "-p", profile, "--memory=2048"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("starting minikube: %v\n%s", err, rr.Output())
|
||||
|
|
|
@ -62,7 +62,7 @@ func TestSkaffold(t *testing.T) {
|
|||
}
|
||||
t.Logf("skaffold version: %s", rr.Stdout.Bytes())
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2600"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("starting minikube: %v\n%s", err, rr.Output())
|
||||
|
|
|
@ -106,7 +106,7 @@ func TestStartStop(t *testing.T) {
|
|||
waitFlag = "--wait=apiserver,system_pods,default_sa"
|
||||
}
|
||||
|
||||
startArgs := append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", waitFlag}, tc.args...)
|
||||
startArgs := append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", waitFlag}, tc.args...)
|
||||
startArgs = append(startArgs, StartArgs()...)
|
||||
startArgs = append(startArgs, fmt.Sprintf("--kubernetes-version=%s", tc.version))
|
||||
|
||||
|
|
|
@ -41,7 +41,7 @@ func TestInsufficientStorage(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(5))
|
||||
defer Cleanup(t, profile, cancel)
|
||||
|
||||
startArgs := []string{"start", "-p", profile, "--memory=2048", "--output=json", "--wait=true"}
|
||||
startArgs := []string{"start", "-p", profile, "--memory=3072", "--output=json", "--wait=true"}
|
||||
startArgs = append(startArgs, StartArgs()...)
|
||||
c := exec.CommandContext(ctx, Target(), startArgs...)
|
||||
// artificially set /var to 100% capacity
|
||||
|
|
|
@ -95,7 +95,7 @@ func TestRunningBinaryUpgrade(t *testing.T) {
|
|||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072"}, legacyStartArgs()...)
|
||||
rr := &RunResult{}
|
||||
r := func() error {
|
||||
c := exec.CommandContext(ctx, tf.Name(), args...)
|
||||
|
@ -126,7 +126,7 @@ func TestRunningBinaryUpgrade(t *testing.T) {
|
|||
t.Fatalf("legacy %s start failed: %v", desiredLegacyVersion, err)
|
||||
}
|
||||
|
||||
args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
args = append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("upgrade from %s to HEAD failed: %s: %v", desiredLegacyVersion, rr.Command(), err)
|
||||
|
@ -158,7 +158,7 @@ func TestStoppedBinaryUpgrade(t *testing.T) {
|
|||
defer os.Remove(tf.Name())
|
||||
|
||||
t.Run("Upgrade", func(t *testing.T) {
|
||||
args := append([]string{"start", "-p", profile, "--memory=2200"}, legacyStartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072"}, legacyStartArgs()...)
|
||||
rr := &RunResult{}
|
||||
r := func() error {
|
||||
c := exec.CommandContext(ctx, tf.Name(), args...)
|
||||
|
@ -194,7 +194,7 @@ func TestStoppedBinaryUpgrade(t *testing.T) {
|
|||
t.Errorf("failed to stop cluster: %s: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
args = append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Fatalf("upgrade from %s to HEAD failed: %s: %v", desiredLegacyVersion, rr.Command(), err)
|
||||
|
@ -218,7 +218,7 @@ func TestKubernetesUpgrade(t *testing.T) {
|
|||
|
||||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("failed to start minikube HEAD with oldest k8s version: %s: %v", rr.Command(), err)
|
||||
|
@ -239,7 +239,7 @@ func TestKubernetesUpgrade(t *testing.T) {
|
|||
t.Errorf("FAILED: status = %q; want = %q", got, state.Stopped.String())
|
||||
}
|
||||
|
||||
args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
args = append([]string{"start", "-p", profile, "--memory=3072", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("failed to upgrade with newest k8s version. args: %s : %v", rr.Command(), err)
|
||||
|
@ -265,13 +265,13 @@ func TestKubernetesUpgrade(t *testing.T) {
|
|||
}
|
||||
|
||||
t.Logf("Attempting to downgrade Kubernetes (should fail)")
|
||||
args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion)}, StartArgs()...)
|
||||
args = append([]string{"start", "-p", profile, "--memory=3072", fmt.Sprintf("--kubernetes-version=%s", constants.OldestKubernetesVersion)}, StartArgs()...)
|
||||
if rr, err := Run(t, exec.CommandContext(ctx, Target(), args...)); err == nil {
|
||||
t.Fatalf("downgrading Kubernetes should not be allowed. expected to see error but got %v for %q", err, rr.Command())
|
||||
}
|
||||
|
||||
t.Logf("Attempting restart after unsuccessful downgrade")
|
||||
args = append([]string{"start", "-p", profile, "--memory=2200", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
args = append([]string{"start", "-p", profile, "--memory=3072", fmt.Sprintf("--kubernetes-version=%s", constants.NewestKubernetesVersion), "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("start after failed upgrade: %s: %v", rr.Command(), err)
|
||||
|
@ -303,7 +303,7 @@ func TestMissingContainerUpgrade(t *testing.T) {
|
|||
}
|
||||
defer os.Remove(tf.Name())
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2200"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--memory=3072"}, StartArgs()...)
|
||||
rr := &RunResult{}
|
||||
r := func() error {
|
||||
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...))
|
||||
|
@ -325,7 +325,7 @@ func TestMissingContainerUpgrade(t *testing.T) {
|
|||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
args = append([]string{"start", "-p", profile, "--memory=3072", "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("failed missing container upgrade from %s. args: %s : %v", legacyVersion, rr.Command(), err)
|
||||
|
|
Loading…
Reference in New Issue