Merge branch 'master' of github.com:kubernetes/minikube into v1.12.2
commit
a4cd184361
5
Makefile
5
Makefile
|
@ -87,8 +87,7 @@ SHA512SUM=$(shell command -v sha512sum || echo "shasum -a 512")
|
|||
GVISOR_TAG ?= latest
|
||||
|
||||
# storage provisioner tag to push changes to
|
||||
# to update minikubes default, update pkg/minikube/bootstrapper/images
|
||||
STORAGE_PROVISIONER_TAG ?= latest
|
||||
STORAGE_PROVISIONER_TAG ?= v2
|
||||
|
||||
# TODO: multi-arch manifest
|
||||
ifeq ($(GOARCH),amd64)
|
||||
|
@ -98,7 +97,7 @@ STORAGE_PROVISIONER_IMAGE ?= $(REGISTRY)/storage-provisioner-$(GOARCH):$(STORAGE
|
|||
endif
|
||||
|
||||
# Set the version information for the Kubernetes servers
|
||||
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET) -X k8s.io/minikube/pkg/version.gitCommitID=$(COMMIT)
|
||||
MINIKUBE_LDFLAGS := -X k8s.io/minikube/pkg/version.version=$(VERSION) -X k8s.io/minikube/pkg/version.isoVersion=$(ISO_VERSION) -X k8s.io/minikube/pkg/version.isoPath=$(ISO_BUCKET) -X k8s.io/minikube/pkg/version.gitCommitID=$(COMMIT) -X k8s.io/minikube/pkg/version.storageProvisionerVersion=$(STORAGE_PROVISIONER_TAG)
|
||||
PROVISIONER_LDFLAGS := "-X k8s.io/minikube/pkg/storage.version=$(STORAGE_PROVISIONER_TAG) -s -w -extldflags '-static'"
|
||||
|
||||
MINIKUBEFILES := ./cmd/minikube/
|
||||
|
|
|
@ -126,7 +126,7 @@ func runDelete(cmd *cobra.Command, args []string) {
|
|||
if len(args) > 0 {
|
||||
exit.UsageT("Usage: minikube delete")
|
||||
}
|
||||
register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
|
||||
//register.SetEventLogPath(localpath.EventLog(ClusterFlagValue()))
|
||||
register.Reg.SetStep(register.Deleting)
|
||||
|
||||
validProfiles, invalidProfiles, err := config.ListProfiles()
|
||||
|
|
|
@ -40,7 +40,7 @@ subjects:
|
|||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
name: system::leader-locking-storage-provisioner
|
||||
name: system:persistent-volume-provisioner
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
|
@ -51,6 +51,7 @@ rules:
|
|||
- endpoints
|
||||
verbs:
|
||||
- watch
|
||||
- create
|
||||
- apiGroups:
|
||||
- ""
|
||||
resourceNames:
|
||||
|
@ -60,18 +61,19 @@ rules:
|
|||
verbs:
|
||||
- get
|
||||
- update
|
||||
- create
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
name: leader-locking-storage-provisioner
|
||||
name: system:persistent-volume-provisioner
|
||||
namespace: kube-system
|
||||
labels:
|
||||
addonmanager.kubernetes.io/mode: EnsureExists
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: system::leader-locking-storage-provisioner
|
||||
name: system:persistent-volume-provisioner
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: storage-provisioner
|
||||
|
@ -98,7 +100,7 @@ spec:
|
|||
hostNetwork: true
|
||||
containers:
|
||||
- name: storage-provisioner
|
||||
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/storage-provisioner{{.ExoticArch}}:v1.8.1
|
||||
image: {{default "gcr.io/k8s-minikube" .ImageRepository}}/storage-provisioner{{.ExoticArch}}:{{.StorageProvisionerVersion}}
|
||||
command: ["/storage-provisioner"]
|
||||
imagePullPolicy: IfNotPresent
|
||||
volumeMounts:
|
||||
|
|
|
@ -22,6 +22,7 @@ import (
|
|||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/vmpath"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
// Addon is a named list of assets, that can be enabled
|
||||
|
@ -452,17 +453,19 @@ func GenerateTemplateData(cfg config.KubernetesConfig) interface{} {
|
|||
ea = "-" + runtime.GOARCH
|
||||
}
|
||||
opts := struct {
|
||||
Arch string
|
||||
ExoticArch string
|
||||
ImageRepository string
|
||||
LoadBalancerStartIP string
|
||||
LoadBalancerEndIP string
|
||||
Arch string
|
||||
ExoticArch string
|
||||
ImageRepository string
|
||||
LoadBalancerStartIP string
|
||||
LoadBalancerEndIP string
|
||||
StorageProvisionerVersion string
|
||||
}{
|
||||
Arch: a,
|
||||
ExoticArch: ea,
|
||||
ImageRepository: cfg.ImageRepository,
|
||||
LoadBalancerStartIP: cfg.LoadBalancerStartIP,
|
||||
LoadBalancerEndIP: cfg.LoadBalancerEndIP,
|
||||
Arch: a,
|
||||
ExoticArch: ea,
|
||||
ImageRepository: cfg.ImageRepository,
|
||||
LoadBalancerStartIP: cfg.LoadBalancerStartIP,
|
||||
LoadBalancerEndIP: cfg.LoadBalancerEndIP,
|
||||
StorageProvisionerVersion: version.GetStorageProvisionerVersion(),
|
||||
}
|
||||
|
||||
return opts
|
||||
|
|
|
@ -23,6 +23,7 @@ import (
|
|||
"runtime"
|
||||
|
||||
"github.com/blang/semver"
|
||||
"k8s.io/minikube/pkg/version"
|
||||
)
|
||||
|
||||
// Pause returns the image name to pull for a given Kubernetes version
|
||||
|
@ -127,7 +128,7 @@ func auxiliary(mirror string) []string {
|
|||
|
||||
// storageProvisioner returns the minikube storage provisioner image
|
||||
func storageProvisioner(mirror string) string {
|
||||
return path.Join(minikubeRepo(mirror), "storage-provisioner"+archTag(false)+"v1.8.1")
|
||||
return path.Join(minikubeRepo(mirror), "storage-provisioner"+archTag(false)+version.GetStorageProvisionerVersion())
|
||||
}
|
||||
|
||||
// dashboardFrontend returns the image used for the dashboard frontend
|
||||
|
|
|
@ -24,7 +24,7 @@ import (
|
|||
|
||||
func TestAuxiliary(t *testing.T) {
|
||||
want := []string{
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v2",
|
||||
"kubernetesui/dashboard:v2.0.1",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
}
|
||||
|
@ -36,7 +36,7 @@ func TestAuxiliary(t *testing.T) {
|
|||
|
||||
func TestAuxiliaryMirror(t *testing.T) {
|
||||
want := []string{
|
||||
"test.mirror/storage-provisioner:v1.8.1",
|
||||
"test.mirror/storage-provisioner:v2",
|
||||
"test.mirror/dashboard:v2.0.1",
|
||||
"test.mirror/metrics-scraper:v1.0.4",
|
||||
}
|
||||
|
|
|
@ -37,7 +37,7 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/coredns:1.6.5",
|
||||
"k8s.gcr.io/etcd:3.4.3-0",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v2",
|
||||
"kubernetesui/dashboard:v2.0.1",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
|
@ -49,7 +49,7 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"mirror.k8s.io/coredns:1.6.2",
|
||||
"mirror.k8s.io/etcd:3.3.15-0",
|
||||
"mirror.k8s.io/pause:3.1",
|
||||
"mirror.k8s.io/storage-provisioner:v1.8.1",
|
||||
"mirror.k8s.io/storage-provisioner:v2",
|
||||
"mirror.k8s.io/dashboard:v2.0.1",
|
||||
"mirror.k8s.io/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
|
@ -61,7 +61,7 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/coredns:1.3.1",
|
||||
"k8s.gcr.io/etcd:3.3.10",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v2",
|
||||
"kubernetesui/dashboard:v2.0.1",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
|
@ -73,7 +73,7 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/coredns:1.3.1",
|
||||
"k8s.gcr.io/etcd:3.3.10",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v2",
|
||||
"kubernetesui/dashboard:v2.0.1",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
|
@ -85,7 +85,7 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/coredns:1.2.6",
|
||||
"k8s.gcr.io/etcd:3.2.24",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v2",
|
||||
"kubernetesui/dashboard:v2.0.1",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
|
@ -97,7 +97,7 @@ func TestKubeadmImages(t *testing.T) {
|
|||
"k8s.gcr.io/coredns:1.2.2",
|
||||
"k8s.gcr.io/etcd:3.2.24",
|
||||
"k8s.gcr.io/pause:3.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v1.8.1",
|
||||
"gcr.io/k8s-minikube/storage-provisioner:v2",
|
||||
"kubernetesui/dashboard:v2.0.1",
|
||||
"kubernetesui/metrics-scraper:v1.0.4",
|
||||
}},
|
||||
|
|
|
@ -27,7 +27,7 @@ func TestAddRepoTagToImageName(t *testing.T) {
|
|||
}{
|
||||
{"kubernetesui/dashboard:v2.0.1", "docker.io/kubernetesui/dashboard:v2.0.1"},
|
||||
{"kubernetesui/metrics-scraper:v1.0.4", "docker.io/kubernetesui/metrics-scraper:v1.0.4"},
|
||||
{"gcr.io/k8s-minikube/storage-provisioner:v1.8.1", "gcr.io/k8s-minikube/storage-provisioner:v1.8.1"},
|
||||
{"gcr.io/k8s-minikube/storage-provisioner:v2", "gcr.io/k8s-minikube/storage-provisioner:v2"},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
t.Run(tc.imgName, func(t *testing.T) {
|
||||
|
|
|
@ -40,7 +40,7 @@ const (
|
|||
// PreloadVersion is the current version of the preloaded tarball
|
||||
//
|
||||
// NOTE: You may need to bump this version up when upgrading auxiliary docker images
|
||||
PreloadVersion = "v4"
|
||||
PreloadVersion = "v5"
|
||||
// PreloadBucket is the name of the GCS bucket where preloaded volume tarballs exist
|
||||
PreloadBucket = "minikube-preloaded-volume-tarballs"
|
||||
)
|
||||
|
|
|
@ -34,6 +34,9 @@ var gitCommitID = ""
|
|||
// isoVersion is a private field and should be set when compiling with --ldflags="-X k8s.io/minikube/pkg/version.isoVersion=vX.Y.Z"
|
||||
var isoVersion = "v0.0.0-unset"
|
||||
|
||||
// storageProvisionerVersion is a private field and should be set when compiling with --ldflags="-X k8s.io/minikube/pkg/version.storageProvisionerVersion=<storage-provisioner-version>"
|
||||
var storageProvisionerVersion = ""
|
||||
|
||||
// GetVersion returns the current minikube version
|
||||
func GetVersion() string {
|
||||
return version
|
||||
|
@ -53,3 +56,8 @@ func GetISOVersion() string {
|
|||
func GetSemverVersion() (semver.Version, error) {
|
||||
return semver.Make(strings.TrimPrefix(GetVersion(), VersionPrefix))
|
||||
}
|
||||
|
||||
// GetStorageProvisionerVersion returns the storage provisioner version
|
||||
func GetStorageProvisionerVersion() string {
|
||||
return storageProvisionerVersion
|
||||
}
|
||||
|
|
|
@ -24,7 +24,8 @@ import (
|
|||
"encoding/json"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"path"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
|
@ -64,13 +65,14 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st
|
|||
}
|
||||
|
||||
// Now create a testpvc
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", filepath.Join(*testdataDir, "pvc.yaml")))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", path.Join(*testdataDir, "storage-provisioner", "pvc.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// make sure the pvc is Bound
|
||||
checkStoragePhase := func() error {
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "pvc", "testpvc", "-o=json"))
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "pvc", "myclaim", "-o=json"))
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
@ -88,4 +90,43 @@ func validatePersistentVolumeClaim(ctx context.Context, t *testing.T, profile st
|
|||
if err := retry.Expo(checkStoragePhase, 2*time.Second, Minutes(4)); err != nil {
|
||||
t.Fatalf("failed to check storage phase: %v", err)
|
||||
}
|
||||
|
||||
// create a test pod that will mount the persistent volume
|
||||
createPVTestPod(ctx, t, profile)
|
||||
|
||||
// write to the persistent volume
|
||||
podName := "sp-pod"
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", podName, "--", "touch", "/tmp/mount/foo"))
|
||||
if err != nil {
|
||||
t.Fatalf("creating file in pv: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// kill the pod
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "-f", path.Join(*testdataDir, "storage-provisioner", "pod.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("kubectl delete pod.yaml failed: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
// recreate the pod
|
||||
createPVTestPod(ctx, t, profile)
|
||||
|
||||
// make sure the file we previously wrote to the persistent volume still exists
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "exec", podName, "--", "ls", "/tmp/mount"))
|
||||
if err != nil {
|
||||
t.Fatalf("creating file in pv: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
if !strings.Contains(rr.Output(), "foo") {
|
||||
t.Fatalf("expected file foo to persist in pvc, instead got [%v] as files in pv", rr.Output())
|
||||
}
|
||||
}
|
||||
|
||||
func createPVTestPod(ctx context.Context, t *testing.T, profile string) {
|
||||
// Deploy a pod that will mount the PV
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "apply", "-f", path.Join(*testdataDir, "storage-provisioner", "pod.yaml")))
|
||||
if err != nil {
|
||||
t.Fatalf("kubectl apply pvc.yaml failed: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
// wait for pod to be running
|
||||
if _, err := PodWait(ctx, t, profile, "default", "test=storage-provisioner", Minutes(1)); err != nil {
|
||||
t.Fatalf("failed waiting for pod: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -0,0 +1,17 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: sp-pod
|
||||
labels:
|
||||
test: storage-provisioner
|
||||
spec:
|
||||
containers:
|
||||
- name: myfrontend
|
||||
image: nginx
|
||||
volumeMounts:
|
||||
- mountPath: "/tmp/mount"
|
||||
name: mypd
|
||||
volumes:
|
||||
- name: mypd
|
||||
persistentVolumeClaim:
|
||||
claimName: myclaim
|
|
@ -1,10 +1,11 @@
|
|||
kind: PersistentVolumeClaim
|
||||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: testpvc
|
||||
name: myclaim
|
||||
spec:
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
volumeMode: Filesystem
|
||||
resources:
|
||||
requests:
|
||||
storage: 2Gi
|
||||
storage: 500Mi
|
Loading…
Reference in New Issue