csi-hostpath-driver & volumesnapshots addons docs and test
parent
ae92c289b8
commit
b696eb614a
|
@ -0,0 +1,46 @@
|
|||
---
|
||||
title: "CSI Driver and Volume Snapshots"
|
||||
linkTitle: "CSI Driver and Volume Snapshots"
|
||||
weight: 1
|
||||
date: 2020-08-06
|
||||
description: >
|
||||
CSI Driver and Volume Snapshots
|
||||
---
|
||||
|
||||
## Overview
|
||||
|
||||
This tutorial explains how to set up the CSI Hostpath Driver in minikube and create volume snapshots.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- latest version of minikube
|
||||
|
||||
## Tutorial
|
||||
|
||||
Support for volume snapshots in minikube is provided through the `volumesnapshots` addon. This addon provisions the required
|
||||
CRDs and deploys the Volume Snapshot Controller. It is <b>disabled by default</b>.
|
||||
|
||||
Furthermore, the default storage provider in minikube does not implement the CSI interface and thus is NOT capable of creating/handling
|
||||
volume snapshots. For that, you must first deploy a CSI driver. To make this step easy, minikube offers the `csi-hostpath-driver` addon,
|
||||
which deploys the [CSI Hostpath Driver](https://github.com/kubernetes-csi/csi-driver-host-path). This addon is <b>disabled</b>
|
||||
by default as well.
|
||||
|
||||
Thus, to utilize the volume snapshots functionality, you must:
|
||||
|
||||
1\) enable the `volumesnapshots` addon AND\
|
||||
2a\) either enable the `csi-hostpth-driver` addon OR\
|
||||
2b\) deploy your own CSI driver
|
||||
|
||||
You can enable/disable either of the above-mentioned addons using
|
||||
```shell script
|
||||
minikube addons enable [ADDON_NAME]
|
||||
minikube addons disable [ADDON_NAME]
|
||||
```
|
||||
|
||||
The `csi-hostpath-driver` addon deploys its required resources into the `kube-system` namespace and sets up a dedicated
|
||||
storage class called `csi-hostpath-sc` that you need to reference in your PVCs. The driver itself is created under the
|
||||
name `hostpath.csi.k8s.io`. Use this wherever necessary (e.g. snapshot class definitions).
|
||||
|
||||
Once both addons are enabled, you can create persistent volumes and snapshots using standard ways (for a quick test of
|
||||
volume snapshots, you can find some example yaml files along with a step-by-step [here](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html)).
|
||||
The driver stores all persistent volumes in the `/var/lib/csi-hostpath-data/` directory of minikube's host.
|
|
@ -40,7 +40,7 @@ func TestAddons(t *testing.T) {
|
|||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
|
||||
defer Cleanup(t, profile, cancel)
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm"}, StartArgs()...)
|
||||
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm", "--addons=volumesnapshots", "--addons=csi-hostpath-driver"}, StartArgs()...)
|
||||
if !NoneDriver() { // none doesn't support ingress
|
||||
args = append(args, "--addons=ingress")
|
||||
}
|
||||
|
@ -60,6 +60,7 @@ func TestAddons(t *testing.T) {
|
|||
{"MetricsServer", validateMetricsServerAddon},
|
||||
{"HelmTiller", validateHelmTillerAddon},
|
||||
{"Olm", validateOlmAddon},
|
||||
{"CSI", validateCSIDriverAndSnapshots},
|
||||
}
|
||||
for _, tc := range tests {
|
||||
tc := tc
|
||||
|
@ -398,3 +399,108 @@ func validateOlmAddon(ctx context.Context, t *testing.T, profile string) {
|
|||
t.Errorf("failed checking operator installed: %v", err.Error())
|
||||
}
|
||||
}
|
||||
|
||||
func validateCSIDriverAndSnapshots(ctx context.Context, t *testing.T, profile string) {
|
||||
defer PostMortemLogs(t, profile)
|
||||
|
||||
client, err := kapi.Client(profile)
|
||||
if err != nil {
|
||||
t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err)
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
if err := kapi.WaitForPods(client, "kube-system", "kubernetes.io/minikube-addons=csi-hostpath-driver", Minutes(6)); err != nil {
|
||||
t.Errorf("failed waiting for csi-hostpath-driver pods to stabilize: %v", err)
|
||||
}
|
||||
t.Logf("csi-hostpath-driver pods stabilized in %s", time.Since(start))
|
||||
|
||||
// create sample PVC
|
||||
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc.yaml")))
|
||||
if err != nil {
|
||||
t.Logf("creating sample PVC with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if err := PVCWait(ctx, t, profile, "default", "hpvc", Minutes(6)); err != nil {
|
||||
t.Fatalf("failed waiting for PVC hpvc: %v", err)
|
||||
}
|
||||
|
||||
// create sample pod with the PVC
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod.yaml")))
|
||||
if err != nil {
|
||||
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod", Minutes(6)); err != nil {
|
||||
t.Fatalf("failed waiting for pod task-pv-pod: %v", err)
|
||||
}
|
||||
|
||||
// create sample snapshotclass
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshotclass.yaml")))
|
||||
if err != nil {
|
||||
t.Logf("creating snapshostclass with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// create volume snapshot
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshot.yaml")))
|
||||
if err != nil {
|
||||
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if err := VolumeSnapshotWait(ctx, t, profile, "default", "new-snapshot-demo", Minutes(6)); err != nil {
|
||||
t.Fatalf("failed waiting for volume snapshot new-snapshot-demo: %v", err)
|
||||
}
|
||||
|
||||
// delete pod
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod"))
|
||||
if err != nil {
|
||||
t.Logf("deleting pod with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// delete pvc
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc"))
|
||||
if err != nil {
|
||||
t.Logf("deleting pod with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
// restore pv from snapshot
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc-restore.yaml")))
|
||||
if err != nil {
|
||||
t.Logf("creating pvc with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if err = PVCWait(ctx, t, profile, "default", "hpvc-restore", Minutes(6)); err != nil {
|
||||
t.Fatalf("failed waiting for PVC hpvc-restore: %v", err)
|
||||
}
|
||||
|
||||
// create pod from restored snapshot
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod-restore.yaml")))
|
||||
if err != nil {
|
||||
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod-restore", Minutes(6)); err != nil {
|
||||
t.Fatalf("failed waiting for pod task-pv-pod-restore: %v", err)
|
||||
}
|
||||
|
||||
// CLEANUP
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod-restore"))
|
||||
if err != nil {
|
||||
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc-restore"))
|
||||
if err != nil {
|
||||
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "volumesnapshot", "new-snapshot-demo"))
|
||||
if err != nil {
|
||||
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "csi-hostpath-driver", "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to disable csi-hostpath-driver addon: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "volumesnapshots", "--alsologtostderr", "-v=1"))
|
||||
if err != nil {
|
||||
t.Errorf("failed to disable volumesnapshots addon: args %q: %v", rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
|
|
@ -29,6 +29,7 @@ import (
|
|||
"fmt"
|
||||
"io/ioutil"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
|
@ -372,6 +373,56 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec
|
|||
return names, fmt.Errorf("%s: %v", fmt.Sprintf("%s within %s", selector, timeout), err)
|
||||
}
|
||||
|
||||
// PVCWait waits for persistent volume claim to reach bound state
|
||||
func PVCWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error {
|
||||
t.Helper()
|
||||
|
||||
t.Logf("(dbg) %s: waiting %s for pvc %q in namespace %q ...", t.Name(), timeout, name, ns)
|
||||
|
||||
f := func() (bool, error) {
|
||||
ret, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "pvc", name, "-o", "jsonpath={.status.phase}", "-n", ns))
|
||||
if err != nil {
|
||||
t.Logf("%s: WARNING: PVC get for %q %q returned: %v", t.Name(), ns, name, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
pvc := strings.TrimSpace(ret.Stdout.String())
|
||||
if pvc == string(core.ClaimBound) {
|
||||
return true, nil
|
||||
} else if pvc == string(core.ClaimLost) {
|
||||
return true, fmt.Errorf("PVC %q is LOST", name)
|
||||
}
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return wait.PollImmediate(1*time.Second, timeout, f)
|
||||
}
|
||||
|
||||
//// VolumeSnapshotWait waits for volume snapshot to be ready to use
|
||||
func VolumeSnapshotWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error {
|
||||
t.Helper()
|
||||
|
||||
t.Logf("(dbg) %s: waiting %s for volume snapshot %q in namespace %q ...", t.Name(), timeout, name, ns)
|
||||
|
||||
f := func() (bool, error) {
|
||||
res, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "volumesnapshot", name, "-o", "jsonpath={.status.readyToUse}", "-n", ns))
|
||||
if err != nil {
|
||||
t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, err)
|
||||
return false, nil
|
||||
}
|
||||
|
||||
isReady, err := strconv.ParseBool(strings.TrimSpace(res.Stdout.String()))
|
||||
if err != nil {
|
||||
t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, res.Stdout.String())
|
||||
return false, nil
|
||||
}
|
||||
|
||||
return isReady, nil
|
||||
}
|
||||
|
||||
return wait.PollImmediate(1*time.Second, timeout, f)
|
||||
}
|
||||
|
||||
// Status returns a minikube component status as a string
|
||||
func Status(ctx context.Context, t *testing.T, path string, profile string, key string, node string) string {
|
||||
t.Helper()
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: task-pv-pod-restore
|
||||
labels:
|
||||
app: task-pv-pod-restore
|
||||
spec:
|
||||
volumes:
|
||||
- name: task-pv-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: hpvc-restore
|
||||
containers:
|
||||
- name: task-pv-container
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: "http-server"
|
||||
volumeMounts:
|
||||
- mountPath: "/usr/share/nginx/html"
|
||||
name: task-pv-storage
|
||||
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
apiVersion: v1
|
||||
kind: Pod
|
||||
metadata:
|
||||
name: task-pv-pod
|
||||
labels:
|
||||
app: task-pv-pod
|
||||
spec:
|
||||
volumes:
|
||||
- name: task-pv-storage
|
||||
persistentVolumeClaim:
|
||||
claimName: hpvc
|
||||
containers:
|
||||
- name: task-pv-container
|
||||
image: nginx
|
||||
ports:
|
||||
- containerPort: 80
|
||||
name: "http-server"
|
||||
volumeMounts:
|
||||
- mountPath: "/usr/share/nginx/html"
|
||||
name: task-pv-storage
|
||||
|
||||
|
|
@ -0,0 +1,15 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: hpvc-restore
|
||||
spec:
|
||||
storageClassName: csi-hostpath-sc
|
||||
dataSource:
|
||||
name: new-snapshot-demo
|
||||
kind: VolumeSnapshot
|
||||
apiGroup: snapshot.storage.k8s.io
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
|
@ -0,0 +1,11 @@
|
|||
apiVersion: v1
|
||||
kind: PersistentVolumeClaim
|
||||
metadata:
|
||||
name: hpvc
|
||||
spec:
|
||||
storageClassName: csi-hostpath-sc
|
||||
accessModes:
|
||||
- ReadWriteOnce
|
||||
resources:
|
||||
requests:
|
||||
storage: 1Gi
|
|
@ -0,0 +1,8 @@
|
|||
apiVersion: snapshot.storage.k8s.io/v1beta1
|
||||
kind: VolumeSnapshot
|
||||
metadata:
|
||||
name: new-snapshot-demo
|
||||
spec:
|
||||
volumeSnapshotClassName: csi-hostpath-snapclass
|
||||
source:
|
||||
persistentVolumeClaimName: hpvc
|
|
@ -0,0 +1,6 @@
|
|||
apiVersion: snapshot.storage.k8s.io/v1beta1
|
||||
kind: VolumeSnapshotClass
|
||||
metadata:
|
||||
name: csi-hostpath-snapclass
|
||||
driver: hostpath.csi.k8s.io #csi-hostpath
|
||||
deletionPolicy: Delete
|
Loading…
Reference in New Issue