Merge pull request #8461 from 11janci/jjanik-enable-volumesnapshots

CSI Hostpath Driver & VolumeSnapshots addons
pull/9267/head
Sharif Elgamal 2020-09-16 11:57:42 -07:00 committed by GitHub
commit 070cbb99ee
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
28 changed files with 1748 additions and 6 deletions

View File

@ -0,0 +1,63 @@
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-attacher
namespace: kube-system
labels:
app: csi-hostpath-attacher
spec:
selector:
app: csi-hostpath-attacher
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpath-attacher
namespace: kube-system
spec:
serviceName: "csi-hostpath-attacher"
replicas: 1
selector:
matchLabels:
app: csi-hostpath-attacher
template:
metadata:
labels:
app: csi-hostpath-attacher
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccountName: csi-attacher
containers:
- name: csi-attacher
image: quay.io/k8scsi/csi-attacher:v3.0.0-rc1
args:
- --v=5
- --csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir

View File

@ -0,0 +1,13 @@
apiVersion: storage.k8s.io/v1
kind: CSIDriver
metadata:
name: hostpath.csi.k8s.io
namespace: kube-system
spec:
# Supports persistent and ephemeral inline volumes.
volumeLifecycleModes:
- Persistent
- Ephemeral
# To determine at runtime which mode a volume uses, pod info and its
# "csi.storage.k8s.io/ephemeral" entry are needed.
podInfoOnMount: true

View File

@ -0,0 +1,143 @@
# Service defined here, plus serviceName below in StatefulSet,
# are needed only because of condition explained in
# https://github.com/kubernetes/kubernetes/issues/69608
kind: Service
apiVersion: v1
metadata:
name: csi-hostpathplugin
namespace: kube-system
labels:
app: csi-hostpathplugin
spec:
selector:
app: csi-hostpathplugin
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpathplugin
namespace: kube-system
spec:
serviceName: "csi-hostpathplugin"
# One replica only:
# Host path driver only works when everything runs
# on a single node. We achieve that by starting it once and then
# co-locate all other pods via inter-pod affinity
replicas: 1
selector:
matchLabels:
app: csi-hostpathplugin
template:
metadata:
labels:
app: csi-hostpathplugin
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
containers:
- name: node-driver-registrar
image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0
args:
- --v=5
- --csi-address=/csi/csi.sock
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
env:
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /registration
name: registration-dir
- mountPath: /csi-data-dir
name: csi-data-dir
- name: hostpath
image: quay.io/k8scsi/hostpathplugin:v1.4.0-rc2
args:
- "--drivername=hostpath.csi.k8s.io"
- "--v=5"
- "--endpoint=$(CSI_ENDPOINT)"
- "--nodeid=$(KUBE_NODE_NAME)"
env:
- name: CSI_ENDPOINT
value: unix:///csi/csi.sock
- name: KUBE_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
securityContext:
privileged: true
ports:
- containerPort: 9898
name: healthz
protocol: TCP
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: healthz
initialDelaySeconds: 10
timeoutSeconds: 3
periodSeconds: 2
volumeMounts:
- mountPath: /csi
name: socket-dir
- mountPath: /var/lib/kubelet/pods
mountPropagation: Bidirectional
name: mountpoint-dir
- mountPath: /var/lib/kubelet/plugins
mountPropagation: Bidirectional
name: plugins-dir
- mountPath: /csi-data-dir
name: csi-data-dir
- mountPath: /dev
name: dev-dir
- name: liveness-probe
volumeMounts:
- mountPath: /csi
name: socket-dir
image: quay.io/k8scsi/livenessprobe:v1.1.0
args:
- --csi-address=/csi/csi.sock
- --health-port=9898
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir
- hostPath:
path: /var/lib/kubelet/pods
type: DirectoryOrCreate
name: mountpoint-dir
- hostPath:
path: /var/lib/kubelet/plugins_registry
type: Directory
name: registration-dir
- hostPath:
path: /var/lib/kubelet/plugins
type: Directory
name: plugins-dir
- hostPath:
# 'path' is where PV data is persisted on host.
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
path: /var/lib/csi-hostpath-data/
type: DirectoryOrCreate
name: csi-data-dir
- hostPath:
path: /dev
type: Directory
name: dev-dir

View File

@ -0,0 +1,63 @@
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-provisioner
namespace: kube-system
labels:
app: csi-hostpath-provisioner
spec:
selector:
app: csi-hostpath-provisioner
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpath-provisioner
namespace: kube-system
spec:
serviceName: "csi-hostpath-provisioner"
replicas: 1
selector:
matchLabels:
app: csi-hostpath-provisioner
template:
metadata:
labels:
app: csi-hostpath-provisioner
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccountName: csi-provisioner
containers:
- name: csi-provisioner
image: gcr.io/k8s-staging-sig-storage/csi-provisioner:v2.0.0-rc2
args:
- -v=5
- --csi-address=/csi/csi.sock
- --feature-gates=Topology=true
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir

View File

@ -0,0 +1,62 @@
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-resizer
namespace: kube-system
labels:
app: csi-hostpath-resizer
spec:
selector:
app: csi-hostpath-resizer
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpath-resizer
namespace: kube-system
spec:
serviceName: "csi-hostpath-resizer"
replicas: 1
selector:
matchLabels:
app: csi-hostpath-resizer
template:
metadata:
labels:
app: csi-hostpath-resizer
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccountName: csi-resizer
containers:
- name: csi-resizer
image: quay.io/k8scsi/csi-resizer:v0.6.0-rc1
args:
- -v=5
- -csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir

View File

@ -0,0 +1,62 @@
kind: Service
apiVersion: v1
metadata:
name: csi-hostpath-snapshotter
namespace: kube-system
labels:
app: csi-hostpath-snapshotter
spec:
selector:
app: csi-hostpath-snapshotter
ports:
- name: dummy
port: 12345
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: csi-hostpath-snapshotter
namespace: kube-system
spec:
serviceName: "csi-hostpath-snapshotter"
replicas: 1
selector:
matchLabels:
app: csi-hostpath-snapshotter
template:
metadata:
labels:
app: csi-hostpath-snapshotter
kubernetes.io/minikube-addons: csi-hostpath-driver
spec:
affinity:
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: app
operator: In
values:
- csi-hostpathplugin
topologyKey: kubernetes.io/hostname
serviceAccount: csi-snapshotter
containers:
- name: csi-snapshotter
image: quay.io/k8scsi/csi-snapshotter:v2.1.0
args:
- -v=5
- --csi-address=/csi/csi.sock
securityContext:
# This is necessary only for systems with SELinux, where
# non-privileged sidecar containers cannot access unix domain socket
# created by privileged CSI driver container.
privileged: true
volumeMounts:
- mountPath: /csi
name: socket-dir
volumes:
- hostPath:
path: /var/lib/kubelet/plugins/csi-hostpath
type: DirectoryOrCreate
name: socket-dir

View File

@ -0,0 +1,7 @@
apiVersion: storage.k8s.io/v1
kind: StorageClass
metadata:
name: csi-hostpath-sc
provisioner: hostpath.csi.k8s.io #csi-hostpath
reclaimPolicy: Delete
volumeBindingMode: Immediate

View File

@ -0,0 +1,84 @@
# This YAML file contains all RBAC objects that are necessary to run external
# CSI attacher.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# attacher, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-attacher
namespace: kube-system
---
# Attacher must be able to work with PVs, CSINodes and VolumeAttachments
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-attacher-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments/status"]
verbs: ["patch"]
#Secret permission is optional.
#Enable it if you need value from secret.
#For example, you have key `csi.storage.k8s.io/controller-publish-secret-name` in StorageClass.parameters
#see https://kubernetes-csi.github.io/docs/secrets-and-credentials.html
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-attacher-runner
apiGroup: rbac.authorization.k8s.io
---
# Attacher must be able to work with configmaps or leases in the current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: external-attacher-cfg
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-attacher-role-cfg
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-attacher
namespace: kube-system
roleRef:
kind: Role
name: external-attacher-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,101 @@
# This YAML file contains all RBAC objects that are necessary to run external
# CSI provisioner.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# provisioner, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-provisioner
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-provisioner-runner
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "create", "delete"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["get", "list"]
- apiGroups: ["storage.k8s.io"]
resources: ["csinodes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["nodes"]
verbs: ["get", "list", "watch"]
- apiGroups: ["storage.k8s.io"]
resources: ["volumeattachments"]
verbs: ["get", "list", "watch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-provisioner-runner
apiGroup: rbac.authorization.k8s.io
---
# Provisioner must be able to work with endpoints in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: external-provisioner-cfg
rules:
# Only one of the following rules for endpoints or leases is required based on
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
- apiGroups: [""]
resources: ["endpoints"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-provisioner-role-cfg
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-provisioner
namespace: kube-system
roleRef:
kind: Role
name: external-provisioner-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,85 @@
# This YAML file contains all RBAC objects that are necessary to run external
# CSI resizer.
#
# In production, each CSI driver deployment has to be customized:
# - to avoid conflicts, use non-default namespace and different names
# for non-namespaced entities like the ClusterRole
# - decide whether the deployment replicates the external CSI
# resizer, in which case leadership election must be enabled;
# this influences the RBAC setup, see below
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-resizer
namespace: kube-system
---
# Resizer must be able to work with PVCs, PVs, SCs.
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: external-resizer-runner
rules:
# The following rule should be uncommented for plugins that require secrets
# for provisioning.
# - apiGroups: [""]
# resources: ["secrets"]
# verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch", "patch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["pods"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims/status"]
verbs: ["patch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role
subjects:
- kind: ServiceAccount
name: csi-resizer
namespace: kube-system
roleRef:
kind: ClusterRole
name: external-resizer-runner
apiGroup: rbac.authorization.k8s.io
---
# Resizer must be able to work with end point in current namespace
# if (and only if) leadership election is enabled
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: external-resizer-cfg
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-resizer-role-cfg
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-resizer
namespace: kube-system
roleRef:
kind: Role
name: external-resizer-cfg
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,88 @@
# RBAC file for the snapshot controller.
#
# The snapshot controller implements the control loop for CSI snapshot functionality.
# It should be installed as part of the base Kubernetes distribution in an appropriate
# namespace for components implementing base system functionality. For installing with
# Vanilla Kubernetes, kube-system makes sense for the namespace.
apiVersion: v1
kind: ServiceAccount
metadata:
name: csi-snapshotter
namespace: kube-system
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# rename if there are conflicts
name: csi-snapshotter-runner
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents/status"]
verbs: ["update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-role
subjects:
- kind: ServiceAccount
name: csi-snapshotter
namespace: kube-system
roleRef:
kind: ClusterRole
# change the name also here if the ClusterRole gets renamed
name: csi-snapshotter-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
namespace: kube-system
name: csi-snapshotter-leaderelection
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: csi-snapshotter-leaderelection
namespace: kube-system
subjects:
- kind: ServiceAccount
name: csi-snapshotter
namespace: kube-system
roleRef:
kind: Role
name: csi-snapshotter-leaderelection
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,99 @@
# RBAC file for the volume snapshot controller.
apiVersion: v1
kind: ServiceAccount
metadata:
name: volume-snapshot-controller
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
# rename if there are conflicts
name: volume-snapshot-controller-runner
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: [""]
resources: ["persistentvolumes"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["persistentvolumeclaims"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["storage.k8s.io"]
resources: ["storageclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: [""]
resources: ["events"]
verbs: ["list", "watch", "create", "update", "patch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotclasses"]
verbs: ["get", "list", "watch"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshotcontents"]
verbs: ["create", "get", "list", "watch", "update", "delete"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots"]
verbs: ["get", "list", "watch", "update"]
- apiGroups: ["snapshot.storage.k8s.io"]
resources: ["volumesnapshots/status"]
verbs: ["update"]
- apiGroups: ["apiextensions.k8s.io"]
resources: ["customresourcedefinitions"]
verbs: ["create", "list", "watch", "delete", "get", "update"]
---
kind: ClusterRoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volume-snapshot-controller-role
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: volume-snapshot-controller
namespace: kube-system
roleRef:
kind: ClusterRole
# change the name also here if the ClusterRole gets renamed
name: volume-snapshot-controller-runner
apiGroup: rbac.authorization.k8s.io
---
kind: Role
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volume-snapshot-controller-leaderelection
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
rules:
- apiGroups: ["coordination.k8s.io"]
resources: ["leases"]
verbs: ["get", "watch", "list", "delete", "update", "create"]
---
kind: RoleBinding
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: volume-snapshot-controller-leaderelection
namespace: kube-system
labels:
kubernetes.io/cluster-service: "true"
addonmanager.kubernetes.io/mode: Reconcile
subjects:
- kind: ServiceAccount
name: volume-snapshot-controller
namespace: kube-system
roleRef:
kind: Role
name: volume-snapshot-controller-leaderelection
apiGroup: rbac.authorization.k8s.io

View File

@ -0,0 +1,68 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: volumesnapshotclasses.snapshot.storage.k8s.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshotClass
listKind: VolumeSnapshotClassList
plural: volumesnapshotclasses
singular: volumesnapshotclass
scope: Cluster
preserveUnknownFields: false
validation:
openAPIV3Schema:
description: VolumeSnapshotClass specifies parameters that a underlying storage
system uses when creating a volume snapshot. A specific VolumeSnapshotClass
is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses
are non-namespaced
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
deletionPolicy:
description: deletionPolicy determines whether a VolumeSnapshotContent created
through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot
is deleted. Supported values are "Retain" and "Delete". "Retain" means
that the VolumeSnapshotContent and its physical snapshot on underlying
storage system are kept. "Delete" means that the VolumeSnapshotContent
and its physical snapshot on underlying storage system are deleted. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the storage driver that handles this
VolumeSnapshotClass. Required.
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
parameters:
additionalProperties:
type: string
description: parameters is a key-value map with storage driver specific
parameters for creating snapshots. These values are opaque to Kubernetes.
type: object
required:
- deletionPolicy
- driver
type: object
version: v1beta1
versions:
- name: v1beta1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,197 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: volumesnapshotcontents.snapshot.storage.k8s.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshotContent
listKind: VolumeSnapshotContentList
plural: volumesnapshotcontents
singular: volumesnapshotcontent
scope: Cluster
subresources:
status: {}
preserveUnknownFields: false
validation:
openAPIV3Schema:
description: VolumeSnapshotContent represents the actual "on-disk" snapshot
object in the underlying storage system
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
spec:
description: spec defines properties of a VolumeSnapshotContent created
by the underlying storage system. Required.
properties:
deletionPolicy:
description: deletionPolicy determines whether this VolumeSnapshotContent
and its physical snapshot on the underlying storage system should
be deleted when its bound VolumeSnapshot is deleted. Supported values
are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent
and its physical snapshot on underlying storage system are kept. "Delete"
means that the VolumeSnapshotContent and its physical snapshot on
underlying storage system are deleted. In dynamic snapshot creation
case, this field will be filled in with the "DeletionPolicy" field
defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For
pre-existing snapshots, users MUST specify this field when creating
the VolumeSnapshotContent object. Required.
enum:
- Delete
- Retain
type: string
driver:
description: driver is the name of the CSI driver used to create the
physical snapshot on the underlying storage system. This MUST be the
same as the name returned by the CSI GetPluginName() call for that
driver. Required.
type: string
source:
description: source specifies from where a snapshot will be created.
This field is immutable after creation. Required.
properties:
snapshotHandle:
description: snapshotHandle specifies the CSI "snapshot_id" of a
pre-existing snapshot on the underlying storage system. This field
is immutable.
type: string
volumeHandle:
description: volumeHandle specifies the CSI "volume_id" of the volume
from which a snapshot should be dynamically taken from. This field
is immutable.
type: string
type: object
volumeSnapshotClassName:
description: name of the VolumeSnapshotClass to which this snapshot
belongs.
type: string
volumeSnapshotRef:
description: volumeSnapshotRef specifies the VolumeSnapshot object to
which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName
field must reference to this VolumeSnapshotContent's name for the
bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent
object, name and namespace of the VolumeSnapshot object MUST be provided
for binding to happen. This field is immutable after creation. Required.
properties:
apiVersion:
description: API version of the referent.
type: string
fieldPath:
description: 'If referring to a piece of an object instead of an
entire object, this string should contain a valid JSON/Go field
access statement, such as desiredState.manifest.containers[2].
For example, if the object reference is to a container within
a pod, this would take on a value like: "spec.containers{name}"
(where "name" refers to the name of the container that triggered
the event) or if no container name is specified "spec.containers[2]"
(container with index 2 in this pod). This syntax is chosen only
to have some well-defined way of referencing a part of an object.
TODO: this design is not final and this field is subject to change
in the future.'
type: string
kind:
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
name:
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
type: string
namespace:
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
type: string
resourceVersion:
description: 'Specific resourceVersion to which this reference is
made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
type: string
uid:
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
type: string
type: object
required:
- deletionPolicy
- driver
- source
- volumeSnapshotRef
type: object
status:
description: status represents the current information of a snapshot.
properties:
creationTime:
description: creationTime is the timestamp when the point-in-time snapshot
is taken by the underlying storage system. In dynamic snapshot creation
case, this field will be filled in with the "creation_time" value
returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing
snapshot, this field will be filled with the "creation_time" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it. If not specified, it indicates the creation time is unknown. The
format of this field is a Unix nanoseconds time encoded as an int64.
On Unix, the command `date +%s%N` returns the current time in nanoseconds
since 1970-01-01 00:00:00 UTC.
format: int64
type: integer
error:
description: error is the latest observed error during snapshot creation,
if any.
properties:
message:
description: 'message is a string detailing the encountered error
during snapshot creation if specified. NOTE: message may be logged,
and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if a snapshot is ready to be used
to restore a volume. In dynamic snapshot creation case, this field
will be filled in with the "ready_to_use" value returned from CSI
"CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this
field will be filled with the "ready_to_use" value returned from the
CSI "ListSnapshots" gRPC call if the driver supports it, otherwise,
this field will be set to "True". If not specified, it means the readiness
of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the complete size of the snapshot
in bytes. In dynamic snapshot creation case, this field will be filled
in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
gRPC call. For a pre-existing snapshot, this field will be filled
with the "size_bytes" value returned from the CSI "ListSnapshots"
gRPC call if the driver supports it. When restoring a volume from
this snapshot, the size of the volume MUST NOT be smaller than the
restoreSize if it is specified, otherwise the restoration will fail.
If not specified, it indicates that the size is unknown.
format: int64
minimum: 0
type: integer
snapshotHandle:
description: snapshotHandle is the CSI "snapshot_id" of a snapshot on
the underlying storage system. If not specified, it indicates that
dynamic snapshot creation has either failed or it is still in progress.
type: string
type: object
required:
- spec
type: object
version: v1beta1
versions:
- name: v1beta1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,144 @@
apiVersion: apiextensions.k8s.io/v1beta1
kind: CustomResourceDefinition
metadata:
name: volumesnapshots.snapshot.storage.k8s.io
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
group: snapshot.storage.k8s.io
names:
kind: VolumeSnapshot
listKind: VolumeSnapshotList
plural: volumesnapshots
singular: volumesnapshot
scope: Namespaced
subresources:
status: {}
preserveUnknownFields: false
validation:
openAPIV3Schema:
description: VolumeSnapshot is a user's request for either creating a point-in-time
snapshot of a persistent volume, or binding to a pre-existing snapshot.
properties:
apiVersion:
description: 'APIVersion defines the versioned schema of this representation
of an object. Servers should convert recognized schemas to the latest
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
type: string
kind:
description: 'Kind is a string value representing the REST resource this
object represents. Servers may infer this from the endpoint the client
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
type: string
spec:
description: 'spec defines the desired characteristics of a snapshot requested
by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots
Required.'
properties:
source:
description: source specifies where a snapshot will be created from.
This field is immutable after creation. Required.
properties:
persistentVolumeClaimName:
description: persistentVolumeClaimName specifies the name of the
PersistentVolumeClaim object in the same namespace as the VolumeSnapshot
object where the snapshot should be dynamically taken from. This
field is immutable.
type: string
volumeSnapshotContentName:
description: volumeSnapshotContentName specifies the name of a pre-existing
VolumeSnapshotContent object. This field is immutable.
type: string
type: object
volumeSnapshotClassName:
description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass
requested by the VolumeSnapshot. If not specified, the default snapshot
class will be used if one exists. If not specified, and there is no
default snapshot class, dynamic snapshot creation will fail. Empty
string is not allowed for this field. TODO(xiangqian): a webhook validation
on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes'
type: string
required:
- source
type: object
status:
description: 'status represents the current information of a snapshot. NOTE:
status can be modified by sources other than system controllers, and must
not be depended upon for accuracy. Controllers should only use information
from the VolumeSnapshotContent object after verifying that the binding
is accurate and complete.'
properties:
boundVolumeSnapshotContentName:
description: 'boundVolumeSnapshotContentName represents the name of
the VolumeSnapshotContent object to which the VolumeSnapshot object
is bound. If not specified, it indicates that the VolumeSnapshot object
has not been successfully bound to a VolumeSnapshotContent object
yet. NOTE: Specified boundVolumeSnapshotContentName alone does not
mean binding is valid. Controllers MUST always verify bidirectional
binding between VolumeSnapshot and VolumeSnapshotContent to
avoid possible security issues.'
type: string
creationTime:
description: creationTime is the timestamp when the point-in-time snapshot
is taken by the underlying storage system. In dynamic snapshot creation
case, this field will be filled in with the "creation_time" value
returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing
snapshot, this field will be filled with the "creation_time" value
returned from the CSI "ListSnapshots" gRPC call if the driver supports
it. If not specified, it indicates that the creation time of the snapshot
is unknown.
format: date-time
type: string
error:
description: error is the last observed error during snapshot creation,
if any. This field could be helpful to upper level controllers(i.e.,
application controller) to decide whether they should continue on
waiting for the snapshot to be created based on the type of error
reported.
properties:
message:
description: 'message is a string detailing the encountered error
during snapshot creation if specified. NOTE: message may be logged,
and it should not contain sensitive information.'
type: string
time:
description: time is the timestamp when the error was encountered.
format: date-time
type: string
type: object
readyToUse:
description: readyToUse indicates if a snapshot is ready to be used
to restore a volume. In dynamic snapshot creation case, this field
will be filled in with the "ready_to_use" value returned from CSI
"CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this
field will be filled with the "ready_to_use" value returned from the
CSI "ListSnapshots" gRPC call if the driver supports it, otherwise,
this field will be set to "True". If not specified, it means the readiness
of a snapshot is unknown.
type: boolean
restoreSize:
description: restoreSize represents the complete size of the snapshot
in bytes. In dynamic snapshot creation case, this field will be filled
in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
gRPC call. For a pre-existing snapshot, this field will be filled
with the "size_bytes" value returned from the CSI "ListSnapshots"
gRPC call if the driver supports it. When restoring a volume from
this snapshot, the size of the volume MUST NOT be smaller than the
restoreSize if it is specified, otherwise the restoration will fail.
If not specified, it indicates that the size is unknown.
type: string
type: object
required:
- spec
type: object
version: v1beta1
versions:
- name: v1beta1
served: true
storage: true
status:
acceptedNames:
kind: ""
plural: ""
conditions: []
storedVersions: []

View File

@ -0,0 +1,29 @@
# This YAML file shows how to deploy the volume snapshot controller
---
kind: StatefulSet
apiVersion: apps/v1
metadata:
name: volume-snapshot-controller
namespace: kube-system
labels:
addonmanager.kubernetes.io/mode: Reconcile
spec:
serviceName: "volume-snapshot-controller"
replicas: 1
selector:
matchLabels:
app: volume-snapshot-controller
template:
metadata:
labels:
app: volume-snapshot-controller
spec:
serviceAccount: volume-snapshot-controller
containers:
- name: volume-snapshot-controller
# TODO(xyang): Replace with an official image when it is released
image: gcr.io/k8s-staging-csi/snapshot-controller:v2.0.0-rc2
args:
- "--v=5"
imagePullPolicy: Always

View File

@ -33,10 +33,11 @@ type Addon struct {
// addonPodLabels holds the pod label that will be used to verify if the addon is enabled
var addonPodLabels = map[string]string{
"ingress": "app.kubernetes.io/name=ingress-nginx",
"registry": "kubernetes.io/minikube-addons=registry",
"gvisor": "kubernetes.io/minikube-addons=gvisor",
"gcp-auth": "kubernetes.io/minikube-addons=gcp-auth",
"ingress": "app.kubernetes.io/name=ingress-nginx",
"registry": "kubernetes.io/minikube-addons=registry",
"gvisor": "kubernetes.io/minikube-addons=gvisor",
"gcp-auth": "kubernetes.io/minikube-addons=gcp-auth",
"csi-hostpath-driver": "kubernetes.io/minikube-addons=csi-hostpath-driver",
}
// Addons is a list of all addons
@ -170,4 +171,15 @@ var Addons = []*Addon{
set: SetBool,
callbacks: []setFn{gcpauth.EnableOrDisable, enableOrDisableAddon, verifyGCPAuthAddon, gcpauth.DisplayAddonMessage},
},
{
name: "volumesnapshots",
set: SetBool,
callbacks: []setFn{enableOrDisableAddon},
},
{
name: "csi-hostpath-driver",
set: SetBool,
validations: []setFn{IsVolumesnapshotsEnabled},
callbacks: []setFn{enableOrDisableAddon, verifyAddonStatus},
},
}

View File

@ -18,11 +18,16 @@ package addons
import (
"fmt"
"strconv"
"k8s.io/minikube/pkg/minikube/assets"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/cruntime"
"k8s.io/minikube/pkg/minikube/out"
)
const volumesnapshotsAddon = "volumesnapshots"
// containerdOnlyMsg is the message shown when a containerd-only addon is enabled
const containerdOnlyAddonMsg = `
This addon can only be enabled with the containerd runtime backend. To enable this backend, please first stop minikube with:
@ -33,6 +38,12 @@ and then start minikube again with the following flags:
minikube start --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock`
// volumesnapshotsDisabledMsg is the message shown when csi-hostpath-driver addon is enabled without the volumesnapshots addon
const volumesnapshotsDisabledMsg = `[WARNING] For full functionality, the 'csi-hostpath-driver' addon requires the 'volumesnapshots' addon to be enabled.
You can enable 'volumesnapshots' addon by running: 'minikube addons enable volumesnapshots'
`
// IsRuntimeContainerd is a validator which returns an error if the current runtime is not containerd
func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
@ -46,6 +57,21 @@ func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
return nil
}
// IsVolumesnapshotsEnabled is a validator that prints out a warning if the volumesnapshots addon
// is disabled (does not return any errors!)
func IsVolumesnapshotsEnabled(cc *config.ClusterConfig, _, value string) error {
isCsiDriverEnabled, _ := strconv.ParseBool(value)
// assets.Addons[].IsEnabled() returns the current status of the addon or default value.
// config.AddonList contains list of addons to be enabled.
isVolumesnapshotsEnabled := assets.Addons[volumesnapshotsAddon].IsEnabled(cc) || contains(config.AddonList, volumesnapshotsAddon)
if isCsiDriverEnabled && !isVolumesnapshotsEnabled {
// just print out a warning directly, we don't want to return any errors since
// that would prevent the addon from being enabled (callbacks wouldn't be run)
out.WarningT(volumesnapshotsDisabledMsg)
}
return nil
}
// isAddonValid returns the addon, true if it is valid
// otherwise returns nil, false
func isAddonValid(name string) (*Addon, bool) {
@ -56,3 +82,12 @@ func isAddonValid(name string) (*Addon, bool) {
}
return nil, false
}
func contains(slice []string, val string) bool {
for _, item := range slice {
if item == val {
return true
}
}
return false
}

View File

@ -416,7 +416,7 @@ var Addons = map[string]*Addon{
MustBinAsset(
"deploy/addons/ambassador/ambassadorinstallation.yaml",
vmpath.GuestAddonsDir,
"ambassadorinstallation.yaml.yaml",
"ambassadorinstallation.yaml",
"0640",
false),
}, false, "ambassador"),
@ -440,6 +440,106 @@ var Addons = map[string]*Addon{
"0640",
false),
}, false, "gcp-auth"),
"volumesnapshots": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml",
vmpath.GuestAddonsDir,
"snapshot.storage.k8s.io_volumesnapshotclasses.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml",
vmpath.GuestAddonsDir,
"snapshot.storage.k8s.io_volumesnapshotcontents.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml",
vmpath.GuestAddonsDir,
"snapshot.storage.k8s.io_volumesnapshots.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml",
vmpath.GuestAddonsDir,
"rbac-volume-snapshot-controller.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml",
vmpath.GuestAddonsDir,
"volume-snapshot-controller-deployment.yaml",
"0640",
false),
}, false, "volumesnapshots"),
"csi-hostpath-driver": NewAddon([]*BinAsset{
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml",
vmpath.GuestAddonsDir,
"rbac-external-attacher.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml",
vmpath.GuestAddonsDir,
"rbac-external-provisioner.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml",
vmpath.GuestAddonsDir,
"rbac-external-resizer.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml",
vmpath.GuestAddonsDir,
"rbac-external-snapshotter.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-attacher.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-driverinfo.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-plugin.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-provisioner.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-resizer.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-snapshotter.yaml",
"0640",
false),
MustBinAsset(
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml",
vmpath.GuestAddonsDir,
"csi-hostpath-storageclass.yaml",
"0640",
false),
}, false, "csi-hostpath-driver"),
}
// GenerateTemplateData generates template data for template assets

View File

@ -0,0 +1,46 @@
---
title: "CSI Driver and Volume Snapshots"
linkTitle: "CSI Driver and Volume Snapshots"
weight: 1
date: 2020-08-06
description: >
CSI Driver and Volume Snapshots
---
## Overview
This tutorial explains how to set up the CSI Hostpath Driver in minikube and create volume snapshots.
## Prerequisites
- latest version of minikube
## Tutorial
Support for volume snapshots in minikube is provided through the `volumesnapshots` addon. This addon provisions the required
CRDs and deploys the Volume Snapshot Controller. It is <b>disabled by default</b>.
Furthermore, the default storage provider in minikube does not implement the CSI interface and thus is NOT capable of creating/handling
volume snapshots. For that, you must first deploy a CSI driver. To make this step easy, minikube offers the `csi-hostpath-driver` addon,
which deploys the [CSI Hostpath Driver](https://github.com/kubernetes-csi/csi-driver-host-path). This addon is <b>disabled</b>
by default as well.
Thus, to utilize the volume snapshots functionality, you must:
1\) enable the `volumesnapshots` addon AND\
2a\) either enable the `csi-hostpth-driver` addon OR\
2b\) deploy your own CSI driver
You can enable/disable either of the above-mentioned addons using
```shell script
minikube addons enable [ADDON_NAME]
minikube addons disable [ADDON_NAME]
```
The `csi-hostpath-driver` addon deploys its required resources into the `kube-system` namespace and sets up a dedicated
storage class called `csi-hostpath-sc` that you need to reference in your PVCs. The driver itself is created under the
name `hostpath.csi.k8s.io`. Use this wherever necessary (e.g. snapshot class definitions).
Once both addons are enabled, you can create persistent volumes and snapshots using standard ways (for a quick test of
volume snapshots, you can find some example yaml files along with a step-by-step [here](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html)).
The driver stores all persistent volumes in the `/var/lib/csi-hostpath-data/` directory of minikube's host.

View File

@ -40,7 +40,7 @@ func TestAddons(t *testing.T) {
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
defer Cleanup(t, profile, cancel)
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm"}, StartArgs()...)
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm", "--addons=volumesnapshots", "--addons=csi-hostpath-driver"}, StartArgs()...)
if !NoneDriver() { // none doesn't support ingress
args = append(args, "--addons=ingress")
}
@ -60,6 +60,7 @@ func TestAddons(t *testing.T) {
{"MetricsServer", validateMetricsServerAddon},
{"HelmTiller", validateHelmTillerAddon},
{"Olm", validateOlmAddon},
{"CSI", validateCSIDriverAndSnapshots},
}
for _, tc := range tests {
tc := tc
@ -398,3 +399,108 @@ func validateOlmAddon(ctx context.Context, t *testing.T, profile string) {
t.Errorf("failed checking operator installed: %v", err.Error())
}
}
func validateCSIDriverAndSnapshots(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err)
}
start := time.Now()
if err := kapi.WaitForPods(client, "kube-system", "kubernetes.io/minikube-addons=csi-hostpath-driver", Minutes(6)); err != nil {
t.Errorf("failed waiting for csi-hostpath-driver pods to stabilize: %v", err)
}
t.Logf("csi-hostpath-driver pods stabilized in %s", time.Since(start))
// create sample PVC
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc.yaml")))
if err != nil {
t.Logf("creating sample PVC with %s failed: %v", rr.Command(), err)
}
if err := PVCWait(ctx, t, profile, "default", "hpvc", Minutes(6)); err != nil {
t.Fatalf("failed waiting for PVC hpvc: %v", err)
}
// create sample pod with the PVC
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod.yaml")))
if err != nil {
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod task-pv-pod: %v", err)
}
// create sample snapshotclass
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshotclass.yaml")))
if err != nil {
t.Logf("creating snapshostclass with %s failed: %v", rr.Command(), err)
}
// create volume snapshot
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshot.yaml")))
if err != nil {
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
}
if err := VolumeSnapshotWait(ctx, t, profile, "default", "new-snapshot-demo", Minutes(6)); err != nil {
t.Fatalf("failed waiting for volume snapshot new-snapshot-demo: %v", err)
}
// delete pod
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod"))
if err != nil {
t.Logf("deleting pod with %s failed: %v", rr.Command(), err)
}
// delete pvc
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc"))
if err != nil {
t.Logf("deleting pod with %s failed: %v", rr.Command(), err)
}
// restore pv from snapshot
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc-restore.yaml")))
if err != nil {
t.Logf("creating pvc with %s failed: %v", rr.Command(), err)
}
if err = PVCWait(ctx, t, profile, "default", "hpvc-restore", Minutes(6)); err != nil {
t.Fatalf("failed waiting for PVC hpvc-restore: %v", err)
}
// create pod from restored snapshot
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod-restore.yaml")))
if err != nil {
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
}
if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod-restore", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod task-pv-pod-restore: %v", err)
}
// CLEANUP
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod-restore"))
if err != nil {
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc-restore"))
if err != nil {
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "volumesnapshot", "new-snapshot-demo"))
if err != nil {
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "csi-hostpath-driver", "--alsologtostderr", "-v=1"))
if err != nil {
t.Errorf("failed to disable csi-hostpath-driver addon: args %q: %v", rr.Command(), err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "volumesnapshots", "--alsologtostderr", "-v=1"))
if err != nil {
t.Errorf("failed to disable volumesnapshots addon: args %q: %v", rr.Command(), err)
}
}

View File

@ -29,6 +29,7 @@ import (
"fmt"
"io/ioutil"
"os/exec"
"strconv"
"strings"
"testing"
"time"
@ -372,6 +373,56 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec
return names, fmt.Errorf("%s: %v", fmt.Sprintf("%s within %s", selector, timeout), err)
}
// PVCWait waits for persistent volume claim to reach bound state
func PVCWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error {
t.Helper()
t.Logf("(dbg) %s: waiting %s for pvc %q in namespace %q ...", t.Name(), timeout, name, ns)
f := func() (bool, error) {
ret, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "pvc", name, "-o", "jsonpath={.status.phase}", "-n", ns))
if err != nil {
t.Logf("%s: WARNING: PVC get for %q %q returned: %v", t.Name(), ns, name, err)
return false, nil
}
pvc := strings.TrimSpace(ret.Stdout.String())
if pvc == string(core.ClaimBound) {
return true, nil
} else if pvc == string(core.ClaimLost) {
return true, fmt.Errorf("PVC %q is LOST", name)
}
return false, nil
}
return wait.PollImmediate(1*time.Second, timeout, f)
}
//// VolumeSnapshotWait waits for volume snapshot to be ready to use
func VolumeSnapshotWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error {
t.Helper()
t.Logf("(dbg) %s: waiting %s for volume snapshot %q in namespace %q ...", t.Name(), timeout, name, ns)
f := func() (bool, error) {
res, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "volumesnapshot", name, "-o", "jsonpath={.status.readyToUse}", "-n", ns))
if err != nil {
t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, err)
return false, nil
}
isReady, err := strconv.ParseBool(strings.TrimSpace(res.Stdout.String()))
if err != nil {
t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, res.Stdout.String())
return false, nil
}
return isReady, nil
}
return wait.PollImmediate(1*time.Second, timeout, f)
}
// Status returns a minikube component status as a string
func Status(ctx context.Context, t *testing.T, path string, profile string, key string, node string) string {
t.Helper()

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: task-pv-pod-restore
labels:
app: task-pv-pod-restore
spec:
volumes:
- name: task-pv-storage
persistentVolumeClaim:
claimName: hpvc-restore
containers:
- name: task-pv-container
image: nginx
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: task-pv-storage

View File

@ -0,0 +1,22 @@
apiVersion: v1
kind: Pod
metadata:
name: task-pv-pod
labels:
app: task-pv-pod
spec:
volumes:
- name: task-pv-storage
persistentVolumeClaim:
claimName: hpvc
containers:
- name: task-pv-container
image: nginx
ports:
- containerPort: 80
name: "http-server"
volumeMounts:
- mountPath: "/usr/share/nginx/html"
name: task-pv-storage

View File

@ -0,0 +1,15 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hpvc-restore
spec:
storageClassName: csi-hostpath-sc
dataSource:
name: new-snapshot-demo
kind: VolumeSnapshot
apiGroup: snapshot.storage.k8s.io
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,11 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
name: hpvc
spec:
storageClassName: csi-hostpath-sc
accessModes:
- ReadWriteOnce
resources:
requests:
storage: 1Gi

View File

@ -0,0 +1,8 @@
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshot
metadata:
name: new-snapshot-demo
spec:
volumeSnapshotClassName: csi-hostpath-snapclass
source:
persistentVolumeClaimName: hpvc

View File

@ -0,0 +1,6 @@
apiVersion: snapshot.storage.k8s.io/v1beta1
kind: VolumeSnapshotClass
metadata:
name: csi-hostpath-snapclass
driver: hostpath.csi.k8s.io #csi-hostpath
deletionPolicy: Delete