Merge branch 'master' of github.com:kubernetes/minikube into revert-native
commit
1168b1aa7d
|
@ -252,7 +252,9 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing *
|
||||||
|
|
||||||
validateFlags(cmd, driverName)
|
validateFlags(cmd, driverName)
|
||||||
validateUser(driverName)
|
validateUser(driverName)
|
||||||
validateDockerStorageDriver(driverName)
|
if driverName == oci.Docker {
|
||||||
|
validateDockerStorageDriver(driverName)
|
||||||
|
}
|
||||||
|
|
||||||
// Download & update the driver, even in --download-only mode
|
// Download & update the driver, even in --download-only mode
|
||||||
if !viper.GetBool(dryRun) {
|
if !viper.GetBool(dryRun) {
|
||||||
|
|
|
@ -184,7 +184,7 @@ func initDriverFlags() {
|
||||||
startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox driver only)")
|
startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox driver only)")
|
||||||
startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)")
|
startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)")
|
||||||
startCmd.Flags().String(hostOnlyNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)")
|
startCmd.Flags().String(hostOnlyNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)")
|
||||||
startCmd.Flags().String(natNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)")
|
startCmd.Flags().String(natNicType, "virtio", "NIC Type used for nat network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)")
|
||||||
|
|
||||||
// hyperkit
|
// hyperkit
|
||||||
startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)")
|
startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)")
|
||||||
|
|
|
@ -0,0 +1,63 @@
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-attacher
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app: csi-hostpath-attacher
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: csi-hostpath-attacher
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-attacher
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
serviceName: "csi-hostpath-attacher"
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-hostpath-attacher
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: csi-hostpath-attacher
|
||||||
|
kubernetes.io/minikube-addons: csi-hostpath-driver
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
podAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- csi-hostpathplugin
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
serviceAccountName: csi-attacher
|
||||||
|
containers:
|
||||||
|
- name: csi-attacher
|
||||||
|
image: quay.io/k8scsi/csi-attacher:v3.0.0-rc1
|
||||||
|
args:
|
||||||
|
- --v=5
|
||||||
|
- --csi-address=/csi/csi.sock
|
||||||
|
securityContext:
|
||||||
|
# This is necessary only for systems with SELinux, where
|
||||||
|
# non-privileged sidecar containers cannot access unix domain socket
|
||||||
|
# created by privileged CSI driver container.
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: socket-dir
|
|
@ -0,0 +1,13 @@
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: CSIDriver
|
||||||
|
metadata:
|
||||||
|
name: hostpath.csi.k8s.io
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
# Supports persistent and ephemeral inline volumes.
|
||||||
|
volumeLifecycleModes:
|
||||||
|
- Persistent
|
||||||
|
- Ephemeral
|
||||||
|
# To determine at runtime which mode a volume uses, pod info and its
|
||||||
|
# "csi.storage.k8s.io/ephemeral" entry are needed.
|
||||||
|
podInfoOnMount: true
|
|
@ -0,0 +1,143 @@
|
||||||
|
# Service defined here, plus serviceName below in StatefulSet,
|
||||||
|
# are needed only because of condition explained in
|
||||||
|
# https://github.com/kubernetes/kubernetes/issues/69608
|
||||||
|
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpathplugin
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app: csi-hostpathplugin
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: csi-hostpathplugin
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
||||||
|
---
|
||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpathplugin
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
serviceName: "csi-hostpathplugin"
|
||||||
|
# One replica only:
|
||||||
|
# Host path driver only works when everything runs
|
||||||
|
# on a single node. We achieve that by starting it once and then
|
||||||
|
# co-locate all other pods via inter-pod affinity
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-hostpathplugin
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: csi-hostpathplugin
|
||||||
|
kubernetes.io/minikube-addons: csi-hostpath-driver
|
||||||
|
spec:
|
||||||
|
containers:
|
||||||
|
- name: node-driver-registrar
|
||||||
|
image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0
|
||||||
|
args:
|
||||||
|
- --v=5
|
||||||
|
- --csi-address=/csi/csi.sock
|
||||||
|
- --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock
|
||||||
|
securityContext:
|
||||||
|
# This is necessary only for systems with SELinux, where
|
||||||
|
# non-privileged sidecar containers cannot access unix domain socket
|
||||||
|
# created by privileged CSI driver container.
|
||||||
|
privileged: true
|
||||||
|
env:
|
||||||
|
- name: KUBE_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
apiVersion: v1
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
- mountPath: /registration
|
||||||
|
name: registration-dir
|
||||||
|
- mountPath: /csi-data-dir
|
||||||
|
name: csi-data-dir
|
||||||
|
|
||||||
|
- name: hostpath
|
||||||
|
image: quay.io/k8scsi/hostpathplugin:v1.4.0-rc2
|
||||||
|
args:
|
||||||
|
- "--drivername=hostpath.csi.k8s.io"
|
||||||
|
- "--v=5"
|
||||||
|
- "--endpoint=$(CSI_ENDPOINT)"
|
||||||
|
- "--nodeid=$(KUBE_NODE_NAME)"
|
||||||
|
env:
|
||||||
|
- name: CSI_ENDPOINT
|
||||||
|
value: unix:///csi/csi.sock
|
||||||
|
- name: KUBE_NODE_NAME
|
||||||
|
valueFrom:
|
||||||
|
fieldRef:
|
||||||
|
apiVersion: v1
|
||||||
|
fieldPath: spec.nodeName
|
||||||
|
securityContext:
|
||||||
|
privileged: true
|
||||||
|
ports:
|
||||||
|
- containerPort: 9898
|
||||||
|
name: healthz
|
||||||
|
protocol: TCP
|
||||||
|
livenessProbe:
|
||||||
|
failureThreshold: 5
|
||||||
|
httpGet:
|
||||||
|
path: /healthz
|
||||||
|
port: healthz
|
||||||
|
initialDelaySeconds: 10
|
||||||
|
timeoutSeconds: 3
|
||||||
|
periodSeconds: 2
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
- mountPath: /var/lib/kubelet/pods
|
||||||
|
mountPropagation: Bidirectional
|
||||||
|
name: mountpoint-dir
|
||||||
|
- mountPath: /var/lib/kubelet/plugins
|
||||||
|
mountPropagation: Bidirectional
|
||||||
|
name: plugins-dir
|
||||||
|
- mountPath: /csi-data-dir
|
||||||
|
name: csi-data-dir
|
||||||
|
- mountPath: /dev
|
||||||
|
name: dev-dir
|
||||||
|
- name: liveness-probe
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
image: quay.io/k8scsi/livenessprobe:v1.1.0
|
||||||
|
args:
|
||||||
|
- --csi-address=/csi/csi.sock
|
||||||
|
- --health-port=9898
|
||||||
|
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: socket-dir
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/pods
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: mountpoint-dir
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins_registry
|
||||||
|
type: Directory
|
||||||
|
name: registration-dir
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins
|
||||||
|
type: Directory
|
||||||
|
name: plugins-dir
|
||||||
|
- hostPath:
|
||||||
|
# 'path' is where PV data is persisted on host.
|
||||||
|
# using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot
|
||||||
|
path: /var/lib/csi-hostpath-data/
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: csi-data-dir
|
||||||
|
- hostPath:
|
||||||
|
path: /dev
|
||||||
|
type: Directory
|
||||||
|
name: dev-dir
|
|
@ -0,0 +1,63 @@
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-provisioner
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app: csi-hostpath-provisioner
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: csi-hostpath-provisioner
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-provisioner
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
serviceName: "csi-hostpath-provisioner"
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-hostpath-provisioner
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: csi-hostpath-provisioner
|
||||||
|
kubernetes.io/minikube-addons: csi-hostpath-driver
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
podAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- csi-hostpathplugin
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
serviceAccountName: csi-provisioner
|
||||||
|
containers:
|
||||||
|
- name: csi-provisioner
|
||||||
|
image: gcr.io/k8s-staging-sig-storage/csi-provisioner:v2.0.0-rc2
|
||||||
|
args:
|
||||||
|
- -v=5
|
||||||
|
- --csi-address=/csi/csi.sock
|
||||||
|
- --feature-gates=Topology=true
|
||||||
|
securityContext:
|
||||||
|
# This is necessary only for systems with SELinux, where
|
||||||
|
# non-privileged sidecar containers cannot access unix domain socket
|
||||||
|
# created by privileged CSI driver container.
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: socket-dir
|
|
@ -0,0 +1,62 @@
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-resizer
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app: csi-hostpath-resizer
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: csi-hostpath-resizer
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-resizer
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
serviceName: "csi-hostpath-resizer"
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-hostpath-resizer
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: csi-hostpath-resizer
|
||||||
|
kubernetes.io/minikube-addons: csi-hostpath-driver
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
podAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- csi-hostpathplugin
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
serviceAccountName: csi-resizer
|
||||||
|
containers:
|
||||||
|
- name: csi-resizer
|
||||||
|
image: quay.io/k8scsi/csi-resizer:v0.6.0-rc1
|
||||||
|
args:
|
||||||
|
- -v=5
|
||||||
|
- -csi-address=/csi/csi.sock
|
||||||
|
securityContext:
|
||||||
|
# This is necessary only for systems with SELinux, where
|
||||||
|
# non-privileged sidecar containers cannot access unix domain socket
|
||||||
|
# created by privileged CSI driver container.
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: socket-dir
|
|
@ -0,0 +1,62 @@
|
||||||
|
kind: Service
|
||||||
|
apiVersion: v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-snapshotter
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
app: csi-hostpath-snapshotter
|
||||||
|
spec:
|
||||||
|
selector:
|
||||||
|
app: csi-hostpath-snapshotter
|
||||||
|
ports:
|
||||||
|
- name: dummy
|
||||||
|
port: 12345
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-snapshotter
|
||||||
|
namespace: kube-system
|
||||||
|
spec:
|
||||||
|
serviceName: "csi-hostpath-snapshotter"
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: csi-hostpath-snapshotter
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: csi-hostpath-snapshotter
|
||||||
|
kubernetes.io/minikube-addons: csi-hostpath-driver
|
||||||
|
spec:
|
||||||
|
affinity:
|
||||||
|
podAffinity:
|
||||||
|
requiredDuringSchedulingIgnoredDuringExecution:
|
||||||
|
- labelSelector:
|
||||||
|
matchExpressions:
|
||||||
|
- key: app
|
||||||
|
operator: In
|
||||||
|
values:
|
||||||
|
- csi-hostpathplugin
|
||||||
|
topologyKey: kubernetes.io/hostname
|
||||||
|
serviceAccount: csi-snapshotter
|
||||||
|
containers:
|
||||||
|
- name: csi-snapshotter
|
||||||
|
image: quay.io/k8scsi/csi-snapshotter:v2.1.0
|
||||||
|
args:
|
||||||
|
- -v=5
|
||||||
|
- --csi-address=/csi/csi.sock
|
||||||
|
securityContext:
|
||||||
|
# This is necessary only for systems with SELinux, where
|
||||||
|
# non-privileged sidecar containers cannot access unix domain socket
|
||||||
|
# created by privileged CSI driver container.
|
||||||
|
privileged: true
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: /csi
|
||||||
|
name: socket-dir
|
||||||
|
volumes:
|
||||||
|
- hostPath:
|
||||||
|
path: /var/lib/kubelet/plugins/csi-hostpath
|
||||||
|
type: DirectoryOrCreate
|
||||||
|
name: socket-dir
|
|
@ -0,0 +1,7 @@
|
||||||
|
apiVersion: storage.k8s.io/v1
|
||||||
|
kind: StorageClass
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-sc
|
||||||
|
provisioner: hostpath.csi.k8s.io #csi-hostpath
|
||||||
|
reclaimPolicy: Delete
|
||||||
|
volumeBindingMode: Immediate
|
|
@ -0,0 +1,84 @@
|
||||||
|
# This YAML file contains all RBAC objects that are necessary to run external
|
||||||
|
# CSI attacher.
|
||||||
|
#
|
||||||
|
# In production, each CSI driver deployment has to be customized:
|
||||||
|
# - to avoid conflicts, use non-default namespace and different names
|
||||||
|
# for non-namespaced entities like the ClusterRole
|
||||||
|
# - decide whether the deployment replicates the external CSI
|
||||||
|
# attacher, in which case leadership election must be enabled;
|
||||||
|
# this influences the RBAC setup, see below
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: csi-attacher
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
# Attacher must be able to work with PVs, CSINodes and VolumeAttachments
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: external-attacher-runner
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "patch"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["csinodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["volumeattachments"]
|
||||||
|
verbs: ["get", "list", "watch", "patch"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["volumeattachments/status"]
|
||||||
|
verbs: ["patch"]
|
||||||
|
#Secret permission is optional.
|
||||||
|
#Enable it if you need value from secret.
|
||||||
|
#For example, you have key `csi.storage.k8s.io/controller-publish-secret-name` in StorageClass.parameters
|
||||||
|
#see https://kubernetes-csi.github.io/docs/secrets-and-credentials.html
|
||||||
|
# - apiGroups: [""]
|
||||||
|
# resources: ["secrets"]
|
||||||
|
# verbs: ["get", "list"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-attacher-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-attacher
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: external-attacher-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
---
|
||||||
|
# Attacher must be able to work with configmaps or leases in the current namespace
|
||||||
|
# if (and only if) leadership election is enabled
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
namespace: kube-system
|
||||||
|
name: external-attacher-cfg
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-attacher-role-cfg
|
||||||
|
namespace: kube-system
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-attacher
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: external-attacher-cfg
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,101 @@
|
||||||
|
# This YAML file contains all RBAC objects that are necessary to run external
|
||||||
|
# CSI provisioner.
|
||||||
|
#
|
||||||
|
# In production, each CSI driver deployment has to be customized:
|
||||||
|
# - to avoid conflicts, use non-default namespace and different names
|
||||||
|
# for non-namespaced entities like the ClusterRole
|
||||||
|
# - decide whether the deployment replicates the external CSI
|
||||||
|
# provisioner, in which case leadership election must be enabled;
|
||||||
|
# this influences the RBAC setup, see below
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: csi-provisioner
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: external-provisioner-runner
|
||||||
|
rules:
|
||||||
|
# The following rule should be uncommented for plugins that require secrets
|
||||||
|
# for provisioning.
|
||||||
|
# - apiGroups: [""]
|
||||||
|
# resources: ["secrets"]
|
||||||
|
# verbs: ["get", "list"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "create", "delete"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["storageclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshots"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents"]
|
||||||
|
verbs: ["get", "list"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["csinodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["nodes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["volumeattachments"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-provisioner-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-provisioner
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: external-provisioner-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
---
|
||||||
|
# Provisioner must be able to work with endpoints in current namespace
|
||||||
|
# if (and only if) leadership election is enabled
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
namespace: kube-system
|
||||||
|
name: external-provisioner-cfg
|
||||||
|
rules:
|
||||||
|
# Only one of the following rules for endpoints or leases is required based on
|
||||||
|
# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases.
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["endpoints"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-provisioner-role-cfg
|
||||||
|
namespace: kube-system
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-provisioner
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: external-provisioner-cfg
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,85 @@
|
||||||
|
# This YAML file contains all RBAC objects that are necessary to run external
|
||||||
|
# CSI resizer.
|
||||||
|
#
|
||||||
|
# In production, each CSI driver deployment has to be customized:
|
||||||
|
# - to avoid conflicts, use non-default namespace and different names
|
||||||
|
# for non-namespaced entities like the ClusterRole
|
||||||
|
# - decide whether the deployment replicates the external CSI
|
||||||
|
# resizer, in which case leadership election must be enabled;
|
||||||
|
# this influences the RBAC setup, see below
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: csi-resizer
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
# Resizer must be able to work with PVCs, PVs, SCs.
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: external-resizer-runner
|
||||||
|
rules:
|
||||||
|
# The following rule should be uncommented for plugins that require secrets
|
||||||
|
# for provisioning.
|
||||||
|
# - apiGroups: [""]
|
||||||
|
# resources: ["secrets"]
|
||||||
|
# verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch", "patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["pods"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims/status"]
|
||||||
|
verbs: ["patch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-resizer-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-resizer
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
name: external-resizer-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
---
|
||||||
|
# Resizer must be able to work with end point in current namespace
|
||||||
|
# if (and only if) leadership election is enabled
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
namespace: kube-system
|
||||||
|
name: external-resizer-cfg
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-resizer-role-cfg
|
||||||
|
namespace: kube-system
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-resizer
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: external-resizer-cfg
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,88 @@
|
||||||
|
# RBAC file for the snapshot controller.
|
||||||
|
#
|
||||||
|
# The snapshot controller implements the control loop for CSI snapshot functionality.
|
||||||
|
# It should be installed as part of the base Kubernetes distribution in an appropriate
|
||||||
|
# namespace for components implementing base system functionality. For installing with
|
||||||
|
# Vanilla Kubernetes, kube-system makes sense for the namespace.
|
||||||
|
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: csi-snapshotter
|
||||||
|
namespace: kube-system
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
# rename if there are conflicts
|
||||||
|
name: csi-snapshotter-runner
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["storageclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents"]
|
||||||
|
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents/status"]
|
||||||
|
verbs: ["update"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshots"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshots/status"]
|
||||||
|
verbs: ["update"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-snapshotter-role
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-snapshotter
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
# change the name also here if the ClusterRole gets renamed
|
||||||
|
name: csi-snapshotter-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
namespace: kube-system
|
||||||
|
name: csi-snapshotter-leaderelection
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: csi-snapshotter-leaderelection
|
||||||
|
namespace: kube-system
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: csi-snapshotter
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: csi-snapshotter-leaderelection
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
|
@ -0,0 +1,99 @@
|
||||||
|
# RBAC file for the volume snapshot controller.
|
||||||
|
apiVersion: v1
|
||||||
|
kind: ServiceAccount
|
||||||
|
metadata:
|
||||||
|
name: volume-snapshot-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
---
|
||||||
|
kind: ClusterRole
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
# rename if there are conflicts
|
||||||
|
name: volume-snapshot-controller-runner
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
rules:
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumes"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["persistentvolumeclaims"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["storage.k8s.io"]
|
||||||
|
resources: ["storageclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: [""]
|
||||||
|
resources: ["events"]
|
||||||
|
verbs: ["list", "watch", "create", "update", "patch"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotclasses"]
|
||||||
|
verbs: ["get", "list", "watch"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshotcontents"]
|
||||||
|
verbs: ["create", "get", "list", "watch", "update", "delete"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshots"]
|
||||||
|
verbs: ["get", "list", "watch", "update"]
|
||||||
|
- apiGroups: ["snapshot.storage.k8s.io"]
|
||||||
|
resources: ["volumesnapshots/status"]
|
||||||
|
verbs: ["update"]
|
||||||
|
- apiGroups: ["apiextensions.k8s.io"]
|
||||||
|
resources: ["customresourcedefinitions"]
|
||||||
|
verbs: ["create", "list", "watch", "delete", "get", "update"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: ClusterRoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: volume-snapshot-controller-role
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: volume-snapshot-controller
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: ClusterRole
|
||||||
|
# change the name also here if the ClusterRole gets renamed
|
||||||
|
name: volume-snapshot-controller-runner
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: Role
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: volume-snapshot-controller-leaderelection
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
rules:
|
||||||
|
- apiGroups: ["coordination.k8s.io"]
|
||||||
|
resources: ["leases"]
|
||||||
|
verbs: ["get", "watch", "list", "delete", "update", "create"]
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: RoleBinding
|
||||||
|
apiVersion: rbac.authorization.k8s.io/v1
|
||||||
|
metadata:
|
||||||
|
name: volume-snapshot-controller-leaderelection
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
kubernetes.io/cluster-service: "true"
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
subjects:
|
||||||
|
- kind: ServiceAccount
|
||||||
|
name: volume-snapshot-controller
|
||||||
|
namespace: kube-system
|
||||||
|
roleRef:
|
||||||
|
kind: Role
|
||||||
|
name: volume-snapshot-controller-leaderelection
|
||||||
|
apiGroup: rbac.authorization.k8s.io
|
||||||
|
|
|
@ -0,0 +1,68 @@
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: volumesnapshotclasses.snapshot.storage.k8s.io
|
||||||
|
labels:
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
spec:
|
||||||
|
group: snapshot.storage.k8s.io
|
||||||
|
names:
|
||||||
|
kind: VolumeSnapshotClass
|
||||||
|
listKind: VolumeSnapshotClassList
|
||||||
|
plural: volumesnapshotclasses
|
||||||
|
singular: volumesnapshotclass
|
||||||
|
scope: Cluster
|
||||||
|
preserveUnknownFields: false
|
||||||
|
validation:
|
||||||
|
openAPIV3Schema:
|
||||||
|
description: VolumeSnapshotClass specifies parameters that a underlying storage
|
||||||
|
system uses when creating a volume snapshot. A specific VolumeSnapshotClass
|
||||||
|
is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses
|
||||||
|
are non-namespaced
|
||||||
|
properties:
|
||||||
|
apiVersion:
|
||||||
|
description: 'APIVersion defines the versioned schema of this representation
|
||||||
|
of an object. Servers should convert recognized schemas to the latest
|
||||||
|
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||||
|
type: string
|
||||||
|
deletionPolicy:
|
||||||
|
description: deletionPolicy determines whether a VolumeSnapshotContent created
|
||||||
|
through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot
|
||||||
|
is deleted. Supported values are "Retain" and "Delete". "Retain" means
|
||||||
|
that the VolumeSnapshotContent and its physical snapshot on underlying
|
||||||
|
storage system are kept. "Delete" means that the VolumeSnapshotContent
|
||||||
|
and its physical snapshot on underlying storage system are deleted. Required.
|
||||||
|
enum:
|
||||||
|
- Delete
|
||||||
|
- Retain
|
||||||
|
type: string
|
||||||
|
driver:
|
||||||
|
description: driver is the name of the storage driver that handles this
|
||||||
|
VolumeSnapshotClass. Required.
|
||||||
|
type: string
|
||||||
|
kind:
|
||||||
|
description: 'Kind is a string value representing the REST resource this
|
||||||
|
object represents. Servers may infer this from the endpoint the client
|
||||||
|
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
parameters:
|
||||||
|
additionalProperties:
|
||||||
|
type: string
|
||||||
|
description: parameters is a key-value map with storage driver specific
|
||||||
|
parameters for creating snapshots. These values are opaque to Kubernetes.
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- deletionPolicy
|
||||||
|
- driver
|
||||||
|
type: object
|
||||||
|
version: v1beta1
|
||||||
|
versions:
|
||||||
|
- name: v1beta1
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
|
status:
|
||||||
|
acceptedNames:
|
||||||
|
kind: ""
|
||||||
|
plural: ""
|
||||||
|
conditions: []
|
||||||
|
storedVersions: []
|
|
@ -0,0 +1,197 @@
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: volumesnapshotcontents.snapshot.storage.k8s.io
|
||||||
|
labels:
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
spec:
|
||||||
|
group: snapshot.storage.k8s.io
|
||||||
|
names:
|
||||||
|
kind: VolumeSnapshotContent
|
||||||
|
listKind: VolumeSnapshotContentList
|
||||||
|
plural: volumesnapshotcontents
|
||||||
|
singular: volumesnapshotcontent
|
||||||
|
scope: Cluster
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
|
preserveUnknownFields: false
|
||||||
|
validation:
|
||||||
|
openAPIV3Schema:
|
||||||
|
description: VolumeSnapshotContent represents the actual "on-disk" snapshot
|
||||||
|
object in the underlying storage system
|
||||||
|
properties:
|
||||||
|
apiVersion:
|
||||||
|
description: 'APIVersion defines the versioned schema of this representation
|
||||||
|
of an object. Servers should convert recognized schemas to the latest
|
||||||
|
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||||
|
type: string
|
||||||
|
kind:
|
||||||
|
description: 'Kind is a string value representing the REST resource this
|
||||||
|
object represents. Servers may infer this from the endpoint the client
|
||||||
|
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
spec:
|
||||||
|
description: spec defines properties of a VolumeSnapshotContent created
|
||||||
|
by the underlying storage system. Required.
|
||||||
|
properties:
|
||||||
|
deletionPolicy:
|
||||||
|
description: deletionPolicy determines whether this VolumeSnapshotContent
|
||||||
|
and its physical snapshot on the underlying storage system should
|
||||||
|
be deleted when its bound VolumeSnapshot is deleted. Supported values
|
||||||
|
are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent
|
||||||
|
and its physical snapshot on underlying storage system are kept. "Delete"
|
||||||
|
means that the VolumeSnapshotContent and its physical snapshot on
|
||||||
|
underlying storage system are deleted. In dynamic snapshot creation
|
||||||
|
case, this field will be filled in with the "DeletionPolicy" field
|
||||||
|
defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For
|
||||||
|
pre-existing snapshots, users MUST specify this field when creating
|
||||||
|
the VolumeSnapshotContent object. Required.
|
||||||
|
enum:
|
||||||
|
- Delete
|
||||||
|
- Retain
|
||||||
|
type: string
|
||||||
|
driver:
|
||||||
|
description: driver is the name of the CSI driver used to create the
|
||||||
|
physical snapshot on the underlying storage system. This MUST be the
|
||||||
|
same as the name returned by the CSI GetPluginName() call for that
|
||||||
|
driver. Required.
|
||||||
|
type: string
|
||||||
|
source:
|
||||||
|
description: source specifies from where a snapshot will be created.
|
||||||
|
This field is immutable after creation. Required.
|
||||||
|
properties:
|
||||||
|
snapshotHandle:
|
||||||
|
description: snapshotHandle specifies the CSI "snapshot_id" of a
|
||||||
|
pre-existing snapshot on the underlying storage system. This field
|
||||||
|
is immutable.
|
||||||
|
type: string
|
||||||
|
volumeHandle:
|
||||||
|
description: volumeHandle specifies the CSI "volume_id" of the volume
|
||||||
|
from which a snapshot should be dynamically taken from. This field
|
||||||
|
is immutable.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
volumeSnapshotClassName:
|
||||||
|
description: name of the VolumeSnapshotClass to which this snapshot
|
||||||
|
belongs.
|
||||||
|
type: string
|
||||||
|
volumeSnapshotRef:
|
||||||
|
description: volumeSnapshotRef specifies the VolumeSnapshot object to
|
||||||
|
which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName
|
||||||
|
field must reference to this VolumeSnapshotContent's name for the
|
||||||
|
bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent
|
||||||
|
object, name and namespace of the VolumeSnapshot object MUST be provided
|
||||||
|
for binding to happen. This field is immutable after creation. Required.
|
||||||
|
properties:
|
||||||
|
apiVersion:
|
||||||
|
description: API version of the referent.
|
||||||
|
type: string
|
||||||
|
fieldPath:
|
||||||
|
description: 'If referring to a piece of an object instead of an
|
||||||
|
entire object, this string should contain a valid JSON/Go field
|
||||||
|
access statement, such as desiredState.manifest.containers[2].
|
||||||
|
For example, if the object reference is to a container within
|
||||||
|
a pod, this would take on a value like: "spec.containers{name}"
|
||||||
|
(where "name" refers to the name of the container that triggered
|
||||||
|
the event) or if no container name is specified "spec.containers[2]"
|
||||||
|
(container with index 2 in this pod). This syntax is chosen only
|
||||||
|
to have some well-defined way of referencing a part of an object.
|
||||||
|
TODO: this design is not final and this field is subject to change
|
||||||
|
in the future.'
|
||||||
|
type: string
|
||||||
|
kind:
|
||||||
|
description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
name:
|
||||||
|
description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names'
|
||||||
|
type: string
|
||||||
|
namespace:
|
||||||
|
description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/'
|
||||||
|
type: string
|
||||||
|
resourceVersion:
|
||||||
|
description: 'Specific resourceVersion to which this reference is
|
||||||
|
made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency'
|
||||||
|
type: string
|
||||||
|
uid:
|
||||||
|
description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids'
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- deletionPolicy
|
||||||
|
- driver
|
||||||
|
- source
|
||||||
|
- volumeSnapshotRef
|
||||||
|
type: object
|
||||||
|
status:
|
||||||
|
description: status represents the current information of a snapshot.
|
||||||
|
properties:
|
||||||
|
creationTime:
|
||||||
|
description: creationTime is the timestamp when the point-in-time snapshot
|
||||||
|
is taken by the underlying storage system. In dynamic snapshot creation
|
||||||
|
case, this field will be filled in with the "creation_time" value
|
||||||
|
returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing
|
||||||
|
snapshot, this field will be filled with the "creation_time" value
|
||||||
|
returned from the CSI "ListSnapshots" gRPC call if the driver supports
|
||||||
|
it. If not specified, it indicates the creation time is unknown. The
|
||||||
|
format of this field is a Unix nanoseconds time encoded as an int64.
|
||||||
|
On Unix, the command `date +%s%N` returns the current time in nanoseconds
|
||||||
|
since 1970-01-01 00:00:00 UTC.
|
||||||
|
format: int64
|
||||||
|
type: integer
|
||||||
|
error:
|
||||||
|
description: error is the latest observed error during snapshot creation,
|
||||||
|
if any.
|
||||||
|
properties:
|
||||||
|
message:
|
||||||
|
description: 'message is a string detailing the encountered error
|
||||||
|
during snapshot creation if specified. NOTE: message may be logged,
|
||||||
|
and it should not contain sensitive information.'
|
||||||
|
type: string
|
||||||
|
time:
|
||||||
|
description: time is the timestamp when the error was encountered.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
readyToUse:
|
||||||
|
description: readyToUse indicates if a snapshot is ready to be used
|
||||||
|
to restore a volume. In dynamic snapshot creation case, this field
|
||||||
|
will be filled in with the "ready_to_use" value returned from CSI
|
||||||
|
"CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this
|
||||||
|
field will be filled with the "ready_to_use" value returned from the
|
||||||
|
CSI "ListSnapshots" gRPC call if the driver supports it, otherwise,
|
||||||
|
this field will be set to "True". If not specified, it means the readiness
|
||||||
|
of a snapshot is unknown.
|
||||||
|
type: boolean
|
||||||
|
restoreSize:
|
||||||
|
description: restoreSize represents the complete size of the snapshot
|
||||||
|
in bytes. In dynamic snapshot creation case, this field will be filled
|
||||||
|
in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
|
||||||
|
gRPC call. For a pre-existing snapshot, this field will be filled
|
||||||
|
with the "size_bytes" value returned from the CSI "ListSnapshots"
|
||||||
|
gRPC call if the driver supports it. When restoring a volume from
|
||||||
|
this snapshot, the size of the volume MUST NOT be smaller than the
|
||||||
|
restoreSize if it is specified, otherwise the restoration will fail.
|
||||||
|
If not specified, it indicates that the size is unknown.
|
||||||
|
format: int64
|
||||||
|
minimum: 0
|
||||||
|
type: integer
|
||||||
|
snapshotHandle:
|
||||||
|
description: snapshotHandle is the CSI "snapshot_id" of a snapshot on
|
||||||
|
the underlying storage system. If not specified, it indicates that
|
||||||
|
dynamic snapshot creation has either failed or it is still in progress.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- spec
|
||||||
|
type: object
|
||||||
|
version: v1beta1
|
||||||
|
versions:
|
||||||
|
- name: v1beta1
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
|
status:
|
||||||
|
acceptedNames:
|
||||||
|
kind: ""
|
||||||
|
plural: ""
|
||||||
|
conditions: []
|
||||||
|
storedVersions: []
|
|
@ -0,0 +1,144 @@
|
||||||
|
apiVersion: apiextensions.k8s.io/v1beta1
|
||||||
|
kind: CustomResourceDefinition
|
||||||
|
metadata:
|
||||||
|
name: volumesnapshots.snapshot.storage.k8s.io
|
||||||
|
labels:
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
spec:
|
||||||
|
group: snapshot.storage.k8s.io
|
||||||
|
names:
|
||||||
|
kind: VolumeSnapshot
|
||||||
|
listKind: VolumeSnapshotList
|
||||||
|
plural: volumesnapshots
|
||||||
|
singular: volumesnapshot
|
||||||
|
scope: Namespaced
|
||||||
|
subresources:
|
||||||
|
status: {}
|
||||||
|
preserveUnknownFields: false
|
||||||
|
validation:
|
||||||
|
openAPIV3Schema:
|
||||||
|
description: VolumeSnapshot is a user's request for either creating a point-in-time
|
||||||
|
snapshot of a persistent volume, or binding to a pre-existing snapshot.
|
||||||
|
properties:
|
||||||
|
apiVersion:
|
||||||
|
description: 'APIVersion defines the versioned schema of this representation
|
||||||
|
of an object. Servers should convert recognized schemas to the latest
|
||||||
|
internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources'
|
||||||
|
type: string
|
||||||
|
kind:
|
||||||
|
description: 'Kind is a string value representing the REST resource this
|
||||||
|
object represents. Servers may infer this from the endpoint the client
|
||||||
|
submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds'
|
||||||
|
type: string
|
||||||
|
spec:
|
||||||
|
description: 'spec defines the desired characteristics of a snapshot requested
|
||||||
|
by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots
|
||||||
|
Required.'
|
||||||
|
properties:
|
||||||
|
source:
|
||||||
|
description: source specifies where a snapshot will be created from.
|
||||||
|
This field is immutable after creation. Required.
|
||||||
|
properties:
|
||||||
|
persistentVolumeClaimName:
|
||||||
|
description: persistentVolumeClaimName specifies the name of the
|
||||||
|
PersistentVolumeClaim object in the same namespace as the VolumeSnapshot
|
||||||
|
object where the snapshot should be dynamically taken from. This
|
||||||
|
field is immutable.
|
||||||
|
type: string
|
||||||
|
volumeSnapshotContentName:
|
||||||
|
description: volumeSnapshotContentName specifies the name of a pre-existing
|
||||||
|
VolumeSnapshotContent object. This field is immutable.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
volumeSnapshotClassName:
|
||||||
|
description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass
|
||||||
|
requested by the VolumeSnapshot. If not specified, the default snapshot
|
||||||
|
class will be used if one exists. If not specified, and there is no
|
||||||
|
default snapshot class, dynamic snapshot creation will fail. Empty
|
||||||
|
string is not allowed for this field. TODO(xiangqian): a webhook validation
|
||||||
|
on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes'
|
||||||
|
type: string
|
||||||
|
required:
|
||||||
|
- source
|
||||||
|
type: object
|
||||||
|
status:
|
||||||
|
description: 'status represents the current information of a snapshot. NOTE:
|
||||||
|
status can be modified by sources other than system controllers, and must
|
||||||
|
not be depended upon for accuracy. Controllers should only use information
|
||||||
|
from the VolumeSnapshotContent object after verifying that the binding
|
||||||
|
is accurate and complete.'
|
||||||
|
properties:
|
||||||
|
boundVolumeSnapshotContentName:
|
||||||
|
description: 'boundVolumeSnapshotContentName represents the name of
|
||||||
|
the VolumeSnapshotContent object to which the VolumeSnapshot object
|
||||||
|
is bound. If not specified, it indicates that the VolumeSnapshot object
|
||||||
|
has not been successfully bound to a VolumeSnapshotContent object
|
||||||
|
yet. NOTE: Specified boundVolumeSnapshotContentName alone does not
|
||||||
|
mean binding is valid. Controllers MUST always verify bidirectional
|
||||||
|
binding between VolumeSnapshot and VolumeSnapshotContent to
|
||||||
|
avoid possible security issues.'
|
||||||
|
type: string
|
||||||
|
creationTime:
|
||||||
|
description: creationTime is the timestamp when the point-in-time snapshot
|
||||||
|
is taken by the underlying storage system. In dynamic snapshot creation
|
||||||
|
case, this field will be filled in with the "creation_time" value
|
||||||
|
returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing
|
||||||
|
snapshot, this field will be filled with the "creation_time" value
|
||||||
|
returned from the CSI "ListSnapshots" gRPC call if the driver supports
|
||||||
|
it. If not specified, it indicates that the creation time of the snapshot
|
||||||
|
is unknown.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
error:
|
||||||
|
description: error is the last observed error during snapshot creation,
|
||||||
|
if any. This field could be helpful to upper level controllers(i.e.,
|
||||||
|
application controller) to decide whether they should continue on
|
||||||
|
waiting for the snapshot to be created based on the type of error
|
||||||
|
reported.
|
||||||
|
properties:
|
||||||
|
message:
|
||||||
|
description: 'message is a string detailing the encountered error
|
||||||
|
during snapshot creation if specified. NOTE: message may be logged,
|
||||||
|
and it should not contain sensitive information.'
|
||||||
|
type: string
|
||||||
|
time:
|
||||||
|
description: time is the timestamp when the error was encountered.
|
||||||
|
format: date-time
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
readyToUse:
|
||||||
|
description: readyToUse indicates if a snapshot is ready to be used
|
||||||
|
to restore a volume. In dynamic snapshot creation case, this field
|
||||||
|
will be filled in with the "ready_to_use" value returned from CSI
|
||||||
|
"CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this
|
||||||
|
field will be filled with the "ready_to_use" value returned from the
|
||||||
|
CSI "ListSnapshots" gRPC call if the driver supports it, otherwise,
|
||||||
|
this field will be set to "True". If not specified, it means the readiness
|
||||||
|
of a snapshot is unknown.
|
||||||
|
type: boolean
|
||||||
|
restoreSize:
|
||||||
|
description: restoreSize represents the complete size of the snapshot
|
||||||
|
in bytes. In dynamic snapshot creation case, this field will be filled
|
||||||
|
in with the "size_bytes" value returned from CSI "CreateSnapshotRequest"
|
||||||
|
gRPC call. For a pre-existing snapshot, this field will be filled
|
||||||
|
with the "size_bytes" value returned from the CSI "ListSnapshots"
|
||||||
|
gRPC call if the driver supports it. When restoring a volume from
|
||||||
|
this snapshot, the size of the volume MUST NOT be smaller than the
|
||||||
|
restoreSize if it is specified, otherwise the restoration will fail.
|
||||||
|
If not specified, it indicates that the size is unknown.
|
||||||
|
type: string
|
||||||
|
type: object
|
||||||
|
required:
|
||||||
|
- spec
|
||||||
|
type: object
|
||||||
|
version: v1beta1
|
||||||
|
versions:
|
||||||
|
- name: v1beta1
|
||||||
|
served: true
|
||||||
|
storage: true
|
||||||
|
status:
|
||||||
|
acceptedNames:
|
||||||
|
kind: ""
|
||||||
|
plural: ""
|
||||||
|
conditions: []
|
||||||
|
storedVersions: []
|
|
@ -0,0 +1,29 @@
|
||||||
|
# This YAML file shows how to deploy the volume snapshot controller
|
||||||
|
|
||||||
|
---
|
||||||
|
kind: StatefulSet
|
||||||
|
apiVersion: apps/v1
|
||||||
|
metadata:
|
||||||
|
name: volume-snapshot-controller
|
||||||
|
namespace: kube-system
|
||||||
|
labels:
|
||||||
|
addonmanager.kubernetes.io/mode: Reconcile
|
||||||
|
spec:
|
||||||
|
serviceName: "volume-snapshot-controller"
|
||||||
|
replicas: 1
|
||||||
|
selector:
|
||||||
|
matchLabels:
|
||||||
|
app: volume-snapshot-controller
|
||||||
|
template:
|
||||||
|
metadata:
|
||||||
|
labels:
|
||||||
|
app: volume-snapshot-controller
|
||||||
|
spec:
|
||||||
|
serviceAccount: volume-snapshot-controller
|
||||||
|
containers:
|
||||||
|
- name: volume-snapshot-controller
|
||||||
|
# TODO(xyang): Replace with an official image when it is released
|
||||||
|
image: gcr.io/k8s-staging-csi/snapshot-controller:v2.0.0-rc2
|
||||||
|
args:
|
||||||
|
- "--v=5"
|
||||||
|
imagePullPolicy: Always
|
|
@ -33,10 +33,11 @@ type Addon struct {
|
||||||
|
|
||||||
// addonPodLabels holds the pod label that will be used to verify if the addon is enabled
|
// addonPodLabels holds the pod label that will be used to verify if the addon is enabled
|
||||||
var addonPodLabels = map[string]string{
|
var addonPodLabels = map[string]string{
|
||||||
"ingress": "app.kubernetes.io/name=ingress-nginx",
|
"ingress": "app.kubernetes.io/name=ingress-nginx",
|
||||||
"registry": "kubernetes.io/minikube-addons=registry",
|
"registry": "kubernetes.io/minikube-addons=registry",
|
||||||
"gvisor": "kubernetes.io/minikube-addons=gvisor",
|
"gvisor": "kubernetes.io/minikube-addons=gvisor",
|
||||||
"gcp-auth": "kubernetes.io/minikube-addons=gcp-auth",
|
"gcp-auth": "kubernetes.io/minikube-addons=gcp-auth",
|
||||||
|
"csi-hostpath-driver": "kubernetes.io/minikube-addons=csi-hostpath-driver",
|
||||||
}
|
}
|
||||||
|
|
||||||
// Addons is a list of all addons
|
// Addons is a list of all addons
|
||||||
|
@ -170,4 +171,15 @@ var Addons = []*Addon{
|
||||||
set: SetBool,
|
set: SetBool,
|
||||||
callbacks: []setFn{gcpauth.EnableOrDisable, enableOrDisableAddon, verifyGCPAuthAddon, gcpauth.DisplayAddonMessage},
|
callbacks: []setFn{gcpauth.EnableOrDisable, enableOrDisableAddon, verifyGCPAuthAddon, gcpauth.DisplayAddonMessage},
|
||||||
},
|
},
|
||||||
|
{
|
||||||
|
name: "volumesnapshots",
|
||||||
|
set: SetBool,
|
||||||
|
callbacks: []setFn{enableOrDisableAddon},
|
||||||
|
},
|
||||||
|
{
|
||||||
|
name: "csi-hostpath-driver",
|
||||||
|
set: SetBool,
|
||||||
|
validations: []setFn{IsVolumesnapshotsEnabled},
|
||||||
|
callbacks: []setFn{enableOrDisableAddon, verifyAddonStatus},
|
||||||
|
},
|
||||||
}
|
}
|
||||||
|
|
|
@ -18,11 +18,16 @@ package addons
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"fmt"
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"k8s.io/minikube/pkg/minikube/assets"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/cruntime"
|
"k8s.io/minikube/pkg/minikube/cruntime"
|
||||||
|
"k8s.io/minikube/pkg/minikube/out"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
const volumesnapshotsAddon = "volumesnapshots"
|
||||||
|
|
||||||
// containerdOnlyMsg is the message shown when a containerd-only addon is enabled
|
// containerdOnlyMsg is the message shown when a containerd-only addon is enabled
|
||||||
const containerdOnlyAddonMsg = `
|
const containerdOnlyAddonMsg = `
|
||||||
This addon can only be enabled with the containerd runtime backend. To enable this backend, please first stop minikube with:
|
This addon can only be enabled with the containerd runtime backend. To enable this backend, please first stop minikube with:
|
||||||
|
@ -33,6 +38,12 @@ and then start minikube again with the following flags:
|
||||||
|
|
||||||
minikube start --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock`
|
minikube start --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock`
|
||||||
|
|
||||||
|
// volumesnapshotsDisabledMsg is the message shown when csi-hostpath-driver addon is enabled without the volumesnapshots addon
|
||||||
|
const volumesnapshotsDisabledMsg = `[WARNING] For full functionality, the 'csi-hostpath-driver' addon requires the 'volumesnapshots' addon to be enabled.
|
||||||
|
|
||||||
|
You can enable 'volumesnapshots' addon by running: 'minikube addons enable volumesnapshots'
|
||||||
|
`
|
||||||
|
|
||||||
// IsRuntimeContainerd is a validator which returns an error if the current runtime is not containerd
|
// IsRuntimeContainerd is a validator which returns an error if the current runtime is not containerd
|
||||||
func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
|
func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
|
||||||
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
|
r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime})
|
||||||
|
@ -46,6 +57,21 @@ func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error {
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// IsVolumesnapshotsEnabled is a validator that prints out a warning if the volumesnapshots addon
|
||||||
|
// is disabled (does not return any errors!)
|
||||||
|
func IsVolumesnapshotsEnabled(cc *config.ClusterConfig, _, value string) error {
|
||||||
|
isCsiDriverEnabled, _ := strconv.ParseBool(value)
|
||||||
|
// assets.Addons[].IsEnabled() returns the current status of the addon or default value.
|
||||||
|
// config.AddonList contains list of addons to be enabled.
|
||||||
|
isVolumesnapshotsEnabled := assets.Addons[volumesnapshotsAddon].IsEnabled(cc) || contains(config.AddonList, volumesnapshotsAddon)
|
||||||
|
if isCsiDriverEnabled && !isVolumesnapshotsEnabled {
|
||||||
|
// just print out a warning directly, we don't want to return any errors since
|
||||||
|
// that would prevent the addon from being enabled (callbacks wouldn't be run)
|
||||||
|
out.WarningT(volumesnapshotsDisabledMsg)
|
||||||
|
}
|
||||||
|
return nil
|
||||||
|
}
|
||||||
|
|
||||||
// isAddonValid returns the addon, true if it is valid
|
// isAddonValid returns the addon, true if it is valid
|
||||||
// otherwise returns nil, false
|
// otherwise returns nil, false
|
||||||
func isAddonValid(name string) (*Addon, bool) {
|
func isAddonValid(name string) (*Addon, bool) {
|
||||||
|
@ -56,3 +82,12 @@ func isAddonValid(name string) (*Addon, bool) {
|
||||||
}
|
}
|
||||||
return nil, false
|
return nil, false
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func contains(slice []string, val string) bool {
|
||||||
|
for _, item := range slice {
|
||||||
|
if item == val {
|
||||||
|
return true
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return false
|
||||||
|
}
|
||||||
|
|
|
@ -24,6 +24,7 @@ import (
|
||||||
"strconv"
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
|
|
||||||
|
"github.com/blang/semver"
|
||||||
"github.com/golang/glog"
|
"github.com/golang/glog"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
)
|
)
|
||||||
|
@ -124,8 +125,22 @@ func containerGatewayIP(ociBin, containerName string) (net.IP, error) {
|
||||||
func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) {
|
func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) {
|
||||||
var rr *RunResult
|
var rr *RunResult
|
||||||
var err error
|
var err error
|
||||||
|
var v semver.Version
|
||||||
|
|
||||||
if ociBin == Podman {
|
if ociBin == Podman {
|
||||||
|
rr, err = runCmd(exec.Command(Podman, "version", "--format", "{{.Version}}"))
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrapf(err, "podman version")
|
||||||
|
}
|
||||||
|
output := strings.TrimSpace(rr.Stdout.String())
|
||||||
|
v, err = semver.Make(output)
|
||||||
|
if err != nil {
|
||||||
|
return 0, errors.Wrapf(err, "podman version")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// podman 2.0.1 introduced docker syntax for .NetworkSettings.Ports (podman#5380)
|
||||||
|
if ociBin == Podman && v.LT(semver.Version{Major: 2, Minor: 0, Patch: 1}) {
|
||||||
rr, err = runCmd(exec.Command(ociBin, "container", "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID))
|
rr, err = runCmd(exec.Command(ociBin, "container", "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID)
|
return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID)
|
||||||
|
|
|
@ -145,19 +145,6 @@ func CreateContainerNode(p CreateParams) error {
|
||||||
// label th enode wuth the node ID
|
// label th enode wuth the node ID
|
||||||
"--label", p.NodeLabel,
|
"--label", p.NodeLabel,
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.OCIBinary == Podman { // enable execing in /var
|
|
||||||
// podman mounts var/lib with no-exec by default https://github.com/containers/libpod/issues/5103
|
|
||||||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", p.Name))
|
|
||||||
}
|
|
||||||
if p.OCIBinary == Docker {
|
|
||||||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var", p.Name))
|
|
||||||
// ignore apparmore github actions docker: https://github.com/kubernetes/minikube/issues/7624
|
|
||||||
runArgs = append(runArgs, "--security-opt", "apparmor=unconfined")
|
|
||||||
}
|
|
||||||
|
|
||||||
runArgs = append(runArgs, fmt.Sprintf("--cpus=%s", p.CPUs))
|
|
||||||
|
|
||||||
memcgSwap := true
|
memcgSwap := true
|
||||||
if runtime.GOOS == "linux" {
|
if runtime.GOOS == "linux" {
|
||||||
if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); os.IsNotExist(err) {
|
if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); os.IsNotExist(err) {
|
||||||
|
@ -167,26 +154,51 @@ func CreateContainerNode(p CreateParams) error {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
if p.OCIBinary == Podman && memcgSwap { // swap is required for memory
|
|
||||||
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
|
|
||||||
// Disable swap by setting the value to match
|
|
||||||
runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory))
|
|
||||||
}
|
|
||||||
|
|
||||||
if p.OCIBinary == Docker {
|
|
||||||
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
|
|
||||||
// Disable swap by setting the value to match
|
|
||||||
runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory))
|
|
||||||
}
|
|
||||||
|
|
||||||
// https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/
|
// https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/
|
||||||
var virtualization string
|
var virtualization string
|
||||||
if p.OCIBinary == Podman {
|
if p.OCIBinary == Podman { // enable execing in /var
|
||||||
|
// podman mounts var/lib with no-exec by default https://github.com/containers/libpod/issues/5103
|
||||||
|
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", p.Name))
|
||||||
|
|
||||||
|
if memcgSwap {
|
||||||
|
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
|
||||||
|
// Disable swap by setting the value to match
|
||||||
|
runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory))
|
||||||
|
}
|
||||||
|
|
||||||
virtualization = "podman" // VIRTUALIZATION_PODMAN
|
virtualization = "podman" // VIRTUALIZATION_PODMAN
|
||||||
}
|
}
|
||||||
if p.OCIBinary == Docker {
|
if p.OCIBinary == Docker {
|
||||||
|
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var", p.Name))
|
||||||
|
// ignore apparmore github actions docker: https://github.com/kubernetes/minikube/issues/7624
|
||||||
|
runArgs = append(runArgs, "--security-opt", "apparmor=unconfined")
|
||||||
|
|
||||||
|
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
|
||||||
|
// Disable swap by setting the value to match
|
||||||
|
runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory))
|
||||||
|
|
||||||
virtualization = "docker" // VIRTUALIZATION_DOCKER
|
virtualization = "docker" // VIRTUALIZATION_DOCKER
|
||||||
}
|
}
|
||||||
|
|
||||||
|
cpuCfsPeriod := true
|
||||||
|
cpuCfsQuota := true
|
||||||
|
if runtime.GOOS == "linux" {
|
||||||
|
if _, err := os.Stat("/sys/fs/cgroup/cpu/cpu.cfs_period_us"); os.IsNotExist(err) {
|
||||||
|
cpuCfsPeriod = false
|
||||||
|
}
|
||||||
|
if _, err := os.Stat("/sys/fs/cgroup/cpu/cpu.cfs_quota_us"); os.IsNotExist(err) {
|
||||||
|
cpuCfsQuota = false
|
||||||
|
}
|
||||||
|
if !cpuCfsPeriod || !cpuCfsQuota {
|
||||||
|
// requires CONFIG_CFS_BANDWIDTH
|
||||||
|
glog.Warning("Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.")
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
if cpuCfsPeriod && cpuCfsQuota {
|
||||||
|
runArgs = append(runArgs, fmt.Sprintf("--cpus=%s", p.CPUs))
|
||||||
|
}
|
||||||
|
|
||||||
runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", "container", virtualization))
|
runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", "container", virtualization))
|
||||||
|
|
||||||
for key, val := range p.Envs {
|
for key, val := range p.Envs {
|
||||||
|
|
|
@ -416,7 +416,7 @@ var Addons = map[string]*Addon{
|
||||||
MustBinAsset(
|
MustBinAsset(
|
||||||
"deploy/addons/ambassador/ambassadorinstallation.yaml",
|
"deploy/addons/ambassador/ambassadorinstallation.yaml",
|
||||||
vmpath.GuestAddonsDir,
|
vmpath.GuestAddonsDir,
|
||||||
"ambassadorinstallation.yaml.yaml",
|
"ambassadorinstallation.yaml",
|
||||||
"0640",
|
"0640",
|
||||||
false),
|
false),
|
||||||
}, false, "ambassador"),
|
}, false, "ambassador"),
|
||||||
|
@ -440,6 +440,106 @@ var Addons = map[string]*Addon{
|
||||||
"0640",
|
"0640",
|
||||||
false),
|
false),
|
||||||
}, false, "gcp-auth"),
|
}, false, "gcp-auth"),
|
||||||
|
"volumesnapshots": NewAddon([]*BinAsset{
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"snapshot.storage.k8s.io_volumesnapshotclasses.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"snapshot.storage.k8s.io_volumesnapshotcontents.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"snapshot.storage.k8s.io_volumesnapshots.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"rbac-volume-snapshot-controller.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"volume-snapshot-controller-deployment.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
}, false, "volumesnapshots"),
|
||||||
|
"csi-hostpath-driver": NewAddon([]*BinAsset{
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"rbac-external-attacher.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"rbac-external-provisioner.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"rbac-external-resizer.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"rbac-external-snapshotter.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"csi-hostpath-attacher.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"csi-hostpath-driverinfo.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"csi-hostpath-plugin.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"csi-hostpath-provisioner.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"csi-hostpath-resizer.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"csi-hostpath-snapshotter.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
MustBinAsset(
|
||||||
|
"deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml",
|
||||||
|
vmpath.GuestAddonsDir,
|
||||||
|
"csi-hostpath-storageclass.yaml",
|
||||||
|
"0640",
|
||||||
|
false),
|
||||||
|
}, false, "csi-hostpath-driver"),
|
||||||
}
|
}
|
||||||
|
|
||||||
// GenerateTemplateData generates template data for template assets
|
// GenerateTemplateData generates template data for template assets
|
||||||
|
|
|
@ -96,7 +96,8 @@ func etcd(v semver.Version, mirror string) string {
|
||||||
|
|
||||||
// Should match `DefaultEtcdVersion` in:
|
// Should match `DefaultEtcdVersion` in:
|
||||||
// https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants.go
|
// https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants.go
|
||||||
ev := "3.4.9-1"
|
ev := "3.4.13-0"
|
||||||
|
|
||||||
switch v.Minor {
|
switch v.Minor {
|
||||||
case 17, 18:
|
case 17, 18:
|
||||||
ev = "3.4.3-0"
|
ev = "3.4.3-0"
|
||||||
|
@ -109,6 +110,12 @@ func etcd(v semver.Version, mirror string) string {
|
||||||
case 11:
|
case 11:
|
||||||
ev = "3.2.18"
|
ev = "3.2.18"
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// An awkward special case for v1.19.0 - do not imitate unless necessary
|
||||||
|
if v.Equals(semver.MustParse("1.19.0")) {
|
||||||
|
ev = "3.4.9-1"
|
||||||
|
}
|
||||||
|
|
||||||
return path.Join(kubernetesRepo(mirror), "etcd"+archTag(needsArchSuffix)+ev)
|
return path.Join(kubernetesRepo(mirror), "etcd"+archTag(needsArchSuffix)+ev)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -263,7 +263,7 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error {
|
||||||
mtime, err := f.GetModTime()
|
mtime, err := f.GetModTime()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Infof("error getting modtime for %s: %v", dst, err)
|
glog.Infof("error getting modtime for %s: %v", dst, err)
|
||||||
} else {
|
} else if mtime != (time.Time{}) {
|
||||||
scp += fmt.Sprintf(" && sudo touch -d \"%s\" %s", mtime.Format(layout), dst)
|
scp += fmt.Sprintf(" && sudo touch -d \"%s\" %s", mtime.Format(layout), dst)
|
||||||
}
|
}
|
||||||
out, err := sess.CombinedOutput(scp)
|
out, err := sess.CombinedOutput(scp)
|
||||||
|
|
|
@ -27,10 +27,10 @@ import (
|
||||||
|
|
||||||
const (
|
const (
|
||||||
// DefaultKubernetesVersion is the default Kubernetes version
|
// DefaultKubernetesVersion is the default Kubernetes version
|
||||||
DefaultKubernetesVersion = "v1.19.0"
|
DefaultKubernetesVersion = "v1.19.1"
|
||||||
// NewestKubernetesVersion is the newest Kubernetes version to test against
|
// NewestKubernetesVersion is the newest Kubernetes version to test against
|
||||||
// NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go
|
// NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go
|
||||||
NewestKubernetesVersion = "v1.19.0"
|
NewestKubernetesVersion = "v1.19.1"
|
||||||
// OldestKubernetesVersion is the oldest Kubernetes version to test against
|
// OldestKubernetesVersion is the oldest Kubernetes version to test against
|
||||||
OldestKubernetesVersion = "v1.13.0"
|
OldestKubernetesVersion = "v1.13.0"
|
||||||
// DefaultClusterName is the default nane for the k8s cluster
|
// DefaultClusterName is the default nane for the k8s cluster
|
||||||
|
|
|
@ -39,8 +39,12 @@ func MaybeDisplayAdvice(err error, driver string) {
|
||||||
|
|
||||||
if errors.Is(err, oci.ErrExitedUnexpectedly) || errors.Is(err, oci.ErrDaemonInfo) {
|
if errors.Is(err, oci.ErrExitedUnexpectedly) || errors.Is(err, oci.ErrDaemonInfo) {
|
||||||
out.T(style.Tip, "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:", out.V{"driver_name": driver})
|
out.T(style.Tip, "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:", out.V{"driver_name": driver})
|
||||||
out.T(style.Empty, `
|
if driver == oci.Docker || driver == oci.Podman {
|
||||||
- Prune unused {{.driver_name}} images, volumes and abandoned containers.`, out.V{"driver_name": driver})
|
out.T(style.Empty, `
|
||||||
|
- Prune unused {{.driver_name}} images, volumes, networks and abandoned containers.
|
||||||
|
|
||||||
|
{{.driver_name}} system prune --volumes`, out.V{"driver_name": driver})
|
||||||
|
}
|
||||||
out.T(style.Empty, `
|
out.T(style.Empty, `
|
||||||
- Restart your {{.driver_name}} service`, out.V{"driver_name": driver})
|
- Restart your {{.driver_name}} service`, out.V{"driver_name": driver})
|
||||||
if runtime.GOOS != "linux" {
|
if runtime.GOOS != "linux" {
|
||||||
|
|
|
@ -36,6 +36,7 @@ import (
|
||||||
"github.com/juju/mutex"
|
"github.com/juju/mutex"
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
"github.com/spf13/viper"
|
"github.com/spf13/viper"
|
||||||
|
"k8s.io/minikube/pkg/drivers/kic/oci"
|
||||||
"k8s.io/minikube/pkg/minikube/command"
|
"k8s.io/minikube/pkg/minikube/command"
|
||||||
"k8s.io/minikube/pkg/minikube/config"
|
"k8s.io/minikube/pkg/minikube/config"
|
||||||
"k8s.io/minikube/pkg/minikube/constants"
|
"k8s.io/minikube/pkg/minikube/constants"
|
||||||
|
@ -218,17 +219,32 @@ func postStartValidations(h *host.Host, drvName string) {
|
||||||
glog.Warningf("error getting command runner: %v", err)
|
glog.Warningf("error getting command runner: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
var kind reason.Kind
|
||||||
|
var name string
|
||||||
|
if drvName == oci.Docker {
|
||||||
|
kind = reason.RsrcInsufficientDockerStorage
|
||||||
|
name = "Docker"
|
||||||
|
}
|
||||||
|
if drvName == oci.Podman {
|
||||||
|
kind = reason.RsrcInsufficientPodmanStorage
|
||||||
|
name = "Podman"
|
||||||
|
}
|
||||||
|
if name == "" {
|
||||||
|
glog.Warningf("unknown KIC driver: %v", drvName)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// make sure /var isn't full, as pod deployments will fail if it is
|
// make sure /var isn't full, as pod deployments will fail if it is
|
||||||
percentageFull, err := DiskUsed(r, "/var")
|
percentageFull, err := DiskUsed(r, "/var")
|
||||||
if err != nil {
|
if err != nil {
|
||||||
glog.Warningf("error getting percentage of /var that is free: %v", err)
|
glog.Warningf("error getting percentage of /var that is free: %v", err)
|
||||||
}
|
}
|
||||||
if percentageFull >= 99 {
|
if percentageFull >= 99 {
|
||||||
exit.Message(reason.RsrcInsufficientDockerStorage, `Docker is out of disk space! (/var is at {{.p}}% of capacity)`, out.V{"p": percentageFull})
|
exit.Message(kind, `{{.n}} is out of disk space! (/var is at {{.p}}% of capacity)`, out.V{"n": name, "p": percentageFull})
|
||||||
}
|
}
|
||||||
|
|
||||||
if percentageFull >= 85 {
|
if percentageFull >= 85 {
|
||||||
out.WarnReason(reason.RsrcInsufficientDockerStorage, `Docker is nearly out of disk space, which may cause deployments to fail! ({{.p}}% of capacity)`, out.V{"p": percentageFull})
|
out.WarnReason(kind, `{{.n}} is nearly out of disk space, which may cause deployments to fail! ({{.p}}% of capacity)`, out.V{"n": name, "p": percentageFull})
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
@ -176,6 +176,15 @@ var (
|
||||||
3. Run "minikube ssh -- docker system prune" if using the docker container runtime`,
|
3. Run "minikube ssh -- docker system prune" if using the docker container runtime`,
|
||||||
Issues: []int{9024},
|
Issues: []int{9024},
|
||||||
}
|
}
|
||||||
|
RsrcInsufficientPodmanStorage = Kind{
|
||||||
|
ID: "RSRC_PODMAN_STORAGE",
|
||||||
|
ExitCode: ExInsufficientStorage,
|
||||||
|
Advice: `Try at least one of the following to free up space on the device:
|
||||||
|
|
||||||
|
1. Run "sudo podman system prune" to remove unused podman data
|
||||||
|
2. Run "minikube ssh -- docker system prune" if using the docker container runtime`,
|
||||||
|
Issues: []int{9024},
|
||||||
|
}
|
||||||
|
|
||||||
RsrcInsufficientStorage = Kind{ID: "RSRC_INSUFFICIENT_STORAGE", ExitCode: ExInsufficientStorage, Style: style.UnmetRequirement}
|
RsrcInsufficientStorage = Kind{ID: "RSRC_INSUFFICIENT_STORAGE", ExitCode: ExInsufficientStorage, Style: style.UnmetRequirement}
|
||||||
|
|
||||||
|
|
|
@ -67,7 +67,7 @@ minikube start [flags]
|
||||||
--interactive Allow user prompts for more information (default true)
|
--interactive Allow user prompts for more information (default true)
|
||||||
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.13.0.iso,https://github.com/kubernetes/minikube/releases/download/v1.13.0/minikube-v1.13.0.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.13.0.iso])
|
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.13.0.iso,https://github.com/kubernetes/minikube/releases/download/v1.13.0/minikube-v1.13.0.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.13.0.iso])
|
||||||
--keep-context This will keep the existing kubectl context and will create a minikube context.
|
--keep-context This will keep the existing kubectl context and will create a minikube context.
|
||||||
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.19.0, 'latest' for v1.19.0). Defaults to 'stable'.
|
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.19.1, 'latest' for v1.19.1). Defaults to 'stable'.
|
||||||
--kvm-gpu Enable experimental NVIDIA GPU support in minikube
|
--kvm-gpu Enable experimental NVIDIA GPU support in minikube
|
||||||
--kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only)
|
--kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only)
|
||||||
--kvm-network string The KVM network name. (kvm2 driver only) (default "default")
|
--kvm-network string The KVM network name. (kvm2 driver only) (default "default")
|
||||||
|
@ -75,7 +75,7 @@ minikube start [flags]
|
||||||
--memory string Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g).
|
--memory string Amount of RAM to allocate to Kubernetes (format: <number>[<unit>], where unit = b, k, m or g).
|
||||||
--mount This will start the mount daemon and automatically mount files into minikube.
|
--mount This will start the mount daemon and automatically mount files into minikube.
|
||||||
--mount-string string The argument to pass the minikube mount command on start.
|
--mount-string string The argument to pass the minikube mount command on start.
|
||||||
--nat-nic-type string NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio")
|
--nat-nic-type string NIC Type used for nat network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio")
|
||||||
--native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true)
|
--native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true)
|
||||||
--network-plugin string Kubelet network plug-in to use (default: auto)
|
--network-plugin string Kubelet network plug-in to use (default: auto)
|
||||||
--nfs-share strings Local folders to share with Guest via NFS mounts (hyperkit driver only)
|
--nfs-share strings Local folders to share with Guest via NFS mounts (hyperkit driver only)
|
||||||
|
|
|
@ -20,13 +20,12 @@ description: >
|
||||||
|
|
||||||
See [ISO release instructions]({{<ref "iso.md">}})
|
See [ISO release instructions]({{<ref "iso.md">}})
|
||||||
|
|
||||||
## Tag KIC base image
|
## Release new kicbase image
|
||||||
|
|
||||||
for container drivers (docker,podman), if there has been any change in Dockerfile
|
If there are changes to the Dockerfile for the docker and/or podman drivers
|
||||||
(and there is a -snapshot image), should tag with latest release and push to gcr and docker hub and github packages.
|
(and there is a -snapshot image), you should retag it as a new version and push it to GCR, dockerhub and github packages.
|
||||||
|
|
||||||
for example if you are releasing v0.0.13 and latest kicbase image is v0.0.12-snapshot
|
For example, if you are releasing v0.0.13 and the current kicbase image tag is v0.0.12-snapshot, you should tag v0.0.13 and change [kic/types.go](https://github.com/medyagh/minikube/blob/635ff53a63e5bb1be4e1abb9067ebe502a16224e/pkg/drivers/kic/types.go#L29-L30) as well.
|
||||||
should tag v0.0.13 and change the [kic/types.go](https://github.com/medyagh/minikube/blob/635ff53a63e5bb1be4e1abb9067ebe502a16224e/pkg/drivers/kic/types.go#L29-L30) file as well.
|
|
||||||
|
|
||||||
## Update Release Notes
|
## Update Release Notes
|
||||||
|
|
||||||
|
|
|
@ -96,7 +96,7 @@ minikube start --container-runtime=docker
|
||||||
Other options available are:
|
Other options available are:
|
||||||
|
|
||||||
* [containerd](https://github.com/containerd/containerd)
|
* [containerd](https://github.com/containerd/containerd)
|
||||||
* [crio](https://github.com/kubernetes-sigs/cri-o)
|
* [cri-o](https://github.com/cri-o/cri-o)
|
||||||
|
|
||||||
## Environment variables
|
## Environment variables
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,46 @@
|
||||||
|
---
|
||||||
|
title: "CSI Driver and Volume Snapshots"
|
||||||
|
linkTitle: "CSI Driver and Volume Snapshots"
|
||||||
|
weight: 1
|
||||||
|
date: 2020-08-06
|
||||||
|
description: >
|
||||||
|
CSI Driver and Volume Snapshots
|
||||||
|
---
|
||||||
|
|
||||||
|
## Overview
|
||||||
|
|
||||||
|
This tutorial explains how to set up the CSI Hostpath Driver in minikube and create volume snapshots.
|
||||||
|
|
||||||
|
## Prerequisites
|
||||||
|
|
||||||
|
- latest version of minikube
|
||||||
|
|
||||||
|
## Tutorial
|
||||||
|
|
||||||
|
Support for volume snapshots in minikube is provided through the `volumesnapshots` addon. This addon provisions the required
|
||||||
|
CRDs and deploys the Volume Snapshot Controller. It is <b>disabled by default</b>.
|
||||||
|
|
||||||
|
Furthermore, the default storage provider in minikube does not implement the CSI interface and thus is NOT capable of creating/handling
|
||||||
|
volume snapshots. For that, you must first deploy a CSI driver. To make this step easy, minikube offers the `csi-hostpath-driver` addon,
|
||||||
|
which deploys the [CSI Hostpath Driver](https://github.com/kubernetes-csi/csi-driver-host-path). This addon is <b>disabled</b>
|
||||||
|
by default as well.
|
||||||
|
|
||||||
|
Thus, to utilize the volume snapshots functionality, you must:
|
||||||
|
|
||||||
|
1\) enable the `volumesnapshots` addon AND\
|
||||||
|
2a\) either enable the `csi-hostpth-driver` addon OR\
|
||||||
|
2b\) deploy your own CSI driver
|
||||||
|
|
||||||
|
You can enable/disable either of the above-mentioned addons using
|
||||||
|
```shell script
|
||||||
|
minikube addons enable [ADDON_NAME]
|
||||||
|
minikube addons disable [ADDON_NAME]
|
||||||
|
```
|
||||||
|
|
||||||
|
The `csi-hostpath-driver` addon deploys its required resources into the `kube-system` namespace and sets up a dedicated
|
||||||
|
storage class called `csi-hostpath-sc` that you need to reference in your PVCs. The driver itself is created under the
|
||||||
|
name `hostpath.csi.k8s.io`. Use this wherever necessary (e.g. snapshot class definitions).
|
||||||
|
|
||||||
|
Once both addons are enabled, you can create persistent volumes and snapshots using standard ways (for a quick test of
|
||||||
|
volume snapshots, you can find some example yaml files along with a step-by-step [here](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html)).
|
||||||
|
The driver stores all persistent volumes in the `/var/lib/csi-hostpath-data/` directory of minikube's host.
|
|
@ -40,7 +40,7 @@ func TestAddons(t *testing.T) {
|
||||||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
|
ctx, cancel := context.WithTimeout(context.Background(), Minutes(40))
|
||||||
defer Cleanup(t, profile, cancel)
|
defer Cleanup(t, profile, cancel)
|
||||||
|
|
||||||
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm"}, StartArgs()...)
|
args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm", "--addons=volumesnapshots", "--addons=csi-hostpath-driver"}, StartArgs()...)
|
||||||
if !NoneDriver() { // none doesn't support ingress
|
if !NoneDriver() { // none doesn't support ingress
|
||||||
args = append(args, "--addons=ingress")
|
args = append(args, "--addons=ingress")
|
||||||
}
|
}
|
||||||
|
@ -60,6 +60,7 @@ func TestAddons(t *testing.T) {
|
||||||
{"MetricsServer", validateMetricsServerAddon},
|
{"MetricsServer", validateMetricsServerAddon},
|
||||||
{"HelmTiller", validateHelmTillerAddon},
|
{"HelmTiller", validateHelmTillerAddon},
|
||||||
{"Olm", validateOlmAddon},
|
{"Olm", validateOlmAddon},
|
||||||
|
{"CSI", validateCSIDriverAndSnapshots},
|
||||||
}
|
}
|
||||||
for _, tc := range tests {
|
for _, tc := range tests {
|
||||||
tc := tc
|
tc := tc
|
||||||
|
@ -398,3 +399,108 @@ func validateOlmAddon(ctx context.Context, t *testing.T, profile string) {
|
||||||
t.Errorf("failed checking operator installed: %v", err.Error())
|
t.Errorf("failed checking operator installed: %v", err.Error())
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func validateCSIDriverAndSnapshots(ctx context.Context, t *testing.T, profile string) {
|
||||||
|
defer PostMortemLogs(t, profile)
|
||||||
|
|
||||||
|
client, err := kapi.Client(profile)
|
||||||
|
if err != nil {
|
||||||
|
t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err)
|
||||||
|
}
|
||||||
|
|
||||||
|
start := time.Now()
|
||||||
|
if err := kapi.WaitForPods(client, "kube-system", "kubernetes.io/minikube-addons=csi-hostpath-driver", Minutes(6)); err != nil {
|
||||||
|
t.Errorf("failed waiting for csi-hostpath-driver pods to stabilize: %v", err)
|
||||||
|
}
|
||||||
|
t.Logf("csi-hostpath-driver pods stabilized in %s", time.Since(start))
|
||||||
|
|
||||||
|
// create sample PVC
|
||||||
|
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc.yaml")))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("creating sample PVC with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := PVCWait(ctx, t, profile, "default", "hpvc", Minutes(6)); err != nil {
|
||||||
|
t.Fatalf("failed waiting for PVC hpvc: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create sample pod with the PVC
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod.yaml")))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod", Minutes(6)); err != nil {
|
||||||
|
t.Fatalf("failed waiting for pod task-pv-pod: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create sample snapshotclass
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshotclass.yaml")))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("creating snapshostclass with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create volume snapshot
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshot.yaml")))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err := VolumeSnapshotWait(ctx, t, profile, "default", "new-snapshot-demo", Minutes(6)); err != nil {
|
||||||
|
t.Fatalf("failed waiting for volume snapshot new-snapshot-demo: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete pod
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod"))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("deleting pod with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// delete pvc
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc"))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("deleting pod with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// restore pv from snapshot
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc-restore.yaml")))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("creating pvc with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if err = PVCWait(ctx, t, profile, "default", "hpvc-restore", Minutes(6)); err != nil {
|
||||||
|
t.Fatalf("failed waiting for PVC hpvc-restore: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// create pod from restored snapshot
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod-restore.yaml")))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("creating pod with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
|
||||||
|
if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod-restore", Minutes(6)); err != nil {
|
||||||
|
t.Fatalf("failed waiting for pod task-pv-pod-restore: %v", err)
|
||||||
|
}
|
||||||
|
|
||||||
|
// CLEANUP
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod-restore"))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc-restore"))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "volumesnapshot", "new-snapshot-demo"))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("cleanup with %s failed: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "csi-hostpath-driver", "--alsologtostderr", "-v=1"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to disable csi-hostpath-driver addon: args %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "volumesnapshots", "--alsologtostderr", "-v=1"))
|
||||||
|
if err != nil {
|
||||||
|
t.Errorf("failed to disable volumesnapshots addon: args %q: %v", rr.Command(), err)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
@ -496,11 +496,11 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
||||||
}
|
}
|
||||||
|
|
||||||
t.Run("cache", func(t *testing.T) {
|
t.Run("cache", func(t *testing.T) {
|
||||||
t.Run("add", func(t *testing.T) {
|
t.Run("add_remote", func(t *testing.T) {
|
||||||
for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} {
|
for _, img := range []string{"k8s.gcr.io/pause:3.1", "k8s.gcr.io/pause:3.3", "k8s.gcr.io/pause:latest"} {
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Command(), err)
|
t.Errorf("failed to 'cache add' remote image %q. args %q err %v", img, rr.Command(), err)
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
@ -514,7 +514,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
||||||
message := []byte("FROM scratch\nADD Dockerfile /x")
|
message := []byte("FROM scratch\nADD Dockerfile /x")
|
||||||
err = ioutil.WriteFile(filepath.Join(dname, "Dockerfile"), message, 0644)
|
err = ioutil.WriteFile(filepath.Join(dname, "Dockerfile"), message, 0644)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Fatalf("unable to writefile: %v", err)
|
t.Fatalf("unable to write Dockerfile: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
img := "minikube-local-cache-test:" + profile
|
img := "minikube-local-cache-test:" + profile
|
||||||
|
@ -525,14 +525,14 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
||||||
|
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to add local image %q. args %q err %v", img, rr.Command(), err)
|
t.Errorf("failed to 'cache add' local image %q. args %q err %v", img, rr.Command(), err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) {
|
t.Run("delete_k8s.gcr.io/pause:3.3", func(t *testing.T) {
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc"))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "k8s.gcr.io/pause:3.3"))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Command(), err)
|
t.Errorf("failed to delete image k8s.gcr.io/pause:3.3 from cache. args %q: %v", rr.Command(), err)
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -542,10 +542,10 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
||||||
t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err)
|
t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err)
|
||||||
}
|
}
|
||||||
if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") {
|
if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") {
|
||||||
t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%s***", rr.Output())
|
t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got: ***%s***", rr.Output())
|
||||||
}
|
}
|
||||||
if strings.Contains(rr.Output(), "busybox:1.28.4-glibc") {
|
if strings.Contains(rr.Output(), "k8s.gcr.io/pause:3.3") {
|
||||||
t.Errorf("expected 'cache list' output not to include busybox:1.28.4-glibc but got:\n ***%s***", rr.Output())
|
t.Errorf("expected 'cache list' output not to include k8s.gcr.io/pause:3.3 but got: ***%s***", rr.Output())
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
|
@ -554,24 +554,24 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to get images by %q ssh %v", rr.Command(), err)
|
t.Errorf("failed to get images by %q ssh %v", rr.Command(), err)
|
||||||
}
|
}
|
||||||
if !strings.Contains(rr.Output(), "1.28.4-glibc") {
|
if !strings.Contains(rr.Output(), "0184c1613d929") {
|
||||||
t.Errorf("expected '1.28.4-glibc' to be in the output but got *%s*", rr.Output())
|
t.Errorf("expected sha for pause:3.3 '0184c1613d929' to be in the output but got *%s*", rr.Output())
|
||||||
}
|
}
|
||||||
|
|
||||||
})
|
})
|
||||||
|
|
||||||
t.Run("cache_reload", func(t *testing.T) { // deleting image inside minikube node manually and expecting reload to bring it back
|
t.Run("cache_reload", func(t *testing.T) { // deleting image inside minikube node manually and expecting reload to bring it back
|
||||||
img := "busybox:latest"
|
img := "k8s.gcr.io/pause:latest"
|
||||||
// deleting image inside minikube node manually
|
// deleting image inside minikube node manually
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "docker", "rmi", img))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "docker", "rmi", img))
|
||||||
|
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to delete inside the node %q : %v", rr.Command(), err)
|
t.Errorf("failed to manually delete image %q : %v", rr.Command(), err)
|
||||||
}
|
}
|
||||||
// make sure the image is deleted.
|
// make sure the image is deleted.
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img))
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img))
|
||||||
if err == nil {
|
if err == nil {
|
||||||
t.Errorf("expected an error. because image should not exist. but got *nil error* ! cmd: %q", rr.Command())
|
t.Errorf("expected an error but got no error. image should not exist. ! cmd: %q", rr.Command())
|
||||||
}
|
}
|
||||||
// minikube cache reload.
|
// minikube cache reload.
|
||||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload"))
|
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload"))
|
||||||
|
@ -587,7 +587,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) {
|
||||||
|
|
||||||
// delete will clean up the cached images since they are global and all other tests will load it for no reason
|
// delete will clean up the cached images since they are global and all other tests will load it for no reason
|
||||||
t.Run("delete", func(t *testing.T) {
|
t.Run("delete", func(t *testing.T) {
|
||||||
for _, img := range []string{"busybox:latest", "k8s.gcr.io/pause:latest"} {
|
for _, img := range []string{"k8s.gcr.io/pause:3.1", "k8s.gcr.io/pause:latest"} {
|
||||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", img))
|
rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", img))
|
||||||
if err != nil {
|
if err != nil {
|
||||||
t.Errorf("failed to delete %s from cache. args %q: %v", img, rr.Command(), err)
|
t.Errorf("failed to delete %s from cache. args %q: %v", img, rr.Command(), err)
|
||||||
|
|
|
@ -29,6 +29,7 @@ import (
|
||||||
"fmt"
|
"fmt"
|
||||||
"io/ioutil"
|
"io/ioutil"
|
||||||
"os/exec"
|
"os/exec"
|
||||||
|
"strconv"
|
||||||
"strings"
|
"strings"
|
||||||
"testing"
|
"testing"
|
||||||
"time"
|
"time"
|
||||||
|
@ -372,6 +373,56 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec
|
||||||
return names, fmt.Errorf("%s: %v", fmt.Sprintf("%s within %s", selector, timeout), err)
|
return names, fmt.Errorf("%s: %v", fmt.Sprintf("%s within %s", selector, timeout), err)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// PVCWait waits for persistent volume claim to reach bound state
|
||||||
|
func PVCWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
t.Logf("(dbg) %s: waiting %s for pvc %q in namespace %q ...", t.Name(), timeout, name, ns)
|
||||||
|
|
||||||
|
f := func() (bool, error) {
|
||||||
|
ret, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "pvc", name, "-o", "jsonpath={.status.phase}", "-n", ns))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("%s: WARNING: PVC get for %q %q returned: %v", t.Name(), ns, name, err)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
pvc := strings.TrimSpace(ret.Stdout.String())
|
||||||
|
if pvc == string(core.ClaimBound) {
|
||||||
|
return true, nil
|
||||||
|
} else if pvc == string(core.ClaimLost) {
|
||||||
|
return true, fmt.Errorf("PVC %q is LOST", name)
|
||||||
|
}
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return wait.PollImmediate(1*time.Second, timeout, f)
|
||||||
|
}
|
||||||
|
|
||||||
|
//// VolumeSnapshotWait waits for volume snapshot to be ready to use
|
||||||
|
func VolumeSnapshotWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error {
|
||||||
|
t.Helper()
|
||||||
|
|
||||||
|
t.Logf("(dbg) %s: waiting %s for volume snapshot %q in namespace %q ...", t.Name(), timeout, name, ns)
|
||||||
|
|
||||||
|
f := func() (bool, error) {
|
||||||
|
res, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "volumesnapshot", name, "-o", "jsonpath={.status.readyToUse}", "-n", ns))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, err)
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
isReady, err := strconv.ParseBool(strings.TrimSpace(res.Stdout.String()))
|
||||||
|
if err != nil {
|
||||||
|
t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, res.Stdout.String())
|
||||||
|
return false, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return isReady, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
return wait.PollImmediate(1*time.Second, timeout, f)
|
||||||
|
}
|
||||||
|
|
||||||
// Status returns a minikube component status as a string
|
// Status returns a minikube component status as a string
|
||||||
func Status(ctx context.Context, t *testing.T, path string, profile string, key string, node string) string {
|
func Status(ctx context.Context, t *testing.T, path string, profile string, key string, node string) string {
|
||||||
t.Helper()
|
t.Helper()
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: task-pv-pod-restore
|
||||||
|
labels:
|
||||||
|
app: task-pv-pod-restore
|
||||||
|
spec:
|
||||||
|
volumes:
|
||||||
|
- name: task-pv-storage
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: hpvc-restore
|
||||||
|
containers:
|
||||||
|
- name: task-pv-container
|
||||||
|
image: nginx
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
name: "http-server"
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/usr/share/nginx/html"
|
||||||
|
name: task-pv-storage
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,22 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: Pod
|
||||||
|
metadata:
|
||||||
|
name: task-pv-pod
|
||||||
|
labels:
|
||||||
|
app: task-pv-pod
|
||||||
|
spec:
|
||||||
|
volumes:
|
||||||
|
- name: task-pv-storage
|
||||||
|
persistentVolumeClaim:
|
||||||
|
claimName: hpvc
|
||||||
|
containers:
|
||||||
|
- name: task-pv-container
|
||||||
|
image: nginx
|
||||||
|
ports:
|
||||||
|
- containerPort: 80
|
||||||
|
name: "http-server"
|
||||||
|
volumeMounts:
|
||||||
|
- mountPath: "/usr/share/nginx/html"
|
||||||
|
name: task-pv-storage
|
||||||
|
|
||||||
|
|
|
@ -0,0 +1,15 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: hpvc-restore
|
||||||
|
spec:
|
||||||
|
storageClassName: csi-hostpath-sc
|
||||||
|
dataSource:
|
||||||
|
name: new-snapshot-demo
|
||||||
|
kind: VolumeSnapshot
|
||||||
|
apiGroup: snapshot.storage.k8s.io
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
|
@ -0,0 +1,11 @@
|
||||||
|
apiVersion: v1
|
||||||
|
kind: PersistentVolumeClaim
|
||||||
|
metadata:
|
||||||
|
name: hpvc
|
||||||
|
spec:
|
||||||
|
storageClassName: csi-hostpath-sc
|
||||||
|
accessModes:
|
||||||
|
- ReadWriteOnce
|
||||||
|
resources:
|
||||||
|
requests:
|
||||||
|
storage: 1Gi
|
|
@ -0,0 +1,8 @@
|
||||||
|
apiVersion: snapshot.storage.k8s.io/v1beta1
|
||||||
|
kind: VolumeSnapshot
|
||||||
|
metadata:
|
||||||
|
name: new-snapshot-demo
|
||||||
|
spec:
|
||||||
|
volumeSnapshotClassName: csi-hostpath-snapclass
|
||||||
|
source:
|
||||||
|
persistentVolumeClaimName: hpvc
|
|
@ -0,0 +1,6 @@
|
||||||
|
apiVersion: snapshot.storage.k8s.io/v1beta1
|
||||||
|
kind: VolumeSnapshotClass
|
||||||
|
metadata:
|
||||||
|
name: csi-hostpath-snapclass
|
||||||
|
driver: hostpath.csi.k8s.io #csi-hostpath
|
||||||
|
deletionPolicy: Delete
|
|
@ -279,6 +279,7 @@
|
||||||
"Multiple errors deleting profiles": "",
|
"Multiple errors deleting profiles": "",
|
||||||
"Multiple minikube profiles were found -": "",
|
"Multiple minikube profiles were found -": "",
|
||||||
"NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "",
|
"NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "",
|
||||||
|
"NIC Type used for nat network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "",
|
||||||
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
"NOTE: This process must stay alive for the mount to be accessible ...": "",
|
||||||
"Networking and Connectivity Commands:": "",
|
"Networking and Connectivity Commands:": "",
|
||||||
"No changes required for the \"{{.context}}\" context": "",
|
"No changes required for the \"{{.context}}\" context": "",
|
||||||
|
|
Loading…
Reference in New Issue