From e4a8e59cfa9d1ab87da7b76658a559c3d3884f26 Mon Sep 17 00:00:00 2001 From: Dean Coakley Date: Wed, 5 Aug 2020 02:06:06 +0100 Subject: [PATCH 01/21] Add prune docker images instructions --- pkg/minikube/machine/advice.go | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/pkg/minikube/machine/advice.go b/pkg/minikube/machine/advice.go index dc08d9a667..a7d4cd7787 100644 --- a/pkg/minikube/machine/advice.go +++ b/pkg/minikube/machine/advice.go @@ -40,7 +40,8 @@ func MaybeDisplayAdvice(err error, driver string) { if errors.Is(err, oci.ErrExitedUnexpectedly) || errors.Is(err, oci.ErrDaemonInfo) { out.T(style.Tip, "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:", out.V{"driver_name": driver}) out.T(style.Empty, ` - - Prune unused {{.driver_name}} images, volumes and abandoned containers.`, out.V{"driver_name": driver}) + - Prune unused {{.driver_name}} images, volumes, networks and abandoned containers. + docker system prune --volumes`, out.V{"driver_name": driver}) out.T(style.Empty, ` - Restart your {{.driver_name}} service`, out.V{"driver_name": driver}) if runtime.GOOS != "linux" { From 481cb50f7492ab108cad8ce003075844d5462ad0 Mon Sep 17 00:00:00 2001 From: Dean Coakley Date: Tue, 1 Sep 2020 15:05:10 +0100 Subject: [PATCH 02/21] Add leading newline for clarity --- pkg/minikube/machine/advice.go | 1 + 1 file changed, 1 insertion(+) diff --git a/pkg/minikube/machine/advice.go b/pkg/minikube/machine/advice.go index a7d4cd7787..0f802567ae 100644 --- a/pkg/minikube/machine/advice.go +++ b/pkg/minikube/machine/advice.go @@ -41,6 +41,7 @@ func MaybeDisplayAdvice(err error, driver string) { out.T(style.Tip, "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:", out.V{"driver_name": driver}) out.T(style.Empty, ` - Prune unused {{.driver_name}} images, volumes, networks and abandoned containers. + docker system prune --volumes`, out.V{"driver_name": driver}) out.T(style.Empty, ` - Restart your {{.driver_name}} service`, out.V{"driver_name": driver}) From c5ae7b95806366f36c7caff0f58a1a4d2870c55e Mon Sep 17 00:00:00 2001 From: jjanik <11janci@seznam.cz> Date: Fri, 12 Jun 2020 11:59:36 +0200 Subject: [PATCH 03/21] add volumesnapshots addon --- .../rbac-volume-snapshot-controller.yaml | 99 +++++++++ ....storage.k8s.io_volumesnapshotclasses.yaml | 68 ++++++ ...storage.k8s.io_volumesnapshotcontents.yaml | 197 ++++++++++++++++++ ...apshot.storage.k8s.io_volumesnapshots.yaml | 144 +++++++++++++ ...volume-snapshot-controller-deployment.yaml | 29 +++ pkg/addons/config.go | 5 + pkg/minikube/assets/addons.go | 34 ++- 7 files changed, 575 insertions(+), 1 deletion(-) create mode 100644 deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml create mode 100644 deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml create mode 100644 deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml create mode 100644 deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml create mode 100644 deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml diff --git a/deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml b/deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml new file mode 100644 index 0000000000..8d92502d34 --- /dev/null +++ b/deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml @@ -0,0 +1,99 @@ +# RBAC file for the volume snapshot controller. +apiVersion: v1 +kind: ServiceAccount +metadata: + name: volume-snapshot-controller + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # rename if there are conflicts + name: volume-snapshot-controller-runner + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + - apiGroups: ["apiextensions.k8s.io"] + resources: ["customresourcedefinitions"] + verbs: ["create", "list", "watch", "delete", "get", "update"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: volume-snapshot-controller-role + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +subjects: + - kind: ServiceAccount + name: volume-snapshot-controller + namespace: kube-system +roleRef: + kind: ClusterRole + # change the name also here if the ClusterRole gets renamed + name: volume-snapshot-controller-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: volume-snapshot-controller-leaderelection + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: volume-snapshot-controller-leaderelection + namespace: kube-system + labels: + kubernetes.io/cluster-service: "true" + addonmanager.kubernetes.io/mode: Reconcile +subjects: + - kind: ServiceAccount + name: volume-snapshot-controller + namespace: kube-system +roleRef: + kind: Role + name: volume-snapshot-controller-leaderelection + apiGroup: rbac.authorization.k8s.io + diff --git a/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml b/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml new file mode 100644 index 0000000000..90f6b6bd7f --- /dev/null +++ b/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml @@ -0,0 +1,68 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: volumesnapshotclasses.snapshot.storage.k8s.io + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotClass + listKind: VolumeSnapshotClassList + plural: volumesnapshotclasses + singular: volumesnapshotclass + scope: Cluster + preserveUnknownFields: false + validation: + openAPIV3Schema: + description: VolumeSnapshotClass specifies parameters that a underlying storage + system uses when creating a volume snapshot. A specific VolumeSnapshotClass + is used by specifying its name in a VolumeSnapshot object. VolumeSnapshotClasses + are non-namespaced + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + deletionPolicy: + description: deletionPolicy determines whether a VolumeSnapshotContent created + through the VolumeSnapshotClass should be deleted when its bound VolumeSnapshot + is deleted. Supported values are "Retain" and "Delete". "Retain" means + that the VolumeSnapshotContent and its physical snapshot on underlying + storage system are kept. "Delete" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are deleted. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the storage driver that handles this + VolumeSnapshotClass. Required. + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + parameters: + additionalProperties: + type: string + description: parameters is a key-value map with storage driver specific + parameters for creating snapshots. These values are opaque to Kubernetes. + type: object + required: + - deletionPolicy + - driver + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml b/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml new file mode 100644 index 0000000000..8114e8e543 --- /dev/null +++ b/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml @@ -0,0 +1,197 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: volumesnapshotcontents.snapshot.storage.k8s.io + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshotContent + listKind: VolumeSnapshotContentList + plural: volumesnapshotcontents + singular: volumesnapshotcontent + scope: Cluster + subresources: + status: {} + preserveUnknownFields: false + validation: + openAPIV3Schema: + description: VolumeSnapshotContent represents the actual "on-disk" snapshot + object in the underlying storage system + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + description: spec defines properties of a VolumeSnapshotContent created + by the underlying storage system. Required. + properties: + deletionPolicy: + description: deletionPolicy determines whether this VolumeSnapshotContent + and its physical snapshot on the underlying storage system should + be deleted when its bound VolumeSnapshot is deleted. Supported values + are "Retain" and "Delete". "Retain" means that the VolumeSnapshotContent + and its physical snapshot on underlying storage system are kept. "Delete" + means that the VolumeSnapshotContent and its physical snapshot on + underlying storage system are deleted. In dynamic snapshot creation + case, this field will be filled in with the "DeletionPolicy" field + defined in the VolumeSnapshotClass the VolumeSnapshot refers to. For + pre-existing snapshots, users MUST specify this field when creating + the VolumeSnapshotContent object. Required. + enum: + - Delete + - Retain + type: string + driver: + description: driver is the name of the CSI driver used to create the + physical snapshot on the underlying storage system. This MUST be the + same as the name returned by the CSI GetPluginName() call for that + driver. Required. + type: string + source: + description: source specifies from where a snapshot will be created. + This field is immutable after creation. Required. + properties: + snapshotHandle: + description: snapshotHandle specifies the CSI "snapshot_id" of a + pre-existing snapshot on the underlying storage system. This field + is immutable. + type: string + volumeHandle: + description: volumeHandle specifies the CSI "volume_id" of the volume + from which a snapshot should be dynamically taken from. This field + is immutable. + type: string + type: object + volumeSnapshotClassName: + description: name of the VolumeSnapshotClass to which this snapshot + belongs. + type: string + volumeSnapshotRef: + description: volumeSnapshotRef specifies the VolumeSnapshot object to + which this VolumeSnapshotContent object is bound. VolumeSnapshot.Spec.VolumeSnapshotContentName + field must reference to this VolumeSnapshotContent's name for the + bidirectional binding to be valid. For a pre-existing VolumeSnapshotContent + object, name and namespace of the VolumeSnapshot object MUST be provided + for binding to happen. This field is immutable after creation. Required. + properties: + apiVersion: + description: API version of the referent. + type: string + fieldPath: + description: 'If referring to a piece of an object instead of an + entire object, this string should contain a valid JSON/Go field + access statement, such as desiredState.manifest.containers[2]. + For example, if the object reference is to a container within + a pod, this would take on a value like: "spec.containers{name}" + (where "name" refers to the name of the container that triggered + the event) or if no container name is specified "spec.containers[2]" + (container with index 2 in this pod). This syntax is chosen only + to have some well-defined way of referencing a part of an object. + TODO: this design is not final and this field is subject to change + in the future.' + type: string + kind: + description: 'Kind of the referent. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + name: + description: 'Name of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#names' + type: string + namespace: + description: 'Namespace of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/namespaces/' + type: string + resourceVersion: + description: 'Specific resourceVersion to which this reference is + made, if any. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#concurrency-control-and-consistency' + type: string + uid: + description: 'UID of the referent. More info: https://kubernetes.io/docs/concepts/overview/working-with-objects/names/#uids' + type: string + type: object + required: + - deletionPolicy + - driver + - source + - volumeSnapshotRef + type: object + status: + description: status represents the current information of a snapshot. + properties: + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates the creation time is unknown. The + format of this field is a Unix nanoseconds time encoded as an int64. + On Unix, the command `date +%s%N` returns the current time in nanoseconds + since 1970-01-01 00:00:00 UTC. + format: int64 + type: integer + error: + description: error is the latest observed error during snapshot creation, + if any. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + format: int64 + minimum: 0 + type: integer + snapshotHandle: + description: snapshotHandle is the CSI "snapshot_id" of a snapshot on + the underlying storage system. If not specified, it indicates that + dynamic snapshot creation has either failed or it is still in progress. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml b/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml new file mode 100644 index 0000000000..1e16e3e7a6 --- /dev/null +++ b/deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml @@ -0,0 +1,144 @@ +apiVersion: apiextensions.k8s.io/v1beta1 +kind: CustomResourceDefinition +metadata: + name: volumesnapshots.snapshot.storage.k8s.io + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + group: snapshot.storage.k8s.io + names: + kind: VolumeSnapshot + listKind: VolumeSnapshotList + plural: volumesnapshots + singular: volumesnapshot + scope: Namespaced + subresources: + status: {} + preserveUnknownFields: false + validation: + openAPIV3Schema: + description: VolumeSnapshot is a user's request for either creating a point-in-time + snapshot of a persistent volume, or binding to a pre-existing snapshot. + properties: + apiVersion: + description: 'APIVersion defines the versioned schema of this representation + of an object. Servers should convert recognized schemas to the latest + internal value, and may reject unrecognized values. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#resources' + type: string + kind: + description: 'Kind is a string value representing the REST resource this + object represents. Servers may infer this from the endpoint the client + submits requests to. Cannot be updated. In CamelCase. More info: https://git.k8s.io/community/contributors/devel/api-conventions.md#types-kinds' + type: string + spec: + description: 'spec defines the desired characteristics of a snapshot requested + by a user. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshots#volumesnapshots + Required.' + properties: + source: + description: source specifies where a snapshot will be created from. + This field is immutable after creation. Required. + properties: + persistentVolumeClaimName: + description: persistentVolumeClaimName specifies the name of the + PersistentVolumeClaim object in the same namespace as the VolumeSnapshot + object where the snapshot should be dynamically taken from. This + field is immutable. + type: string + volumeSnapshotContentName: + description: volumeSnapshotContentName specifies the name of a pre-existing + VolumeSnapshotContent object. This field is immutable. + type: string + type: object + volumeSnapshotClassName: + description: 'volumeSnapshotClassName is the name of the VolumeSnapshotClass + requested by the VolumeSnapshot. If not specified, the default snapshot + class will be used if one exists. If not specified, and there is no + default snapshot class, dynamic snapshot creation will fail. Empty + string is not allowed for this field. TODO(xiangqian): a webhook validation + on empty string. More info: https://kubernetes.io/docs/concepts/storage/volume-snapshot-classes' + type: string + required: + - source + type: object + status: + description: 'status represents the current information of a snapshot. NOTE: + status can be modified by sources other than system controllers, and must + not be depended upon for accuracy. Controllers should only use information + from the VolumeSnapshotContent object after verifying that the binding + is accurate and complete.' + properties: + boundVolumeSnapshotContentName: + description: 'boundVolumeSnapshotContentName represents the name of + the VolumeSnapshotContent object to which the VolumeSnapshot object + is bound. If not specified, it indicates that the VolumeSnapshot object + has not been successfully bound to a VolumeSnapshotContent object + yet. NOTE: Specified boundVolumeSnapshotContentName alone does not + mean binding is valid. Controllers MUST always verify bidirectional + binding between VolumeSnapshot and VolumeSnapshotContent to + avoid possible security issues.' + type: string + creationTime: + description: creationTime is the timestamp when the point-in-time snapshot + is taken by the underlying storage system. In dynamic snapshot creation + case, this field will be filled in with the "creation_time" value + returned from CSI "CreateSnapshotRequest" gRPC call. For a pre-existing + snapshot, this field will be filled with the "creation_time" value + returned from the CSI "ListSnapshots" gRPC call if the driver supports + it. If not specified, it indicates that the creation time of the snapshot + is unknown. + format: date-time + type: string + error: + description: error is the last observed error during snapshot creation, + if any. This field could be helpful to upper level controllers(i.e., + application controller) to decide whether they should continue on + waiting for the snapshot to be created based on the type of error + reported. + properties: + message: + description: 'message is a string detailing the encountered error + during snapshot creation if specified. NOTE: message may be logged, + and it should not contain sensitive information.' + type: string + time: + description: time is the timestamp when the error was encountered. + format: date-time + type: string + type: object + readyToUse: + description: readyToUse indicates if a snapshot is ready to be used + to restore a volume. In dynamic snapshot creation case, this field + will be filled in with the "ready_to_use" value returned from CSI + "CreateSnapshotRequest" gRPC call. For a pre-existing snapshot, this + field will be filled with the "ready_to_use" value returned from the + CSI "ListSnapshots" gRPC call if the driver supports it, otherwise, + this field will be set to "True". If not specified, it means the readiness + of a snapshot is unknown. + type: boolean + restoreSize: + description: restoreSize represents the complete size of the snapshot + in bytes. In dynamic snapshot creation case, this field will be filled + in with the "size_bytes" value returned from CSI "CreateSnapshotRequest" + gRPC call. For a pre-existing snapshot, this field will be filled + with the "size_bytes" value returned from the CSI "ListSnapshots" + gRPC call if the driver supports it. When restoring a volume from + this snapshot, the size of the volume MUST NOT be smaller than the + restoreSize if it is specified, otherwise the restoration will fail. + If not specified, it indicates that the size is unknown. + type: string + type: object + required: + - spec + type: object + version: v1beta1 + versions: + - name: v1beta1 + served: true + storage: true +status: + acceptedNames: + kind: "" + plural: "" + conditions: [] + storedVersions: [] diff --git a/deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml b/deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml new file mode 100644 index 0000000000..32d7f2a391 --- /dev/null +++ b/deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml @@ -0,0 +1,29 @@ +# This YAML file shows how to deploy the volume snapshot controller + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: volume-snapshot-controller + namespace: kube-system + labels: + addonmanager.kubernetes.io/mode: Reconcile +spec: + serviceName: "volume-snapshot-controller" + replicas: 1 + selector: + matchLabels: + app: volume-snapshot-controller + template: + metadata: + labels: + app: volume-snapshot-controller + spec: + serviceAccount: volume-snapshot-controller + containers: + - name: volume-snapshot-controller + # TODO(xyang): Replace with an official image when it is released + image: gcr.io/k8s-staging-csi/snapshot-controller:v2.0.0-rc2 + args: + - "--v=5" + imagePullPolicy: Always diff --git a/pkg/addons/config.go b/pkg/addons/config.go index bcc382a86d..6805d5e83b 100644 --- a/pkg/addons/config.go +++ b/pkg/addons/config.go @@ -170,4 +170,9 @@ var Addons = []*Addon{ set: SetBool, callbacks: []setFn{gcpauth.EnableOrDisable, enableOrDisableAddon, verifyGCPAuthAddon, gcpauth.DisplayAddonMessage}, }, + { + name: "volumesnapshots", + set: SetBool, + callbacks: []setFn{enableOrDisableAddon}, + }, } diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index e557a96b2c..597b4057c8 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -416,7 +416,7 @@ var Addons = map[string]*Addon{ MustBinAsset( "deploy/addons/ambassador/ambassadorinstallation.yaml", vmpath.GuestAddonsDir, - "ambassadorinstallation.yaml.yaml", + "ambassadorinstallation.yaml", "0640", false), }, false, "ambassador"), @@ -440,6 +440,38 @@ var Addons = map[string]*Addon{ "0640", false), }, false, "gcp-auth"), + "volumesnapshots": NewAddon([]*BinAsset{ + MustBinAsset( + "deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotclasses.yaml", + vmpath.GuestAddonsDir, + "snapshot.storage.k8s.io_volumesnapshotclasses.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshotcontents.yaml", + vmpath.GuestAddonsDir, + "snapshot.storage.k8s.io_volumesnapshotcontents.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/volumesnapshots/snapshot.storage.k8s.io_volumesnapshots.yaml", + vmpath.GuestAddonsDir, + "snapshot.storage.k8s.io_volumesnapshots.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/volumesnapshots/rbac-volume-snapshot-controller.yaml", + vmpath.GuestAddonsDir, + "rbac-volume-snapshot-controller.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/volumesnapshots/volume-snapshot-controller-deployment.yaml", + vmpath.GuestAddonsDir, + "volume-snapshot-controller-deployment.yaml", + "0640", + false), + }, false, "volumesnapshots"), } // GenerateTemplateData generates template data for template assets From ae92c289b88ecdea55082e8b5c8bfe97e28453d0 Mon Sep 17 00:00:00 2001 From: jjanik <11janci@seznam.cz> Date: Thu, 6 Aug 2020 17:25:06 +0200 Subject: [PATCH 04/21] add csi-hostpath-driver addon --- .../deploy/csi-hostpath-attacher.yaml | 63 ++++++++ .../deploy/csi-hostpath-driverinfo.yaml | 13 ++ .../deploy/csi-hostpath-plugin.yaml | 143 ++++++++++++++++++ .../deploy/csi-hostpath-provisioner.yaml | 63 ++++++++ .../deploy/csi-hostpath-resizer.yaml | 62 ++++++++ .../deploy/csi-hostpath-snapshotter.yaml | 62 ++++++++ .../deploy/csi-hostpath-storageclass.yaml | 7 + .../rbac/rbac-external-attacher.yaml | 84 ++++++++++ .../rbac/rbac-external-provisioner.yaml | 101 +++++++++++++ .../rbac/rbac-external-resizer.yaml | 85 +++++++++++ .../rbac/rbac-external-snapshotter.yaml | 88 +++++++++++ pkg/addons/config.go | 15 +- pkg/addons/validations.go | 35 +++++ pkg/minikube/assets/addons.go | 68 +++++++++ 14 files changed, 885 insertions(+), 4 deletions(-) create mode 100644 deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml create mode 100644 deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml create mode 100644 deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml create mode 100644 deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml create mode 100644 deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml create mode 100644 deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml create mode 100644 deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml create mode 100644 deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml create mode 100644 deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml create mode 100644 deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml create mode 100644 deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml diff --git a/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml new file mode 100644 index 0000000000..bde8522ab4 --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml @@ -0,0 +1,63 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-attacher + namespace: kube-system + labels: + app: csi-hostpath-attacher +spec: + selector: + app: csi-hostpath-attacher + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-attacher + namespace: kube-system +spec: + serviceName: "csi-hostpath-attacher" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-attacher + template: + metadata: + labels: + app: csi-hostpath-attacher + kubernetes.io/minikube-addons: csi-hostpath-driver + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + serviceAccountName: csi-attacher + containers: + - name: csi-attacher + image: quay.io/k8scsi/csi-attacher:v3.0.0-rc1 + args: + - --v=5 + - --csi-address=/csi/csi.sock + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml new file mode 100644 index 0000000000..79a09ced34 --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml @@ -0,0 +1,13 @@ +apiVersion: storage.k8s.io/v1 +kind: CSIDriver +metadata: + name: hostpath.csi.k8s.io + namespace: kube-system +spec: + # Supports persistent and ephemeral inline volumes. + volumeLifecycleModes: + - Persistent + - Ephemeral + # To determine at runtime which mode a volume uses, pod info and its + # "csi.storage.k8s.io/ephemeral" entry are needed. + podInfoOnMount: true diff --git a/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml new file mode 100644 index 0000000000..b1f379cfe3 --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml @@ -0,0 +1,143 @@ +# Service defined here, plus serviceName below in StatefulSet, +# are needed only because of condition explained in +# https://github.com/kubernetes/kubernetes/issues/69608 + +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpathplugin + namespace: kube-system + labels: + app: csi-hostpathplugin +spec: + selector: + app: csi-hostpathplugin + ports: + - name: dummy + port: 12345 +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpathplugin + namespace: kube-system +spec: + serviceName: "csi-hostpathplugin" + # One replica only: + # Host path driver only works when everything runs + # on a single node. We achieve that by starting it once and then + # co-locate all other pods via inter-pod affinity + replicas: 1 + selector: + matchLabels: + app: csi-hostpathplugin + template: + metadata: + labels: + app: csi-hostpathplugin + kubernetes.io/minikube-addons: csi-hostpath-driver + spec: + containers: + - name: node-driver-registrar + image: quay.io/k8scsi/csi-node-driver-registrar:v1.3.0 + args: + - --v=5 + - --csi-address=/csi/csi.sock + - --kubelet-registration-path=/var/lib/kubelet/plugins/csi-hostpath/csi.sock + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + env: + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /registration + name: registration-dir + - mountPath: /csi-data-dir + name: csi-data-dir + + - name: hostpath + image: quay.io/k8scsi/hostpathplugin:v1.4.0-rc2 + args: + - "--drivername=hostpath.csi.k8s.io" + - "--v=5" + - "--endpoint=$(CSI_ENDPOINT)" + - "--nodeid=$(KUBE_NODE_NAME)" + env: + - name: CSI_ENDPOINT + value: unix:///csi/csi.sock + - name: KUBE_NODE_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: spec.nodeName + securityContext: + privileged: true + ports: + - containerPort: 9898 + name: healthz + protocol: TCP + livenessProbe: + failureThreshold: 5 + httpGet: + path: /healthz + port: healthz + initialDelaySeconds: 10 + timeoutSeconds: 3 + periodSeconds: 2 + volumeMounts: + - mountPath: /csi + name: socket-dir + - mountPath: /var/lib/kubelet/pods + mountPropagation: Bidirectional + name: mountpoint-dir + - mountPath: /var/lib/kubelet/plugins + mountPropagation: Bidirectional + name: plugins-dir + - mountPath: /csi-data-dir + name: csi-data-dir + - mountPath: /dev + name: dev-dir + - name: liveness-probe + volumeMounts: + - mountPath: /csi + name: socket-dir + image: quay.io/k8scsi/livenessprobe:v1.1.0 + args: + - --csi-address=/csi/csi.sock + - --health-port=9898 + + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir + - hostPath: + path: /var/lib/kubelet/pods + type: DirectoryOrCreate + name: mountpoint-dir + - hostPath: + path: /var/lib/kubelet/plugins_registry + type: Directory + name: registration-dir + - hostPath: + path: /var/lib/kubelet/plugins + type: Directory + name: plugins-dir + - hostPath: + # 'path' is where PV data is persisted on host. + # using /tmp is also possible while the PVs will not available after plugin container recreation or host reboot + path: /var/lib/csi-hostpath-data/ + type: DirectoryOrCreate + name: csi-data-dir + - hostPath: + path: /dev + type: Directory + name: dev-dir diff --git a/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml new file mode 100644 index 0000000000..27b78e37ee --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml @@ -0,0 +1,63 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-provisioner + namespace: kube-system + labels: + app: csi-hostpath-provisioner +spec: + selector: + app: csi-hostpath-provisioner + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-provisioner + namespace: kube-system +spec: + serviceName: "csi-hostpath-provisioner" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-provisioner + template: + metadata: + labels: + app: csi-hostpath-provisioner + kubernetes.io/minikube-addons: csi-hostpath-driver + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + serviceAccountName: csi-provisioner + containers: + - name: csi-provisioner + image: gcr.io/k8s-staging-sig-storage/csi-provisioner:v2.0.0-rc2 + args: + - -v=5 + - --csi-address=/csi/csi.sock + - --feature-gates=Topology=true + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml new file mode 100644 index 0000000000..683d2c8f88 --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml @@ -0,0 +1,62 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-resizer + namespace: kube-system + labels: + app: csi-hostpath-resizer +spec: + selector: + app: csi-hostpath-resizer + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-resizer + namespace: kube-system +spec: + serviceName: "csi-hostpath-resizer" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-resizer + template: + metadata: + labels: + app: csi-hostpath-resizer + kubernetes.io/minikube-addons: csi-hostpath-driver + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + serviceAccountName: csi-resizer + containers: + - name: csi-resizer + image: quay.io/k8scsi/csi-resizer:v0.6.0-rc1 + args: + - -v=5 + - -csi-address=/csi/csi.sock + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml new file mode 100644 index 0000000000..6fd4e107ca --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml @@ -0,0 +1,62 @@ +kind: Service +apiVersion: v1 +metadata: + name: csi-hostpath-snapshotter + namespace: kube-system + labels: + app: csi-hostpath-snapshotter +spec: + selector: + app: csi-hostpath-snapshotter + ports: + - name: dummy + port: 12345 + +--- +kind: StatefulSet +apiVersion: apps/v1 +metadata: + name: csi-hostpath-snapshotter + namespace: kube-system +spec: + serviceName: "csi-hostpath-snapshotter" + replicas: 1 + selector: + matchLabels: + app: csi-hostpath-snapshotter + template: + metadata: + labels: + app: csi-hostpath-snapshotter + kubernetes.io/minikube-addons: csi-hostpath-driver + spec: + affinity: + podAffinity: + requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchExpressions: + - key: app + operator: In + values: + - csi-hostpathplugin + topologyKey: kubernetes.io/hostname + serviceAccount: csi-snapshotter + containers: + - name: csi-snapshotter + image: quay.io/k8scsi/csi-snapshotter:v2.1.0 + args: + - -v=5 + - --csi-address=/csi/csi.sock + securityContext: + # This is necessary only for systems with SELinux, where + # non-privileged sidecar containers cannot access unix domain socket + # created by privileged CSI driver container. + privileged: true + volumeMounts: + - mountPath: /csi + name: socket-dir + volumes: + - hostPath: + path: /var/lib/kubelet/plugins/csi-hostpath + type: DirectoryOrCreate + name: socket-dir diff --git a/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml new file mode 100644 index 0000000000..59999a8cca --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml @@ -0,0 +1,7 @@ +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: + name: csi-hostpath-sc +provisioner: hostpath.csi.k8s.io #csi-hostpath +reclaimPolicy: Delete +volumeBindingMode: Immediate diff --git a/deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml b/deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml new file mode 100644 index 0000000000..a5593809f5 --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml @@ -0,0 +1,84 @@ +# This YAML file contains all RBAC objects that are necessary to run external +# CSI attacher. +# +# In production, each CSI driver deployment has to be customized: +# - to avoid conflicts, use non-default namespace and different names +# for non-namespaced entities like the ClusterRole +# - decide whether the deployment replicates the external CSI +# attacher, in which case leadership election must be enabled; +# this influences the RBAC setup, see below + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-attacher + namespace: kube-system + +--- +# Attacher must be able to work with PVs, CSINodes and VolumeAttachments +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-attacher-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments/status"] + verbs: ["patch"] +#Secret permission is optional. +#Enable it if you need value from secret. +#For example, you have key `csi.storage.k8s.io/controller-publish-secret-name` in StorageClass.parameters +#see https://kubernetes-csi.github.io/docs/secrets-and-credentials.html +# - apiGroups: [""] +# resources: ["secrets"] +# verbs: ["get", "list"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-attacher-role +subjects: + - kind: ServiceAccount + name: csi-attacher + namespace: kube-system +roleRef: + kind: ClusterRole + name: external-attacher-runner + apiGroup: rbac.authorization.k8s.io + +--- +# Attacher must be able to work with configmaps or leases in the current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: external-attacher-cfg +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-attacher-role-cfg + namespace: kube-system +subjects: + - kind: ServiceAccount + name: csi-attacher + namespace: kube-system +roleRef: + kind: Role + name: external-attacher-cfg + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml b/deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml new file mode 100644 index 0000000000..07226c75f4 --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml @@ -0,0 +1,101 @@ +# This YAML file contains all RBAC objects that are necessary to run external +# CSI provisioner. +# +# In production, each CSI driver deployment has to be customized: +# - to avoid conflicts, use non-default namespace and different names +# for non-namespaced entities like the ClusterRole +# - decide whether the deployment replicates the external CSI +# provisioner, in which case leadership election must be enabled; +# this influences the RBAC setup, see below + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-provisioner + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-provisioner-runner +rules: + # The following rule should be uncommented for plugins that require secrets + # for provisioning. + # - apiGroups: [""] + # resources: ["secrets"] + # verbs: ["get", "list"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "create", "delete"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["get", "list"] + - apiGroups: ["storage.k8s.io"] + resources: ["csinodes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["nodes"] + verbs: ["get", "list", "watch"] + - apiGroups: ["storage.k8s.io"] + resources: ["volumeattachments"] + verbs: ["get", "list", "watch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role +subjects: + - kind: ServiceAccount + name: csi-provisioner + namespace: kube-system +roleRef: + kind: ClusterRole + name: external-provisioner-runner + apiGroup: rbac.authorization.k8s.io + +--- +# Provisioner must be able to work with endpoints in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: external-provisioner-cfg +rules: +# Only one of the following rules for endpoints or leases is required based on +# what is set for `--leader-election-type`. Endpoints are deprecated in favor of Leases. +- apiGroups: [""] + resources: ["endpoints"] + verbs: ["get", "watch", "list", "delete", "update", "create"] +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-provisioner-role-cfg + namespace: kube-system +subjects: + - kind: ServiceAccount + name: csi-provisioner + namespace: kube-system +roleRef: + kind: Role + name: external-provisioner-cfg + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml b/deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml new file mode 100644 index 0000000000..5bdee8f39d --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml @@ -0,0 +1,85 @@ +# This YAML file contains all RBAC objects that are necessary to run external +# CSI resizer. +# +# In production, each CSI driver deployment has to be customized: +# - to avoid conflicts, use non-default namespace and different names +# for non-namespaced entities like the ClusterRole +# - decide whether the deployment replicates the external CSI +# resizer, in which case leadership election must be enabled; +# this influences the RBAC setup, see below + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-resizer + namespace: kube-system + +--- +# Resizer must be able to work with PVCs, PVs, SCs. +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: external-resizer-runner +rules: + # The following rule should be uncommented for plugins that require secrets + # for provisioning. + # - apiGroups: [""] + # resources: ["secrets"] + # verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch", "patch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["pods"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims/status"] + verbs: ["patch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role +subjects: + - kind: ServiceAccount + name: csi-resizer + namespace: kube-system +roleRef: + kind: ClusterRole + name: external-resizer-runner + apiGroup: rbac.authorization.k8s.io + +--- +# Resizer must be able to work with end point in current namespace +# if (and only if) leadership election is enabled +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: external-resizer-cfg +rules: +- apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-resizer-role-cfg + namespace: kube-system +subjects: + - kind: ServiceAccount + name: csi-resizer + namespace: kube-system +roleRef: + kind: Role + name: external-resizer-cfg + apiGroup: rbac.authorization.k8s.io diff --git a/deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml b/deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml new file mode 100644 index 0000000000..68c1c559ff --- /dev/null +++ b/deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml @@ -0,0 +1,88 @@ +# RBAC file for the snapshot controller. +# +# The snapshot controller implements the control loop for CSI snapshot functionality. +# It should be installed as part of the base Kubernetes distribution in an appropriate +# namespace for components implementing base system functionality. For installing with +# Vanilla Kubernetes, kube-system makes sense for the namespace. + +apiVersion: v1 +kind: ServiceAccount +metadata: + name: csi-snapshotter + namespace: kube-system + +--- +kind: ClusterRole +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + # rename if there are conflicts + name: csi-snapshotter-runner +rules: + - apiGroups: [""] + resources: ["persistentvolumes"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["persistentvolumeclaims"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["storage.k8s.io"] + resources: ["storageclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: [""] + resources: ["events"] + verbs: ["list", "watch", "create", "update", "patch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotclasses"] + verbs: ["get", "list", "watch"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents"] + verbs: ["create", "get", "list", "watch", "update", "delete"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshotcontents/status"] + verbs: ["update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots"] + verbs: ["get", "list", "watch", "update"] + - apiGroups: ["snapshot.storage.k8s.io"] + resources: ["volumesnapshots/status"] + verbs: ["update"] + +--- +kind: ClusterRoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-role +subjects: + - kind: ServiceAccount + name: csi-snapshotter + namespace: kube-system +roleRef: + kind: ClusterRole + # change the name also here if the ClusterRole gets renamed + name: csi-snapshotter-runner + apiGroup: rbac.authorization.k8s.io + +--- +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + namespace: kube-system + name: csi-snapshotter-leaderelection +rules: + - apiGroups: ["coordination.k8s.io"] + resources: ["leases"] + verbs: ["get", "watch", "list", "delete", "update", "create"] + +--- +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: csi-snapshotter-leaderelection + namespace: kube-system +subjects: + - kind: ServiceAccount + name: csi-snapshotter + namespace: kube-system +roleRef: + kind: Role + name: csi-snapshotter-leaderelection + apiGroup: rbac.authorization.k8s.io \ No newline at end of file diff --git a/pkg/addons/config.go b/pkg/addons/config.go index 6805d5e83b..088698d91a 100644 --- a/pkg/addons/config.go +++ b/pkg/addons/config.go @@ -33,10 +33,11 @@ type Addon struct { // addonPodLabels holds the pod label that will be used to verify if the addon is enabled var addonPodLabels = map[string]string{ - "ingress": "app.kubernetes.io/name=ingress-nginx", - "registry": "kubernetes.io/minikube-addons=registry", - "gvisor": "kubernetes.io/minikube-addons=gvisor", - "gcp-auth": "kubernetes.io/minikube-addons=gcp-auth", + "ingress": "app.kubernetes.io/name=ingress-nginx", + "registry": "kubernetes.io/minikube-addons=registry", + "gvisor": "kubernetes.io/minikube-addons=gvisor", + "gcp-auth": "kubernetes.io/minikube-addons=gcp-auth", + "csi-hostpath-driver": "kubernetes.io/minikube-addons=csi-hostpath-driver", } // Addons is a list of all addons @@ -175,4 +176,10 @@ var Addons = []*Addon{ set: SetBool, callbacks: []setFn{enableOrDisableAddon}, }, + { + name: "csi-hostpath-driver", + set: SetBool, + validations: []setFn{IsVolumesnapshotsEnabled}, + callbacks: []setFn{enableOrDisableAddon, verifyAddonStatus}, + }, } diff --git a/pkg/addons/validations.go b/pkg/addons/validations.go index 2661ac8197..aad44e3cf0 100644 --- a/pkg/addons/validations.go +++ b/pkg/addons/validations.go @@ -18,11 +18,16 @@ package addons import ( "fmt" + "strconv" + "k8s.io/minikube/pkg/minikube/assets" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/cruntime" + "k8s.io/minikube/pkg/minikube/out" ) +const volumesnapshotsAddon = "volumesnapshots" + // containerdOnlyMsg is the message shown when a containerd-only addon is enabled const containerdOnlyAddonMsg = ` This addon can only be enabled with the containerd runtime backend. To enable this backend, please first stop minikube with: @@ -33,6 +38,12 @@ and then start minikube again with the following flags: minikube start --container-runtime=containerd --docker-opt containerd=/var/run/containerd/containerd.sock` +// volumesnapshotsDisabledMsg is the message shown when csi-hostpath-driver addon is enabled without the volumesnapshots addon +const volumesnapshotsDisabledMsg = `[WARNING] For full functionality, the 'csi-hostpath-driver' addon requires the 'volumesnapshots' addon to be enabled. + +You can enable 'volumesnapshots' addon by running: 'minikube addons enable volumesnapshots' +` + // IsRuntimeContainerd is a validator which returns an error if the current runtime is not containerd func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error { r, err := cruntime.New(cruntime.Config{Type: cc.KubernetesConfig.ContainerRuntime}) @@ -46,6 +57,21 @@ func IsRuntimeContainerd(cc *config.ClusterConfig, _, _ string) error { return nil } +// IsVolumesnapshotsEnabled is a validator that prints out a warning if the volumesnapshots addon +// is disabled (does not return any errors!) +func IsVolumesnapshotsEnabled(cc *config.ClusterConfig, _, value string) error { + isCsiDriverEnabled, _ := strconv.ParseBool(value) + // assets.Addons[].IsEnabled() returns the current status of the addon or default value. + // config.AddonList contains list of addons to be enabled. + isVolumesnapshotsEnabled := assets.Addons[volumesnapshotsAddon].IsEnabled(cc) || contains(config.AddonList, volumesnapshotsAddon) + if isCsiDriverEnabled && !isVolumesnapshotsEnabled { + // just print out a warning directly, we don't want to return any errors since + // that would prevent the addon from being enabled (callbacks wouldn't be run) + out.WarningT(volumesnapshotsDisabledMsg) + } + return nil +} + // isAddonValid returns the addon, true if it is valid // otherwise returns nil, false func isAddonValid(name string) (*Addon, bool) { @@ -56,3 +82,12 @@ func isAddonValid(name string) (*Addon, bool) { } return nil, false } + +func contains(slice []string, val string) bool { + for _, item := range slice { + if item == val { + return true + } + } + return false +} diff --git a/pkg/minikube/assets/addons.go b/pkg/minikube/assets/addons.go index 597b4057c8..8670a39ffe 100644 --- a/pkg/minikube/assets/addons.go +++ b/pkg/minikube/assets/addons.go @@ -472,6 +472,74 @@ var Addons = map[string]*Addon{ "0640", false), }, false, "volumesnapshots"), + "csi-hostpath-driver": NewAddon([]*BinAsset{ + MustBinAsset( + "deploy/addons/csi-hostpath-driver/rbac/rbac-external-attacher.yaml", + vmpath.GuestAddonsDir, + "rbac-external-attacher.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/rbac/rbac-external-provisioner.yaml", + vmpath.GuestAddonsDir, + "rbac-external-provisioner.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/rbac/rbac-external-resizer.yaml", + vmpath.GuestAddonsDir, + "rbac-external-resizer.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/rbac/rbac-external-snapshotter.yaml", + vmpath.GuestAddonsDir, + "rbac-external-snapshotter.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-attacher.yaml", + vmpath.GuestAddonsDir, + "csi-hostpath-attacher.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-driverinfo.yaml", + vmpath.GuestAddonsDir, + "csi-hostpath-driverinfo.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-plugin.yaml", + vmpath.GuestAddonsDir, + "csi-hostpath-plugin.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-provisioner.yaml", + vmpath.GuestAddonsDir, + "csi-hostpath-provisioner.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-resizer.yaml", + vmpath.GuestAddonsDir, + "csi-hostpath-resizer.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-snapshotter.yaml", + vmpath.GuestAddonsDir, + "csi-hostpath-snapshotter.yaml", + "0640", + false), + MustBinAsset( + "deploy/addons/csi-hostpath-driver/deploy/csi-hostpath-storageclass.yaml", + vmpath.GuestAddonsDir, + "csi-hostpath-storageclass.yaml", + "0640", + false), + }, false, "csi-hostpath-driver"), } // GenerateTemplateData generates template data for template assets From b696eb614a675d91f1bbef5fa9d53d98497c890a Mon Sep 17 00:00:00 2001 From: jjanik <11janci@seznam.cz> Date: Mon, 10 Aug 2020 17:24:23 +0200 Subject: [PATCH 05/21] csi-hostpath-driver & volumesnapshots addons docs and test --- .../tutorials/volume_snapshots_and_csi.md | 46 ++++++++ test/integration/addons_test.go | 108 +++++++++++++++++- test/integration/helpers_test.go | 51 +++++++++ .../csi-hostpath-driver/pv-pod-restore.yaml | 22 ++++ .../testdata/csi-hostpath-driver/pv-pod.yaml | 22 ++++ .../csi-hostpath-driver/pvc-restore.yaml | 15 +++ .../testdata/csi-hostpath-driver/pvc.yaml | 11 ++ .../csi-hostpath-driver/snapshot.yaml | 8 ++ .../csi-hostpath-driver/snapshotclass.yaml | 6 + 9 files changed, 288 insertions(+), 1 deletion(-) create mode 100644 site/content/en/docs/tutorials/volume_snapshots_and_csi.md create mode 100644 test/integration/testdata/csi-hostpath-driver/pv-pod-restore.yaml create mode 100644 test/integration/testdata/csi-hostpath-driver/pv-pod.yaml create mode 100644 test/integration/testdata/csi-hostpath-driver/pvc-restore.yaml create mode 100644 test/integration/testdata/csi-hostpath-driver/pvc.yaml create mode 100644 test/integration/testdata/csi-hostpath-driver/snapshot.yaml create mode 100644 test/integration/testdata/csi-hostpath-driver/snapshotclass.yaml diff --git a/site/content/en/docs/tutorials/volume_snapshots_and_csi.md b/site/content/en/docs/tutorials/volume_snapshots_and_csi.md new file mode 100644 index 0000000000..53c3ade5a8 --- /dev/null +++ b/site/content/en/docs/tutorials/volume_snapshots_and_csi.md @@ -0,0 +1,46 @@ +--- +title: "CSI Driver and Volume Snapshots" +linkTitle: "CSI Driver and Volume Snapshots" +weight: 1 +date: 2020-08-06 +description: > + CSI Driver and Volume Snapshots +--- + +## Overview + +This tutorial explains how to set up the CSI Hostpath Driver in minikube and create volume snapshots. + +## Prerequisites + +- latest version of minikube + +## Tutorial + +Support for volume snapshots in minikube is provided through the `volumesnapshots` addon. This addon provisions the required +CRDs and deploys the Volume Snapshot Controller. It is disabled by default. + +Furthermore, the default storage provider in minikube does not implement the CSI interface and thus is NOT capable of creating/handling +volume snapshots. For that, you must first deploy a CSI driver. To make this step easy, minikube offers the `csi-hostpath-driver` addon, +which deploys the [CSI Hostpath Driver](https://github.com/kubernetes-csi/csi-driver-host-path). This addon is disabled +by default as well. + +Thus, to utilize the volume snapshots functionality, you must: + +1\) enable the `volumesnapshots` addon AND\ +2a\) either enable the `csi-hostpth-driver` addon OR\ +2b\) deploy your own CSI driver + +You can enable/disable either of the above-mentioned addons using +```shell script +minikube addons enable [ADDON_NAME] +minikube addons disable [ADDON_NAME] +``` + +The `csi-hostpath-driver` addon deploys its required resources into the `kube-system` namespace and sets up a dedicated +storage class called `csi-hostpath-sc` that you need to reference in your PVCs. The driver itself is created under the +name `hostpath.csi.k8s.io`. Use this wherever necessary (e.g. snapshot class definitions). + +Once both addons are enabled, you can create persistent volumes and snapshots using standard ways (for a quick test of +volume snapshots, you can find some example yaml files along with a step-by-step [here](https://kubernetes-csi.github.io/docs/snapshot-restore-feature.html)). +The driver stores all persistent volumes in the `/var/lib/csi-hostpath-data/` directory of minikube's host. \ No newline at end of file diff --git a/test/integration/addons_test.go b/test/integration/addons_test.go index e90336b54a..b0d03fe5d6 100644 --- a/test/integration/addons_test.go +++ b/test/integration/addons_test.go @@ -40,7 +40,7 @@ func TestAddons(t *testing.T) { ctx, cancel := context.WithTimeout(context.Background(), Minutes(40)) defer Cleanup(t, profile, cancel) - args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm"}, StartArgs()...) + args := append([]string{"start", "-p", profile, "--wait=false", "--memory=2600", "--alsologtostderr", "--addons=registry", "--addons=metrics-server", "--addons=helm-tiller", "--addons=olm", "--addons=volumesnapshots", "--addons=csi-hostpath-driver"}, StartArgs()...) if !NoneDriver() { // none doesn't support ingress args = append(args, "--addons=ingress") } @@ -60,6 +60,7 @@ func TestAddons(t *testing.T) { {"MetricsServer", validateMetricsServerAddon}, {"HelmTiller", validateHelmTillerAddon}, {"Olm", validateOlmAddon}, + {"CSI", validateCSIDriverAndSnapshots}, } for _, tc := range tests { tc := tc @@ -398,3 +399,108 @@ func validateOlmAddon(ctx context.Context, t *testing.T, profile string) { t.Errorf("failed checking operator installed: %v", err.Error()) } } + +func validateCSIDriverAndSnapshots(ctx context.Context, t *testing.T, profile string) { + defer PostMortemLogs(t, profile) + + client, err := kapi.Client(profile) + if err != nil { + t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err) + } + + start := time.Now() + if err := kapi.WaitForPods(client, "kube-system", "kubernetes.io/minikube-addons=csi-hostpath-driver", Minutes(6)); err != nil { + t.Errorf("failed waiting for csi-hostpath-driver pods to stabilize: %v", err) + } + t.Logf("csi-hostpath-driver pods stabilized in %s", time.Since(start)) + + // create sample PVC + rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc.yaml"))) + if err != nil { + t.Logf("creating sample PVC with %s failed: %v", rr.Command(), err) + } + + if err := PVCWait(ctx, t, profile, "default", "hpvc", Minutes(6)); err != nil { + t.Fatalf("failed waiting for PVC hpvc: %v", err) + } + + // create sample pod with the PVC + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod.yaml"))) + if err != nil { + t.Logf("creating pod with %s failed: %v", rr.Command(), err) + } + + if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod", Minutes(6)); err != nil { + t.Fatalf("failed waiting for pod task-pv-pod: %v", err) + } + + // create sample snapshotclass + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshotclass.yaml"))) + if err != nil { + t.Logf("creating snapshostclass with %s failed: %v", rr.Command(), err) + } + + // create volume snapshot + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "snapshot.yaml"))) + if err != nil { + t.Logf("creating pod with %s failed: %v", rr.Command(), err) + } + + if err := VolumeSnapshotWait(ctx, t, profile, "default", "new-snapshot-demo", Minutes(6)); err != nil { + t.Fatalf("failed waiting for volume snapshot new-snapshot-demo: %v", err) + } + + // delete pod + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod")) + if err != nil { + t.Logf("deleting pod with %s failed: %v", rr.Command(), err) + } + + // delete pvc + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc")) + if err != nil { + t.Logf("deleting pod with %s failed: %v", rr.Command(), err) + } + + // restore pv from snapshot + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pvc-restore.yaml"))) + if err != nil { + t.Logf("creating pvc with %s failed: %v", rr.Command(), err) + } + + if err = PVCWait(ctx, t, profile, "default", "hpvc-restore", Minutes(6)); err != nil { + t.Fatalf("failed waiting for PVC hpvc-restore: %v", err) + } + + // create pod from restored snapshot + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "csi-hostpath-driver", "pv-pod-restore.yaml"))) + if err != nil { + t.Logf("creating pod with %s failed: %v", rr.Command(), err) + } + + if _, err := PodWait(ctx, t, profile, "default", "app=task-pv-pod-restore", Minutes(6)); err != nil { + t.Fatalf("failed waiting for pod task-pv-pod-restore: %v", err) + } + + // CLEANUP + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pod", "task-pv-pod-restore")) + if err != nil { + t.Logf("cleanup with %s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "pvc", "hpvc-restore")) + if err != nil { + t.Logf("cleanup with %s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "delete", "volumesnapshot", "new-snapshot-demo")) + if err != nil { + t.Logf("cleanup with %s failed: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "csi-hostpath-driver", "--alsologtostderr", "-v=1")) + if err != nil { + t.Errorf("failed to disable csi-hostpath-driver addon: args %q: %v", rr.Command(), err) + } + rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "addons", "disable", "volumesnapshots", "--alsologtostderr", "-v=1")) + if err != nil { + t.Errorf("failed to disable volumesnapshots addon: args %q: %v", rr.Command(), err) + } +} diff --git a/test/integration/helpers_test.go b/test/integration/helpers_test.go index 75b6f9d22e..c1bbae42c3 100644 --- a/test/integration/helpers_test.go +++ b/test/integration/helpers_test.go @@ -29,6 +29,7 @@ import ( "fmt" "io/ioutil" "os/exec" + "strconv" "strings" "testing" "time" @@ -372,6 +373,56 @@ func PodWait(ctx context.Context, t *testing.T, profile string, ns string, selec return names, fmt.Errorf("%s: %v", fmt.Sprintf("%s within %s", selector, timeout), err) } +// PVCWait waits for persistent volume claim to reach bound state +func PVCWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error { + t.Helper() + + t.Logf("(dbg) %s: waiting %s for pvc %q in namespace %q ...", t.Name(), timeout, name, ns) + + f := func() (bool, error) { + ret, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "pvc", name, "-o", "jsonpath={.status.phase}", "-n", ns)) + if err != nil { + t.Logf("%s: WARNING: PVC get for %q %q returned: %v", t.Name(), ns, name, err) + return false, nil + } + + pvc := strings.TrimSpace(ret.Stdout.String()) + if pvc == string(core.ClaimBound) { + return true, nil + } else if pvc == string(core.ClaimLost) { + return true, fmt.Errorf("PVC %q is LOST", name) + } + return false, nil + } + + return wait.PollImmediate(1*time.Second, timeout, f) +} + +//// VolumeSnapshotWait waits for volume snapshot to be ready to use +func VolumeSnapshotWait(ctx context.Context, t *testing.T, profile string, ns string, name string, timeout time.Duration) error { + t.Helper() + + t.Logf("(dbg) %s: waiting %s for volume snapshot %q in namespace %q ...", t.Name(), timeout, name, ns) + + f := func() (bool, error) { + res, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "get", "volumesnapshot", name, "-o", "jsonpath={.status.readyToUse}", "-n", ns)) + if err != nil { + t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, err) + return false, nil + } + + isReady, err := strconv.ParseBool(strings.TrimSpace(res.Stdout.String())) + if err != nil { + t.Logf("%s: WARNING: volume snapshot get for %q %q returned: %v", t.Name(), ns, name, res.Stdout.String()) + return false, nil + } + + return isReady, nil + } + + return wait.PollImmediate(1*time.Second, timeout, f) +} + // Status returns a minikube component status as a string func Status(ctx context.Context, t *testing.T, path string, profile string, key string, node string) string { t.Helper() diff --git a/test/integration/testdata/csi-hostpath-driver/pv-pod-restore.yaml b/test/integration/testdata/csi-hostpath-driver/pv-pod-restore.yaml new file mode 100644 index 0000000000..6a544d18d3 --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/pv-pod-restore.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: task-pv-pod-restore + labels: + app: task-pv-pod-restore +spec: + volumes: + - name: task-pv-storage + persistentVolumeClaim: + claimName: hpvc-restore + containers: + - name: task-pv-container + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage + + diff --git a/test/integration/testdata/csi-hostpath-driver/pv-pod.yaml b/test/integration/testdata/csi-hostpath-driver/pv-pod.yaml new file mode 100644 index 0000000000..62df999647 --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/pv-pod.yaml @@ -0,0 +1,22 @@ +apiVersion: v1 +kind: Pod +metadata: + name: task-pv-pod + labels: + app: task-pv-pod +spec: + volumes: + - name: task-pv-storage + persistentVolumeClaim: + claimName: hpvc + containers: + - name: task-pv-container + image: nginx + ports: + - containerPort: 80 + name: "http-server" + volumeMounts: + - mountPath: "/usr/share/nginx/html" + name: task-pv-storage + + diff --git a/test/integration/testdata/csi-hostpath-driver/pvc-restore.yaml b/test/integration/testdata/csi-hostpath-driver/pvc-restore.yaml new file mode 100644 index 0000000000..942d0cf8a8 --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/pvc-restore.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hpvc-restore +spec: + storageClassName: csi-hostpath-sc + dataSource: + name: new-snapshot-demo + kind: VolumeSnapshot + apiGroup: snapshot.storage.k8s.io + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/test/integration/testdata/csi-hostpath-driver/pvc.yaml b/test/integration/testdata/csi-hostpath-driver/pvc.yaml new file mode 100644 index 0000000000..cb3c4560dd --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/pvc.yaml @@ -0,0 +1,11 @@ +apiVersion: v1 +kind: PersistentVolumeClaim +metadata: + name: hpvc +spec: + storageClassName: csi-hostpath-sc + accessModes: + - ReadWriteOnce + resources: + requests: + storage: 1Gi diff --git a/test/integration/testdata/csi-hostpath-driver/snapshot.yaml b/test/integration/testdata/csi-hostpath-driver/snapshot.yaml new file mode 100644 index 0000000000..86a102b88d --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/snapshot.yaml @@ -0,0 +1,8 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshot +metadata: + name: new-snapshot-demo +spec: + volumeSnapshotClassName: csi-hostpath-snapclass + source: + persistentVolumeClaimName: hpvc diff --git a/test/integration/testdata/csi-hostpath-driver/snapshotclass.yaml b/test/integration/testdata/csi-hostpath-driver/snapshotclass.yaml new file mode 100644 index 0000000000..892dfd0c83 --- /dev/null +++ b/test/integration/testdata/csi-hostpath-driver/snapshotclass.yaml @@ -0,0 +1,6 @@ +apiVersion: snapshot.storage.k8s.io/v1beta1 +kind: VolumeSnapshotClass +metadata: + name: csi-hostpath-snapclass +driver: hostpath.csi.k8s.io #csi-hostpath +deletionPolicy: Delete From dcd00b2150f3555c3a52bd6250af880790185a90 Mon Sep 17 00:00:00 2001 From: Dean Coakley Date: Tue, 8 Sep 2020 23:37:25 +0100 Subject: [PATCH 06/21] Make prune conditional. Add podman to prune instruction --- pkg/minikube/machine/advice.go | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pkg/minikube/machine/advice.go b/pkg/minikube/machine/advice.go index 0f802567ae..268ab56788 100644 --- a/pkg/minikube/machine/advice.go +++ b/pkg/minikube/machine/advice.go @@ -39,10 +39,12 @@ func MaybeDisplayAdvice(err error, driver string) { if errors.Is(err, oci.ErrExitedUnexpectedly) || errors.Is(err, oci.ErrDaemonInfo) { out.T(style.Tip, "If you are still interested to make {{.driver_name}} driver work. The following suggestions might help you get passed this issue:", out.V{"driver_name": driver}) - out.T(style.Empty, ` + if driver == oci.Docker || driver == oci.Podman { + out.T(style.Empty, ` - Prune unused {{.driver_name}} images, volumes, networks and abandoned containers. - docker system prune --volumes`, out.V{"driver_name": driver}) + {{.driver_name}} system prune --volumes`, out.V{"driver_name": driver}) + } out.T(style.Empty, ` - Restart your {{.driver_name}} service`, out.V{"driver_name": driver}) if runtime.GOOS != "linux" { From c401b426225c7daf32834a11d82608fa3795a721 Mon Sep 17 00:00:00 2001 From: Thomas Stromberg Date: Wed, 9 Sep 2020 11:20:45 -0700 Subject: [PATCH 07/21] Update default Kubernetes release to v1.19.1 --- pkg/minikube/bootstrapper/images/images.go | 9 ++++++++- pkg/minikube/constants/constants.go | 4 ++-- site/content/en/docs/commands/start.md | 2 +- 3 files changed, 11 insertions(+), 4 deletions(-) diff --git a/pkg/minikube/bootstrapper/images/images.go b/pkg/minikube/bootstrapper/images/images.go index 631e62510e..7c685777f1 100644 --- a/pkg/minikube/bootstrapper/images/images.go +++ b/pkg/minikube/bootstrapper/images/images.go @@ -96,7 +96,8 @@ func etcd(v semver.Version, mirror string) string { // Should match `DefaultEtcdVersion` in: // https://github.com/kubernetes/kubernetes/blob/master/cmd/kubeadm/app/constants/constants.go - ev := "3.4.9-1" + ev := "3.4.13-0" + switch v.Minor { case 17, 18: ev = "3.4.3-0" @@ -109,6 +110,12 @@ func etcd(v semver.Version, mirror string) string { case 11: ev = "3.2.18" } + + // An awkward special case for v1.19.0 - do not imitate unless necessary + if v.Equals(semver.MustParse("1.19.0")) { + ev = "3.4.9-1" + } + return path.Join(kubernetesRepo(mirror), "etcd"+archTag(needsArchSuffix)+ev) } diff --git a/pkg/minikube/constants/constants.go b/pkg/minikube/constants/constants.go index ab48a890a1..7953edfb3b 100644 --- a/pkg/minikube/constants/constants.go +++ b/pkg/minikube/constants/constants.go @@ -27,10 +27,10 @@ import ( const ( // DefaultKubernetesVersion is the default Kubernetes version - DefaultKubernetesVersion = "v1.19.0" + DefaultKubernetesVersion = "v1.19.1" // NewestKubernetesVersion is the newest Kubernetes version to test against // NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go - NewestKubernetesVersion = "v1.19.0" + NewestKubernetesVersion = "v1.19.1" // OldestKubernetesVersion is the oldest Kubernetes version to test against OldestKubernetesVersion = "v1.13.0" // DefaultClusterName is the default nane for the k8s cluster diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index 863ed01e15..7d58537904 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -67,7 +67,7 @@ minikube start [flags] --interactive Allow user prompts for more information (default true) --iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube/iso/minikube-v1.13.0.iso,https://github.com/kubernetes/minikube/releases/download/v1.13.0/minikube-v1.13.0.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.13.0.iso]) --keep-context This will keep the existing kubectl context and will create a minikube context. - --kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.19.0, 'latest' for v1.19.0). Defaults to 'stable'. + --kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.19.1, 'latest' for v1.19.1). Defaults to 'stable'. --kvm-gpu Enable experimental NVIDIA GPU support in minikube --kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only) --kvm-network string The KVM network name. (kvm2 driver only) (default "default") From bc182ce207a1f148182d4c15d009f257dcf348d4 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Thu, 10 Sep 2020 12:58:14 -0700 Subject: [PATCH 08/21] remove docker hub images --- test/integration/functional_test.go | 34 ++++++++++++++--------------- 1 file changed, 17 insertions(+), 17 deletions(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 3fbbfdb26f..a419f25e08 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -496,11 +496,11 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { } t.Run("cache", func(t *testing.T) { - t.Run("add", func(t *testing.T) { - for _, img := range []string{"busybox:latest", "busybox:1.28.4-glibc", "k8s.gcr.io/pause:latest"} { + t.Run("add_remote", func(t *testing.T) { + for _, img := range []string{"k8s.gcr.io/pause:3.0", "k8s.gcr.io/pause:3.3", "k8s.gcr.io/pause:latest"} { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { - t.Errorf("failed to cache add image %q. args %q err %v", img, rr.Command(), err) + t.Errorf("failed to 'cache add' remote image %q. args %q err %v", img, rr.Command(), err) } } }) @@ -514,7 +514,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { message := []byte("FROM scratch\nADD Dockerfile /x") err = ioutil.WriteFile(filepath.Join(dname, "Dockerfile"), message, 0644) if err != nil { - t.Fatalf("unable to writefile: %v", err) + t.Fatalf("unable to write Dockerfile: %v", err) } img := "minikube-local-cache-test:" + profile @@ -525,14 +525,14 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { - t.Errorf("failed to add local image %q. args %q err %v", img, rr.Command(), err) + t.Errorf("failed to 'cache add' local image %q. args %q err %v", img, rr.Command(), err) } }) - t.Run("delete_busybox:1.28.4-glibc", func(t *testing.T) { - rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "busybox:1.28.4-glibc")) + t.Run("delete_k8s.gcr.io/pause:3.3", func(t *testing.T) { + rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", "k8s.gcr.io/pause:3.3")) if err != nil { - t.Errorf("failed to delete image busybox:1.28.4-glibc from cache. args %q: %v", rr.Command(), err) + t.Errorf("failed to delete image k8s.gcr.io/pause:3.3 from cache. args %q: %v", rr.Command(), err) } }) @@ -542,10 +542,10 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { t.Errorf("failed to do cache list. args %q: %v", rr.Command(), err) } if !strings.Contains(rr.Output(), "k8s.gcr.io/pause") { - t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got:\n ***%s***", rr.Output()) + t.Errorf("expected 'cache list' output to include 'k8s.gcr.io/pause' but got: ***%s***", rr.Output()) } - if strings.Contains(rr.Output(), "busybox:1.28.4-glibc") { - t.Errorf("expected 'cache list' output not to include busybox:1.28.4-glibc but got:\n ***%s***", rr.Output()) + if strings.Contains(rr.Output(), "k8s.gcr.io/pause:3.3") { + t.Errorf("expected 'cache list' output not to include k8s.gcr.io/pause:3.3 but got: ***%s***", rr.Output()) } }) @@ -554,24 +554,24 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { if err != nil { t.Errorf("failed to get images by %q ssh %v", rr.Command(), err) } - if !strings.Contains(rr.Output(), "1.28.4-glibc") { - t.Errorf("expected '1.28.4-glibc' to be in the output but got *%s*", rr.Output()) + if !strings.Contains(rr.Output(), "0184c1613d929") { + t.Errorf("expected sha for pause:3.3 '0184c1613d929' to be in the output but got *%s*", rr.Output()) } }) t.Run("cache_reload", func(t *testing.T) { // deleting image inside minikube node manually and expecting reload to bring it back - img := "busybox:latest" + img := "k8s.gcr.io/pause:latest" // deleting image inside minikube node manually rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "docker", "rmi", img)) if err != nil { - t.Errorf("failed to delete inside the node %q : %v", rr.Command(), err) + t.Errorf("failed to manualy delete image %q : %v", rr.Command(), err) } // make sure the image is deleted. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) if err == nil { - t.Errorf("expected an error. because image should not exist. but got *nil error* ! cmd: %q", rr.Command()) + t.Errorf("expected an error but got no error. image should not exist. ! cmd: %q", rr.Command()) } // minikube cache reload. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "reload")) @@ -587,7 +587,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { // delete will clean up the cached images since they are global and all other tests will load it for no reason t.Run("delete", func(t *testing.T) { - for _, img := range []string{"busybox:latest", "k8s.gcr.io/pause:latest"} { + for _, img := range []string{"k8s.gcr.io/pause:3.0", "k8s.gcr.io/pause:latest"} { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", img)) if err != nil { t.Errorf("failed to delete %s from cache. args %q: %v", img, rr.Command(), err) From 37e521272c90461270bb0d06ff8f39ccee5a6470 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sun, 13 Sep 2020 17:24:19 +0200 Subject: [PATCH 09/21] Fix copy/paste error in virtualbox nic-type help It used type "host only" twice, instead of "nat". --- cmd/minikube/cmd/start_flags.go | 2 +- site/content/en/docs/commands/start.md | 2 +- translations/strings.txt | 1 + 3 files changed, 3 insertions(+), 2 deletions(-) diff --git a/cmd/minikube/cmd/start_flags.go b/cmd/minikube/cmd/start_flags.go index e0bc8af450..4323b69d14 100644 --- a/cmd/minikube/cmd/start_flags.go +++ b/cmd/minikube/cmd/start_flags.go @@ -184,7 +184,7 @@ func initDriverFlags() { startCmd.Flags().Bool(hostDNSResolver, true, "Enable host resolver for NAT DNS requests (virtualbox driver only)") startCmd.Flags().Bool(noVTXCheck, false, "Disable checking for the availability of hardware virtualization before the vm is started (virtualbox driver only)") startCmd.Flags().String(hostOnlyNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)") - startCmd.Flags().String(natNicType, "virtio", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)") + startCmd.Flags().String(natNicType, "virtio", "NIC Type used for nat network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)") // hyperkit startCmd.Flags().StringSlice(vsockPorts, []string{}, "List of guest VSock ports that should be exposed as sockets on the host (hyperkit driver only)") diff --git a/site/content/en/docs/commands/start.md b/site/content/en/docs/commands/start.md index 863ed01e15..48b2f53caa 100644 --- a/site/content/en/docs/commands/start.md +++ b/site/content/en/docs/commands/start.md @@ -75,7 +75,7 @@ minikube start [flags] --memory string Amount of RAM to allocate to Kubernetes (format: [], where unit = b, k, m or g). --mount This will start the mount daemon and automatically mount files into minikube. --mount-string string The argument to pass the minikube mount command on start. - --nat-nic-type string NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio") + --nat-nic-type string NIC Type used for nat network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only) (default "virtio") --native-ssh Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'. (default true) --network-plugin string Kubelet network plug-in to use (default: auto) --nfs-share strings Local folders to share with Guest via NFS mounts (hyperkit driver only) diff --git a/translations/strings.txt b/translations/strings.txt index cb12df24b5..c80809c9d9 100644 --- a/translations/strings.txt +++ b/translations/strings.txt @@ -279,6 +279,7 @@ "Multiple errors deleting profiles": "", "Multiple minikube profiles were found -": "", "NIC Type used for host only network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", + "NIC Type used for nat network. One of Am79C970A, Am79C973, 82540EM, 82543GC, 82545EM, or virtio (virtualbox driver only)": "", "NOTE: This process must stay alive for the mount to be accessible ...": "", "Networking and Connectivity Commands:": "", "No changes required for the \"{{.context}}\" context": "", From 3dec9944ded6e81880df1542c353c04a97f4d866 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sun, 13 Sep 2020 17:33:05 +0200 Subject: [PATCH 10/21] The cri-o repository moved to a separate organization --- site/content/en/docs/handbook/config.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/site/content/en/docs/handbook/config.md b/site/content/en/docs/handbook/config.md index 8d6e2995de..9012eb1444 100644 --- a/site/content/en/docs/handbook/config.md +++ b/site/content/en/docs/handbook/config.md @@ -96,7 +96,7 @@ minikube start --container-runtime=docker Other options available are: * [containerd](https://github.com/containerd/containerd) -* [crio](https://github.com/kubernetes-sigs/cri-o) +* [cri-o](https://github.com/cri-o/cri-o) ## Environment variables From af5d6d060b9737dcfbb58490f47573c14bc5aeb1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sun, 13 Sep 2020 22:17:10 +0200 Subject: [PATCH 11/21] Fix ForwardedPort for podman version 2.0.1 and up Increasing Docker compatibility broke Podman v1 --- pkg/drivers/kic/oci/network.go | 15 +++++++++++++++ 1 file changed, 15 insertions(+) diff --git a/pkg/drivers/kic/oci/network.go b/pkg/drivers/kic/oci/network.go index cafaa6a2b3..ca9e53bdb6 100644 --- a/pkg/drivers/kic/oci/network.go +++ b/pkg/drivers/kic/oci/network.go @@ -24,6 +24,7 @@ import ( "strconv" "strings" + "github.com/blang/semver" "github.com/golang/glog" "github.com/pkg/errors" ) @@ -124,8 +125,22 @@ func containerGatewayIP(ociBin, containerName string) (net.IP, error) { func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) { var rr *RunResult var err error + var v semver.Version if ociBin == Podman { + rr, err = runCmd(exec.Command(Podman, "version", "--format", "{{.Version}}")) + if err != nil { + return 0, errors.Wrapf(err, "podman version") + } + output := strings.TrimSpace(rr.Stdout.String()) + v, err = semver.Make(output) + if err != nil { + return 0, errors.Wrapf(err, "podman version") + } + } + + // podman 2.0.1 introduced docker syntax for .NetworkSettings.Ports (podman#5380) + if ociBin == Podman && v.LT(semver.Version{Major: 2, Minor: 0, Patch: 1}) { rr, err = runCmd(exec.Command(ociBin, "container", "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID)) if err != nil { return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID) From 48d44221a9868bc853a036e64f272ef2a0b6540a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sun, 13 Sep 2020 22:28:00 +0200 Subject: [PATCH 12/21] Don't validate Docker storage driver for Podman For podman, "overlay" and "overlay2" are the same --- cmd/minikube/cmd/start.go | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/cmd/minikube/cmd/start.go b/cmd/minikube/cmd/start.go index 21214cc48c..fc3acd592d 100644 --- a/cmd/minikube/cmd/start.go +++ b/cmd/minikube/cmd/start.go @@ -252,7 +252,9 @@ func provisionWithDriver(cmd *cobra.Command, ds registry.DriverState, existing * validateFlags(cmd, driverName) validateUser(driverName) - validateDockerStorageDriver(driverName) + if driverName == oci.Docker { + validateDockerStorageDriver(driverName) + } // Download & update the driver, even in --download-only mode if !viper.GetBool(dryRun) { From b4d4be8e23e48518bbdfdff26ff8d0ad160c6ded Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Sun, 13 Sep 2020 23:03:36 +0200 Subject: [PATCH 13/21] Add podman storage warning to match docker --- pkg/minikube/machine/start.go | 20 ++++++++++++++++++-- pkg/minikube/reason/reason.go | 9 +++++++++ 2 files changed, 27 insertions(+), 2 deletions(-) diff --git a/pkg/minikube/machine/start.go b/pkg/minikube/machine/start.go index ae0bebc0b8..81461f4880 100644 --- a/pkg/minikube/machine/start.go +++ b/pkg/minikube/machine/start.go @@ -36,6 +36,7 @@ import ( "github.com/juju/mutex" "github.com/pkg/errors" "github.com/spf13/viper" + "k8s.io/minikube/pkg/drivers/kic/oci" "k8s.io/minikube/pkg/minikube/command" "k8s.io/minikube/pkg/minikube/config" "k8s.io/minikube/pkg/minikube/constants" @@ -218,17 +219,32 @@ func postStartValidations(h *host.Host, drvName string) { glog.Warningf("error getting command runner: %v", err) } + var kind reason.Kind + var name string + if drvName == oci.Docker { + kind = reason.RsrcInsufficientDockerStorage + name = "Docker" + } + if drvName == oci.Podman { + kind = reason.RsrcInsufficientPodmanStorage + name = "Podman" + } + if name == "" { + glog.Warningf("unknown KIC driver: %v", drvName) + return + } + // make sure /var isn't full, as pod deployments will fail if it is percentageFull, err := DiskUsed(r, "/var") if err != nil { glog.Warningf("error getting percentage of /var that is free: %v", err) } if percentageFull >= 99 { - exit.Message(reason.RsrcInsufficientDockerStorage, `Docker is out of disk space! (/var is at {{.p}}% of capacity)`, out.V{"p": percentageFull}) + exit.Message(kind, `{{.n}} is out of disk space! (/var is at {{.p}}% of capacity)`, out.V{"n": name, "p": percentageFull}) } if percentageFull >= 85 { - out.WarnReason(reason.RsrcInsufficientDockerStorage, `Docker is nearly out of disk space, which may cause deployments to fail! ({{.p}}% of capacity)`, out.V{"p": percentageFull}) + out.WarnReason(kind, `{{.n}} is nearly out of disk space, which may cause deployments to fail! ({{.p}}% of capacity)`, out.V{"n": name, "p": percentageFull}) } } diff --git a/pkg/minikube/reason/reason.go b/pkg/minikube/reason/reason.go index 21b4bb8b95..ee9a3faf6b 100644 --- a/pkg/minikube/reason/reason.go +++ b/pkg/minikube/reason/reason.go @@ -176,6 +176,15 @@ var ( 3. Run "minikube ssh -- docker system prune" if using the docker container runtime`, Issues: []int{9024}, } + RsrcInsufficientPodmanStorage = Kind{ + ID: "RSRC_PODMAN_STORAGE", + ExitCode: ExInsufficientStorage, + Advice: `Try at least one of the following to free up space on the device: + + 1. Run "sudo podman system prune" to remove unused podman data + 2. Run "minikube ssh -- docker system prune" if using the docker container runtime`, + Issues: []int{9024}, + } RsrcInsufficientStorage = Kind{ID: "RSRC_INSUFFICIENT_STORAGE", ExitCode: ExInsufficientStorage, Style: style.UnmetRequirement} From f718e39ab962a3ee74324fe1275d7ded6a099ae2 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Tue, 15 Sep 2020 12:54:39 +0200 Subject: [PATCH 14/21] Make sure CFS_BANDWIDTH is available for --cpus --- pkg/drivers/kic/oci/oci.go | 19 ++++++++++++++++++- 1 file changed, 18 insertions(+), 1 deletion(-) diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index b388140f71..ebc32b6af4 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -156,7 +156,24 @@ func CreateContainerNode(p CreateParams) error { runArgs = append(runArgs, "--security-opt", "apparmor=unconfined") } - runArgs = append(runArgs, fmt.Sprintf("--cpus=%s", p.CPUs)) + cpuCfsPeriod := true + cpuCfsQuota := true + if runtime.GOOS == "linux" { + if _, err := os.Stat("/sys/fs/cgroup/cpu/cpu.cfs_period_us"); os.IsNotExist(err) { + cpuCfsPeriod = false + } + if _, err := os.Stat("/sys/fs/cgroup/cpu/cpu.cfs_quota_us"); os.IsNotExist(err) { + cpuCfsQuota = false + } + if !cpuCfsPeriod || !cpuCfsQuota { + // requires CONFIG_CFS_BANDWIDTH + glog.Warning("Your kernel does not support CPU cfs period/quota or the cgroup is not mounted.") + } + } + + if cpuCfsPeriod && cpuCfsQuota { + runArgs = append(runArgs, fmt.Sprintf("--cpus=%s", p.CPUs)) + } memcgSwap := true if runtime.GOOS == "linux" { From 359efd68b280b02f1476ab6848517e131e5939cb Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Anders=20F=20Bj=C3=B6rklund?= Date: Tue, 15 Sep 2020 17:13:39 +0200 Subject: [PATCH 15/21] Avoid setting time for memory assets These do not have any file modification time Previously used '0001-01-01 00:00:00 +0000' --- pkg/minikube/command/ssh_runner.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/pkg/minikube/command/ssh_runner.go b/pkg/minikube/command/ssh_runner.go index e8b79cb031..0a25bb9bc5 100644 --- a/pkg/minikube/command/ssh_runner.go +++ b/pkg/minikube/command/ssh_runner.go @@ -263,7 +263,7 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error { mtime, err := f.GetModTime() if err != nil { glog.Infof("error getting modtime for %s: %v", dst, err) - } else { + } else if mtime != (time.Time{}) { scp += fmt.Sprintf(" && sudo touch -d \"%s\" %s", mtime.Format(layout), dst) } out, err := sess.CombinedOutput(scp) From f04a7c5c16ad488940da5400c379907540484980 Mon Sep 17 00:00:00 2001 From: Medya Gh Date: Wed, 16 Sep 2020 15:01:15 -0700 Subject: [PATCH 16/21] spell --- test/integration/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index a419f25e08..baedabd59e 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -566,7 +566,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "docker", "rmi", img)) if err != nil { - t.Errorf("failed to manualy delete image %q : %v", rr.Command(), err) + t.Errorf("failed to manually delete image %q : %v", rr.Command(), err) } // make sure the image is deleted. rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", "sudo", "crictl", "inspecti", img)) From eab92dc4557fdea5ec4c091e876d6122d0d59220 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 17 Sep 2020 10:22:28 -0700 Subject: [PATCH 17/21] Reduce cyclotomic complexity of CreateContainerNode to fix lint --- pkg/drivers/kic/oci/oci.go | 46 ++++++++++++++------------------------ 1 file changed, 17 insertions(+), 29 deletions(-) diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index ebc32b6af4..b5a98d877d 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -146,14 +146,31 @@ func CreateContainerNode(p CreateParams) error { "--label", p.NodeLabel, } + // https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/ + var virtualization string if p.OCIBinary == Podman { // enable execing in /var // podman mounts var/lib with no-exec by default https://github.com/containers/libpod/issues/5103 runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", p.Name)) + if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); runtime.GOOS == "linux" && os.IsNotExist(err) { + // requires CONFIG_MEMCG_SWAP_ENABLED or cgroup_enable=memory in grub + glog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.") + } else { + runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory)) + // Disable swap by setting the value to match + runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory)) + } + virtualization = "podman" // VIRTUALIZATION_PODMAN } if p.OCIBinary == Docker { runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var", p.Name)) // ignore apparmore github actions docker: https://github.com/kubernetes/minikube/issues/7624 runArgs = append(runArgs, "--security-opt", "apparmor=unconfined") + + runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory)) + // Disable swap by setting the value to match + runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory)) + + virtualization = "docker" // VIRTUALIZATION_DOCKER } cpuCfsPeriod := true @@ -175,35 +192,6 @@ func CreateContainerNode(p CreateParams) error { runArgs = append(runArgs, fmt.Sprintf("--cpus=%s", p.CPUs)) } - memcgSwap := true - if runtime.GOOS == "linux" { - if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); os.IsNotExist(err) { - // requires CONFIG_MEMCG_SWAP_ENABLED or cgroup_enable=memory in grub - glog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.") - memcgSwap = false - } - } - - if p.OCIBinary == Podman && memcgSwap { // swap is required for memory - runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory)) - // Disable swap by setting the value to match - runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory)) - } - - if p.OCIBinary == Docker { - runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory)) - // Disable swap by setting the value to match - runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory)) - } - - // https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/ - var virtualization string - if p.OCIBinary == Podman { - virtualization = "podman" // VIRTUALIZATION_PODMAN - } - if p.OCIBinary == Docker { - virtualization = "docker" // VIRTUALIZATION_DOCKER - } runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", "container", virtualization)) for key, val := range p.Envs { From b98580c820125573a113d3a485bc76f558e87908 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 17 Sep 2020 10:31:10 -0700 Subject: [PATCH 18/21] fix up if statement --- pkg/drivers/kic/oci/oci.go | 15 +++++++++++---- 1 file changed, 11 insertions(+), 4 deletions(-) diff --git a/pkg/drivers/kic/oci/oci.go b/pkg/drivers/kic/oci/oci.go index b5a98d877d..878189e387 100644 --- a/pkg/drivers/kic/oci/oci.go +++ b/pkg/drivers/kic/oci/oci.go @@ -145,20 +145,27 @@ func CreateContainerNode(p CreateParams) error { // label th enode wuth the node ID "--label", p.NodeLabel, } + memcgSwap := true + if runtime.GOOS == "linux" { + if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); os.IsNotExist(err) { + // requires CONFIG_MEMCG_SWAP_ENABLED or cgroup_enable=memory in grub + glog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.") + memcgSwap = false + } + } // https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/ var virtualization string if p.OCIBinary == Podman { // enable execing in /var // podman mounts var/lib with no-exec by default https://github.com/containers/libpod/issues/5103 runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", p.Name)) - if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); runtime.GOOS == "linux" && os.IsNotExist(err) { - // requires CONFIG_MEMCG_SWAP_ENABLED or cgroup_enable=memory in grub - glog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.") - } else { + + if memcgSwap { runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory)) // Disable swap by setting the value to match runArgs = append(runArgs, fmt.Sprintf("--memory-swap=%s", p.Memory)) } + virtualization = "podman" // VIRTUALIZATION_PODMAN } if p.OCIBinary == Docker { From 2001d205103f2ad1e8bc047da58ddd4e7d7a17d0 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 17 Sep 2020 13:29:19 -0700 Subject: [PATCH 19/21] Update binaries.md --- site/content/en/docs/contrib/releasing/binaries.md | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/site/content/en/docs/contrib/releasing/binaries.md b/site/content/en/docs/contrib/releasing/binaries.md index 2a320004cd..cd0dd72056 100644 --- a/site/content/en/docs/contrib/releasing/binaries.md +++ b/site/content/en/docs/contrib/releasing/binaries.md @@ -20,13 +20,12 @@ description: > See [ISO release instructions]({{}}) -## Tag KIC base image +## Release new kicbase image -for container drivers (docker,podman), if there has been any change in Dockerfile -(and there is a -snapshot image), should tag with latest release and push to gcr and docker hub and github packages. +If there are changes to the Dockerfile for the docker and/or podman drivers +(and there is a -snapshot image), you should retag it as a new version and push it to GCR, dockerhub and github packages. -for example if you are releasing v0.0.13 and latest kicbase image is v0.0.12-snapshot -should tag v0.0.13 and change the [kic/types.go](https://github.com/medyagh/minikube/blob/635ff53a63e5bb1be4e1abb9067ebe502a16224e/pkg/drivers/kic/types.go#L29-L30) file as well. +For example, if you are releasing v0.0.13 and the current kicbase image tag is v0.0.12-snapshot, you should tag v0.0.13 and change [kic/types.go](https://github.com/medyagh/minikube/blob/635ff53a63e5bb1be4e1abb9067ebe502a16224e/pkg/drivers/kic/types.go#L29-L30) as well. ## Update Release Notes From 474c9c2ced24da04fc1d538a7f049f387a12eac8 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 17 Sep 2020 13:57:06 -0700 Subject: [PATCH 20/21] Fix cache functional test --- test/integration/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index baedabd59e..3a042a13fd 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -497,7 +497,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { t.Run("cache", func(t *testing.T) { t.Run("add_remote", func(t *testing.T) { - for _, img := range []string{"k8s.gcr.io/pause:3.0", "k8s.gcr.io/pause:3.3", "k8s.gcr.io/pause:latest"} { + for _, img := range []string{"k8s.gcr.io/pause:3.1", "k8s.gcr.io/pause:3.3", "k8s.gcr.io/pause:latest"} { rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "cache", "add", img)) if err != nil { t.Errorf("failed to 'cache add' remote image %q. args %q err %v", img, rr.Command(), err) From 4226decf841a064538c8ec6f9edf1390b52b2e15 Mon Sep 17 00:00:00 2001 From: Sharif Elgamal Date: Thu, 17 Sep 2020 14:12:48 -0700 Subject: [PATCH 21/21] delete the correct image --- test/integration/functional_test.go | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/test/integration/functional_test.go b/test/integration/functional_test.go index 3a042a13fd..52d188190a 100644 --- a/test/integration/functional_test.go +++ b/test/integration/functional_test.go @@ -587,7 +587,7 @@ func validateCacheCmd(ctx context.Context, t *testing.T, profile string) { // delete will clean up the cached images since they are global and all other tests will load it for no reason t.Run("delete", func(t *testing.T) { - for _, img := range []string{"k8s.gcr.io/pause:3.0", "k8s.gcr.io/pause:latest"} { + for _, img := range []string{"k8s.gcr.io/pause:3.1", "k8s.gcr.io/pause:latest"} { rr, err := Run(t, exec.CommandContext(ctx, Target(), "cache", "delete", img)) if err != nil { t.Errorf("failed to delete %s from cache. args %q: %v", img, rr.Command(), err)