Merge branch 'master' of github.com:kubernetes/minikube into gcp-cs

pull/12231/head
Sharif Elgamal 2021-08-26 13:32:10 -07:00
commit 55c40f8220
65 changed files with 8862 additions and 12456 deletions

View File

@ -27,6 +27,7 @@ minikube runs the latest stable release of Kubernetes, with support for standard
* [Dashboard](https://minikube.sigs.k8s.io/docs/handbook/dashboard/) - `minikube dashboard`
* [Container runtimes](https://minikube.sigs.k8s.io/docs/handbook/config/#runtime-configuration) - `minikube start --container-runtime`
* [Configure apiserver and kubelet options](https://minikube.sigs.k8s.io/docs/handbook/config/#modifying-kubernetes-defaults) via command-line flags
* Supports common [CI environments](https://github.com/minikube-ci/examples)
As well as developer-friendly features:

View File

@ -144,6 +144,77 @@ var loadImageCmd = &cobra.Command{
},
}
func readFile(w io.Writer, tmp string) error {
r, err := os.Open(tmp)
if err != nil {
return err
}
_, err = io.Copy(w, r)
if err != nil {
return err
}
err = r.Close()
if err != nil {
return err
}
return nil
}
// saveImageCmd represents the image load command
var saveImageCmd = &cobra.Command{
Use: "save IMAGE [ARCHIVE | -]",
Short: "Save a image from minikube",
Long: "Save a image from minikube",
Example: "minikube image save image\nminikube image save image image.tar",
Run: func(cmd *cobra.Command, args []string) {
if len(args) == 0 {
exit.Message(reason.Usage, "Please provide an image in the container runtime to save from minikube via <minikube image save IMAGE_NAME>")
}
// Save images from container runtime
profile, err := config.LoadProfile(viper.GetString(config.ProfileName))
if err != nil {
exit.Error(reason.Usage, "loading profile", err)
}
if len(args) > 1 {
output = args[1]
if args[1] == "-" {
tmp, err := ioutil.TempFile("", "image.*.tar")
if err != nil {
exit.Error(reason.GuestImageSave, "Failed to get temp", err)
}
tmp.Close()
output = tmp.Name()
}
if err := machine.DoSaveImages([]string{args[0]}, output, []*config.Profile{profile}, ""); err != nil {
exit.Error(reason.GuestImageSave, "Failed to save image", err)
}
if args[1] == "-" {
err := readFile(os.Stdout, output)
if err != nil {
exit.Error(reason.GuestImageSave, "Failed to read temp", err)
}
os.Remove(output)
}
} else {
if err := machine.SaveAndCacheImages([]string{args[0]}, []*config.Profile{profile}); err != nil {
exit.Error(reason.GuestImageSave, "Failed to save image", err)
}
if imgDaemon || imgRemote {
image.UseDaemon(imgDaemon)
image.UseRemote(imgRemote)
err := image.UploadCachedImage(args[0])
if err != nil {
exit.Error(reason.GuestImageSave, "Failed to save image", err)
}
}
}
},
}
var removeImageCmd = &cobra.Command{
Use: "rm IMAGE [IMAGE...]",
Short: "Remove one or more images",
@ -317,6 +388,9 @@ func init() {
buildImageCmd.Flags().StringArrayVar(&buildEnv, "build-env", nil, "Environment variables to pass to the build. (format: key=value)")
buildImageCmd.Flags().StringArrayVar(&buildOpt, "build-opt", nil, "Specify arbitrary flags to pass to the build. (format: key=value)")
imageCmd.AddCommand(buildImageCmd)
saveImageCmd.Flags().BoolVar(&imgDaemon, "daemon", false, "Cache image to docker daemon")
saveImageCmd.Flags().BoolVar(&imgRemote, "remote", false, "Cache image to remote registry")
imageCmd.AddCommand(saveImageCmd)
imageCmd.AddCommand(listImageCmd)
imageCmd.AddCommand(tagImageCmd)
imageCmd.AddCommand(pushImageCmd)

View File

@ -31,6 +31,7 @@ import (
"github.com/spf13/cobra"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/cluster"
"k8s.io/minikube/pkg/minikube/detect"
"k8s.io/minikube/pkg/minikube/driver"
"k8s.io/minikube/pkg/minikube/exit"
"k8s.io/minikube/pkg/minikube/mustload"
@ -111,7 +112,19 @@ var mountCmd = &cobra.Command{
var ip net.IP
var err error
if mountIP == "" {
ip, err = cluster.HostIP(co.CP.Host, co.Config.Name)
if detect.IsMicrosoftWSL() {
klog.Infof("Selecting IP for WSL. This may be incorrect...")
ip, err = func() (net.IP, error) {
conn, err := net.Dial("udp", "8.8.8.8:80")
if err != nil {
return nil, err
}
defer conn.Close()
return conn.LocalAddr().(*net.UDPAddr).IP, nil
}()
} else {
ip, err = cluster.HostIP(co.CP.Host, co.Config.Name)
}
if err != nil {
exit.Error(reason.IfHostIP, "Error getting the host IP address to use from within the VM", err)
}

View File

@ -19,7 +19,8 @@ metadata:
namespace: gcp-auth
spec:
ports:
- port: 443
- name: https
port: 443
targetPort: 8443
protocol: TCP
selector:

View File

@ -68,7 +68,7 @@ spec:
serviceAccountName: minikube-gcp-auth-certs
containers:
- name: create
image: {{.CustomRegistries.KubeWebhookCertgen | default .ImageRepository | default .Registries.KubeWebhookCertgen }}{{.Images.KubeWebhookCertgen}}
image: {{.CustomRegistries.KubeWebhookCertgen | default .ImageRepository | default .Registries.KubeWebhookCertgen}}{{.Images.KubeWebhookCertgen}}
imagePullPolicy: IfNotPresent
args:
- create
@ -94,7 +94,7 @@ spec:
spec:
containers:
- name: gcp-auth
image: {{.CustomRegistries.GCPAuthWebhook | default .ImageRepository | default .Registries.GCPAuthWebhook }}{{.Images.GCPAuthWebhook}}
image: {{.CustomRegistries.GCPAuthWebhook | default .ImageRepository | default .Registries.GCPAuthWebhook}}{{.Images.GCPAuthWebhook}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8443
@ -127,7 +127,7 @@ spec:
serviceAccountName: minikube-gcp-auth-certs
containers:
- name: patch
image: {{.CustomRegistries.KubeWebhookCertgen | default .ImageRepository | default .Registries.KubeWebhookCertgen }}{{.Images.KubeWebhookCertgen}}
image: {{.CustomRegistries.KubeWebhookCertgen | default .ImageRepository | default .Registries.KubeWebhookCertgen}}{{.Images.KubeWebhookCertgen}}
imagePullPolicy: IfNotPresent
args:
- patch
@ -183,4 +183,4 @@ webhooks:
apiGroups: ["*"]
apiVersions: ["*"]
resources: ["serviceaccounts"]
scope: "*"
scope: "*"

View File

@ -38,6 +38,9 @@ kind: Ingress
metadata:
name: example-ingress
namespace: kube-system
annotations:
# use the shared ingress-nginx
kubernetes.io/ingress.class: nginx
spec:
rules:
- host: hello-john.test

View File

@ -22,7 +22,6 @@ metadata:
app: minikube-ingress-dns
kubernetes.io/bootstrapping: rbac-defaults
app.kubernetes.io/part-of: kube-system
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@ -33,7 +32,6 @@ metadata:
app: minikube-ingress-dns
kubernetes.io/bootstrapping: rbac-defaults
app.kubernetes.io/part-of: kube-system
addonmanager.kubernetes.io/mode: Reconcile
gcp-auth-skip-secret: "true"
rules:
- apiGroups:
@ -56,7 +54,6 @@ metadata:
app: minikube-ingress-dns
kubernetes.io/bootstrapping: rbac-defaults
app.kubernetes.io/part-of: kube-system
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
@ -74,13 +71,12 @@ metadata:
labels:
app: minikube-ingress-dns
app.kubernetes.io/part-of: kube-system
addonmanager.kubernetes.io/mode: Reconcile
spec:
serviceAccountName: minikube-ingress-dns
hostNetwork: true
containers:
- name: minikube-ingress-dns
image: {{.CustomRegistries.IngressDNS | default .ImageRepository | default .Registries.IngressDNS }}{{.Images.IngressDNS}}
image: {{.CustomRegistries.IngressDNS | default .ImageRepository | default .Registries.IngressDNS }}{{.Images.IngressDNS}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 53
@ -91,4 +87,4 @@ spec:
- name: POD_IP
valueFrom:
fieldRef:
fieldPath: status.podIP
fieldPath: status.podIP

View File

@ -1,58 +0,0 @@
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
addonmanager.kubernetes.io/mode: Reconcile
---
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: EnsureExists
name: ingress-nginx-controller
namespace: ingress-nginx
data:
# see https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md for all possible options and their description
hsts: "false"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: tcp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: EnsureExists
---
apiVersion: v1
kind: ConfigMap
metadata:
name: udp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: EnsureExists

View File

@ -0,0 +1,632 @@
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
apiVersion: v1
kind: Namespace
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
---
# Source: ingress-nginx/templates/controller-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
automountServiceAccountToken: true
---
# Source: ingress-nginx/templates/controller-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
data:
# see https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/configmap.md for all possible options and their description
hsts: "false"
---
apiVersion: v1
kind: ConfigMap
metadata:
name: tcp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
apiVersion: v1
kind: ConfigMap
metadata:
name: udp-services
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io # k8s 1.14+
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
# Source: ingress-nginx/templates/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
name: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io # k8s 1.14+
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader-nginx
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
# Source: ingress-nginx/templates/controller-rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/controller-service-webhook.yaml
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-service.yaml
apiVersion: v1
kind: Service
metadata:
annotations:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
# Source: ingress-nginx/templates/controller-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
revisionHistoryLimit: 10
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
gcp-auth-skip-secret: "true"
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: {{.CustomRegistries.IngressController | default .ImageRepository | default .Registries.IngressController}}{{.Images.IngressController}}
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --election-id=ingress-controller-leader
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --report-node-internal-ip-address
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
{{- if .CustomIngressCert}}
- --default-ssl-certificate={{ .CustomIngressCert }}
{{- end}}
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
failureThreshold: 5
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
readinessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
successThreshold: 1
timeoutSeconds: 1
ports:
- name: http
containerPort: 80
protocol: TCP
hostPort: 80
- name: https
containerPort: 443
protocol: TCP
hostPort: 443
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
serviceAccountName: ingress-nginx
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Source: ingress-nginx/templates/admission-webhooks/validating-webhook.yaml
# before changing this value, check the required kubernetes version
# https://kubernetes.io/docs/reference/access-authn-authz/extensible-admission-controllers/#prerequisites
# Currently(v0.49.0), ValidatingWebhookConfiguration of this validates v1beta1 request
# TODO(govargo): check this after upstream ingress-nginx can validate v1 version
# https://github.com/kubernetes/ingress-nginx/blob/controller-v0.49.0/internal/admission/controller/main.go#L46-L52
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- {{.IngressAPIVersion}}
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
{{- if eq .IngressAPIVersion "v1beta1"}}
- v1
{{- end}}
- {{.IngressAPIVersion}}
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/{{.IngressAPIVersion}}/ingresses
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
annotations:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
annotations:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/role.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/rolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
namespace: ingress-nginx
annotations:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-createSecret.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
namespace: ingress-nginx
annotations:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: create
image: {{.CustomRegistries.KubeWebhookCertgenCreate | default .ImageRepository | default .Registries.KubeWebhookCertgenCreate }}{{.Images.KubeWebhookCertgenCreate}}
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
# Source: ingress-nginx/templates/admission-webhooks/job-patch/job-patchWebhook.yaml
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
namespace: ingress-nginx
annotations:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
spec:
containers:
- name: patch
image: {{.CustomRegistries.KubeWebhookCertgenPatch | default .ImageRepository | default .Registries.KubeWebhookCertgenPatch }}{{.Images.KubeWebhookCertgenPatch}}
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000

View File

@ -1,299 +0,0 @@
# Copyright 2016 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: Reconcile
name: ingress-nginx-controller-admission
namespace: ingress-nginx
spec:
type: ClusterIP
ports:
- name: https-webhook
port: 443
targetPort: webhook
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
apiVersion: v1
kind: Service
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: Reconcile
name: ingress-nginx-controller
namespace: ingress-nginx
spec:
type: NodePort
ports:
- name: http
port: 80
protocol: TCP
targetPort: http
- name: https
port: 443
protocol: TCP
targetPort: https
selector:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: ingress-nginx-controller
namespace: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: Reconcile
spec:
selector:
matchLabels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: Reconcile
revisionHistoryLimit: 10
strategy:
rollingUpdate:
maxUnavailable: 1
type: RollingUpdate
minReadySeconds: 0
template:
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: Reconcile
gcp-auth-skip-secret: "true"
spec:
dnsPolicy: ClusterFirst
containers:
- name: controller
image: {{.CustomRegistries.IngressController | default .ImageRepository | default .Registries.IngressController }}{{.Images.IngressController}}
imagePullPolicy: IfNotPresent
lifecycle:
preStop:
exec:
command:
- /wait-shutdown
args:
- /nginx-ingress-controller
- --ingress-class=nginx
- --configmap=$(POD_NAMESPACE)/ingress-nginx-controller
- --report-node-internal-ip-address
- --tcp-services-configmap=$(POD_NAMESPACE)/tcp-services
- --udp-services-configmap=$(POD_NAMESPACE)/udp-services
- --validating-webhook=:8443
- --validating-webhook-certificate=/usr/local/certificates/cert
- --validating-webhook-key=/usr/local/certificates/key
{{if .CustomIngressCert}}
- --default-ssl-certificate={{ .CustomIngressCert }}
{{end}}
securityContext:
capabilities:
drop:
- ALL
add:
- NET_BIND_SERVICE
runAsUser: 101
allowPrivilegeEscalation: true
env:
- name: POD_NAME
valueFrom:
fieldRef:
fieldPath: metadata.name
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
- name: LD_PRELOAD
value: /usr/local/lib/libmimalloc.so
livenessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 5
readinessProbe:
httpGet:
path: /healthz
port: 10254
scheme: HTTP
initialDelaySeconds: 10
periodSeconds: 10
timeoutSeconds: 1
successThreshold: 1
failureThreshold: 3
ports:
- name: http
containerPort: 80
protocol: TCP
hostPort: 80
- name: https
containerPort: 443
protocol: TCP
hostPort: 443
- name: webhook
containerPort: 8443
protocol: TCP
volumeMounts:
- name: webhook-cert
mountPath: /usr/local/certificates/
readOnly: true
resources:
requests:
cpu: 100m
memory: 90Mi
serviceAccountName: ingress-nginx
volumes:
- name: webhook-cert
secret:
secretName: ingress-nginx-admission
---
# Currently(v0.44.0), ValidatingWebhookConfiguration of this validates v1beta1 request
# TODO(govargo): check this after upstream ingress-nginx can validate v1 version
# https://github.com/kubernetes/ingress-nginx/blob/controller-v0.44.0/internal/admission/controller/main.go#L46-L52
apiVersion: admissionregistration.k8s.io/v1
kind: ValidatingWebhookConfiguration
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
name: ingress-nginx-admission
webhooks:
- name: validate.nginx.ingress.kubernetes.io
matchPolicy: Equivalent
rules:
- apiGroups:
- networking.k8s.io
apiVersions:
- v1beta1
operations:
- CREATE
- UPDATE
resources:
- ingresses
failurePolicy: Fail
sideEffects: None
admissionReviewVersions:
- v1
- v1beta1
clientConfig:
service:
namespace: ingress-nginx
name: ingress-nginx-controller-admission
path: /networking/v1beta1/ingresses
---
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-create
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
namespace: ingress-nginx
spec:
template:
metadata:
name: ingress-nginx-admission-create
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
spec:
containers:
- name: create
image: {{.CustomRegistries.KubeWebhookCertgenCreate | default .ImageRepository | default .Registries.KubeWebhookCertgenCreate }}{{.Images.KubeWebhookCertgenCreate}}
imagePullPolicy: IfNotPresent
args:
- create
- --host=ingress-nginx-controller-admission,ingress-nginx-controller-admission.$(POD_NAMESPACE).svc
- --namespace=$(POD_NAMESPACE)
- --secret-name=ingress-nginx-admission
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000
---
apiVersion: batch/v1
kind: Job
metadata:
name: ingress-nginx-admission-patch
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
namespace: ingress-nginx
spec:
template:
metadata:
name: ingress-nginx-admission-patch
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
spec:
containers:
- name: patch
image: {{.CustomRegistries.KubeWebhookCertgenPatch | default .ImageRepository | default .Registries.KubeWebhookCertgenPatch }}{{.Images.KubeWebhookCertgenPatch}}
imagePullPolicy: IfNotPresent
args:
- patch
- --webhook-name=ingress-nginx-admission
- --namespace=$(POD_NAMESPACE)
- --patch-mutating=false
- --secret-name=ingress-nginx-admission
- --patch-failure-policy=Fail
env:
- name: POD_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
restartPolicy: OnFailure
serviceAccountName: ingress-nginx-admission
securityContext:
runAsNonRoot: true
runAsUser: 2000

View File

@ -1,283 +0,0 @@
---
apiVersion: v1
kind: ServiceAccount
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: Reconcile
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
addonmanager.kubernetes.io/mode: Reconcile
name: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- configmaps
- endpoints
- nodes
- pods
- secrets
verbs:
- list
- watch
- apiGroups:
- ''
resources:
- nodes
verbs:
- get
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io # k8s 1.18+
resources:
- ingressclasses
verbs:
- get
- list
- watch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
addonmanager.kubernetes.io/mode: Reconcile
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: Reconcile
name: ingress-nginx
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- namespaces
verbs:
- get
- apiGroups:
- ''
resources:
- configmaps
- pods
- secrets
- endpoints
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- services
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses
verbs:
- get
- list
- watch
- apiGroups:
- extensions
- networking.k8s.io # k8s 1.14+
resources:
- ingresses/status
verbs:
- update
- apiGroups:
- networking.k8s.io # k8s 1.18+
resources:
- ingressclasses
verbs:
- get
- list
- watch
- apiGroups:
- ''
resources:
- configmaps
resourceNames:
- ingress-controller-leader-nginx
verbs:
- get
- update
- apiGroups:
- ''
resources:
- configmaps
verbs:
- create
- apiGroups:
- ''
resources:
- events
verbs:
- create
- patch
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: controller
addonmanager.kubernetes.io/mode: Reconcile
name: ingress-nginx
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx
subjects:
- kind: ServiceAccount
name: ingress-nginx
namespace: ingress-nginx
---
apiVersion: v1
kind: ServiceAccount
metadata:
name: ingress-nginx-admission
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
name: ingress-nginx-admission
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
namespace: ingress-nginx
rules:
- apiGroups:
- admissionregistration.k8s.io
resources:
- validatingwebhookconfigurations
verbs:
- get
- update
---
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
name: ingress-nginx-admission
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: ClusterRole
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx
---
apiVersion: rbac.authorization.k8s.io/v1
kind: Role
metadata:
name: ingress-nginx-admission
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
namespace: ingress-nginx
rules:
- apiGroups:
- ''
resources:
- secrets
verbs:
- get
- create
---
apiVersion: rbac.authorization.k8s.io/v1
kind: RoleBinding
metadata:
name: ingress-nginx-admission
labels:
app.kubernetes.io/name: ingress-nginx
app.kubernetes.io/instance: ingress-nginx
app.kubernetes.io/component: admission-webhook
addonmanager.kubernetes.io/mode: Reconcile
namespace: ingress-nginx
roleRef:
apiGroup: rbac.authorization.k8s.io
kind: Role
name: ingress-nginx-admission
subjects:
- kind: ServiceAccount
name: ingress-nginx-admission
namespace: ingress-nginx

File diff suppressed because it is too large Load Diff

View File

@ -1,4 +1,4 @@
# Copyright 2016 The Kubernetes Authors All rights reserved.
# Copyright 2021 The Kubernetes Authors All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
@ -12,6 +12,8 @@
# See the License for the specific language governing permissions and
# limitations under the License.
# ref: https://github.com/operator-framework/operator-lifecycle-manager/releases/download/v0.18.3/olm.yaml
---
apiVersion: v1
kind: Namespace
@ -82,7 +84,7 @@ spec:
- $(OPERATOR_NAMESPACE)
- --writeStatusName
- ""
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM }}{{.Images.OLM}}
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM}}{{.Images.OLM}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
@ -99,7 +101,6 @@ spec:
port: 8080
terminationMessagePolicy: FallbackToLogsOnError
env:
- name: OPERATOR_NAMESPACE
valueFrom:
fieldRef:
@ -110,8 +111,6 @@ spec:
requests:
cpu: 10m
memory: 160Mi
nodeSelector:
kubernetes.io/os: linux
---
@ -144,8 +143,8 @@ spec:
- olm
- -configmapServerImage=quay.io/operator-framework/configmap-operator-registry:latest
- -util-image
- {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM }}{{.Images.OLM}}
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM }}{{.Images.OLM}}
- {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM}}{{.Images.OLM}}
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM}}{{.Images.OLM}}
imagePullPolicy: IfNotPresent
ports:
- containerPort: 8080
@ -161,14 +160,10 @@ spec:
path: /healthz
port: 8080
terminationMessagePolicy: FallbackToLogsOnError
env:
resources:
requests:
cpu: 10m
memory: 80Mi
nodeSelector:
kubernetes.io/os: linux
---
@ -224,7 +219,7 @@ metadata:
name: packageserver
namespace: olm
labels:
olm.version: 0.17.0
olm.version: 0.18.3
spec:
displayName: Package Server
description: Represents an Operator package that is available from a given CatalogSource which will resolve to a ClusterServiceVersion.
@ -288,6 +283,9 @@ spec:
spec:
strategy:
type: RollingUpdate
rollingUpdate:
maxUnavailable: 1
maxSurge: 1
replicas: 2
selector:
matchLabels:
@ -309,7 +307,7 @@ spec:
- "5443"
- --global-namespace
- olm
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM }}{{.Images.OLM}}
image: {{.CustomRegistries.OLM | default .ImageRepository | default .Registries.OLM}}{{.Images.OLM}}
imagePullPolicy: Always
ports:
- containerPort: 5443
@ -337,7 +335,7 @@ spec:
- name: tmpfs
emptyDir: {}
maturity: alpha
version: 0.17.0
version: 0.18.3
apiservicedefinitions:
owned:
- group: packages.operators.coreos.com
@ -356,6 +354,9 @@ metadata:
namespace: olm
spec:
sourceType: grpc
image: {{.CustomRegistries.UpstreamCommunityOperators | default .ImageRepository | default .Registries.UpstreamCommunityOperators }}{{.Images.UpstreamCommunityOperators}}
image: {{.CustomRegistries.UpstreamCommunityOperators | default .ImageRepository | default .Registries.UpstreamCommunityOperators}}{{.Images.UpstreamCommunityOperators}}
displayName: Community Operators
publisher: OperatorHub.io
updateStrategy:
registryPoll:
interval: 60m

10
go.mod
View File

@ -5,7 +5,7 @@ go 1.16
require (
cloud.google.com/go/storage v1.15.0
contrib.go.opencensus.io/exporter/stackdriver v0.12.1
github.com/Delta456/box-cli-maker/v2 v2.2.1
github.com/Delta456/box-cli-maker/v2 v2.2.2
github.com/GoogleCloudPlatform/docker-credential-gcr v0.0.0-20210713212222-faed5e8b8ca2
github.com/GoogleCloudPlatform/opentelemetry-operations-go/exporter/trace v0.16.0
github.com/Microsoft/hcsshim v0.8.17 // indirect
@ -29,7 +29,6 @@ require (
github.com/google/go-github/v36 v36.0.0
github.com/google/slowjam v1.0.0
github.com/google/uuid v1.3.0
github.com/gookit/color v1.4.2 // indirect
github.com/hashicorp/go-getter v1.5.7
github.com/hashicorp/go-retryablehttp v0.7.0
github.com/hashicorp/golang-lru v0.5.3 // indirect
@ -53,7 +52,6 @@ require (
github.com/machine-drivers/docker-machine-driver-vmware v0.1.3
github.com/mattbaird/jsonpatch v0.0.0-20200820163806-098863c1fc24
github.com/mattn/go-isatty v0.0.13
github.com/mattn/go-runewidth v0.0.13 // indirect
github.com/mitchellh/go-ps v1.0.0
github.com/moby/hyperkit v0.0.0-20210108224842-2f061e447e14
github.com/moby/sys/mount v0.2.0 // indirect
@ -84,18 +82,18 @@ require (
golang.org/x/mod v0.5.0
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf
golang.org/x/term v0.0.0-20210406210042-72f3dc4e9b72
golang.org/x/text v0.3.7
gonum.org/v1/plot v0.9.0
google.golang.org/api v0.52.0
gopkg.in/mgo.v2 v2.0.0-20190816093944-a6b53ec6cb22 // indirect
gopkg.in/yaml.v2 v2.4.0
k8s.io/api v0.22.0
k8s.io/api v0.22.1
k8s.io/apimachinery v0.22.0
k8s.io/client-go v0.22.0
k8s.io/klog/v2 v2.10.0
k8s.io/kubectl v0.22.0
k8s.io/kubectl v0.22.1
k8s.io/kubernetes v1.21.3
sigs.k8s.io/sig-storage-lib-external-provisioner/v6 v6.3.0
)

11
go.sum
View File

@ -68,8 +68,8 @@ github.com/Azure/go-autorest/logger v0.2.0/go.mod h1:T9E3cAhj2VqvPOtCYAvby9aBXkZ
github.com/Azure/go-autorest/tracing v0.6.0/go.mod h1:+vhtPC754Xsa23ID7GlGsrdKBpUA79WCAKPPZVC2DeU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/Delta456/box-cli-maker/v2 v2.2.1 h1:uTcuvT6Ty+LBHuRUdFrJBpqP9RhtLxI5+5ZpKYAUuVw=
github.com/Delta456/box-cli-maker/v2 v2.2.1/go.mod h1:R7jxZHK2wGBR2Luz/Vgi8jP5fz1ljUXgu2o2JQNmvFU=
github.com/Delta456/box-cli-maker/v2 v2.2.2 h1:CpSLcPgi5pY4+arzpyuWN2+nU8gHqto2Y+OO7VbELQ0=
github.com/Delta456/box-cli-maker/v2 v2.2.2/go.mod h1:idItIMZeyx3bg73XwSgsLeZd+gdpD2IvGbR3FC8a9TU=
github.com/GoogleCloudPlatform/docker-credential-gcr v0.0.0-20210713212222-faed5e8b8ca2 h1:rMamBsR6iCT9Y5m2Il6vFGJvY7FAgck4AoA/LobheKU=
github.com/GoogleCloudPlatform/docker-credential-gcr v0.0.0-20210713212222-faed5e8b8ca2/go.mod h1:BB1eHdMLYEFuFdBlRMb0N7YGVdM5s6Pt0njxgvfbGGs=
github.com/GoogleCloudPlatform/k8s-cloud-provider v0.0.0-20200415212048-7901bc822317/go.mod h1:DF8FZRxMHMGv/vP2lQP6h+dYzzjpuRn24VeRiYn3qjQ=
@ -594,7 +594,6 @@ github.com/googleapis/gnostic v0.4.1 h1:DLJCy1n/vrD4HPjOvYcT8aYQXpPIzoRZONaYwyyc
github.com/googleapis/gnostic v0.4.1/go.mod h1:LRhVm6pbyptWbWbuZ38d1eyptfvIytN3ir6b65WBswg=
github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3 h1:eHv/jVY/JNop1xg2J9cBb4EzyMpWZoNCP1BslSAIkOI=
github.com/googleinterns/cloud-operations-api-mock v0.0.0-20200709193332-a1e58c29bdd3/go.mod h1:h/KNeRx7oYU4SpA4SoY7W2/NxDKEEVuwA6j9A27L4OI=
github.com/gookit/color v1.3.6/go.mod h1:R3ogXq2B9rTbXoSHJ1HyUVAZ3poOJHpd9nQmyGZsfvQ=
github.com/gookit/color v1.4.2 h1:tXy44JFSFkKnELV6WaMo/lLfu/meqITX3iAV52do7lk=
github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQHCoQ=
github.com/gophercloud/gophercloud v0.1.0/go.mod h1:vxM41WHh5uqHVBMZHzuwNOHh8XEoIEcSTewFxm1c5g8=
@ -1115,7 +1114,6 @@ github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f h1:mvXjJIHRZy
github.com/xeipuuv/gojsonschema v0.0.0-20180618132009-1d523034197f/go.mod h1:5yf86TLmAcydyeJq5YvxkGPE2fm/u4myDekKRoLuqhs=
github.com/xiang90/probing v0.0.0-20190116061207-43a291ad63a2/go.mod h1:UETIi67q53MR2AWcXfiuqkDkRtnGDLqkBTpCHuJHxtU=
github.com/xlab/treeprint v0.0.0-20181112141820-a009c3971eca/go.mod h1:ce1O1j6UtZfjr22oyGxGLbauSBp2YVXpARAosm7dHBg=
github.com/xo/terminfo v0.0.0-20200218205459-454e5b68f9e8/go.mod h1:6Yhx5ZJl5942QrNRWLwITArVT9okUXc5c3brgWJMoDc=
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778 h1:QldyIu/L63oPpyvQmHgvgickp1Yw510KJOqX7H24mg8=
github.com/xo/terminfo v0.0.0-20210125001918-ca9a967f8778/go.mod h1:2MuV+tbUrU1zIOPMxZ5EncGwgmMJsa+9ucAQZXxsObs=
github.com/xordataexchange/crypt v0.0.3-0.20170626215501-b2862e3d0a77/go.mod h1:aYKd//L2LvnjZzWKhF00oedf4jCCReLcmhLdhm1A27Q=
@ -1423,7 +1421,6 @@ golang.org/x/sys v0.0.0-20201117170446-d9b008d0a637/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201202213521-69691e467435/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201223074533-0d417f636930/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -1442,8 +1439,9 @@ golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c h1:F1jZWGFhYfh0Ci55sIpILtKKK8p3i2/krTr0H1rg74I=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf h1:2ucpDCmfkl8Bd/FsLtiD653Wf96cW37s+iGx93zsu4k=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210220032956-6a3ed077a48d/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
@ -1734,7 +1732,6 @@ gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20200615113413-eeeca48fe776/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b h1:h8qDotaEPuJATrMmW04NCwg7v22aHH28wwpauUhK9Oo=
gopkg.in/yaml.v3 v3.0.0-20210107192922-496545a6307b/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gotest.tools v2.2.0+incompatible h1:VsBPFP1AI068pPrMxtb/S8Zkgf9xEmTLJjfM+P5UIEo=

View File

@ -46,7 +46,7 @@ ec=$?
if [ $ec -gt 0 ]; then
if [ "$release" = false ]; then
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new ISO failed.
See the logs at: https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/iso-${BUILD_NUMBER}/iso_build.txt
See the logs at: https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/${ghprbActualCommit::7}/iso_build.txt
"
fi
exit $ec

View File

@ -68,7 +68,7 @@ ec=$?
if [ $ec -gt 0 ]; then
if [ "$release" = false ]; then
gh pr comment ${ghprbPullId} --body "Hi ${ghprbPullAuthorLoginMention}, building a new kicbase image failed.
See the logs at: https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/kicbase-${BUILD_NUMBER}/kic_image_build.txt
See the logs at: https://storage.cloud.google.com/minikube-builds/logs/${ghprbPullId}/${ghprbActualCommit::7}/kic_image_build.txt
"
fi
exit $ec

View File

@ -27,6 +27,7 @@ import (
"sync"
"time"
"github.com/blang/semver/v4"
"github.com/pkg/errors"
"github.com/spf13/viper"
@ -45,6 +46,7 @@ import (
"k8s.io/minikube/pkg/minikube/reason"
"k8s.io/minikube/pkg/minikube/style"
"k8s.io/minikube/pkg/minikube/sysinit"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/util/retry"
)
@ -226,7 +228,9 @@ func addonSpecificChecks(cc *config.ClusterConfig, name string, enable bool, run
out.V{"driver_name": cc.Driver, "addon_name": name})
}
}
return false, nil
if err := supportLegacyIngress(cc); err != nil {
return false, err
}
}
if strings.HasPrefix(name, "istio") && enable {
@ -238,7 +242,6 @@ func addonSpecificChecks(cc *config.ClusterConfig, name string, enable bool, run
if cc.CPUs < minCPUs {
out.WarningT("Istio needs {{.minCPUs}} CPUs -- your configuration only allocates {{.cpus}} CPUs", out.V{"minCPUs": minCPUs, "cpus": cc.CPUs})
}
return false, nil
}
if name == "registry" {
@ -286,6 +289,33 @@ func isAddonAlreadySet(cc *config.ClusterConfig, addon *assets.Addon, enable boo
return false
}
// maintain backwards compatibility with k8s < v1.19
// by replacing images with old versions if custom ones are not already provided
func supportLegacyIngress(cc *config.ClusterConfig) error {
v, err := util.ParseKubernetesVersion(cc.KubernetesConfig.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "parsing Kubernetes version")
}
if semver.MustParseRange("<1.19.0")(v) {
imgs := map[string]string{
// https://github.com/kubernetes/ingress-nginx/blob/f3c50698d98299b1a61f83cb6c4bb7de0b71fb4b/deploy/static/provider/kind/deploy.yaml#L327
"IngressController": "ingress-nginx/controller:v0.49.0@sha256:e9707504ad0d4c119036b6d41ace4a33596139d3feb9ccb6617813ce48c3eeef",
// issues: https://github.com/kubernetes/ingress-nginx/issues/7418 and https://github.com/jet/kube-webhook-certgen/issues/30
"KubeWebhookCertgenCreate": "docker.io/jettech/kube-webhook-certgen:v1.5.1@sha256:950833e19ade18cd389d647efb88992a7cc077abedef343fa59e012d376d79b7",
"KubeWebhookCertgenPatch": "docker.io/jettech/kube-webhook-certgen:v1.5.1@sha256:950833e19ade18cd389d647efb88992a7cc077abedef343fa59e012d376d79b7",
}
if cc.CustomAddonImages == nil {
cc.CustomAddonImages = map[string]string{}
}
for name, path := range imgs {
if _, exists := cc.CustomAddonImages[name]; !exists {
cc.CustomAddonImages[name] = path
}
}
}
return nil
}
func enableOrDisableAddonInternal(cc *config.ClusterConfig, addon *assets.Addon, runner command.Runner, data interface{}, enable bool) error {
deployFiles := []string{}

View File

@ -21,12 +21,15 @@ import (
"runtime"
"strings"
"github.com/blang/semver/v4"
"github.com/pkg/errors"
"github.com/spf13/viper"
"k8s.io/minikube/deploy/addons"
"k8s.io/minikube/pkg/minikube/config"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/out"
"k8s.io/minikube/pkg/minikube/vmpath"
"k8s.io/minikube/pkg/util"
"k8s.io/minikube/pkg/version"
)
@ -230,24 +233,17 @@ var Addons = map[string]*Addon{
}),
"ingress": NewAddon([]*BinAsset{
MustBinAsset(addons.IngressAssets,
"ingress/ingress-configmap.yaml.tmpl",
"ingress/ingress-deploy.yaml.tmpl",
vmpath.GuestAddonsDir,
"ingress-configmap.yaml",
"0640"),
MustBinAsset(addons.IngressAssets,
"ingress/ingress-rbac.yaml.tmpl",
vmpath.GuestAddonsDir,
"ingress-rbac.yaml",
"0640"),
MustBinAsset(addons.IngressAssets,
"ingress/ingress-dp.yaml.tmpl",
vmpath.GuestAddonsDir,
"ingress-dp.yaml",
"ingress-deploy.yaml",
"0640"),
}, false, "ingress", "", map[string]string{
"IngressController": "ingress-nginx/controller:v0.44.0@sha256:3dd0fac48073beaca2d67a78c746c7593f9c575168a17139a9955a82c63c4b9a",
"KubeWebhookCertgenCreate": "docker.io/jettech/kube-webhook-certgen:v1.5.1@sha256:950833e19ade18cd389d647efb88992a7cc077abedef343fa59e012d376d79b7",
"KubeWebhookCertgenPatch": "docker.io/jettech/kube-webhook-certgen:v1.5.1@sha256:950833e19ade18cd389d647efb88992a7cc077abedef343fa59e012d376d79b7",
// https://github.com/kubernetes/ingress-nginx/blob/557604f4ef526f7755d36089b617bc7686c389f9/deploy/static/provider/kind/deploy.yaml#L323
"IngressController": "ingress-nginx/controller:v1.0.0-beta.3@sha256:44a7a06b71187a4529b0a9edee5cc22bdf71b414470eff696c3869ea8d90a695",
// https://github.com/kubernetes/ingress-nginx/blob/557604f4ef526f7755d36089b617bc7686c389f9/deploy/static/provider/kind/deploy.yaml#L612
"KubeWebhookCertgenCreate": "k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068",
// https://github.com/kubernetes/ingress-nginx/blob/557604f4ef526f7755d36089b617bc7686c389f9/deploy/static/provider/kind/deploy.yaml#L660
"KubeWebhookCertgenPatch": "k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068",
}, map[string]string{
"IngressController": "k8s.gcr.io",
}),
@ -314,8 +310,9 @@ var Addons = map[string]*Addon{
"olm.yaml",
"0640"),
}, false, "olm", "", map[string]string{
"OLM": "operator-framework/olm:v0.17.0@sha256:de396b540b82219812061d0d753440d5655250c621c753ed1dc67d6154741607",
"UpstreamCommunityOperators": "operator-framework/upstream-community-operators:07bbc13@sha256:cc7b3fdaa1ccdea5866fcd171669dc0ed88d3477779d8ed32e3712c827e38cc0",
"OLM": "operator-framework/olm@sha256:e74b2ac57963c7f3ba19122a8c31c9f2a0deb3c0c5cac9e5323ccffd0ca198ed",
// operator-framework/community-operators was deprecated: https://github.com/operator-framework/community-operators#repository-is-obsolete; switching to OperatorHub.io instead
"UpstreamCommunityOperators": "operatorhubio/catalog:latest",
}, map[string]string{
"OLM": "quay.io",
"UpstreamCommunityOperators": "quay.io",
@ -531,13 +528,13 @@ var Addons = map[string]*Addon{
"gcp-auth-service.yaml",
"0640"),
MustBinAsset(addons.GcpAuthAssets,
"gcp-auth/gcp-auth-webhook.yaml.tmpl.tmpl",
"gcp-auth/gcp-auth-webhook.yaml.tmpl",
vmpath.GuestAddonsDir,
"gcp-auth-webhook.yaml",
"0640"),
}, false, "gcp-auth", "google", map[string]string{
"KubeWebhookCertgen": "jettech/kube-webhook-certgen:v1.3.0@sha256:ff01fba91131ed260df3f3793009efbf9686f5a5ce78a85f81c386a4403f7689",
"GCPAuthWebhook": "k8s-minikube/gcp-auth-webhook:v0.0.6@sha256:c407ad6ee97d8a0e8a21c713e2d9af66aaf73315e4a123874c00b786f962f3cd",
"KubeWebhookCertgen": "k8s.gcr.io/ingress-nginx/kube-webhook-certgen:v1.0@sha256:f3b6b39a6062328c095337b4cadcefd1612348fdd5190b1dcbcb9b9e90bd8068",
"GCPAuthWebhook": "k8s-minikube/gcp-auth-webhook:v0.0.7@sha256:be9661afbd47e4042bee1cb48cae858cc2f4b4e121340ee69fdc0013aeffcca4",
}, map[string]string{
"GCPAuthWebhook": "gcr.io",
}),
@ -795,6 +792,7 @@ func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig, netInfo Net
LoadBalancerStartIP string
LoadBalancerEndIP string
CustomIngressCert string
IngressAPIVersion string
ContainerRuntime string
Images map[string]string
Registries map[string]string
@ -807,6 +805,7 @@ func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig, netInfo Net
LoadBalancerStartIP: cfg.LoadBalancerStartIP,
LoadBalancerEndIP: cfg.LoadBalancerEndIP,
CustomIngressCert: cfg.CustomIngressCert,
IngressAPIVersion: "v1", // api version for ingress (eg, "v1beta1"; defaults to "v1" for k8s 1.19+)
ContainerRuntime: cfg.ContainerRuntime,
Images: images,
Registries: addon.Registries,
@ -820,6 +819,16 @@ func GenerateTemplateData(addon *Addon, cfg config.KubernetesConfig, netInfo Net
opts.Registries = make(map[string]string)
}
// maintain backwards compatibility with k8s < v1.19
// by using v1beta1 instead of v1 api version for ingress
v, err := util.ParseKubernetesVersion(cfg.KubernetesVersion)
if err != nil {
return errors.Wrap(err, "parsing Kubernetes version")
}
if semver.MustParseRange("<1.19.0")(v) {
opts.IngressAPIVersion = "v1beta1"
}
// Network info for generating template
opts.NetworkInfo["ControlPlaneNodeIP"] = netInfo.ControlPlaneNodeIP
opts.NetworkInfo["ControlPlaneNodePort"] = fmt.Sprint(netInfo.ControlPlaneNodePort)

View File

@ -24,6 +24,7 @@ import (
"io"
"os"
"path"
"strconv"
"time"
"github.com/pkg/errors"
@ -37,8 +38,11 @@ const MemorySource = "memory"
// CopyableFile is something that can be copied
type CopyableFile interface {
io.Reader
io.Writer
GetLength() int
SetLength(int)
GetSourcePath() string
GetTargetPath() string
GetTargetDir() string
GetTargetName() string
@ -62,6 +66,11 @@ func (b *BaseAsset) GetSourcePath() string {
return b.SourcePath
}
// GetTargetPath returns target path
func (b *BaseAsset) GetTargetPath() string {
return path.Join(b.GetTargetDir(), b.GetTargetName())
}
// GetTargetDir returns target dir
func (b *BaseAsset) GetTargetDir() string {
return b.TargetDir
@ -86,6 +95,7 @@ func (b *BaseAsset) GetModTime() (time.Time, error) {
type FileAsset struct {
BaseAsset
reader io.ReadSeeker
writer io.Writer
file *os.File // Optional pointer to close file through FileAsset.Close()
}
@ -134,6 +144,14 @@ func (f *FileAsset) GetLength() (flen int) {
return int(fi.Size())
}
// SetLength sets the file length
func (f *FileAsset) SetLength(flen int) {
err := os.Truncate(f.SourcePath, int64(flen))
if err != nil {
klog.Errorf("truncate(%q) failed: %v", f.SourcePath, err)
}
}
// GetModTime returns modification time of the file
func (f *FileAsset) GetModTime() (time.Time, error) {
fi, err := os.Stat(f.SourcePath)
@ -152,6 +170,23 @@ func (f *FileAsset) Read(p []byte) (int, error) {
return f.reader.Read(p)
}
// Write writes the asset
func (f *FileAsset) Write(p []byte) (int, error) {
if f.writer == nil {
f.file.Close()
perms, err := strconv.ParseUint(f.Permissions, 8, 32)
if err != nil || perms > 07777 {
return 0, err
}
f.file, err = os.OpenFile(f.SourcePath, os.O_RDWR|os.O_CREATE, os.FileMode(perms))
if err != nil {
return 0, err
}
f.writer = io.Writer(f.file)
}
return f.writer.Write(p)
}
// Seek resets the reader to offset
func (f *FileAsset) Seek(offset int64, whence int) (int64, error) {
return f.reader.Seek(offset, whence)
@ -177,11 +212,23 @@ func (m *MemoryAsset) GetLength() int {
return m.length
}
// SetLength returns length
func (m *MemoryAsset) SetLength(len int) {
m.length = len
}
// Read reads the asset
func (m *MemoryAsset) Read(p []byte) (int, error) {
return m.reader.Read(p)
}
// Writer writes the asset
func (m *MemoryAsset) Write(p []byte) (int, error) {
m.length = len(p)
m.reader = bytes.NewReader(p)
return len(p), nil
}
// Seek resets the reader to offset
func (m *MemoryAsset) Seek(offset int64, whence int) (int64, error) {
return m.reader.Seek(offset, whence)
@ -298,6 +345,11 @@ func (m *BinAsset) GetLength() int {
return m.length
}
// SetLength sets length
func (m *BinAsset) SetLength(len int) {
m.length = len
}
// Read reads the asset
func (m *BinAsset) Read(p []byte) (int, error) {
if m.GetLength() == 0 {
@ -306,6 +358,13 @@ func (m *BinAsset) Read(p []byte) (int, error) {
return m.reader.Read(p)
}
// Write writes the asset
func (m *BinAsset) Write(p []byte) (int, error) {
m.length = len(p)
m.reader = bytes.NewReader(p)
return len(p), nil
}
// Seek resets the reader to offset
func (m *BinAsset) Seek(offset int64, whence int) (int64, error) {
return m.reader.Seek(offset, whence)

View File

@ -40,7 +40,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -40,7 +40,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: cluster.local
podSubnet: "192.168.32.0/20"

View File

@ -40,7 +40,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -46,7 +46,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -40,7 +40,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -40,7 +40,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -40,7 +40,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: minikube.local
podSubnet: "10.244.0.0/16"

View File

@ -41,7 +41,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -43,7 +43,7 @@ etcd:
dataDir: /var/lib/minikube/etcd
extraArgs:
proxy-refresh-interval: "70000"
kubernetesVersion: v1.22.0-rc.0
kubernetesVersion: v1.22.0
networking:
dnsDomain: cluster.local
podSubnet: "10.244.0.0/16"

View File

@ -23,23 +23,23 @@ import (
"k8s.io/minikube/pkg/minikube/config"
)
// From https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml
// From https://raw.githubusercontent.com/cilium/cilium/v1.9/install/kubernetes/quick-install.yaml
var ciliumTmpl = `---
# Source: cilium/charts/agent/templates/serviceaccount.yaml
# Source: cilium/templates/cilium-agent-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium
namespace: kube-system
---
# Source: cilium/charts/operator/templates/serviceaccount.yaml
# Source: cilium/templates/cilium-operator-serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: cilium-operator
namespace: kube-system
---
# Source: cilium/charts/config/templates/configmap.yaml
# Source: cilium/templates/cilium-configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
@ -58,9 +58,14 @@ data:
# the kvstore by commenting out the identity-allocation-mode below, or
# setting it to "kvstore".
identity-allocation-mode: crd
cilium-endpoint-gc-interval: "5m0s"
# If you want to run cilium in debug mode change this value to true
debug: "false"
# The agent can be put into the following three policy enforcement modes
# default, always and never.
# https://docs.cilium.io/en/latest/policy/intro/#policy-enforcement-modes
enable-policy: "default"
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
# address.
@ -69,8 +74,10 @@ data:
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
# address.
enable-ipv6: "false"
# Users who wish to specify their own custom CNI configuration file must set
# custom-cni-conf to "true", otherwise Cilium may overwrite the configuration.
custom-cni-conf: "false"
enable-bpf-clock-probe: "true"
# If you want cilium monitor to aggregate tracing for packets, set this level
# to "low", "medium", or "maximum". The higher the level, the less packets
# that will be seen in monitor output.
@ -87,13 +94,15 @@ data:
#
# Only effective when monitor aggregation is set to "medium" or higher.
monitor-aggregation-flags: all
# bpf-policy-map-max specified the maximum number of entries in endpoint
# policy map (per endpoint)
bpf-policy-map-max: "16384"
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
bpf-map-dynamic-size-ratio: "0.0025"
# bpf-policy-map-max specifies the maximum number of entries in endpoint
# policy map (per endpoint)
bpf-policy-map-max: "16384"
# bpf-lb-map-max specifies the maximum number of entries in bpf lb service,
# backend and affinity maps.
bpf-lb-map-max: "65536"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
@ -104,9 +113,8 @@ data:
#
# If this value is modified, then during the next Cilium startup the restore
# of existing endpoints and tracking of ongoing connections may be disrupted.
# This may lead to policy drops or a change in loadbalancing decisions for a
# connection for some time. Endpoints may need to be recreated to restore
# connectivity.
# As a result, reply packets may be dropped and the load-balancing decisions
# for established connections may change.
#
# If this option is set to "false" during an upgrade from 1.3 or earlier to
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
@ -116,61 +124,63 @@ data:
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
# in the range of 1 and 255. Only relevant when building a mesh of clusters.
cluster-id: ""
# Encapsulation mode for communication between nodes
# Possible values:
# - disabled
# - vxlan (default)
# - geneve
tunnel: vxlan
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# DNS Polling periodically issues a DNS lookup for each 'matchName' from
# cilium-agent. The result is used to regenerate endpoint policy.
# DNS lookups are repeated with an interval of 5 seconds, and are made for
# A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
# data is used instead. An IP change will trigger a regeneration of the Cilium
# policy for each endpoint and increment the per cilium-agent policy
# repository revision.
#
# This option is disabled by default starting from version 1.4.x in favor
# of a more powerful DNS proxy-based implementation, see [0] for details.
# Enable this option if you want to use FQDN policies but do not want to use
# the DNS proxy.
#
# To ease upgrade, users may opt to set this option to "true".
# Otherwise please refer to the Upgrade Guide [1] which explains how to
# prepare policy rules for upgrade.
#
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
tofqdns-enable-poller: "false"
# Enables L7 proxy for L7 policy enforcement and visibility
enable-l7-proxy: "true"
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
wait-bpf-mount: "false"
masquerade: "true"
enable-bpf-masquerade: "true"
enable-xt-socket-fallback: "true"
install-iptables-rules: "true"
auto-direct-node-routes: "false"
enable-bandwidth-manager: "false"
enable-local-redirect-policy: "false"
kube-proxy-replacement: "probe"
kube-proxy-replacement-healthz-bind-address: ""
enable-health-check-nodeport: "true"
node-port-bind-protection: "true"
enable-auto-protect-node-port-range: "true"
enable-session-affinity: "true"
k8s-require-ipv4-pod-cidr: "true"
k8s-require-ipv6-pod-cidr: "false"
enable-endpoint-health-checking: "true"
enable-health-checking: "true"
enable-well-known-identities: "false"
enable-remote-node-identity: "true"
operator-api-serve-addr: "127.0.0.1:9234"
# Enable Hubble gRPC service.
enable-hubble: "true"
# UNIX domain socket for Hubble server to listen to.
hubble-socket-path: "/var/run/cilium/hubble.sock"
# An additional address for Hubble server to listen to (e.g. ":4244").
hubble-listen-address: ":4244"
hubble-disable-tls: "false"
hubble-tls-cert-file: /var/lib/cilium/tls/hubble/server.crt
hubble-tls-key-file: /var/lib/cilium/tls/hubble/server.key
hubble-tls-client-ca-files: /var/lib/cilium/tls/hubble/client-ca.crt
ipam: "cluster-pool"
cluster-pool-ipv4-cidr: "10.0.0.0/8"
cluster-pool-ipv4-mask-size: "24"
disable-cnp-status-updates: "true"
cgroup-root: "/run/cilium/cgroupv2"
---
# Source: cilium/charts/agent/templates/clusterrole.yaml
# Source: cilium/templates/cilium-agent-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
@ -207,6 +217,16 @@ rules:
- ""
resources:
- pods
- pods/finalizers
verbs:
- get
- list
- watch
- update
- delete
- apiGroups:
- ""
resources:
- nodes
verbs:
- get
@ -225,27 +245,40 @@ rules:
resources:
- customresourcedefinitions
verbs:
# Deprecated for removal in v1.10
- create
- get
- list
- watch
- update
# This is used when validating policies in preflight. This will need to stay
# until we figure out how to avoid "get" inside the preflight, and then
# should be removed ideally.
- get
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
verbs:
- '*'
---
# Source: cilium/charts/operator/templates/clusterrole.yaml
# Source: cilium/templates/cilium-operator-clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
metadata:
@ -287,14 +320,22 @@ rules:
resources:
- ciliumnetworkpolicies
- ciliumnetworkpolicies/status
- ciliumnetworkpolicies/finalizers
- ciliumclusterwidenetworkpolicies
- ciliumclusterwidenetworkpolicies/status
- ciliumclusterwidenetworkpolicies/finalizers
- ciliumendpoints
- ciliumendpoints/status
- ciliumendpoints/finalizers
- ciliumnodes
- ciliumnodes/status
- ciliumnodes/finalizers
- ciliumidentities
- ciliumidentities/status
- ciliumidentities/finalizers
- ciliumlocalredirectpolicies
- ciliumlocalredirectpolicies/status
- ciliumlocalredirectpolicies/finalizers
verbs:
- '*'
- apiGroups:
@ -302,11 +343,30 @@ rules:
resources:
- customresourcedefinitions
verbs:
- create
- get
- list
- update
- watch
# For cilium-operator running in HA mode.
#
# Cilium operator running in HA mode requires the use of ResourceLock for Leader Election
# between multiple running instances.
# The preferred way of doing this is to use LeasesResourceLock as edits to Leases are less
# common and fewer objects in the cluster watch "all Leases".
# The support for leases was introduced in coordination.k8s.io/v1 during Kubernetes 1.14 release.
# In Cilium we currently don't support HA mode for K8s version < 1.14. This condition make sure
# that we only authorize access to leases resources in supported K8s versions.
- apiGroups:
- coordination.k8s.io
resources:
- leases
verbs:
- create
- get
- update
---
# Source: cilium/charts/agent/templates/clusterrolebinding.yaml
# Source: cilium/templates/cilium-agent-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@ -320,7 +380,7 @@ subjects:
name: cilium
namespace: kube-system
---
# Source: cilium/charts/operator/templates/clusterrolebinding.yaml
# Source: cilium/templates/cilium-operator-clusterrolebinding.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRoleBinding
metadata:
@ -334,7 +394,7 @@ subjects:
name: cilium-operator
namespace: kube-system
---
# Source: cilium/charts/agent/templates/daemonset.yaml
# Source: cilium/templates/cilium-agent-daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
@ -346,6 +406,10 @@ spec:
selector:
matchLabels:
k8s-app: cilium
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
@ -414,16 +478,16 @@ spec:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
- name: CILIUM_Cilium_MASTER_DEVICE
- name: CILIUM_FLANNEL_MASTER_DEVICE
valueFrom:
configMapKeyRef:
key: Cilium-master-device
key: flannel-master-device
name: cilium-config
optional: true
- name: CILIUM_Cilium_UNINSTALL_ON_EXIT
- name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT
valueFrom:
configMapKeyRef:
key: Cilium-uninstall-on-exit
key: flannel-uninstall-on-exit
name: cilium-config
optional: true
- name: CILIUM_CLUSTERMESH_CONFIG
@ -440,7 +504,7 @@ spec:
key: custom-cni-conf
name: cilium-config
optional: true
image: "docker.io/cilium/cilium:v1.8.0"
image: "quay.io/cilium/cilium:v1.9.9@sha256:a85d5cff13f8231c2e267d9fc3c6e43d24be4a75dac9f641c11ec46e7f17624d"
imagePullPolicy: IfNotPresent
lifecycle:
postStart:
@ -480,8 +544,37 @@ spec:
readOnly: true
- mountPath: /run/xtables.lock
name: xtables-lock
- mountPath: /var/lib/cilium/tls/hubble
name: hubble-tls
readOnly: true
hostNetwork: true
initContainers:
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.
# We use nsenter command with host's cgroup and mount namespaces enabled.
- name: mount-cgroup
env:
- name: CGROUP_ROOT
value: /run/cilium/cgroupv2
- name: BIN_PATH
value: /opt/cni/bin
command:
- sh
- -c
# The statically linked Go program binary is invoked to avoid any
# dependency on utilities like sh and mount that can be missing on certain
# distros installed on the underlying host. Copy the binary to the
# same directory where we install cilium cni plugin so that exec permissions
# are available.
- 'cp /usr/bin/cilium-mount /hostbin/cilium-mount && nsenter --cgroup=/hostproc/1/ns/cgroup --mount=/hostproc/1/ns/mnt "${BIN_PATH}/cilium-mount" $CGROUP_ROOT; rm /hostbin/cilium-mount'
image: "quay.io/cilium/cilium:v1.9.9@sha256:a85d5cff13f8231c2e267d9fc3c6e43d24be4a75dac9f641c11ec46e7f17624d"
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /hostproc
name: hostproc
- mountPath: /hostbin
name: cni-path
securityContext:
privileged: true
- command:
- /init-container.sh
env:
@ -503,7 +596,7 @@ spec:
key: wait-bpf-mount
name: cilium-config
optional: true
image: "docker.io/cilium/cilium:v1.8.0"
image: "quay.io/cilium/cilium:v1.9.9@sha256:a85d5cff13f8231c2e267d9fc3c6e43d24be4a75dac9f641c11ec46e7f17624d"
imagePullPolicy: IfNotPresent
name: clean-cilium-state
securityContext:
@ -515,6 +608,10 @@ spec:
- mountPath: /sys/fs/bpf
name: bpf-maps
mountPropagation: HostToContainer
# Required to mount cgroup filesystem from the host to cilium agent pod
- mountPath: /run/cilium/cgroupv2
name: cilium-cgroup
mountPropagation: HostToContainer
- mountPath: /var/run/cilium
name: cilium-run
resources:
@ -539,6 +636,16 @@ spec:
path: /sys/fs/bpf
type: DirectoryOrCreate
name: bpf-maps
# To mount cgroup2 filesystem on the host
- hostPath:
path: /proc
type: Directory
name: hostproc
# To keep state between restarts / upgrades for cgroup2 filesystem
- hostPath:
path: /run/cilium/cgroupv2
type: DirectoryOrCreate
name: cilium-cgroup
# To install cilium cni plugin in the host
- hostPath:
path: /opt/cni/bin
@ -568,12 +675,25 @@ spec:
- configMap:
name: cilium-config
name: cilium-config-path
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
- name: hubble-tls
projected:
sources:
- secret:
name: hubble-server-certs
items:
- key: tls.crt
path: server.crt
- key: tls.key
path: server.key
optional: true
- configMap:
name: hubble-ca-cert
items:
- key: ca.crt
path: client-ca.crt
optional: true
---
# Source: cilium/charts/operator/templates/deployment.yaml
# Source: cilium/templates/cilium-operator-deployment.yaml
apiVersion: apps/v1
kind: Deployment
metadata:
@ -583,6 +703,9 @@ metadata:
name: cilium-operator
namespace: kube-system
spec:
# We support HA mode only for Kubernetes version > 1.14
# See docs on ServerCapabilities.LeasesResourceLock in file pkg/k8s/version/version.go
# for more details.
replicas: 1
selector:
matchLabels:
@ -600,6 +723,18 @@ spec:
io.cilium/app: operator
name: cilium-operator
spec:
# In HA mode, cilium-operator pods must not be scheduled on the same
# node as they will clash with each other.
affinity:
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchExpressions:
- key: io.cilium/app
operator: In
values:
- operator
topologyKey: kubernetes.io/hostname
containers:
- args:
- --config-dir=/tmp/cilium/config-map
@ -623,25 +758,7 @@ spec:
key: debug
name: cilium-config
optional: true
- name: AWS_ACCESS_KEY_ID
valueFrom:
secretKeyRef:
key: AWS_ACCESS_KEY_ID
name: cilium-aws
optional: true
- name: AWS_SECRET_ACCESS_KEY
valueFrom:
secretKeyRef:
key: AWS_SECRET_ACCESS_KEY
name: cilium-aws
optional: true
- name: AWS_DEFAULT_REGION
valueFrom:
secretKeyRef:
key: AWS_DEFAULT_REGION
name: cilium-aws
optional: true
image: "docker.io/cilium/operator-generic:v1.8.0"
image: "quay.io/cilium/operator-generic:v1.9.9@sha256:3726a965cd960295ca3c5e7f2b543c02096c0912c6652eb8bbb9ce54bcaa99d8"
imagePullPolicy: IfNotPresent
name: cilium-operator
livenessProbe:
@ -662,6 +779,8 @@ spec:
priorityClassName: system-cluster-critical
serviceAccount: cilium-operator
serviceAccountName: cilium-operator
tolerations:
- operator: Exists
volumes:
# To read the configuration from the config map
- configMap:

View File

@ -75,6 +75,9 @@ type Runner interface {
// Copy is a convenience method that runs a command to copy a file
Copy(assets.CopyableFile) error
// CopyFrom is a convenience method that runs a command to copy a file back
CopyFrom(assets.CopyableFile) error
// Remove is a convenience method that runs a command to remove a file
Remove(assets.CopyableFile) error
}

View File

@ -184,6 +184,24 @@ func (e *execRunner) Copy(f assets.CopyableFile) error {
return writeFile(dst, f, os.FileMode(perms))
}
// CopyFrom copies a file
func (e *execRunner) CopyFrom(f assets.CopyableFile) error {
src := path.Join(f.GetTargetDir(), f.GetTargetName())
dst := f.GetSourcePath()
klog.Infof("cp: %s --> %s (%d bytes)", src, dst, f.GetLength())
if f.GetLength() == 0 {
klog.Warningf("0 byte asset: %+v", f)
}
perms, err := strconv.ParseInt(f.GetPermissions(), 8, 0)
if err != nil || perms > 07777 {
return errors.Wrapf(err, "error converting permissions %s to integer", f.GetPermissions())
}
return writeFile(dst, f, os.FileMode(perms))
}
// Remove removes a file
func (e *execRunner) Remove(f assets.CopyableFile) error {
dst := filepath.Join(f.GetTargetDir(), f.GetTargetName())

View File

@ -142,6 +142,19 @@ func (f *FakeCommandRunner) Copy(file assets.CopyableFile) error {
return nil
}
func (f *FakeCommandRunner) CopyFrom(file assets.CopyableFile) error {
v, ok := f.fileMap.Load(file.GetSourcePath())
if !ok {
return fmt.Errorf("not found in map")
}
b := v.(bytes.Buffer)
_, err := io.Copy(file, &b)
if err != nil {
return errors.Wrapf(err, "error writing file: %+v", file)
}
return nil
}
// Remove removes the filename, file contents key value pair from the stored map
func (f *FakeCommandRunner) Remove(file assets.CopyableFile) error {
f.fileMap.Delete(file.GetSourcePath())

View File

@ -204,6 +204,15 @@ func (k *kicRunner) Copy(f assets.CopyableFile) error {
return k.copy(tf.Name(), dst)
}
// CopyFrom copies a file
func (k *kicRunner) CopyFrom(f assets.CopyableFile) error {
src := f.GetTargetPath()
dst := f.GetSourcePath()
klog.Infof("%s (direct): %s --> %s", k.ociBin, src, dst)
return k.copyFrom(src, dst)
}
// tempDirectory returns the directory to use as the temp directory
// or an empty string if it should use the os default temp directory.
func tempDirectory(isMinikubeSnap bool, isDockerSnap bool) (string, error) {
@ -229,6 +238,14 @@ func (k *kicRunner) copy(src string, dst string) error {
return copyToDocker(src, fullDest)
}
func (k *kicRunner) copyFrom(src string, dst string) error {
fullSource := fmt.Sprintf("%s:%s", k.nameOrID, src)
if k.ociBin == oci.Podman {
return copyToPodman(fullSource, dst)
}
return copyToDocker(fullSource, dst)
}
func (k *kicRunner) chmod(dst string, perm string) error {
_, err := k.RunCmd(exec.Command("sudo", "chmod", perm, dst))
return err

View File

@ -17,11 +17,14 @@ limitations under the License.
package command
import (
"bufio"
"bytes"
"fmt"
"io"
"os/exec"
"path"
"strconv"
"strings"
"sync"
"time"
@ -373,3 +376,82 @@ func (s *SSHRunner) Copy(f assets.CopyableFile) error {
}
return g.Wait()
}
// CopyFrom copies a file from the remote over SSH.
func (s *SSHRunner) CopyFrom(f assets.CopyableFile) error {
dst := path.Join(path.Join(f.GetTargetDir(), f.GetTargetName()))
sess, err := s.session()
if err != nil {
return errors.Wrap(err, "NewSession")
}
defer func() {
if err := sess.Close(); err != nil {
if err != io.EOF {
klog.Errorf("session close: %v", err)
}
}
}()
cmd := exec.Command("stat", "-c", "%s", dst)
rr, err := s.RunCmd(cmd)
if err != nil {
return fmt.Errorf("%s: %v", cmd, err)
}
length, err := strconv.Atoi(strings.TrimSuffix(rr.Stdout.String(), "\n"))
if err != nil {
return err
}
src := f.GetSourcePath()
klog.Infof("scp %s --> %s (%d bytes)", dst, src, length)
f.SetLength(length)
r, err := sess.StdoutPipe()
if err != nil {
return errors.Wrap(err, "StdoutPipe")
}
w, err := sess.StdinPipe()
if err != nil {
return errors.Wrap(err, "StdinPipe")
}
// The scpcmd below *should not* return until all data is copied and the
// StdinPipe is closed. But let's use errgroup to make it explicit.
var g errgroup.Group
var copied int64
g.Go(func() error {
defer w.Close()
br := bufio.NewReader(r)
fmt.Fprint(w, "\x00")
b, err := br.ReadBytes('\n')
if err != nil {
return errors.Wrap(err, "ReadBytes")
}
if b[0] != 'C' {
return fmt.Errorf("unexpected: %v", b)
}
fmt.Fprint(w, "\x00")
copied = 0
for copied < int64(length) {
n, err := io.CopyN(f, br, int64(length))
if err != nil {
return errors.Wrap(err, "io.CopyN")
}
copied += n
}
fmt.Fprint(w, "\x00")
err = sess.Wait()
if err != nil {
return err
}
return nil
})
scp := fmt.Sprintf("sudo scp -f %s", f.GetTargetPath())
err = sess.Start(scp)
if err != nil {
return fmt.Errorf("%s: %s", scp, err)
}
return g.Wait()
}

View File

@ -34,10 +34,10 @@ var (
const (
// DefaultKubernetesVersion is the default Kubernetes version
// dont update till #10545 is solved
DefaultKubernetesVersion = "v1.21.3"
DefaultKubernetesVersion = "v1.22.1"
// NewestKubernetesVersion is the newest Kubernetes version to test against
// NOTE: You may need to update coreDNS & etcd versions in pkg/minikube/bootstrapper/images/images.go
NewestKubernetesVersion = "v1.22.0-rc.0"
NewestKubernetesVersion = "v1.22.2-rc.0"
// OldestKubernetesVersion is the oldest Kubernetes version to test against
OldestKubernetesVersion = "v1.14.0"
// DefaultClusterName is the default nane for the k8s cluster

View File

@ -248,10 +248,14 @@ func (r *Containerd) Disable() error {
return r.Init.ForceStop("containerd")
}
// ImageExists checks if an image exists, expected input format
// ImageExists checks if image exists based on image name and optionally image sha
func (r *Containerd) ImageExists(name string, sha string) bool {
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo ctr -n=k8s.io images check | grep %s | grep %s", name, sha))
if _, err := r.Runner.RunCmd(c); err != nil {
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo ctr -n=k8s.io images check | grep %s", name))
rr, err := r.Runner.RunCmd(c)
if err != nil {
return false
}
if sha != "" && !strings.Contains(rr.Output(), sha) {
return false
}
return true

View File

@ -162,7 +162,7 @@ func (r *CRIO) Disable() error {
return r.Init.ForceStop("crio")
}
// ImageExists checks if an image exists
// ImageExists checks if image exists based on image name and optionally image sha
func (r *CRIO) ImageExists(name string, sha string) bool {
// expected output looks like [NAME@sha256:SHA]
c := exec.Command("sudo", "podman", "image", "inspect", "--format", "{{.Id}}", name)
@ -170,7 +170,7 @@ func (r *CRIO) ImageExists(name string, sha string) bool {
if err != nil {
return false
}
if !strings.Contains(rr.Output(), sha) {
if sha != "" && !strings.Contains(rr.Output(), sha) {
return false
}
return true

View File

@ -65,6 +65,8 @@ type CommandRunner interface {
WaitCmd(sc *command.StartedCmd) (*command.RunResult, error)
// Copy is a convenience method that runs a command to copy a file
Copy(assets.CopyableFile) error
// CopyFrom is a convenience method that runs a command to copy a file back
CopyFrom(assets.CopyableFile) error
// Remove is a convenience method that runs a command to remove a file
Remove(assets.CopyableFile) error
}
@ -106,7 +108,7 @@ type Manager interface {
// Push an image from the runtime to the container registry
PushImage(string) error
// ImageExists takes image name and image sha checks if an it exists
// ImageExists takes image name and optionally image sha to check if an image exists
ImageExists(string, string) bool
// ListImages returns a list of images managed by this container runtime
ListImages(ListImagesOptions) ([]string, error)

View File

@ -236,6 +236,10 @@ func (f *FakeRunner) Copy(assets.CopyableFile) error {
return nil
}
func (f *FakeRunner) CopyFrom(assets.CopyableFile) error {
return nil
}
func (f *FakeRunner) Remove(assets.CopyableFile) error {
return nil
}

View File

@ -165,7 +165,7 @@ func (r *Docker) Disable() error {
return r.Init.Mask("docker.service")
}
// ImageExists checks if an image exists
// ImageExists checks if image exists based on image name and optionally image sha
func (r *Docker) ImageExists(name string, sha string) bool {
// expected output looks like [SHA_ALGO:SHA]
c := exec.Command("docker", "image", "inspect", "--format", "{{.Id}}", name)
@ -173,7 +173,7 @@ func (r *Docker) ImageExists(name string, sha string) bool {
if err != nil {
return false
}
if !strings.Contains(rr.Output(), sha) {
if sha != "" && !strings.Contains(rr.Output(), sha) {
return false
}
return true
@ -201,7 +201,7 @@ func (r *Docker) ListImages(ListImagesOptions) ([]string, error) {
// LoadImage loads an image into this runtime
func (r *Docker) LoadImage(path string) error {
klog.Infof("Loading image: %s", path)
c := exec.Command("docker", "load", "-i", path)
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("sudo cat %s | docker load", path))
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "loadimage docker.")
}
@ -224,7 +224,7 @@ func (r *Docker) PullImage(name string) error {
// SaveImage saves an image from this runtime
func (r *Docker) SaveImage(name string, path string) error {
klog.Infof("Saving image %s: %s", name, path)
c := exec.Command("docker", "save", name, "-o", path)
c := exec.Command("/bin/bash", "-c", fmt.Sprintf("docker save '%s' | sudo tee %s >/dev/null", name, path))
if _, err := r.Runner.RunCmd(c); err != nil {
return errors.Wrap(err, "saveimage docker.")
}

View File

@ -33,10 +33,12 @@ import (
"github.com/google/go-containerregistry/pkg/v1/daemon"
"github.com/google/go-containerregistry/pkg/v1/mutate"
"github.com/google/go-containerregistry/pkg/v1/remote"
"github.com/google/go-containerregistry/pkg/v1/tarball"
"github.com/pkg/errors"
"k8s.io/klog/v2"
"k8s.io/minikube/pkg/minikube/constants"
"k8s.io/minikube/pkg/minikube/localpath"
)
const (
@ -191,6 +193,62 @@ func retrieveRemote(ref name.Reference, p v1.Platform) (v1.Image, error) {
return img, err
}
// imagePathInCache returns path in local cache directory
func imagePathInCache(img string) string {
f := filepath.Join(constants.ImageCacheDir, img)
f = localpath.SanitizeCacheDir(f)
return f
}
func UploadCachedImage(imgName string) error {
tag, err := name.NewTag(imgName, name.WeakValidation)
if err != nil {
klog.Infof("error parsing image name %s tag %v ", imgName, err)
return err
}
return uploadImage(tag, imagePathInCache(imgName))
}
func uploadImage(tag name.Tag, p string) error {
var err error
var img v1.Image
if !useDaemon && !useRemote {
return fmt.Errorf("neither daemon nor remote")
}
img, err = tarball.ImageFromPath(p, &tag)
if err != nil {
return errors.Wrap(err, "tarball")
}
ref := name.Reference(tag)
klog.Infof("uploading image: %+v from: %s", ref, p)
if useDaemon {
return uploadDaemon(tag, img)
}
if useRemote {
return uploadRemote(ref, img, defaultPlatform)
}
return nil
}
func uploadDaemon(tag name.Tag, img v1.Image) error {
resp, err := daemon.Write(tag, img)
if err != nil {
klog.Warningf("daemon load for %s: %v\n%s", tag, err, resp)
}
return err
}
func uploadRemote(ref name.Reference, img v1.Image, p v1.Platform) error {
err := remote.Write(ref, img, remote.WithAuthFromKeychain(authn.DefaultKeychain), remote.WithPlatform(p))
if err != nil {
klog.Warningf("remote push for %s: %v", ref, err)
}
return err
}
// See https://github.com/kubernetes/minikube/issues/10402
// check if downloaded image Architecture field matches the requested and fix it otherwise
func fixPlatform(ref name.Reference, img v1.Image, p v1.Platform) (v1.Image, error) {

View File

@ -19,6 +19,7 @@ package machine
import (
"fmt"
"os"
"os/exec"
"path"
"path/filepath"
"sort"
@ -48,6 +49,9 @@ var loadRoot = path.Join(vmpath.GuestPersistentDir, "images")
// loadImageLock is used to serialize image loads to avoid overloading the guest VM
var loadImageLock sync.Mutex
// saveRoot is where images should be saved from within the guest VM
var saveRoot = path.Join(vmpath.GuestPersistentDir, "images")
// CacheImagesForBootstrapper will cache images for a bootstrapper
func CacheImagesForBootstrapper(imageRepository string, version string, clusterBootstrapper string) error {
images, err := bootstrapper.GetCachedImageList(imageRepository, version, clusterBootstrapper)
@ -326,6 +330,177 @@ func removeExistingImage(r cruntime.Manager, src string, imgName string) error {
return nil
}
// SaveCachedImages saves from the container runtime to the cache
func SaveCachedImages(cc *config.ClusterConfig, runner command.Runner, images []string, cacheDir string) error {
klog.Infof("SaveImages start: %s", images)
start := time.Now()
defer func() {
klog.Infof("SaveImages completed in %s", time.Since(start))
}()
var g errgroup.Group
for _, image := range images {
image := image
g.Go(func() error {
return transferAndSaveCachedImage(runner, cc.KubernetesConfig, image, cacheDir)
})
}
if err := g.Wait(); err != nil {
return errors.Wrap(err, "saving cached images")
}
klog.Infoln("Successfully saved all cached images")
return nil
}
// SaveLocalImages saves images from the container runtime
func SaveLocalImages(cc *config.ClusterConfig, runner command.Runner, images []string, output string) error {
var g errgroup.Group
for _, image := range images {
image := image
g.Go(func() error {
return transferAndSaveImage(runner, cc.KubernetesConfig, output, image)
})
}
if err := g.Wait(); err != nil {
return errors.Wrap(err, "saving images")
}
klog.Infoln("Successfully saved all images")
return nil
}
// SaveAndCacheImages saves images from all profiles into the cache
func SaveAndCacheImages(images []string, profiles []*config.Profile) error {
if len(images) == 0 {
return nil
}
return DoSaveImages(images, "", profiles, constants.ImageCacheDir)
}
// DoSaveImages saves images from all profiles
func DoSaveImages(images []string, output string, profiles []*config.Profile, cacheDir string) error {
api, err := NewAPIClient()
if err != nil {
return errors.Wrap(err, "api")
}
defer api.Close()
klog.Infof("Save images: %q", images)
succeeded := []string{}
failed := []string{}
for _, p := range profiles { // loading images to all running profiles
pName := p.Name // capture the loop variable
c, err := config.Load(pName)
if err != nil {
// Non-fatal because it may race with profile deletion
klog.Errorf("Failed to load profile %q: %v", pName, err)
failed = append(failed, pName)
continue
}
for _, n := range c.Nodes {
m := config.MachineName(*c, n)
status, err := Status(api, m)
if err != nil {
klog.Warningf("error getting status for %s: %v", m, err)
failed = append(failed, m)
continue
}
if status == state.Running.String() { // the not running hosts will load on next start
h, err := api.Load(m)
if err != nil {
klog.Warningf("Failed to load machine %q: %v", m, err)
failed = append(failed, m)
continue
}
cr, err := CommandRunner(h)
if err != nil {
return err
}
if cacheDir != "" {
// saving image names, to cache
err = SaveCachedImages(c, cr, images, cacheDir)
} else {
// saving mage files
err = SaveLocalImages(c, cr, images, output)
}
if err != nil {
failed = append(failed, m)
klog.Warningf("Failed to load cached images for profile %s. make sure the profile is running. %v", pName, err)
continue
}
succeeded = append(succeeded, m)
}
}
}
klog.Infof("succeeded pulling from : %s", strings.Join(succeeded, " "))
klog.Infof("failed pulling from : %s", strings.Join(failed, " "))
// Live pushes are not considered a failure
return nil
}
// transferAndSaveCachedImage transfers and loads a single image from the cache
func transferAndSaveCachedImage(cr command.Runner, k8s config.KubernetesConfig, imgName string, cacheDir string) error {
dst := filepath.Join(cacheDir, imgName)
dst = localpath.SanitizeCacheDir(dst)
return transferAndSaveImage(cr, k8s, dst, imgName)
}
// transferAndSaveImage transfers and loads a single image
func transferAndSaveImage(cr command.Runner, k8s config.KubernetesConfig, dst string, imgName string) error {
r, err := cruntime.New(cruntime.Config{Type: k8s.ContainerRuntime, Runner: cr})
if err != nil {
return errors.Wrap(err, "runtime")
}
if !r.ImageExists(imgName, "") {
return errors.Errorf("image %s not found", imgName)
}
klog.Infof("Saving image to: %s", dst)
filename := filepath.Base(dst)
_, err = os.OpenFile(dst, os.O_CREATE|os.O_WRONLY, 0777)
if err != nil {
return err
}
f, err := assets.NewFileAsset(dst, saveRoot, filename, "0644")
if err != nil {
return errors.Wrapf(err, "creating copyable file asset: %s", filename)
}
defer func() {
if err := f.Close(); err != nil {
klog.Warningf("error closing the file %s: %v", f.GetSourcePath(), err)
}
}()
src := path.Join(saveRoot, filename)
args := append([]string{"rm", "-f"}, src)
if _, err := cr.RunCmd(exec.Command("sudo", args...)); err != nil {
return err
}
err = r.SaveImage(imgName, src)
if err != nil {
return errors.Wrapf(err, "%s save %s", r.Name(), src)
}
if err := cr.CopyFrom(f); err != nil {
return errors.Wrap(err, "transferring cached image")
}
klog.Infof("Transferred and saved %s to cache", dst)
return nil
}
// pullImages pulls images to the container run time
func pullImages(cruntime cruntime.Manager, images []string) error {
klog.Infof("PullImages start: %s", images)

View File

@ -317,10 +317,12 @@ var (
GuestImageRemove = Kind{ID: "GUEST_IMAGE_REMOVE", ExitCode: ExGuestError}
// minikube failed to pull an image
GuestImagePull = Kind{ID: "GUEST_IMAGE_PULL", ExitCode: ExGuestError}
// minikube failed to push an image
GuestImagePush = Kind{ID: "GUEST_IMAGE_PUSH", ExitCode: ExGuestError}
// minikube failed to build an image
GuestImageBuild = Kind{ID: "GUEST_IMAGE_BUILD", ExitCode: ExGuestError}
// minikube failed to push or save an image
GuestImageSave = Kind{ID: "GUEST_IMAGE_SAVE", ExitCode: ExGuestError}
// minikube failed to push an image
GuestImagePush = Kind{ID: "GUEST_IMAGE_PUSH", ExitCode: ExGuestError}
// minikube failed to tag an image
GuestImageTag = Kind{ID: "GUEST_IMAGE_TAG", ExitCode: ExGuestError}
// minikube failed to load host

View File

@ -125,6 +125,17 @@ REM @FOR /f "tokens=*" %%i IN ('%s') DO @%%i
`, s...)
},
},
"tcsh": {
prefix: "setenv ",
suffix: "\";\n",
delimiter: " \"",
unsetPrefix: "unsetenv ",
unsetSuffix: ";\n",
unsetDelimiter: "",
usageHint: func(s ...interface{}) string {
return fmt.Sprintf("\n: \"%s\"\n: eval `%s`\n", s...)
},
},
"none": {
prefix: "",
suffix: "\n",

View File

@ -41,6 +41,7 @@ func TestGenerateUsageHint(t *testing.T) {
{EnvConfig{"fish"}, `# foo
# bar | source`},
{EnvConfig{"none"}, ``},
{EnvConfig{"tcsh"}, "\n: \"foo\"\n: eval `bar`\n"},
}
for _, tc := range testCases {
tc := tc
@ -67,6 +68,7 @@ func TestCfgSet(t *testing.T) {
{"", "eval", EnvConfig{"emacs"}, `")`},
{"", "eval", EnvConfig{"none"}, ``},
{"", "eval", EnvConfig{"fish"}, `";`},
{"", "eval", EnvConfig{"tcsh"}, `";`},
}
for _, tc := range testCases {
tc := tc
@ -100,6 +102,7 @@ set -e bar;`},
{[]string{"baz", "bar"}, EnvConfig{"emacs"}, `(setenv "baz" nil)
(setenv "bar" nil)`},
{[]string{"baz", "bar"}, EnvConfig{"none"}, "baz\nbar"},
{[]string{"baz", "bar"}, EnvConfig{"tcsh"}, "unsetenv baz;\nunsetenv bar;"},
}
for _, tc := range testCases {
tc := tc

View File

@ -22,6 +22,8 @@ minikube quickly sets up a local Kubernetes cluster on macOS, Linux, and Windows
* Docker API endpoint for blazing fast [image pushes]({{< ref "/docs/handbook/pushing.md#pushing-directly-to-the-in-cluster-docker-daemon" >}})
* Advanced features such as [LoadBalancer]({{< ref "/docs/handbook/accessing.md#loadbalancer-access" >}}), filesystem mounts, and FeatureGates
* [Addons]({{< ref "/docs/handbook/deploying.md#addons" >}}) for easily installed Kubernetes applications
* Supports common [CI environments](https://github.com/minikube-ci/examples)
## Survey

View File

@ -348,6 +348,54 @@ $ minikube image unload image busybox
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
## minikube image save
Save a image from minikube
### Synopsis
Save a image from minikube
```shell
minikube image save IMAGE [ARCHIVE | -] [flags]
```
### Examples
```
minikube image save image
minikube image save image image.tar
```
### Options
```
--daemon Cache image to docker daemon
--remote Cache image to remote registry
```
### Options inherited from parent commands
```
--add_dir_header If true, adds the file directory to the header of the log messages
--alsologtostderr log to standard error as well as files
-b, --bootstrapper string The name of the cluster bootstrapper that will set up the Kubernetes cluster. (default "kubeadm")
-h, --help
--log_backtrace_at traceLocation when logging hits line file:N, emit a stack trace (default :0)
--log_dir string If non-empty, write log files in this directory
--log_file string If non-empty, use this log file
--log_file_max_size uint Defines the maximum size a log file can grow to. Unit is megabytes. If the value is 0, the maximum file size is unlimited. (default 1800)
--logtostderr log to standard error instead of files
--one_output If true, only write logs to their native severity level (vs also writing to each lower severity level)
-p, --profile string The name of the minikube VM being used. This can be set to allow having multiple instances of minikube independently. (default "minikube")
--skip_headers If true, avoid header prefixes in the log messages
--skip_log_headers If true, avoid headers when opening log files
--stderrthreshold severity logs at or above this threshold go to stderr (default 2)
--user string Specifies the user executing the operation. Useful for auditing operations executed by 3rd party tools. Defaults to the operating system username.
-v, --v Level number for the log level verbosity
--vmodule moduleSpec comma-separated list of pattern=N settings for file-filtered logging
```
## minikube image tag
Tag images

View File

@ -67,7 +67,7 @@ minikube start [flags]
--interactive Allow user prompts for more information (default true)
--iso-url strings Locations to fetch the minikube ISO from. (default [https://storage.googleapis.com/minikube-builds/iso/12268/minikube-v1.22.0-1628974786-12268.iso,https://github.com/kubernetes/minikube/releases/download/v1.22.0-1628974786-12268/minikube-v1.22.0-1628974786-12268.iso,https://kubernetes.oss-cn-hangzhou.aliyuncs.com/minikube/iso/minikube-v1.22.0-1628974786-12268.iso])
--keep-context This will keep the existing kubectl context and will create a minikube context.
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.21.3, 'latest' for v1.22.0-rc.0). Defaults to 'stable'.
--kubernetes-version string The Kubernetes version that the minikube VM will use (ex: v1.2.3, 'stable' for v1.22.1, 'latest' for v1.22.2-rc.0). Defaults to 'stable'.
--kvm-gpu Enable experimental NVIDIA GPU support in minikube
--kvm-hidden Hide the hypervisor signature from the guest in minikube (kvm2 driver only)
--kvm-network string The KVM default network name. (kvm2 driver only) (default "default")

View File

@ -381,12 +381,15 @@ minikube failed to remove an image
"GUEST_IMAGE_PULL" (Exit code ExGuestError)
minikube failed to pull an image
"GUEST_IMAGE_PUSH" (Exit code ExGuestError)
minikube failed to push an image
"GUEST_IMAGE_BUILD" (Exit code ExGuestError)
minikube failed to build an image
"GUEST_IMAGE_SAVE" (Exit code ExGuestError)
minikube failed to push or save an image
"GUEST_IMAGE_PUSH" (Exit code ExGuestError)
minikube failed to push an image
"GUEST_IMAGE_TAG" (Exit code ExGuestError)
minikube failed to tag an image

View File

@ -84,6 +84,12 @@ makes sure that `minikube image load` works from a local file
#### validateRemoveImage
makes sures that `minikube image rm` works as expected
#### validateSaveImage
makes sure that `minikube image save` works as expected
#### validateSaveImageToFile
makes sure that `minikube image save` works to a local file
#### validateBuildImage
makes sures that `minikube image build` works as expected

View File

@ -2,14 +2,35 @@
title: "Continuous Integration"
weight: 1
description: >
Using minikube for Continuous Integration
How to run minikube in CI (Continuous Integration)
---
## Overview
Most continuous integration environments are already running inside a VM, and may not support nested virtualization.
The `docker` driver was designed for this use case, as well as the older `none` driver.
Most continuous integration environments are already running inside a VM, and may not support nested virtualization.
You could use either `none` or `docker` driver in CI.
To see a working example of running minikube in CI checkout [minikube-ci/examples](https://github.com/minikube-ci/examples) that contains working examples.
## Supported / Tested CI Platforms
For any platform not yet listed we are looking for your help! Please file Pull Requests and / or Issues for missing CI platforms 😄
| Platform | Known to Work? | Status |
|---|---|--|
| [Prow](https://github.com/kubernetes/test-infra/tree/master/prow) | [Yes](https://github.com/kubernetes/test-infra/tree/master/config/jobs/kubernetes/minikube) ✔️ | [![Prow](https://prow.k8s.io/badge.svg?jobs=pull-minikube-build)](https://prow.k8s.io/?job=pull-minikube-build) |
| [Google Cloud Build](https://cloud.google.com/cloud-build/) | [Yes](https://github.com/minikube-ci/examples/blob/master/gcb.md) ✔️ | [![cloud build status](https://storage.googleapis.com/minikube-ci-example/build/working.svg)](https://pantheon.corp.google.com/cloud-build/dashboard?project=k8s-minikube) |
| [Github](https://help.github.com/en/actions/automating-your-workflow-with-github-actions/about-continuous-integration) | [Yes](https://github.com/minikube-ci/examples/blob/master/.github/workflows/minikube.yml) ✔️ | [![Github](https://github.com/minikube-ci/examples/workflows/Minikube/badge.svg)](https://github.com/minikube-ci/examples/actions) |
| [Azure Pipelines](https://azure.microsoft.com/en-us/services/devops/pipelines/) | [Yes](https://github.com/minikube-ci/examples/blob/master/azure-pipelines.yml) ✔️ | [![Azure Pipelines](https://dev.azure.com/medyagh0825/minikube-ci/_apis/build/status/examples?api-version=5.1-preview.1)](https://dev.azure.com/medyagh0825/minikube-ci/_build)
| [Travis CI](https://travis-ci.com/) | [Yes](https://github.com/minikube-ci/examples/blob/master/.travis.yml) ✔️ | [![Travis CI](https://travis-ci.com/minikube-ci/examples.svg?branch=master)](https://travis-ci.com/minikube-ci/examples/) |
| [CircleCI](https://circleci.com/) | [Yes](https://github.com/minikube-ci/examples/blob/master/.circleci) ✔️ | [![CircleCI](https://circleci.com/gh/minikube-ci/examples.svg?style=svg)](https://circleci.com/gh/minikube-ci/examples) |
| [Gitlab](https://about.gitlab.com/product/continuous-integration/) | [Yes](https://github.com/minikube-ci/examples/blob/master/.gitlab-ci.yml) ✔️ | Gitlab |
## Example

View File

@ -163,16 +163,16 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
t.Fatalf("failed to get Kubernetes client: %v", client)
}
if err := kapi.WaitForDeploymentToStabilize(client, "ingress-nginx", "ingress-nginx-controller", Minutes(6)); err != nil {
t.Errorf("failed waiting for ingress-controller deployment to stabilize: %v", err)
}
if _, err := PodWait(ctx, t, profile, "ingress-nginx", "app.kubernetes.io/name=ingress-nginx", Minutes(12)); err != nil {
// avoid timeouts like:
// Error from server (InternalError): Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": Post "https://ingress-nginx-controller-admission.ingress-nginx.svc:443/networking/v1/ingresses?timeout=10s": dial tcp 10.107.218.58:443: i/o timeout
// Error from server (InternalError): Internal error occurred: failed calling webhook "validate.nginx.ingress.kubernetes.io": Post "https://ingress-nginx-controller-admission.ingress-nginx.svc:443/networking/v1/ingresses?timeout=10s": context deadline exceeded
if _, err = Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "wait", "--for=condition=ready", "--namespace=ingress-nginx", "pod", "--selector=app.kubernetes.io/component=controller", "--timeout=90s")); err != nil {
t.Fatalf("failed waititing for ingress-nginx-controller : %v", err)
}
// create networking.k8s.io/v1 ingress
createv1Ingress := func() error {
// apply networking.k8s.io/v1beta1 ingress
// apply networking.k8s.io/v1 ingress
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "replace", "--force", "-f", filepath.Join(*testdataDir, "nginx-ingv1.yaml")))
if err != nil {
return err
@ -182,8 +182,6 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
}
return nil
}
// create networking.k8s.io/v1 ingress
if err := retry.Expo(createv1Ingress, 1*time.Second, Seconds(90)); err != nil {
t.Errorf("failed to create ingress: %v", err)
}
@ -202,43 +200,6 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
want := "Welcome to nginx!"
addr := "http://127.0.0.1/"
// check if the ingress can route nginx app with networking.k8s.io/v1beta1 ingress
checkv1betaIngress := func() error {
var rr *RunResult
var err error
if NoneDriver() { // just run curl directly on the none driver
rr, err = Run(t, exec.CommandContext(ctx, "curl", "-s", addr, "-H", "'Host: nginx.example.com'"))
if err != nil {
return err
}
} else {
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "ssh", fmt.Sprintf("curl -s %s -H 'Host: nginx.example.com'", addr)))
if err != nil {
return err
}
}
stderr := rr.Stderr.String()
if rr.Stderr.String() != "" {
t.Logf("debug: unexpected stderr for %v:\n%s", rr.Command(), stderr)
}
stdout := rr.Stdout.String()
if !strings.Contains(stdout, want) {
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), stdout, want)
}
return nil
}
// check if the ingress can route nginx app with networking.k8s.io/v1beta1 ingress
if err := retry.Expo(checkv1betaIngress, 500*time.Millisecond, Seconds(90)); err != nil {
t.Errorf("failed to get expected response from %s within minikube: %v", addr, err)
}
// create networking.k8s.io/v1 ingress
if err := retry.Expo(createv1Ingress, 1*time.Second, Seconds(90)); err != nil {
t.Errorf("failed to create ingress: %v", err)
}
// check if the ingress can route nginx app with networking.k8s.io/v1 ingress
checkv1Ingress := func() error {
@ -255,20 +216,16 @@ func validateIngressAddon(ctx context.Context, t *testing.T, profile string) {
return err
}
}
stderr := rr.Stderr.String()
if rr.Stderr.String() != "" {
t.Logf("debug: unexpected stderr for %v:\n%s", rr.Command(), stderr)
}
stdout := rr.Stdout.String()
if !strings.Contains(stdout, want) {
return fmt.Errorf("%v stdout = %q, want %q", rr.Command(), stdout, want)
}
return nil
}
// check if the ingress can route nginx app with networking.k8s.io/v1 ingress
if err := retry.Expo(checkv1Ingress, 500*time.Millisecond, Seconds(90)); err != nil {
t.Errorf("failed to get expected response from %s within minikube: %v", addr, err)
}
@ -462,37 +419,27 @@ func validateHelmTillerAddon(ctx context.Context, t *testing.T, profile string)
func validateOlmAddon(ctx context.Context, t *testing.T, profile string) {
defer PostMortemLogs(t, profile)
client, err := kapi.Client(profile)
if err != nil {
t.Fatalf("failed to get Kubernetes client for %s: %v", profile, err)
}
start := time.Now()
if err := kapi.WaitForDeploymentToStabilize(client, "olm", "catalog-operator", Minutes(6)); err != nil {
t.Errorf("failed waiting for catalog-operator deployment to stabilize: %v", err)
if _, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "wait", "--for=condition=ready", "--namespace=olm", "pod", "--selector=app=catalog-operator", "--timeout=90s")); err != nil {
t.Fatalf("failed waititing for pod catalog-operator: %v", err)
}
t.Logf("catalog-operator stabilized in %s", time.Since(start))
if err := kapi.WaitForDeploymentToStabilize(client, "olm", "olm-operator", Minutes(6)); err != nil {
t.Errorf("failed waiting for olm-operator deployment to stabilize: %v", err)
if _, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "wait", "--for=condition=ready", "--namespace=olm", "pod", "--selector=app=olm-operator", "--timeout=90s")); err != nil {
t.Fatalf("failed waititing for pod olm-operator: %v", err)
}
t.Logf("olm-operator stabilized in %s", time.Since(start))
if err := kapi.WaitForDeploymentToStabilize(client, "olm", "packageserver", Minutes(6)); err != nil {
t.Errorf("failed waiting for packageserver deployment to stabilize: %v", err)
if _, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "wait", "--for=condition=ready", "--namespace=olm", "pod", "--selector=app=packageserver", "--timeout=90s")); err != nil {
t.Fatalf("failed waititing for pod olm-operator: %v", err)
}
t.Logf("packageserver stabilized in %s", time.Since(start))
if _, err := PodWait(ctx, t, profile, "olm", "app=catalog-operator", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod catalog-operator: %v", err)
}
if _, err := PodWait(ctx, t, profile, "olm", "app=olm-operator", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod olm-operator: %v", err)
}
if _, err := PodWait(ctx, t, profile, "olm", "app=packageserver", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod packageserver: %v", err)
}
if _, err := PodWait(ctx, t, profile, "olm", "olm.catalogSource=operatorhubio-catalog", Minutes(6)); err != nil {
t.Fatalf("failed waiting for pod operatorhubio-catalog: %v", err)
if _, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "wait", "--for=condition=ready", "--namespace=olm", "pod", "--selector=olm.catalogSource=operatorhubio-catalog", "--timeout=90s")); err != nil {
t.Fatalf("failed waititing for pod operatorhubio-catalog: %v", err)
}
t.Logf("operatorhubio-catalog stabilized in %s", time.Since(start))
// Install one sample Operator such as etcd
rr, err := Run(t, exec.CommandContext(ctx, "kubectl", "--context", profile, "create", "-f", filepath.Join(*testdataDir, "etcd.yaml")))
@ -514,7 +461,6 @@ func validateOlmAddon(ctx context.Context, t *testing.T, profile string) {
}
return nil
}
// Operator installation takes a while
if err := retry.Expo(checkOperatorInstalled, time.Second*3, Minutes(10)); err != nil {
t.Errorf("failed checking operator installed: %v", err.Error())

View File

@ -151,8 +151,10 @@ func TestFunctional(t *testing.T) {
{"PodmanEnv", validatePodmanEnv},
{"NodeLabels", validateNodeLabels},
{"LoadImage", validateLoadImage},
{"SaveImage", validateSaveImage},
{"RemoveImage", validateRemoveImage},
{"LoadImageFromFile", validateLoadImageFromFile},
{"SaveImageToFile", validateSaveImageToFile},
{"BuildImage", validateBuildImage},
{"ListImages", validateListImages},
{"NonActiveRuntimeDisabled", validateNotActiveRuntimeDisabled},
@ -206,7 +208,6 @@ func cleanupUnwantedImages(ctx context.Context, t *testing.T, profile string) {
}
})
}
}
// validateNodeLabels checks if minikube cluster is created with correct kubernetes's node label
@ -249,7 +250,7 @@ func validateLoadImage(ctx context.Context, t *testing.T, profile string) {
}
// try to load the new image into minikube
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", newImage))
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", "--daemon", newImage))
if err != nil {
t.Fatalf("loading image into minikube: %v\n%s", err, rr.Output())
}
@ -289,7 +290,7 @@ func validateLoadImageFromFile(ctx context.Context, t *testing.T, profile string
}
// save image to file
imageFile := "busybox.tar"
imageFile := "busybox-load.tar"
rr, err = Run(t, exec.CommandContext(ctx, "docker", "save", "-o", imageFile, taggedImage))
if err != nil {
t.Fatalf("failed to save image to file: %v\n%s", err, rr.Output())
@ -302,7 +303,7 @@ func validateLoadImageFromFile(ctx context.Context, t *testing.T, profile string
t.Fatalf("failed to get absolute path of file %q: %v", imageFile, err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "load", imagePath))
if err != nil {
if err != nil || rr.Stderr.String() != "" {
t.Fatalf("loading image into minikube: %v\n%s", err, rr.Output())
}
@ -312,7 +313,7 @@ func validateLoadImageFromFile(ctx context.Context, t *testing.T, profile string
t.Fatalf("listing images: %v\n%s", err, rr.Output())
}
if !strings.Contains(rr.Output(), tag) {
t.Fatalf("expected %s to be loaded into minikube but the image is not there", taggedImage)
t.Fatalf("expected %s to be loaded into minikube but the image is not there: %v", taggedImage, rr.Output())
}
}
@ -363,6 +364,101 @@ func validateRemoveImage(ctx context.Context, t *testing.T, profile string) {
}
// validateSaveImage makes sure that `minikube image save` works as expected
func validateSaveImage(ctx context.Context, t *testing.T, profile string) {
if NoneDriver() {
t.Skip("load image not available on none driver")
}
if GithubActionRunner() && runtime.GOOS == "darwin" {
t.Skip("skipping on github actions and darwin, as this test requires a running docker daemon")
}
defer PostMortemLogs(t, profile)
// pull busybox
busyboxImage := "docker.io/library/busybox:1.29"
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "pull", busyboxImage))
if err != nil {
t.Fatalf("failed to setup test (pull image): %v\n%s", err, rr.Output())
}
// tag busybox
name := "busybox"
tag := fmt.Sprintf("save-%s", profile)
newImage := fmt.Sprintf("docker.io/library/%s:%s", name, tag)
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "tag", busyboxImage, newImage))
if err != nil {
t.Fatalf("failed to setup test (tag image) : %v\n%s", err, rr.Output())
}
// try to save the new image from minikube
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "save", "--daemon", newImage))
if err != nil {
t.Fatalf("loading image into minikube: %v\n%s", err, rr.Output())
}
// make sure the image was correctly loaded
rr, err = Run(t, exec.CommandContext(ctx, "docker", "images", name))
if err != nil {
t.Fatalf("listing images: %v\n%s", err, rr.Output())
}
if !strings.Contains(rr.Output(), fmt.Sprintf("save-%s", profile)) {
t.Fatalf("expected %s to be loaded into minikube but the image is not there", newImage)
}
}
// validateSaveImageToFile makes sure that `minikube image save` works to a local file
func validateSaveImageToFile(ctx context.Context, t *testing.T, profile string) {
if NoneDriver() {
t.Skip("save image not available on none driver")
}
if GithubActionRunner() && runtime.GOOS == "darwin" {
t.Skip("skipping on github actions and darwin, as this test requires a running docker daemon")
}
defer PostMortemLogs(t, profile)
// pull busybox
busyboxImage := "docker.io/library/busybox:1.30"
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "pull", busyboxImage))
if err != nil {
t.Fatalf("failed to setup test (pull image): %v\n%s", err, rr.Output())
}
name := "busybox"
tag := fmt.Sprintf("save-to-file-%s", profile)
taggedImage := fmt.Sprintf("docker.io/library/%s:%s", name, tag)
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "tag", busyboxImage, taggedImage))
if err != nil {
t.Fatalf("failed to setup test (tag image) : %v\n%s", err, rr.Output())
}
// try to save the new image from minikube
imageFile := "busybox-save.tar"
imagePath, err := filepath.Abs(imageFile)
if err != nil {
t.Fatalf("failed to get absolute path of file %q: %v", imageFile, err)
}
rr, err = Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "image", "save", taggedImage, imagePath))
if err != nil {
t.Fatalf("saving image from minikube: %v\n%s", err, rr.Output())
}
// load image from file
rr, err = Run(t, exec.CommandContext(ctx, "docker", "load", "-i", imagePath))
if err != nil {
t.Fatalf("failed to load image to file: %v\n%s", err, rr.Output())
}
defer os.Remove(imageFile)
// make sure the image was correctly loaded
rr, err = Run(t, exec.CommandContext(ctx, "docker", "images", name))
if err != nil {
t.Fatalf("listing images: %v\n%s", err, rr.Output())
}
if !strings.Contains(rr.Output(), tag) {
t.Fatalf("expected %s to be loaded but the image is not there", taggedImage)
}
}
func inspectImage(ctx context.Context, t *testing.T, profile string, image string) (*RunResult, error) {
var cmd *exec.Cmd
if ContainerRuntime() == "docker" {
@ -467,53 +563,76 @@ func validateDockerEnv(ctx context.Context, t *testing.T, profile string) {
t.Skipf("only validate docker env with docker container runtime, currently testing %s", cr)
}
defer PostMortemLogs(t, profile)
mctx, cancel := context.WithTimeout(ctx, Seconds(120))
defer cancel()
var rr *RunResult
var err error
type ShellTest struct {
name string
commandPrefix []string
formatArg string
}
windowsTests := []ShellTest{
{"powershell", []string{"powershell.exe", "-NoProfile", "-NonInteractive"}, "%[1]s -p %[2]s docker-env | Invoke-Expression ; "},
}
posixTests := []ShellTest{
{"bash", []string{"/bin/bash", "-c"}, "eval $(%[1]s -p %[2]s docker-env) && "},
}
tests := posixTests
if runtime.GOOS == "windows" {
c := exec.CommandContext(mctx, "powershell.exe", "-NoProfile", "-NonInteractive", Target()+" -p "+profile+" docker-env | Invoke-Expression ;"+Target()+" status -p "+profile)
rr, err = Run(t, c)
} else {
c := exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && "+Target()+" status -p "+profile)
// we should be able to get minikube status with a bash which evaled docker-env
rr, err = Run(t, c)
}
if mctx.Err() == context.DeadlineExceeded {
t.Errorf("failed to run the command by deadline. exceeded timeout. %s", rr.Command())
}
if err != nil {
t.Fatalf("failed to do status after eval-ing docker-env. error: %v", err)
}
if !strings.Contains(rr.Output(), "Running") {
t.Fatalf("expected status output to include 'Running' after eval docker-env but got: *%s*", rr.Output())
}
if !strings.Contains(rr.Output(), "in-use") {
t.Fatalf("expected status output to include `in-use` after eval docker-env but got *%s*", rr.Output())
tests = windowsTests
}
for _, tc := range tests {
t.Run(tc.name, func(t *testing.T) {
mctx, cancel := context.WithTimeout(ctx, Seconds(120))
defer cancel()
mctx, cancel = context.WithTimeout(ctx, Seconds(60))
defer cancel()
// do a eval $(minikube -p profile docker-env) and check if we are point to docker inside minikube
if runtime.GOOS == "windows" { // testing docker-env eval in powershell
c := exec.CommandContext(mctx, "powershell.exe", "-NoProfile", "-NonInteractive", Target()+" -p "+profile+" docker-env | Invoke-Expression ; docker images")
rr, err = Run(t, c)
} else {
c := exec.CommandContext(mctx, "/bin/bash", "-c", "eval $("+Target()+" -p "+profile+" docker-env) && docker images")
rr, err = Run(t, c)
}
command := make([]string, len(tc.commandPrefix)+1)
// Would use "copy" built-in here, but that is shadowed by "copy" package
for i, v := range tc.commandPrefix {
command[i] = v
}
if mctx.Err() == context.DeadlineExceeded {
t.Errorf("failed to run the command in 30 seconds. exceeded 30s timeout. %s", rr.Command())
}
formattedArg := fmt.Sprintf(tc.formatArg, Target(), profile)
if err != nil {
t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Command(), err)
}
// we should be able to get minikube status with a shell which evaled docker-env
command[len(command)-1] = formattedArg + Target() + " status -p " + profile
c := exec.CommandContext(mctx, command[0], command[1:]...)
rr, err := Run(t, c)
expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner"
if !strings.Contains(rr.Output(), expectedImgInside) {
t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%s*", expectedImgInside, rr.Output())
if mctx.Err() == context.DeadlineExceeded {
t.Errorf("failed to run the command by deadline. exceeded timeout. %s", rr.Command())
}
if err != nil {
t.Fatalf("failed to do status after eval-ing docker-env. error: %v", err)
}
if !strings.Contains(rr.Output(), "Running") {
t.Fatalf("expected status output to include 'Running' after eval docker-env but got: *%s*", rr.Output())
}
if !strings.Contains(rr.Output(), "in-use") {
t.Fatalf("expected status output to include `in-use` after eval docker-env but got *%s*", rr.Output())
}
mctx, cancel = context.WithTimeout(ctx, Seconds(60))
defer cancel()
// do a eval $(minikube -p profile docker-env) and check if we are point to docker inside minikube
command[len(command)-1] = formattedArg + "docker images"
c = exec.CommandContext(mctx, command[0], command[1:]...)
rr, err = Run(t, c)
if mctx.Err() == context.DeadlineExceeded {
t.Errorf("failed to run the command in 30 seconds. exceeded 30s timeout. %s", rr.Command())
}
if err != nil {
t.Fatalf("failed to run minikube docker-env. args %q : %v ", rr.Command(), err)
}
expectedImgInside := "gcr.io/k8s-minikube/storage-provisioner"
if !strings.Contains(rr.Output(), expectedImgInside) {
t.Fatalf("expected 'docker images' to have %q inside minikube. but the output is: *%s*", expectedImgInside, rr.Output())
}
})
}
}

View File

@ -146,7 +146,7 @@ func DockerDriver() bool {
// PodmanDriver returns whether or not this test is using the docker or podman driver
func PodmanDriver() bool {
return strings.Contains(*startArgs, "--vm-driver=podman") || strings.Contains(*startArgs, "driver=podman")
return strings.Contains(*startArgs, "--driver=podman") || strings.Contains(*startArgs, "--vm-driver=podman")
}
// KicDriver returns whether or not this test is using the docker or podman driver

View File

@ -1,3 +1,5 @@
# ref: https://operatorhub.io/install/etcd.yaml
apiVersion: v1
kind: Namespace
metadata:
@ -12,13 +14,28 @@ spec:
targetNamespaces:
- my-etcd
---
# # etcd v0.9.4 uses 'apiVersion: apiextensions.k8s.io/v1beta1' deprecated in k8s v1.22+
# # ref: https://github.com/k8s-operatorhub/community-operators/blob/834fa9b5a58f75fbf1ae2ed5e37db2efe1cab483/operators/etcd/0.9.4/etcdclusters.etcd.database.coreos.com.crd.yaml#L1
# # keep it disabled until updated and temporary use the 'cluster-manager' below instead
# apiVersion: operators.coreos.com/v1alpha1
# kind: Subscription
# metadata:
# name: my-etcd
# namespace: my-etcd
# spec:
# channel: singlenamespace-alpha
# name: etcd
# source: operatorhubio-catalog
# sourceNamespace: olm
# ---
apiVersion: operators.coreos.com/v1alpha1
kind: Subscription
metadata:
name: my-etcd
name: cluster-manager
namespace: my-etcd
spec:
channel: singlenamespace-alpha
name: etcd
channel: stable
name: cluster-manager
source: operatorhubio-catalog
sourceNamespace: olm
installPlanApproval: Automatic

View File

@ -3,7 +3,8 @@ kind: Ingress
metadata:
name: nginx-ingress
annotations:
kubernetes.io/ingress.class: "nginx"
# use the shared ingress-nginx
kubernetes.io/ingress.class: nginx
labels:
integration-test: ingress
spec:
@ -11,10 +12,10 @@ spec:
- host: nginx.example.com
http:
paths:
- path: "/"
- path: /
pathType: Prefix
backend:
service:
name: nginx
port:
number: 80
number: 80

View File

@ -74,6 +74,8 @@
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
"Cache image to docker daemon": "",
"Cache image to remote registry": "",
"Cannot find directory {{.path}} for copy": "",
"Cannot find directory {{.path}} for mount": "",
"Cannot use both --output and --format options": "",
@ -244,6 +246,7 @@
"Failed to get command runner": "",
"Failed to get image map": "",
"Failed to get service URL: {{.error}}": "",
"Failed to get temp": "",
"Failed to kill mount process: {{.error}}": "Fehler beim Beenden des Bereitstellungsprozesses: {{.error}}",
"Failed to list cached images": "",
"Failed to list images": "",
@ -252,10 +255,12 @@
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to read temp": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to save config {{.profile}}": "",
"Failed to save dir": "",
"Failed to save image": "",
"Failed to save stdin": "",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "NO_PROXY Env konnte nicht festgelegt werden. Benutzen Sie `export NO_PROXY = $ NO_PROXY, {{.ip}}",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
@ -436,6 +441,7 @@
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in the container runtime to save from minikube via \u003cminikube image save IMAGE_NAME\u003e": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
@ -524,6 +530,7 @@
"SSH key (ssh driver only)": "",
"SSH port (ssh driver only)": "",
"SSH user (ssh driver only)": "",
"Save a image from minikube": "",
"Select a valid value for --dnsdomain": "",
"Send trace events. Options include: [gcp]": "",
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",

View File

@ -75,6 +75,8 @@
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "Plug-in CNI para usar. Opciones validas: auto, bridge, calico, cilium, flannel, kindnet, o ruta a un manifiesto CNI (Por defecto: auto)",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
"Cache image to docker daemon": "",
"Cache image to remote registry": "",
"Cannot find directory {{.path}} for copy": "",
"Cannot find directory {{.path}} for mount": "No se pudo encontrar el directorio {{.path}} para montar",
"Cannot use both --output and --format options": "No se pueden usar ambas opciones (--output y --path)",
@ -250,6 +252,7 @@
"Failed to get command runner": "",
"Failed to get image map": "",
"Failed to get service URL: {{.error}}": "",
"Failed to get temp": "",
"Failed to kill mount process: {{.error}}": "No se ha podido detener el proceso de activación: {{.error}}",
"Failed to list cached images": "",
"Failed to list images": "",
@ -258,10 +261,12 @@
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to read temp": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to save config {{.profile}}": "",
"Failed to save dir": "",
"Failed to save image": "",
"Failed to save stdin": "",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "No se ha podido definir la variable de entorno NO_PROXY. Utiliza export NO_PROXY=$NO_PROXY,{{.ip}}",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
@ -442,6 +447,7 @@
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in the container runtime to save from minikube via \u003cminikube image save IMAGE_NAME\u003e": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
@ -530,6 +536,7 @@
"SSH key (ssh driver only)": "",
"SSH port (ssh driver only)": "",
"SSH user (ssh driver only)": "",
"Save a image from minikube": "",
"Select a valid value for --dnsdomain": "",
"Send trace events. Options include: [gcp]": "",
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",

View File

@ -76,6 +76,8 @@
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "Plug-in CNI à utiliser. Options valides : auto, bridge, calico, cilium, flannel, kindnet ou chemin vers un manifeste CNI (par défaut : auto)",
"Cache image from docker daemon": "Cacher l'image du démon docker",
"Cache image from remote registry": "Cacher l'image du registre distant",
"Cache image to docker daemon": "",
"Cache image to remote registry": "",
"Cannot find directory {{.path}} for copy": "Impossible de trouver le répertoire {{.path}} pour la copie",
"Cannot find directory {{.path}} for mount": "Impossible de trouver le répertoire {{.path}} pour le montage",
"Cannot use both --output and --format options": "Impossible d'utiliser à la fois les options --output et --format",
@ -248,6 +250,7 @@
"Failed to get command runner": "Impossible d'obtenir le lanceur de commandes",
"Failed to get image map": "Échec de l'obtention de la carte d'image",
"Failed to get service URL: {{.error}}": "Échec de l'obtention de l'URL du service : {{.error}}",
"Failed to get temp": "",
"Failed to kill mount process: {{.error}}": "Échec de l'arrêt du processus d'installation : {{.error}}",
"Failed to list cached images": "Échec de l'obtention de la liste des images mises en cache",
"Failed to list images": "Échec de l'obtention de la liste des images",
@ -256,10 +259,12 @@
"Failed to pull image": "Échec de l'extraction de l'image",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to read temp": "",
"Failed to reload cached images": "Échec du rechargement des images mises en cache",
"Failed to remove image": "Échec de la suppression de l'image",
"Failed to save config {{.profile}}": "Échec de l'enregistrement de la configuration {{.profile}}",
"Failed to save dir": "Échec de l'enregistrement du répertoire",
"Failed to save image": "",
"Failed to save stdin": "Échec de l'enregistrement de l'entrée standard",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "Échec de la définition la variable d'environnement NO_PROXY. Veuillez utiliser `export NO_PROXY=$NO_PROXY,{{.ip}}.",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "Échec de la définition de la variable d'environnement NO_PROXY. Veuillez utiliser `export NO_PROXY=$NO_PROXY,{{.ip}}`.",
@ -442,6 +447,7 @@
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "Veuillez installer le pilote minikube kvm2 VM, ou sélectionnez un --driver alternatif",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "Veuillez vous assurer que le service que vous recherchez est déployé ou se trouve dans le bon espace de noms.",
"Please provide a path or url to build": "Veuillez fournir un chemin ou une URL à construire",
"Please provide an image in the container runtime to save from minikube via \u003cminikube image save IMAGE_NAME\u003e": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "Veuillez fournir une image dans votre démon local à charger dans minikube via \u003cminikube image load IMAGE_NAME\u003e",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "Veuillez réévaluer votre docker-env, pour vous assurer que vos variables d'environnement ont des ports mis à jour :\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t",
@ -531,6 +537,7 @@
"SSH key (ssh driver only)": "Clé SSH (pilote ssh uniquement)",
"SSH port (ssh driver only)": "Port SSH (pilote ssh uniquement)",
"SSH user (ssh driver only)": "Utilisateur SSH (pilote ssh uniquement)",
"Save a image from minikube": "",
"Select a valid value for --dnsdomain": "Sélectionnez une valeur valide pour --dnsdomain",
"Send trace events. Options include: [gcp]": "Envoyer des événements de trace. Les options incluent : [gcp]",
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "Le service '{{.service}}' n'a pas été trouvé dans l'espace de noms '{{.namespace}}'.\nVous pouvez sélectionner un autre espace de noms en utilisant 'minikube service {{.service}} -n \u003cnamespace\u003e'. Ou répertoriez tous les services à l'aide de 'minikube service list'",

View File

@ -74,6 +74,8 @@
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
"Cache image to docker daemon": "",
"Cache image to remote registry": "",
"Cannot find directory {{.path}} for copy": "",
"Cannot find directory {{.path}} for mount": "マウントのためのディレクトリ{{.path}}が見つかりません",
"Cannot use both --output and --format options": "",
@ -238,6 +240,7 @@
"Failed to get command runner": "",
"Failed to get image map": "",
"Failed to get service URL: {{.error}}": "",
"Failed to get temp": "",
"Failed to kill mount process: {{.error}}": "マウント プロセスを強制終了できませんでした。{{.error}}",
"Failed to list cached images": "",
"Failed to list images": "",
@ -246,10 +249,12 @@
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to read temp": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to save config {{.profile}}": "",
"Failed to save dir": "",
"Failed to save image": "",
"Failed to save stdin": "",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "NO_PROXY 環境変数を設定できませんでした。「export NO_PROXY=$NO_PROXY,{{.ip}}」を使用してください。",
"Failed to setup certs": "",
@ -433,6 +438,7 @@
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in the container runtime to save from minikube via \u003cminikube image save IMAGE_NAME\u003e": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
@ -523,6 +529,7 @@
"SSH key (ssh driver only)": "",
"SSH port (ssh driver only)": "",
"SSH user (ssh driver only)": "",
"Save a image from minikube": "",
"Select a valid value for --dnsdomain": "",
"Send trace events. Options include: [gcp]": "",
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",

View File

@ -79,6 +79,8 @@
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "도커 데몬의 캐시 이미지",
"Cache image from remote registry": "원격 레지스트리의 캐시 이미지",
"Cache image to docker daemon": "",
"Cache image to remote registry": "",
"Cannot find directory {{.path}} for copy": "복사하기 위한 디렉토리 {{.path}} 를 찾을 수 없습니다.",
"Cannot find directory {{.path}} for mount": "마운트하기 위한 디렉토리 {{.path}} 를 찾을 수 없습니다",
"Cannot use both --output and --format options": "--output 과 --format 옵션을 함께 사용할 수 없습니다",
@ -265,6 +267,7 @@
"Failed to get driver URL": "드라이버 URL 조회에 실패하였습니다",
"Failed to get image map": "",
"Failed to get service URL: {{.error}}": "서비스 URL 조회에 실패하였습니다: {{.error}}",
"Failed to get temp": "",
"Failed to kill mount process: {{.error}}": "마운트 프로세스 중지에 실패하였습니다: {{.error}}",
"Failed to list cached images": "캐시된 이미지를 조회하는 데 실패하였습니다",
"Failed to list images": "",
@ -273,11 +276,13 @@
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to read temp": "",
"Failed to reload cached images": "캐시된 이미지를 다시 불러오는 데 실패하였습니다",
"Failed to remove image": "",
"Failed to save config": "컨피그 저장에 실패하였습니다",
"Failed to save config {{.profile}}": "",
"Failed to save dir": "",
"Failed to save image": "",
"Failed to save stdin": "",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
"Failed to setup certs": "",
@ -457,6 +462,7 @@
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in the container runtime to save from minikube via \u003cminikube image save IMAGE_NAME\u003e": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
@ -543,6 +549,7 @@
"SSH key (ssh driver only)": "",
"SSH port (ssh driver only)": "",
"SSH user (ssh driver only)": "",
"Save a image from minikube": "",
"Select a valid value for --dnsdomain": "",
"Send trace events. Options include: [gcp]": "",
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",

View File

@ -76,6 +76,8 @@
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
"Cache image to docker daemon": "",
"Cache image to remote registry": "",
"Cannot find directory {{.path}} for copy": "Nie znaleziono katalogu {{.path}} do skopiowania",
"Cannot find directory {{.path}} for mount": "Nie można odnaleźć folderu {{.path}} do zamontowania",
"Cannot use both --output and --format options": "Nie można użyć obydwu opcji --output i --format jednocześnie",
@ -252,6 +254,7 @@
"Failed to get command runner": "",
"Failed to get image map": "",
"Failed to get service URL: {{.error}}": "",
"Failed to get temp": "",
"Failed to kill mount process: {{.error}}": "Zabicie procesu nie powiodło się: {{.error}}",
"Failed to list cached images": "",
"Failed to list images": "",
@ -260,12 +263,14 @@
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to read temp": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to remove profile": "Usunięcie profilu nie powiodło się",
"Failed to save config": "Zapisywanie konfiguracji nie powiodło się",
"Failed to save config {{.profile}}": "",
"Failed to save dir": "",
"Failed to save image": "",
"Failed to save stdin": "",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
"Failed to setup certs": "Konfiguracja certyfikatów nie powiodła się",
@ -450,6 +455,7 @@
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "Zainstaluj sterownik kvm2 lub wybierz inny sterownik używając flagi --driver",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "Proszę upewnij się, że serwis którego szukasz znajduje się w prawidłowej przestrzeni nazw",
"Please provide a path or url to build": "",
"Please provide an image in the container runtime to save from minikube via \u003cminikube image save IMAGE_NAME\u003e": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
@ -542,6 +548,7 @@
"SSH key (ssh driver only)": "",
"SSH port (ssh driver only)": "",
"SSH user (ssh driver only)": "",
"Save a image from minikube": "",
"Select a valid value for --dnsdomain": "",
"Send trace events. Options include: [gcp]": "",
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",

View File

@ -70,6 +70,8 @@
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
"Cache image to docker daemon": "",
"Cache image to remote registry": "",
"Cannot find directory {{.path}} for copy": "",
"Cannot find directory {{.path}} for mount": "",
"Cannot use both --output and --format options": "",
@ -229,6 +231,7 @@
"Failed to get command runner": "",
"Failed to get image map": "",
"Failed to get service URL: {{.error}}": "",
"Failed to get temp": "",
"Failed to kill mount process: {{.error}}": "",
"Failed to list cached images": "",
"Failed to list images": "",
@ -237,10 +240,12 @@
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to read temp": "",
"Failed to reload cached images": "",
"Failed to remove image": "",
"Failed to save config {{.profile}}": "",
"Failed to save dir": "",
"Failed to save image": "",
"Failed to save stdin": "",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "",
"Failed to setup certs": "",
@ -411,6 +416,7 @@
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in the container runtime to save from minikube via \u003cminikube image save IMAGE_NAME\u003e": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
@ -495,6 +501,7 @@
"SSH key (ssh driver only)": "",
"SSH port (ssh driver only)": "",
"SSH user (ssh driver only)": "",
"Save a image from minikube": "",
"Select a valid value for --dnsdomain": "",
"Send trace events. Options include: [gcp]": "",
"Service '{{.service}}' was not found in '{{.namespace}}' namespace.\nYou may select another namespace by using 'minikube service {{.service}} -n \u003cnamespace\u003e'. Or list out all the services using 'minikube service list'": "",

View File

@ -90,6 +90,8 @@
"CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)": "",
"Cache image from docker daemon": "",
"Cache image from remote registry": "",
"Cache image to docker daemon": "",
"Cache image to remote registry": "",
"Cannot find directory {{.path}} for copy": "",
"Cannot find directory {{.path}} for mount": "找不到用来挂载的 {{.path}} 目录",
"Cannot use both --output and --format options": "不能同时使用 --output 和 --format 选项",
@ -315,6 +317,7 @@
"Failed to get driver URL": "获取 driver URL 失败",
"Failed to get image map": "",
"Failed to get service URL: {{.error}}": "获取 service URL 失败:{{.error}}",
"Failed to get temp": "",
"Failed to kill mount process: {{.error}}": "未能终止装载进程:{{.error}}",
"Failed to list cached images": "无法列出缓存镜像",
"Failed to list images": "",
@ -323,12 +326,14 @@
"Failed to pull image": "",
"Failed to pull images": "",
"Failed to push images": "",
"Failed to read temp": "",
"Failed to reload cached images": "重新加载缓存镜像失败",
"Failed to remove image": "",
"Failed to remove profile": "无法删除配置文件",
"Failed to save config": "无法保存配置",
"Failed to save config {{.profile}}": "",
"Failed to save dir": "",
"Failed to save image": "",
"Failed to save stdin": "",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}": "未能设置 NO_PROXY 环境变量。请使用“export NO_PROXY=$NO_PROXY,{{.ip}}”",
"Failed to set NO_PROXY Env. Please use `export NO_PROXY=$NO_PROXY,{{.ip}}`.": "未能设置 NO_PROXY 环境变量。请使用“export NO_PROXY=$NO_PROXY,{{.ip}}”。",
@ -524,6 +529,7 @@
"Please install the minikube kvm2 VM driver, or select an alternative --driver": "",
"Please make sure the service you are looking for is deployed or is in the correct namespace.": "",
"Please provide a path or url to build": "",
"Please provide an image in the container runtime to save from minikube via \u003cminikube image save IMAGE_NAME\u003e": "",
"Please provide an image in your local daemon to load into minikube via \u003cminikube image load IMAGE_NAME\u003e": "",
"Please provide source and target image": "",
"Please re-eval your docker-env, To ensure your environment variables have updated ports:\n\n\t'minikube -p {{.profile_name}} docker-env'\n\n\t": "",
@ -621,6 +627,7 @@
"SSH key (ssh driver only)": "",
"SSH port (ssh driver only)": "",
"SSH user (ssh driver only)": "",
"Save a image from minikube": "",
"Select a valid value for --dnsdomain": "",
"Selecting '{{.driver}}' driver from existing profile (alternates: {{.alternates}})": "从现有配置文件中选择 '{{.driver}}' 驱动程序 (可选:{{.alternates}}",
"Selecting '{{.driver}}' driver from user configuration (alternates: {{.alternates}})": "从用户配置中选择 {{.driver}}' 驱动程序(可选:{{.alternates}}",