Merge branch 'master' into download-tests-suck
commit
0cd6a01180
2
Makefile
2
Makefile
|
@ -529,7 +529,7 @@ storage-provisioner-image: out/storage-provisioner-$(GOARCH) ## Build storage-pr
|
|||
.PHONY: kic-base-image
|
||||
kic-base-image: ## builds the base image used for kic.
|
||||
docker rmi -f $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot || true
|
||||
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --target base .
|
||||
docker build -f ./hack/images/kicbase.Dockerfile -t $(REGISTRY)/kicbase:$(KIC_VERSION)-snapshot --build-arg COMMIT_SHA=${VERSION}-$(COMMIT) --cache-from $(REGISTRY)/kicbase:$(KIC_VERSION) --target base .
|
||||
|
||||
.PHONY: upload-preloaded-images-tar
|
||||
upload-preloaded-images-tar: out/minikube # Upload the preloaded images for oldest supported, newest supported, and default kubernetes versions to GCS.
|
||||
|
|
|
@ -18,8 +18,10 @@ package config
|
|||
|
||||
import (
|
||||
"io/ioutil"
|
||||
"net"
|
||||
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/minikube/service"
|
||||
|
@ -184,6 +186,30 @@ var addonsConfigureCmd = &cobra.Command{
|
|||
out.WarningT("ERROR creating `registry-creds-acr` secret")
|
||||
}
|
||||
|
||||
case "metallb":
|
||||
profile := ClusterFlagValue()
|
||||
cfg, err := config.Load(profile)
|
||||
if err != nil {
|
||||
out.ErrT(out.FatalType, "Failed to load config {{.profile}}", out.V{"profile": profile})
|
||||
}
|
||||
|
||||
validator := func(s string) bool {
|
||||
return net.ParseIP(s) != nil
|
||||
}
|
||||
|
||||
if cfg.KubernetesConfig.LoadBalancerStartIP == "" {
|
||||
cfg.KubernetesConfig.LoadBalancerStartIP = AskForStaticValidatedValue("-- Enter Load Balancer Start IP: ", validator)
|
||||
}
|
||||
|
||||
if cfg.KubernetesConfig.LoadBalancerEndIP == "" {
|
||||
cfg.KubernetesConfig.LoadBalancerEndIP = AskForStaticValidatedValue("-- Enter Load Balancer End IP: ", validator)
|
||||
}
|
||||
|
||||
err = config.SaveProfile(profile, cfg)
|
||||
if err != nil {
|
||||
out.ErrT(out.FatalType, "Failed to save config {{.profile}}", out.V{"profile": profile})
|
||||
}
|
||||
|
||||
default:
|
||||
out.FailureT("{{.name}} has no available configuration options", out.V{"name": addon})
|
||||
return
|
||||
|
|
|
@ -153,3 +153,23 @@ func posString(slice []string, element string) int {
|
|||
func containsString(slice []string, element string) bool {
|
||||
return posString(slice, element) != -1
|
||||
}
|
||||
|
||||
// AskForStaticValidatedValue asks for a single value to enter and check for valid input
|
||||
func AskForStaticValidatedValue(s string, validator func(s string) bool) string {
|
||||
reader := bufio.NewReader(os.Stdin)
|
||||
|
||||
for {
|
||||
response := getStaticValue(reader, s)
|
||||
|
||||
// Can't have zero length
|
||||
if len(response) == 0 {
|
||||
out.Err("--Error, please enter a value:")
|
||||
continue
|
||||
}
|
||||
if !validator(response) {
|
||||
out.Err("--Invalid input, please enter a value:")
|
||||
continue
|
||||
}
|
||||
return response
|
||||
}
|
||||
}
|
||||
|
|
|
@ -106,7 +106,7 @@ var mountCmd = &cobra.Command{
|
|||
var ip net.IP
|
||||
var err error
|
||||
if mountIP == "" {
|
||||
ip, err = cluster.GetVMHostIP(co.CP.Host)
|
||||
ip, err = cluster.HostIP(co.CP.Host)
|
||||
if err != nil {
|
||||
exit.WithError("Error getting the host IP address to use from within the VM", err)
|
||||
}
|
||||
|
|
|
@ -27,6 +27,6 @@ var nodeCmd = &cobra.Command{
|
|||
Short: "Node operations",
|
||||
Long: "Operations on nodes",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
exit.UsageT("Usage: minikube node [add|start|stop|delete]")
|
||||
exit.UsageT("Usage: minikube node [add|start|stop|delete|list]")
|
||||
},
|
||||
}
|
||||
|
|
|
@ -18,6 +18,7 @@ package cmd
|
|||
|
||||
import (
|
||||
"github.com/spf13/cobra"
|
||||
"github.com/spf13/viper"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
|
@ -54,6 +55,11 @@ var nodeAddCmd = &cobra.Command{
|
|||
KubernetesVersion: cc.KubernetesConfig.KubernetesVersion,
|
||||
}
|
||||
|
||||
// Make sure to decrease the default amount of memory we use per VM if this is the first worker node
|
||||
if len(cc.Nodes) == 1 && viper.GetString(memory) == "" {
|
||||
cc.Memory = 2200
|
||||
}
|
||||
|
||||
if err := node.Add(cc, n); err != nil {
|
||||
_, err := maybeDeleteAndRetry(*cc, n, nil, err)
|
||||
if err != nil {
|
||||
|
@ -61,6 +67,12 @@ var nodeAddCmd = &cobra.Command{
|
|||
}
|
||||
}
|
||||
|
||||
// Add CNI config if it's not already there
|
||||
// We need to run kubeadm.init here as well
|
||||
if err := config.MultiNodeCNIConfig(cc); err != nil {
|
||||
exit.WithError("failed to save config", err)
|
||||
}
|
||||
|
||||
out.T(out.Ready, "Successfully added {{.name}} to {{.cluster}}!", out.V{"name": name, "cluster": cc.Name})
|
||||
},
|
||||
}
|
||||
|
|
|
@ -0,0 +1,58 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cmd
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/spf13/cobra"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/exit"
|
||||
"k8s.io/minikube/pkg/minikube/mustload"
|
||||
)
|
||||
|
||||
var nodeListCmd = &cobra.Command{
|
||||
Use: "list",
|
||||
Short: "List nodes.",
|
||||
Long: "List existing Minikube nodes.",
|
||||
Run: func(cmd *cobra.Command, args []string) {
|
||||
if len(args) != 0 {
|
||||
exit.UsageT("Usage: minikube node list")
|
||||
}
|
||||
|
||||
cname := ClusterFlagValue()
|
||||
_, cc := mustload.Partial(cname)
|
||||
|
||||
if len(cc.Nodes) < 1 {
|
||||
glog.Warningf("Did not found any Minikube node.")
|
||||
} else {
|
||||
glog.Infof("%v", cc.Nodes)
|
||||
}
|
||||
|
||||
for _, n := range cc.Nodes {
|
||||
machineName := driver.MachineName(*cc, n)
|
||||
fmt.Printf("%s\t%s\n", machineName, n.IP)
|
||||
}
|
||||
os.Exit(0)
|
||||
},
|
||||
}
|
||||
|
||||
func init() {
|
||||
nodeCmd.AddCommand(nodeListCmd)
|
||||
}
|
|
@ -80,11 +80,6 @@ var serviceCmd = &cobra.Command{
|
|||
cname := ClusterFlagValue()
|
||||
co := mustload.Healthy(cname)
|
||||
|
||||
if driver.NeedsPortForward(co.Config.Driver) {
|
||||
startKicServiceTunnel(svc, cname)
|
||||
return
|
||||
}
|
||||
|
||||
urls, err := service.WaitForService(co.API, co.Config.Name, namespace, svc, serviceURLTemplate, serviceURLMode, https, wait, interval)
|
||||
if err != nil {
|
||||
var s *service.SVCNotFoundError
|
||||
|
@ -95,6 +90,11 @@ You may select another namespace by using 'minikube service {{.service}} -n <nam
|
|||
exit.WithError("Error opening service", err)
|
||||
}
|
||||
|
||||
if driver.NeedsPortForward(co.Config.Driver) {
|
||||
startKicServiceTunnel(svc, cname)
|
||||
return
|
||||
}
|
||||
|
||||
openURLs(svc, urls)
|
||||
},
|
||||
}
|
||||
|
|
|
@ -715,7 +715,7 @@ func memoryLimits(drvName string) (int, int, error) {
|
|||
}
|
||||
|
||||
// suggestMemoryAllocation calculates the default memory footprint in MB
|
||||
func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
|
||||
func suggestMemoryAllocation(sysLimit int, containerLimit int, nodes int) int {
|
||||
if mem := viper.GetInt(memory); mem != 0 {
|
||||
return mem
|
||||
}
|
||||
|
@ -737,6 +737,10 @@ func suggestMemoryAllocation(sysLimit int, containerLimit int) int {
|
|||
// Suggest 25% of RAM, rounded to nearest 100MB. Hyper-V requires an even number!
|
||||
suggested := int(float32(sysLimit)/400.0) * 100
|
||||
|
||||
if nodes > 1 {
|
||||
suggested /= nodes
|
||||
}
|
||||
|
||||
if suggested > maximum {
|
||||
return maximum
|
||||
}
|
||||
|
|
|
@ -99,6 +99,7 @@ const (
|
|||
nodes = "nodes"
|
||||
preload = "preload"
|
||||
deleteOnFailure = "delete-on-failure"
|
||||
forceSystemd = "force-systemd"
|
||||
kicBaseImage = "base-image"
|
||||
)
|
||||
|
||||
|
@ -138,6 +139,7 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().IntP(nodes, "n", 1, "The number of nodes to spin up. Defaults to 1.")
|
||||
startCmd.Flags().Bool(preload, true, "If set, download tarball of preloaded images if available to improve start time. Defaults to true.")
|
||||
startCmd.Flags().Bool(deleteOnFailure, false, "If set, delete the current cluster if start fails and try again. Defaults to false.")
|
||||
startCmd.Flags().Bool(forceSystemd, false, "If set, force the container runtime to use sytemd as cgroup manager. Currently available for docker and crio. Defaults to false.")
|
||||
}
|
||||
|
||||
// initKubernetesFlags inits the commandline flags for kubernetes related options
|
||||
|
@ -218,7 +220,7 @@ func generateClusterConfig(cmd *cobra.Command, existing *config.ClusterConfig, k
|
|||
glog.Warningf("Unable to query memory limits: %v", err)
|
||||
}
|
||||
|
||||
mem := suggestMemoryAllocation(sysLimit, containerLimit)
|
||||
mem := suggestMemoryAllocation(sysLimit, containerLimit, viper.GetInt(nodes))
|
||||
if cmd.Flags().Changed(memory) {
|
||||
mem, err = pkgutil.CalculateSizeInMB(viper.GetString(memory))
|
||||
if err != nil {
|
||||
|
|
|
@ -185,25 +185,34 @@ func TestSuggestMemoryAllocation(t *testing.T) {
|
|||
description string
|
||||
sysLimit int
|
||||
containerLimit int
|
||||
nodes int
|
||||
want int
|
||||
}{
|
||||
{"128GB sys", 128000, 0, 6000},
|
||||
{"64GB sys", 64000, 0, 6000},
|
||||
{"16GB sys", 16384, 0, 4000},
|
||||
{"odd sys", 14567, 0, 3600},
|
||||
{"4GB sys", 4096, 0, 2200},
|
||||
{"2GB sys", 2048, 0, 2048},
|
||||
{"Unable to poll sys", 0, 0, 2200},
|
||||
{"128GB sys, 16GB container", 128000, 16384, 16336},
|
||||
{"64GB sys, 16GB container", 64000, 16384, 16000},
|
||||
{"16GB sys, 4GB container", 16384, 4096, 4000},
|
||||
{"4GB sys, 3.5GB container", 16384, 3500, 3452},
|
||||
{"2GB sys, 2GB container", 16384, 2048, 2048},
|
||||
{"2GB sys, unable to poll container", 16384, 0, 4000},
|
||||
{"128GB sys", 128000, 0, 1, 6000},
|
||||
{"64GB sys", 64000, 0, 1, 6000},
|
||||
{"32GB sys", 32768, 0, 1, 6000},
|
||||
{"16GB sys", 16384, 0, 1, 4000},
|
||||
{"odd sys", 14567, 0, 1, 3600},
|
||||
{"4GB sys", 4096, 0, 1, 2200},
|
||||
{"2GB sys", 2048, 0, 1, 2048},
|
||||
{"Unable to poll sys", 0, 0, 1, 2200},
|
||||
{"128GB sys, 16GB container", 128000, 16384, 1, 16336},
|
||||
{"64GB sys, 16GB container", 64000, 16384, 1, 16000},
|
||||
{"16GB sys, 4GB container", 16384, 4096, 1, 4000},
|
||||
{"4GB sys, 3.5GB container", 16384, 3500, 1, 3452},
|
||||
{"16GB sys, 2GB container", 16384, 2048, 1, 2048},
|
||||
{"16GB sys, unable to poll container", 16384, 0, 1, 4000},
|
||||
{"128GB sys 2 nodes", 128000, 0, 2, 6000},
|
||||
{"8GB sys 3 nodes", 8192, 0, 3, 2200},
|
||||
{"16GB sys 2 nodes", 16384, 0, 2, 2200},
|
||||
{"32GB sys 2 nodes", 32768, 0, 2, 4050},
|
||||
{"odd sys 2 nodes", 14567, 0, 2, 2200},
|
||||
{"4GB sys 2 nodes", 4096, 0, 2, 2200},
|
||||
{"2GB sys 3 nodes", 2048, 0, 3, 2048},
|
||||
}
|
||||
for _, test := range tests {
|
||||
t.Run(test.description, func(t *testing.T) {
|
||||
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit)
|
||||
got := suggestMemoryAllocation(test.sysLimit, test.containerLimit, test.nodes)
|
||||
if got != test.want {
|
||||
t.Errorf("defaultMemorySize(sys=%d, container=%d) = %d, want: %d", test.sysLimit, test.containerLimit, got, test.want)
|
||||
}
|
||||
|
|
|
@ -191,7 +191,7 @@ func status(api libmachine.API, cc config.ClusterConfig, n config.Node) (*Status
|
|||
}
|
||||
|
||||
// We have a fully operational host, now we can check for details
|
||||
if _, err := cluster.GetHostDriverIP(api, name); err != nil {
|
||||
if _, err := cluster.DriverIP(api, name); err != nil {
|
||||
glog.Errorf("failed to get driver ip: %v", err)
|
||||
st.Host = state.Error.String()
|
||||
return st, err
|
||||
|
|
|
@ -0,0 +1,12 @@
|
|||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
namespace: metallb-system
|
||||
name: config
|
||||
data:
|
||||
config: |
|
||||
address-pools:
|
||||
- name: default
|
||||
protocol: layer2
|
||||
addresses:
|
||||
- {{ .LoadBalancerStartIP }}-{{ .LoadBalancerEndIP }}
|
|
@ -0,0 +1,293 @@
|
|||
apiVersion: v1
|
||||
kind: Namespace
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: metallb-system
|
||||
---
|
||||
apiVersion: policy/v1beta1
|
||||
kind: PodSecurityPolicy
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: speaker
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
allowPrivilegeEscalation: false
|
||||
allowedCapabilities:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_ADMIN
|
||||
fsGroup:
|
||||
rule: RunAsAny
|
||||
hostNetwork: true
|
||||
hostPorts:
|
||||
- max: 7472
|
||||
min: 7472
|
||||
privileged: true
|
||||
runAsUser:
|
||||
rule: RunAsAny
|
||||
seLinux:
|
||||
rule: RunAsAny
|
||||
supplementalGroups:
|
||||
rule: RunAsAny
|
||||
volumes:
|
||||
- '*'
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: controller
|
||||
namespace: metallb-system
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: speaker
|
||||
namespace: metallb-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: metallb-system:controller
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- services
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- services/status
|
||||
verbs:
|
||||
- update
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: metallb-system:speaker
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- services
|
||||
- endpoints
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- events
|
||||
verbs:
|
||||
- create
|
||||
- patch
|
||||
- apiGroups:
|
||||
- extensions
|
||||
resourceNames:
|
||||
- speaker
|
||||
resources:
|
||||
- podsecuritypolicies
|
||||
verbs:
|
||||
- use
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: Role
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: config-watcher
|
||||
namespace: metallb-system
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ''
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: metallb-system:controller
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metallb-system:controller
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller
|
||||
namespace: metallb-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: metallb-system:speaker
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: metallb-system:speaker
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: speaker
|
||||
namespace: metallb-system
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: RoleBinding
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
name: config-watcher
|
||||
namespace: metallb-system
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: Role
|
||||
name: config-watcher
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: controller
|
||||
- kind: ServiceAccount
|
||||
name: speaker
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
name: speaker
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/port: '7472'
|
||||
prometheus.io/scrape: 'true'
|
||||
labels:
|
||||
app: metallb
|
||||
component: speaker
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --port=7472
|
||||
- --config=config
|
||||
env:
|
||||
- name: METALLB_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: METALLB_HOST
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: status.hostIP
|
||||
image: metallb/speaker:v0.8.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: speaker
|
||||
ports:
|
||||
- containerPort: 7472
|
||||
name: monitoring
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- NET_RAW
|
||||
- SYS_ADMIN
|
||||
drop:
|
||||
- ALL
|
||||
readOnlyRootFilesystem: true
|
||||
hostNetwork: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
serviceAccountName: speaker
|
||||
terminationGracePeriodSeconds: 0
|
||||
tolerations:
|
||||
- effect: NoSchedule
|
||||
key: node-role.kubernetes.io/master
|
||||
---
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
app: metallb
|
||||
component: controller
|
||||
name: controller
|
||||
namespace: metallb-system
|
||||
spec:
|
||||
revisionHistoryLimit: 3
|
||||
selector:
|
||||
matchLabels:
|
||||
app: metallb
|
||||
component: controller
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
prometheus.io/port: '7472'
|
||||
prometheus.io/scrape: 'true'
|
||||
labels:
|
||||
app: metallb
|
||||
component: controller
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --port=7472
|
||||
- --config=config
|
||||
image: metallb/controller:v0.8.2
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: controller
|
||||
ports:
|
||||
- containerPort: 7472
|
||||
name: monitoring
|
||||
resources:
|
||||
limits:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
securityContext:
|
||||
allowPrivilegeEscalation: false
|
||||
capabilities:
|
||||
drop:
|
||||
- all
|
||||
readOnlyRootFilesystem: true
|
||||
nodeSelector:
|
||||
beta.kubernetes.io/os: linux
|
||||
securityContext:
|
||||
runAsNonRoot: true
|
||||
runAsUser: 65534
|
||||
serviceAccountName: controller
|
||||
terminationGracePeriodSeconds: 0
|
|
@ -17,7 +17,7 @@ spec:
|
|||
spec:
|
||||
initContainers:
|
||||
- name: update
|
||||
image: registry.fedoraproject.org/fedora
|
||||
image: alpine:3.11
|
||||
volumeMounts:
|
||||
- name: etchosts
|
||||
mountPath: /host-etc/hosts
|
||||
|
@ -39,7 +39,7 @@ spec:
|
|||
for H in $REGISTRY_ALIASES; do
|
||||
echo "$HOSTS" | grep "$H" || HOSTS="$HOSTS$NL$REGISTRY_SERVICE_HOST$TAB$H";
|
||||
done;
|
||||
echo "$HOSTS" | diff -u /host-etc/hosts - || echo "$HOSTS" > /host-etc/hosts
|
||||
echo "$HOSTS" | diff -U 3 /host-etc/hosts - || echo "$HOSTS" > /host-etc/hosts
|
||||
echo "Done."
|
||||
containers:
|
||||
- name: pause-for-update
|
||||
|
|
|
@ -0,0 +1,88 @@
|
|||
# Scheduled shutdown and pause
|
||||
|
||||
* First proposed: 2020-04-20
|
||||
* Authors: Thomas Stromberg (@tstromberg)
|
||||
|
||||
## Reviewer Priorities
|
||||
|
||||
Please review this proposal with the following priorities:
|
||||
|
||||
* Does this fit with minikube's [principles](https://minikube.sigs.k8s.io/docs/concepts/principles/)?
|
||||
* Are there other approaches to consider?
|
||||
* Could the implementation be made simpler?
|
||||
* Are there usability, reliability, or technical debt concerns?
|
||||
|
||||
Please leave the above text in your proposal as instructions to the reader.
|
||||
|
||||
## Summary
|
||||
|
||||
Add the ability to schedule a future shutdown or pause event.
|
||||
|
||||
This is useful for two sets of users:
|
||||
|
||||
* command-lines which interact with a minikube on per-invocation basis. These command-line tools may not be aware of when the final invocation is, but would like low-latency between-commands
|
||||
* IDE's which start minikube on an as-needed basis. Not all IDE's have the ability to trigger exit hooks when closed.
|
||||
|
||||
## Goals
|
||||
|
||||
* The ability to schedule a pause or shutdown event
|
||||
* The ability to defer the scheduled pause or shutdown event
|
||||
* "minikube start" transparently clears pending scheduled events
|
||||
|
||||
## Non-Goals
|
||||
|
||||
* Automatic idle detection: This is a more advanced, but complimentary idea, that would keep users from having to invoke a keep-alive command. It is possible to create using the same mechanisms, but is beyond the scope of this proposal.
|
||||
|
||||
* Automatic re-activation: This is significantly more advanced, particularly for the scheduled shutdown case. It would be possible to create this, but it to is out of scope for this proposal.
|
||||
|
||||
## Design Details
|
||||
|
||||
### Proposed interface:
|
||||
|
||||
* `minikube pause --after 5m`
|
||||
* `minikube stop --after 5m`
|
||||
|
||||
* Each scheduled pause would overwrite the previous scheduled event.
|
||||
* Each scheduled stop would overwrite the previous scheduled event.
|
||||
* Each call to `minikube start` would clear scheduled events
|
||||
|
||||
As a `keep-alive` implementation, tools will repeat the command to reset the clock, and move the event 5 minutes into the future.
|
||||
|
||||
### Implementation idea #1: host-based
|
||||
|
||||
* If `--schedule` is used, the minikube command will daemonize, storing a pid in a well-known location, such as `$HOME/.minikube/profiles/<name>/scheduled_pause.pid`.
|
||||
* If the pid already exists, the previous process will be killed, cancelling the scheduled event.
|
||||
|
||||
Advantages:
|
||||
|
||||
* Able to re-use all of the existing `pause` and `stop` implementation within minikube.
|
||||
* Built-in handling for multiple architectures
|
||||
* Does not consume memory reserved for the VM
|
||||
|
||||
Disadvantages:
|
||||
|
||||
* Runs a background task on the host
|
||||
* Daemonization may require different handling on Windows
|
||||
|
||||
### Implementation idea #2: guest-based
|
||||
|
||||
minikube would connect to the control-plane via SSH, and run the equivalent of:
|
||||
|
||||
```shell
|
||||
killall minikube-schedule
|
||||
sleep 300
|
||||
|
||||
for node in $other-nodes; do
|
||||
ssh $node halt
|
||||
done
|
||||
halt
|
||||
```
|
||||
|
||||
Advantages:
|
||||
|
||||
* Consistent execution environment
|
||||
|
||||
Disadvantages:
|
||||
|
||||
* Requires creation of a helper binary that runs within the VM
|
||||
* Untested: some drivers may not fully release resources if shutdown from inside the VM
|
|
@ -15,13 +15,20 @@ RUN apt-get update && apt-get install -y --no-install-recommends \
|
|||
# libglib2.0-0 is required for conmon, which is required for podman
|
||||
libglib2.0-0=2.62.1-1 \
|
||||
&& rm /etc/crictl.yaml
|
||||
|
||||
# install cri-o based on https://github.com/cri-o/cri-o/commit/96b0c34b31a9fc181e46d7d8e34fb8ee6c4dc4e1#diff-04c6e90faac2675aa89e2176d2eec7d8R128
|
||||
RUN sh -c "echo 'deb http://download.opensuse.org/repositories/devel:/kubic:/libcontainers:/stable/xUbuntu_19.10/ /' > /etc/apt/sources.list.d/devel:kubic:libcontainers:stable.list" && \
|
||||
curl -LO https://download.opensuse.org/repositories/devel:kubic:libcontainers:stable/xUbuntu_19.10/Release.key && \
|
||||
apt-key add - < Release.key && apt-get update && \
|
||||
apt-get install -y --no-install-recommends cri-o-1.17=1.17.2~1
|
||||
apt-get install -y --no-install-recommends cri-o-1.17
|
||||
|
||||
# install podman
|
||||
RUN apt-get install -y --no-install-recommends podman=1.8.2~144
|
||||
RUN apt-get install -y --no-install-recommends podman
|
||||
|
||||
# install varlink
|
||||
RUN apt-get install -y --no-install-recommends varlink
|
||||
|
||||
|
||||
# disable non-docker runtimes by default
|
||||
RUN systemctl disable containerd && systemctl disable crio && rm /etc/crictl.yaml
|
||||
# enable docker which is default
|
||||
|
|
|
@ -31,7 +31,6 @@ JOB_NAME="Experimental_Podman_Linux"
|
|||
|
||||
mkdir -p cron && gsutil -qm rsync "gs://minikube-builds/${MINIKUBE_LOCATION}/cron" cron || echo "FAILED TO GET CRON FILES"
|
||||
sudo install cron/cleanup_and_reboot_Linux.sh /etc/cron.hourly/cleanup_and_reboot || echo "FAILED TO INSTALL CLEANUP"
|
||||
SUDO_PREFIX="sudo -E "
|
||||
|
||||
EXTRA_ARGS="--container-runtime=containerd"
|
||||
|
||||
|
|
|
@ -87,7 +87,7 @@ func generateTarball(kubernetesVersion, containerRuntime, tarballFilename string
|
|||
if err != nil {
|
||||
return errors.Wrap(err, "failed create new runtime")
|
||||
}
|
||||
if err := cr.Enable(true); err != nil {
|
||||
if err := cr.Enable(true, false); err != nil {
|
||||
return errors.Wrap(err, "enable container runtime")
|
||||
}
|
||||
|
||||
|
|
|
@ -129,4 +129,9 @@ var Addons = []*Addon{
|
|||
set: SetBool,
|
||||
callbacks: []setFn{enableOrDisableStorageClasses},
|
||||
},
|
||||
{
|
||||
name: "metallb",
|
||||
set: SetBool,
|
||||
callbacks: []setFn{enableOrDisableAddon},
|
||||
},
|
||||
}
|
||||
|
|
|
@ -99,7 +99,7 @@ func (d *Driver) Create() error {
|
|||
},
|
||||
)
|
||||
|
||||
exists, err := oci.ContainerExists(d.OCIBinary, params.Name)
|
||||
exists, err := oci.ContainerExists(d.OCIBinary, params.Name, true)
|
||||
if err != nil {
|
||||
glog.Warningf("failed to check if container already exists: %v", err)
|
||||
}
|
||||
|
@ -234,26 +234,7 @@ func (d *Driver) GetURL() (string, error) {
|
|||
|
||||
// GetState returns the state that the host is in (running, stopped, etc)
|
||||
func (d *Driver) GetState() (state.State, error) {
|
||||
out, err := oci.WarnIfSlow(d.NodeConfig.OCIBinary, "inspect", "-f", "{{.State.Status}}", d.MachineName)
|
||||
if err != nil {
|
||||
return state.Error, err
|
||||
}
|
||||
|
||||
o := strings.TrimSpace(string(out))
|
||||
switch o {
|
||||
case "running":
|
||||
return state.Running, nil
|
||||
case "exited":
|
||||
return state.Stopped, nil
|
||||
case "paused":
|
||||
return state.Paused, nil
|
||||
case "restarting":
|
||||
return state.Starting, nil
|
||||
case "dead":
|
||||
return state.Error, nil
|
||||
default:
|
||||
return state.None, fmt.Errorf("unknown state")
|
||||
}
|
||||
return oci.ContainerStatus(d.OCIBinary, d.MachineName, true)
|
||||
}
|
||||
|
||||
// Kill stops a host forcefully, including any containers that we are managing.
|
||||
|
@ -269,7 +250,7 @@ func (d *Driver) Kill() error {
|
|||
}
|
||||
|
||||
cr := command.NewExecRunner() // using exec runner for interacting with dameon.
|
||||
if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName)); err != nil {
|
||||
if _, err := cr.RunCmd(oci.PrefixCmd(exec.Command(d.NodeConfig.OCIBinary, "kill", d.MachineName))); err != nil {
|
||||
return errors.Wrapf(err, "killing %q", d.MachineName)
|
||||
}
|
||||
return nil
|
||||
|
@ -320,7 +301,7 @@ func (d *Driver) Restart() error {
|
|||
// Start an already created kic container
|
||||
func (d *Driver) Start() error {
|
||||
cr := command.NewExecRunner() // using exec runner for interacting with docker/podman daemon
|
||||
if _, err := cr.RunCmd(exec.Command(d.NodeConfig.OCIBinary, "start", d.MachineName)); err != nil {
|
||||
if _, err := cr.RunCmd(oci.PrefixCmd(exec.Command(d.NodeConfig.OCIBinary, "start", d.MachineName))); err != nil {
|
||||
return errors.Wrap(err, "start")
|
||||
}
|
||||
checkRunning := func() error {
|
||||
|
|
|
@ -0,0 +1,154 @@
|
|||
/*
|
||||
Copyright 2019 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package oci
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
)
|
||||
|
||||
// RunResult holds the results of a Runner
|
||||
type RunResult struct {
|
||||
Stdout bytes.Buffer
|
||||
Stderr bytes.Buffer
|
||||
ExitCode int
|
||||
Args []string // the args that was passed to Runner
|
||||
}
|
||||
|
||||
// Command returns a human readable command string that does not induce eye fatigue
|
||||
func (rr RunResult) Command() string {
|
||||
var sb strings.Builder
|
||||
sb.WriteString(rr.Args[0])
|
||||
for _, a := range rr.Args[1:] {
|
||||
if strings.Contains(a, " ") {
|
||||
sb.WriteString(fmt.Sprintf(` "%s"`, a))
|
||||
continue
|
||||
}
|
||||
sb.WriteString(fmt.Sprintf(" %s", a))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// Output returns human-readable output for an execution result
|
||||
func (rr RunResult) Output() string {
|
||||
var sb strings.Builder
|
||||
if rr.Stdout.Len() > 0 {
|
||||
sb.WriteString(fmt.Sprintf("-- stdout --\n%s\n-- /stdout --", rr.Stdout.Bytes()))
|
||||
}
|
||||
if rr.Stderr.Len() > 0 {
|
||||
sb.WriteString(fmt.Sprintf("\n** stderr ** \n%s\n** /stderr **", rr.Stderr.Bytes()))
|
||||
}
|
||||
return sb.String()
|
||||
}
|
||||
|
||||
// PrefixCmd adds any needed prefix (such as sudo) to the command
|
||||
func PrefixCmd(cmd *exec.Cmd) *exec.Cmd {
|
||||
if cmd.Args[0] == Podman && runtime.GOOS == "linux" { // want sudo when not running podman-remote
|
||||
cmdWithSudo := exec.Command("sudo", cmd.Args...)
|
||||
cmdWithSudo.Env = cmd.Env
|
||||
cmdWithSudo.Dir = cmd.Dir
|
||||
cmdWithSudo.Stdin = cmd.Stdin
|
||||
cmdWithSudo.Stdout = cmd.Stdout
|
||||
cmdWithSudo.Stderr = cmd.Stderr
|
||||
cmd = cmdWithSudo
|
||||
}
|
||||
return cmd
|
||||
}
|
||||
|
||||
// runCmd runs a command exec.Command against docker daemon or podman
|
||||
func runCmd(cmd *exec.Cmd, warnSlow ...bool) (*RunResult, error) {
|
||||
cmd = PrefixCmd(cmd)
|
||||
|
||||
warn := false
|
||||
if len(warnSlow) > 0 {
|
||||
warn = warnSlow[0]
|
||||
}
|
||||
|
||||
killTime := 19 * time.Second // this will be applied only if warnSlow is true
|
||||
warnTime := 2 * time.Second
|
||||
ctx, cancel := context.WithTimeout(context.Background(), killTime)
|
||||
defer cancel()
|
||||
|
||||
if cmd.Args[1] == "volume" || cmd.Args[1] == "ps" { // volume and ps requires more time than inspect
|
||||
killTime = 30 * time.Second
|
||||
warnTime = 3 * time.Second
|
||||
}
|
||||
|
||||
if warn { // convert exec.Command to with context
|
||||
cmdWithCtx := exec.CommandContext(ctx, cmd.Args[0], cmd.Args[1:]...)
|
||||
cmdWithCtx.Stdout = cmd.Stdout //copying the original command
|
||||
cmdWithCtx.Stderr = cmd.Stderr
|
||||
cmd = cmdWithCtx
|
||||
}
|
||||
|
||||
rr := &RunResult{Args: cmd.Args}
|
||||
glog.Infof("Run: %v", rr.Command())
|
||||
|
||||
var outb, errb io.Writer
|
||||
if cmd.Stdout == nil {
|
||||
var so bytes.Buffer
|
||||
outb = io.MultiWriter(&so, &rr.Stdout)
|
||||
} else {
|
||||
outb = io.MultiWriter(cmd.Stdout, &rr.Stdout)
|
||||
}
|
||||
|
||||
if cmd.Stderr == nil {
|
||||
var se bytes.Buffer
|
||||
errb = io.MultiWriter(&se, &rr.Stderr)
|
||||
} else {
|
||||
errb = io.MultiWriter(cmd.Stderr, &rr.Stderr)
|
||||
}
|
||||
|
||||
cmd.Stdout = outb
|
||||
cmd.Stderr = errb
|
||||
|
||||
start := time.Now()
|
||||
err := cmd.Run()
|
||||
elapsed := time.Since(start)
|
||||
if warn {
|
||||
if elapsed > warnTime {
|
||||
out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": rr.Command(), "duration": elapsed})
|
||||
out.ErrT(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": cmd.Args[0]})
|
||||
}
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return rr, fmt.Errorf("%q timed out after %s", rr.Command(), killTime)
|
||||
}
|
||||
}
|
||||
|
||||
if exitError, ok := err.(*exec.ExitError); ok {
|
||||
rr.ExitCode = exitError.ExitCode()
|
||||
}
|
||||
// Decrease log spam
|
||||
if elapsed > (1 * time.Second) {
|
||||
glog.Infof("Completed: %s: (%s)", rr.Command(), elapsed)
|
||||
}
|
||||
if err == nil {
|
||||
return rr, nil
|
||||
}
|
||||
|
||||
return rr, fmt.Errorf("%s: %v\nstdout:\n%s\nstderr:\n%s", rr.Command(), err, rr.Stdout.String(), rr.Stderr.String())
|
||||
}
|
|
@ -216,15 +216,12 @@ type podmanSysInfo struct {
|
|||
// dockerSystemInfo returns docker system info --format '{{json .}}'
|
||||
func dockerSystemInfo() (dockerSysInfo, error) {
|
||||
var ds dockerSysInfo
|
||||
|
||||
cmd := exec.Command(Docker, "system", "info", "--format", "{{json .}}")
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
rr, err := runCmd(exec.Command(Docker, "system", "info", "--format", "{{json .}}"))
|
||||
if err != nil {
|
||||
return ds, errors.Wrap(err, "get docker system info")
|
||||
}
|
||||
|
||||
if err := json.Unmarshal([]byte(strings.TrimSpace(string(out))), &ds); err != nil {
|
||||
if err := json.Unmarshal([]byte(strings.TrimSpace(rr.Stdout.String())), &ds); err != nil {
|
||||
return ds, errors.Wrapf(err, "unmarshal docker system info")
|
||||
}
|
||||
|
||||
|
@ -234,12 +231,12 @@ func dockerSystemInfo() (dockerSysInfo, error) {
|
|||
// podmanSysInfo returns podman system info --format '{{json .}}'
|
||||
func podmanSystemInfo() (podmanSysInfo, error) {
|
||||
var ps podmanSysInfo
|
||||
cmd := exec.Command(Podman, "system", "info", "--format", "'{{json .}}'")
|
||||
out, err := cmd.CombinedOutput()
|
||||
rr, err := runCmd(exec.Command(Podman, "system", "info", "--format", "json"))
|
||||
if err != nil {
|
||||
return ps, errors.Wrap(err, "get podman system info")
|
||||
}
|
||||
if err := json.Unmarshal([]byte(strings.TrimSpace(string(out))), &ps); err != nil {
|
||||
|
||||
if err := json.Unmarshal([]byte(strings.TrimSpace(rr.Stdout.String())), &ps); err != nil {
|
||||
return ps, errors.Wrapf(err, "unmarshal podman system info")
|
||||
}
|
||||
return ps, nil
|
||||
|
|
|
@ -43,12 +43,10 @@ func RoutableHostIPFromInside(ociBin string, containerName string) (net.IP, erro
|
|||
|
||||
// digDNS will get the IP record for a dns
|
||||
func digDNS(ociBin, containerName, dns string) (net.IP, error) {
|
||||
cmd := exec.Command(ociBin, "exec", "-t", containerName, "dig", "+short", dns)
|
||||
out, err := cmd.CombinedOutput()
|
||||
ip := net.ParseIP(strings.TrimSpace(string(out)))
|
||||
|
||||
rr, err := runCmd(exec.Command(ociBin, "exec", "-t", containerName, "dig", "+short", dns))
|
||||
ip := net.ParseIP(strings.TrimSpace(rr.Stdout.String()))
|
||||
if err != nil {
|
||||
return ip, errors.Wrapf(err, "resolve dns to ip: %s", string(out))
|
||||
return ip, errors.Wrapf(err, "resolve dns to ip")
|
||||
}
|
||||
|
||||
glog.Infof("got host ip for mount in container by digging dns: %s", ip.String())
|
||||
|
@ -58,23 +56,19 @@ func digDNS(ociBin, containerName, dns string) (net.IP, error) {
|
|||
// dockerGatewayIP gets the default gateway ip for the docker bridge on the user's host machine
|
||||
// gets the ip from user's host docker
|
||||
func dockerGatewayIP() (net.IP, error) {
|
||||
cmd := exec.Command(Docker, "network", "ls", "--filter", "name=bridge", "--format", "{{.ID}}")
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
rr, err := runCmd(exec.Command(Docker, "network", "ls", "--filter", "name=bridge", "--format", "{{.ID}}"))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "get network bridge. output: %s", string(out))
|
||||
return nil, errors.Wrapf(err, "get network bridge")
|
||||
}
|
||||
|
||||
bridgeID := strings.TrimSpace(string(out))
|
||||
cmd = exec.Command(Docker, "inspect",
|
||||
"--format", "{{(index .IPAM.Config 0).Gateway}}", bridgeID)
|
||||
out, err = cmd.CombinedOutput()
|
||||
|
||||
bridgeID := strings.TrimSpace(rr.Stdout.String())
|
||||
rr, err = runCmd(exec.Command(Docker, "inspect",
|
||||
"--format", "{{(index .IPAM.Config 0).Gateway}}", bridgeID))
|
||||
if err != nil {
|
||||
return nil, errors.Wrapf(err, "inspect IP gatway for bridge network: %q. output: %s", string(out), bridgeID)
|
||||
return nil, errors.Wrapf(err, "inspect IP bridge network %q.", bridgeID)
|
||||
}
|
||||
|
||||
ip := net.ParseIP(strings.TrimSpace(string(out)))
|
||||
ip := net.ParseIP(strings.TrimSpace(rr.Stdout.String()))
|
||||
glog.Infof("got host ip for mount in container by inspect docker network: %s", ip.String())
|
||||
return ip, nil
|
||||
}
|
||||
|
@ -84,26 +78,24 @@ func dockerGatewayIP() (net.IP, error) {
|
|||
// will return the docker assigned port:
|
||||
// 32769, nil
|
||||
// only supports TCP ports
|
||||
func ForwardedPort(ociBinary string, ociID string, contPort int) (int, error) {
|
||||
var out []byte
|
||||
func ForwardedPort(ociBin string, ociID string, contPort int) (int, error) {
|
||||
var rr *RunResult
|
||||
var err error
|
||||
|
||||
if ociBinary == Podman {
|
||||
if ociBin == Podman {
|
||||
//podman inspect -f "{{range .NetworkSettings.Ports}}{{if eq .ContainerPort "80"}}{{.HostPort}}{{end}}{{end}}"
|
||||
cmd := exec.Command(ociBinary, "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID)
|
||||
out, err = cmd.CombinedOutput()
|
||||
rr, err = runCmd(exec.Command(ociBin, "inspect", "-f", fmt.Sprintf("{{range .NetworkSettings.Ports}}{{if eq .ContainerPort %s}}{{.HostPort}}{{end}}{{end}}", fmt.Sprint(contPort)), ociID))
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "get host-bind port %d for %q, output %s", contPort, ociID, out)
|
||||
return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID)
|
||||
}
|
||||
} else {
|
||||
cmd := exec.Command(ociBinary, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID)
|
||||
out, err = cmd.CombinedOutput()
|
||||
rr, err = runCmd(exec.Command(ociBin, "inspect", "-f", fmt.Sprintf("'{{(index (index .NetworkSettings.Ports \"%d/tcp\") 0).HostPort}}'", contPort), ociID))
|
||||
if err != nil {
|
||||
return 0, errors.Wrapf(err, "get host-bind port %d for %q, output %s", contPort, ociID, out)
|
||||
return 0, errors.Wrapf(err, "get port %d for %q", contPort, ociID)
|
||||
}
|
||||
}
|
||||
|
||||
o := strings.TrimSpace(string(out))
|
||||
o := strings.TrimSpace(rr.Stdout.String())
|
||||
o = strings.Trim(o, "'")
|
||||
p, err := strconv.Atoi(o)
|
||||
|
||||
|
@ -115,8 +107,8 @@ func ForwardedPort(ociBinary string, ociID string, contPort int) (int, error) {
|
|||
}
|
||||
|
||||
// ContainerIPs returns ipv4,ipv6, error of a container by their name
|
||||
func ContainerIPs(ociBinary string, name string) (string, string, error) {
|
||||
if ociBinary == Podman {
|
||||
func ContainerIPs(ociBin string, name string) (string, string, error) {
|
||||
if ociBin == Podman {
|
||||
return podmanConttainerIP(name)
|
||||
}
|
||||
return dockerContainerIP(name)
|
||||
|
@ -124,14 +116,13 @@ func ContainerIPs(ociBinary string, name string) (string, string, error) {
|
|||
|
||||
// podmanConttainerIP returns ipv4, ipv6 of container or error
|
||||
func podmanConttainerIP(name string) (string, string, error) {
|
||||
cmd := exec.Command(Podman, "inspect",
|
||||
rr, err := runCmd(exec.Command(Podman, "inspect",
|
||||
"-f", "{{.NetworkSettings.IPAddress}}",
|
||||
name)
|
||||
out, err := cmd.CombinedOutput()
|
||||
name))
|
||||
if err != nil {
|
||||
return "", "", errors.Wrapf(err, "podman inspect ip %s", name)
|
||||
}
|
||||
output := strings.TrimSpace(string(out))
|
||||
output := strings.TrimSpace(rr.Stdout.String())
|
||||
if err == nil && output == "" { // podman returns empty for 127.0.0.1
|
||||
return DefaultBindIPV4, "", nil
|
||||
}
|
||||
|
|
|
@ -17,9 +17,7 @@ limitations under the License.
|
|||
package oci
|
||||
|
||||
import (
|
||||
"context"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"time"
|
||||
|
||||
"bufio"
|
||||
|
@ -29,12 +27,12 @@ import (
|
|||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/constants"
|
||||
"k8s.io/minikube/pkg/minikube/localpath"
|
||||
"k8s.io/minikube/pkg/minikube/out"
|
||||
"k8s.io/minikube/pkg/util/retry"
|
||||
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"strconv"
|
||||
"strings"
|
||||
)
|
||||
|
||||
|
@ -64,9 +62,9 @@ func DeleteContainersByLabel(ociBin string, label string) []error {
|
|||
if err := ShutDown(ociBin, c); err != nil {
|
||||
glog.Infof("couldn't shut down %s (might be okay): %v ", c, err)
|
||||
}
|
||||
cmd := exec.Command(ociBin, "rm", "-f", "-v", c)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, out))
|
||||
|
||||
if _, err := runCmd(exec.Command(ociBin, "rm", "-f", "-v", c)); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "delete container %s: output %s", c, err))
|
||||
}
|
||||
|
||||
}
|
||||
|
@ -84,9 +82,9 @@ func DeleteContainer(ociBin string, name string) error {
|
|||
if err := ShutDown(ociBin, name); err != nil {
|
||||
glog.Infof("couldn't shut down %s (might be okay): %v ", name, err)
|
||||
}
|
||||
cmd := exec.Command(ociBin, "rm", "-f", "-v", name)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "delete container %s: output %s", name, out)
|
||||
|
||||
if _, err := runCmd(exec.Command(ociBin, "rm", "-f", "-v", name)); err != nil {
|
||||
return errors.Wrapf(err, "delete %s", name)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -135,21 +133,41 @@ func CreateContainerNode(p CreateParams) error {
|
|||
}
|
||||
|
||||
if p.OCIBinary == Podman { // enable execing in /var
|
||||
// volume path in minikube home folder to mount to /var
|
||||
hostVarVolPath := filepath.Join(localpath.MiniPath(), "machines", p.Name, "var")
|
||||
if err := os.MkdirAll(hostVarVolPath, 0711); err != nil {
|
||||
return errors.Wrapf(err, "create var dir %s", hostVarVolPath)
|
||||
}
|
||||
// podman mounts var/lib with no-exec by default https://github.com/containers/libpod/issues/5103
|
||||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", hostVarVolPath))
|
||||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var:exec", p.Name))
|
||||
}
|
||||
if p.OCIBinary == Docker {
|
||||
runArgs = append(runArgs, "--volume", fmt.Sprintf("%s:/var", p.Name))
|
||||
// setting resource limit in privileged mode is only supported by docker
|
||||
// podman error: "Error: invalid configuration, cannot set resources with rootless containers not using cgroups v2 unified mode"
|
||||
runArgs = append(runArgs, fmt.Sprintf("--cpus=%s", p.CPUs), fmt.Sprintf("--memory=%s", p.Memory))
|
||||
}
|
||||
|
||||
runArgs = append(runArgs, fmt.Sprintf("--cpus=%s", p.CPUs))
|
||||
|
||||
memcgSwap := true
|
||||
if runtime.GOOS == "linux" {
|
||||
if _, err := os.Stat("/sys/fs/cgroup/memory/memsw.limit_in_bytes"); os.IsNotExist(err) {
|
||||
// requires CONFIG_MEMCG_SWAP_ENABLED or cgroup_enable=memory in grub
|
||||
glog.Warning("Your kernel does not support swap limit capabilities or the cgroup is not mounted.")
|
||||
memcgSwap = false
|
||||
}
|
||||
}
|
||||
|
||||
if p.OCIBinary == Podman && memcgSwap { // swap is required for memory
|
||||
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
|
||||
}
|
||||
if p.OCIBinary == Docker { // swap is only required for --memory-swap
|
||||
runArgs = append(runArgs, fmt.Sprintf("--memory=%s", p.Memory))
|
||||
}
|
||||
|
||||
// https://www.freedesktop.org/wiki/Software/systemd/ContainerInterface/
|
||||
var virtualization string
|
||||
if p.OCIBinary == Podman {
|
||||
virtualization = "podman" // VIRTUALIZATION_PODMAN
|
||||
}
|
||||
if p.OCIBinary == Docker {
|
||||
virtualization = "docker" // VIRTUALIZATION_DOCKER
|
||||
}
|
||||
runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", "container", virtualization))
|
||||
|
||||
for key, val := range p.Envs {
|
||||
runArgs = append(runArgs, "-e", fmt.Sprintf("%s=%s", key, val))
|
||||
}
|
||||
|
@ -168,6 +186,13 @@ func CreateContainerNode(p CreateParams) error {
|
|||
}
|
||||
|
||||
checkRunning := func() error {
|
||||
r, err := ContainerRunning(p.OCIBinary, p.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("temporary error checking running for %q : %v", p.Name, err)
|
||||
}
|
||||
if !r {
|
||||
return fmt.Errorf("temporary error created container %q is not running yet", p.Name)
|
||||
}
|
||||
s, err := ContainerStatus(p.OCIBinary, p.Name)
|
||||
if err != nil {
|
||||
return fmt.Errorf("temporary error checking status for %q : %v", p.Name, err)
|
||||
|
@ -188,7 +213,7 @@ func CreateContainerNode(p CreateParams) error {
|
|||
}
|
||||
|
||||
// CreateContainer creates a container with "docker/podman run"
|
||||
func createContainer(ociBinary string, image string, opts ...createOpt) error {
|
||||
func createContainer(ociBin string, image string, opts ...createOpt) error {
|
||||
o := &createOpts{}
|
||||
for _, opt := range opts {
|
||||
o = opt(o)
|
||||
|
@ -205,7 +230,7 @@ func createContainer(ociBinary string, image string, opts ...createOpt) error {
|
|||
args := []string{"run"}
|
||||
|
||||
// to run nested container from privileged container in podman https://bugzilla.redhat.com/show_bug.cgi?id=1687713
|
||||
if ociBinary == Podman {
|
||||
if ociBin == Podman {
|
||||
args = append(args, "--cgroup-manager", "cgroupfs")
|
||||
}
|
||||
|
||||
|
@ -213,87 +238,33 @@ func createContainer(ociBinary string, image string, opts ...createOpt) error {
|
|||
args = append(args, image)
|
||||
args = append(args, o.ContainerArgs...)
|
||||
|
||||
out, err := exec.Command(ociBinary, args...).CombinedOutput()
|
||||
if err != nil {
|
||||
return errors.Wrapf(err, "failed args: %v output: %s", args, out)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Copy copies a local asset into the container
|
||||
func Copy(ociBinary string, ociID string, targetDir string, fName string) error {
|
||||
if _, err := os.Stat(fName); os.IsNotExist(err) {
|
||||
return errors.Wrapf(err, "error source %s does not exist", fName)
|
||||
}
|
||||
|
||||
destination := fmt.Sprintf("%s:%s", ociID, targetDir)
|
||||
cmd := exec.Command(ociBinary, "cp", fName, destination)
|
||||
if err := cmd.Run(); err != nil {
|
||||
return errors.Wrapf(err, "error copying %s into node", fName)
|
||||
if _, err := runCmd(exec.Command(ociBin, args...)); err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ContainerID returns id of a container name
|
||||
func ContainerID(ociBinary string, nameOrID string) (string, error) {
|
||||
cmd := exec.Command(ociBinary, "inspect", "-f", "{{.Id}}", nameOrID)
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
func ContainerID(ociBin string, nameOrID string) (string, error) {
|
||||
rr, err := runCmd(exec.Command(ociBin, "inspect", "-f", "{{.Id}}", nameOrID))
|
||||
if err != nil { // don't return error if not found, only return empty string
|
||||
if strings.Contains(string(out), "Error: No such object:") || strings.Contains(string(out), "unable to find") {
|
||||
if strings.Contains(rr.Stdout.String(), "Error: No such object:") || strings.Contains(rr.Stdout.String(), "unable to find") {
|
||||
err = nil
|
||||
}
|
||||
out = []byte{}
|
||||
return "", err
|
||||
}
|
||||
|
||||
return string(out), err
|
||||
}
|
||||
|
||||
// WarnIfSlow runs an oci command, warning about performance issues
|
||||
func WarnIfSlow(args ...string) ([]byte, error) {
|
||||
killTime := 19 * time.Second
|
||||
warnTime := 2 * time.Second
|
||||
|
||||
if args[1] == "volume" || args[1] == "ps" { // volume and ps requires more time than inspect
|
||||
killTime = 30 * time.Second
|
||||
warnTime = 3 * time.Second
|
||||
}
|
||||
|
||||
ctx, cancel := context.WithTimeout(context.Background(), killTime)
|
||||
defer cancel()
|
||||
|
||||
start := time.Now()
|
||||
glog.Infof("executing with %s timeout: %v", args, killTime)
|
||||
cmd := exec.CommandContext(ctx, args[0], args[1:]...)
|
||||
stdout, err := cmd.Output()
|
||||
d := time.Since(start)
|
||||
if d > warnTime {
|
||||
out.WarningT(`Executing "{{.command}}" took an unusually long time: {{.duration}}`, out.V{"command": strings.Join(cmd.Args, " "), "duration": d})
|
||||
out.ErrT(out.Tip, `Restarting the {{.name}} service may improve performance.`, out.V{"name": args[0]})
|
||||
}
|
||||
|
||||
if ctx.Err() == context.DeadlineExceeded {
|
||||
return stdout, fmt.Errorf("%q timed out after %s", strings.Join(cmd.Args, " "), killTime)
|
||||
}
|
||||
if err != nil {
|
||||
if exitErr, ok := err.(*exec.ExitError); ok {
|
||||
return stdout, fmt.Errorf("%q failed: %v: %s", strings.Join(cmd.Args, " "), exitErr, exitErr.Stderr)
|
||||
}
|
||||
return stdout, fmt.Errorf("%q failed: %v", strings.Join(cmd.Args, " "), err)
|
||||
}
|
||||
return stdout, nil
|
||||
return rr.Stdout.String(), nil
|
||||
}
|
||||
|
||||
// ContainerExists checks if container name exists (either running or exited)
|
||||
func ContainerExists(ociBin string, name string) (bool, error) {
|
||||
out, err := WarnIfSlow(ociBin, "ps", "-a", "--format", "{{.Names}}")
|
||||
func ContainerExists(ociBin string, name string, warnSlow ...bool) (bool, error) {
|
||||
rr, err := runCmd(exec.Command(ociBin, "ps", "-a", "--format", "{{.Names}}"), warnSlow...)
|
||||
if err != nil {
|
||||
return false, errors.Wrapf(err, string(out))
|
||||
return false, err
|
||||
}
|
||||
|
||||
containers := strings.Split(string(out), "\n")
|
||||
containers := strings.Split(rr.Stdout.String(), "\n")
|
||||
for _, c := range containers {
|
||||
if strings.TrimSpace(c) == name {
|
||||
return true, nil
|
||||
|
@ -305,15 +276,13 @@ func ContainerExists(ociBin string, name string) (bool, error) {
|
|||
|
||||
// IsCreatedByMinikube returns true if the container was created by minikube
|
||||
// with default assumption that it is not created by minikube when we don't know for sure
|
||||
func IsCreatedByMinikube(ociBinary string, nameOrID string) bool {
|
||||
cmd := exec.Command(ociBinary, "inspect", nameOrID, "--format", "{{.Config.Labels}}")
|
||||
out, err := cmd.CombinedOutput()
|
||||
|
||||
func IsCreatedByMinikube(ociBin string, nameOrID string) bool {
|
||||
rr, err := runCmd(exec.Command(ociBin, "inspect", nameOrID, "--format", "{{.Config.Labels}}"))
|
||||
if err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
if strings.Contains(string(out), fmt.Sprintf("%s:true", CreatedByLabelKey)) {
|
||||
if strings.Contains(rr.Stdout.String(), fmt.Sprintf("%s:true", CreatedByLabelKey)) {
|
||||
return true
|
||||
}
|
||||
|
||||
|
@ -321,20 +290,19 @@ func IsCreatedByMinikube(ociBinary string, nameOrID string) bool {
|
|||
}
|
||||
|
||||
// ListOwnedContainers lists all the containres that kic driver created on user's machine using a label
|
||||
func ListOwnedContainers(ociBinary string) ([]string, error) {
|
||||
return ListContainersByLabel(ociBinary, ProfileLabelKey)
|
||||
func ListOwnedContainers(ociBin string) ([]string, error) {
|
||||
return ListContainersByLabel(ociBin, ProfileLabelKey)
|
||||
}
|
||||
|
||||
// inspect return low-level information on containers
|
||||
func inspect(ociBinary string, containerNameOrID, format string) ([]string, error) {
|
||||
|
||||
cmd := exec.Command(ociBinary, "inspect",
|
||||
func inspect(ociBin string, containerNameOrID, format string) ([]string, error) {
|
||||
cmd := exec.Command(ociBin, "inspect",
|
||||
"-f", format,
|
||||
containerNameOrID) // ... against the "node" container
|
||||
var buff bytes.Buffer
|
||||
cmd.Stdout = &buff
|
||||
cmd.Stderr = &buff
|
||||
err := cmd.Run()
|
||||
_, err := runCmd(cmd)
|
||||
scanner := bufio.NewScanner(&buff)
|
||||
var lines []string
|
||||
for scanner.Scan() {
|
||||
|
@ -390,13 +358,13 @@ func generateMountBindings(mounts ...Mount) []string {
|
|||
}
|
||||
|
||||
// isUsernsRemapEnabled checks if userns-remap is enabled in docker
|
||||
func isUsernsRemapEnabled(ociBinary string) bool {
|
||||
cmd := exec.Command(ociBinary, "info", "--format", "'{{json .SecurityOptions}}'")
|
||||
func isUsernsRemapEnabled(ociBin string) bool {
|
||||
cmd := exec.Command(ociBin, "info", "--format", "'{{json .SecurityOptions}}'")
|
||||
var buff bytes.Buffer
|
||||
cmd.Stdout = &buff
|
||||
cmd.Stderr = &buff
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
|
||||
if _, err := runCmd(cmd); err != nil {
|
||||
return false
|
||||
}
|
||||
|
||||
|
@ -453,12 +421,12 @@ func withPortMappings(portMappings []PortMapping) createOpt {
|
|||
}
|
||||
|
||||
// ListContainersByLabel returns all the container names with a specified label
|
||||
func ListContainersByLabel(ociBinary string, label string) ([]string, error) {
|
||||
stdout, err := WarnIfSlow(ociBinary, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}")
|
||||
func ListContainersByLabel(ociBin string, label string, warnSlow ...bool) ([]string, error) {
|
||||
rr, err := runCmd(exec.Command(ociBin, "ps", "-a", "--filter", fmt.Sprintf("label=%s", label), "--format", "{{.Names}}"), warnSlow...)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
s := bufio.NewScanner(bytes.NewReader(stdout))
|
||||
s := bufio.NewScanner(bytes.NewReader(rr.Stdout.Bytes()))
|
||||
var names []string
|
||||
for s.Scan() {
|
||||
n := strings.TrimSpace(s.Text())
|
||||
|
@ -488,11 +456,23 @@ func PointToHostDockerDaemon() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
// ContainerRunning returns running state of a container
|
||||
func ContainerRunning(ociBin string, name string, warnSlow ...bool) (bool, error) {
|
||||
rr, err := runCmd(exec.Command(ociBin, "inspect", name, "--format={{.State.Running}}"), warnSlow...)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return strconv.ParseBool(strings.TrimSpace(rr.Stdout.String()))
|
||||
}
|
||||
|
||||
// ContainerStatus returns status of a container running,exited,...
|
||||
func ContainerStatus(ociBin string, name string) (state.State, error) {
|
||||
out, err := WarnIfSlow(ociBin, "inspect", name, "--format={{.State.Status}}")
|
||||
o := strings.TrimSpace(string(out))
|
||||
func ContainerStatus(ociBin string, name string, warnSlow ...bool) (state.State, error) {
|
||||
cmd := exec.Command(ociBin, "inspect", name, "--format={{.State.Status}}")
|
||||
rr, err := runCmd(cmd, warnSlow...)
|
||||
o := strings.TrimSpace(rr.Stdout.String())
|
||||
switch o {
|
||||
case "configured":
|
||||
return state.Stopped, nil
|
||||
case "running":
|
||||
return state.Running, nil
|
||||
case "exited":
|
||||
|
@ -508,13 +488,12 @@ func ContainerStatus(ociBin string, name string) (state.State, error) {
|
|||
}
|
||||
}
|
||||
|
||||
// Shutdown will run command to shut down the container
|
||||
// ShutDown will run command to shut down the container
|
||||
// to ensure the containers process and networking bindings are all closed
|
||||
// to avoid containers getting stuck before delete https://github.com/kubernetes/minikube/issues/7657
|
||||
func ShutDown(ociBin string, name string) error {
|
||||
cmd := exec.Command(ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0")
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
glog.Infof("error shutdown %s output %q : %v", name, out, err)
|
||||
if _, err := runCmd(exec.Command(ociBin, "exec", "--privileged", "-t", name, "/bin/bash", "-c", "sudo init 0")); err != nil {
|
||||
glog.Infof("error shutdown %s: %v", name, err)
|
||||
}
|
||||
// helps with allowing docker realize the container is exited and report its status correctly.
|
||||
time.Sleep(time.Second * 1)
|
||||
|
|
|
@ -29,7 +29,7 @@ import (
|
|||
|
||||
// DeleteAllVolumesByLabel deletes all volumes that have a specific label
|
||||
// if there is no volume to delete it will return nil
|
||||
func DeleteAllVolumesByLabel(ociBin string, label string) []error {
|
||||
func DeleteAllVolumesByLabel(ociBin string, label string, warnSlow ...bool) []error {
|
||||
var deleteErrs []error
|
||||
glog.Infof("trying to delete all %s volumes with label %s", ociBin, label)
|
||||
|
||||
|
@ -40,7 +40,7 @@ func DeleteAllVolumesByLabel(ociBin string, label string) []error {
|
|||
}
|
||||
|
||||
for _, v := range vs {
|
||||
if _, err := WarnIfSlow(ociBin, "volume", "rm", "--force", v); err != nil {
|
||||
if _, err := runCmd(exec.Command(ociBin, "volume", "rm", "--force", v), warnSlow...); err != nil {
|
||||
deleteErrs = append(deleteErrs, fmt.Errorf("deleting %q", v))
|
||||
}
|
||||
}
|
||||
|
@ -51,11 +51,11 @@ func DeleteAllVolumesByLabel(ociBin string, label string) []error {
|
|||
// PruneAllVolumesByLabel deletes all volumes that have a specific label
|
||||
// if there is no volume to delete it will return nil
|
||||
// example: docker volume prune -f --filter label=name.minikube.sigs.k8s.io=minikube
|
||||
func PruneAllVolumesByLabel(ociBin string, label string) []error {
|
||||
func PruneAllVolumesByLabel(ociBin string, label string, warnSlow ...bool) []error {
|
||||
var deleteErrs []error
|
||||
glog.Infof("trying to prune all %s volumes with label %s", ociBin, label)
|
||||
|
||||
if _, err := WarnIfSlow(ociBin, "volume", "prune", "-f", "--filter", "label="+label); err != nil {
|
||||
cmd := exec.Command(ociBin, "volume", "prune", "-f", "--filter", "label="+label)
|
||||
if _, err := runCmd(cmd, warnSlow...); err != nil {
|
||||
deleteErrs = append(deleteErrs, errors.Wrapf(err, "prune volume by label %s", label))
|
||||
}
|
||||
|
||||
|
@ -65,9 +65,8 @@ func PruneAllVolumesByLabel(ociBin string, label string) []error {
|
|||
// allVolumesByLabel returns name of all docker volumes by a specific label
|
||||
// will not return error if there is no volume found.
|
||||
func allVolumesByLabel(ociBin string, label string) ([]string, error) {
|
||||
cmd := exec.Command(ociBin, "volume", "ls", "--filter", "label="+label, "--format", "{{.Name}}")
|
||||
stdout, err := cmd.Output()
|
||||
s := bufio.NewScanner(bytes.NewReader(stdout))
|
||||
rr, err := runCmd(exec.Command(ociBin, "volume", "ls", "--filter", "label="+label, "--format", "{{.Name}}"))
|
||||
s := bufio.NewScanner(bytes.NewReader(rr.Stdout.Bytes()))
|
||||
var vols []string
|
||||
for s.Scan() {
|
||||
v := strings.TrimSpace(s.Text())
|
||||
|
@ -82,9 +81,8 @@ func allVolumesByLabel(ociBin string, label string) ([]string, error) {
|
|||
// to the volume named volumeName
|
||||
func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
|
||||
cmd := exec.Command(Docker, "run", "--rm", "--entrypoint", "/usr/bin/tar", "-v", fmt.Sprintf("%s:/preloaded.tar:ro", tarballPath), "-v", fmt.Sprintf("%s:/extractDir", volumeName), imageName, "-I", "lz4", "-xvf", "/preloaded.tar", "-C", "/extractDir")
|
||||
glog.Infof("executing: %s", cmd.Args)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "output %s", string(out))
|
||||
if _, err := runCmd(cmd); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
@ -93,10 +91,8 @@ func ExtractTarballToVolume(tarballPath, volumeName, imageName string) error {
|
|||
// Caution ! if volume already exists does NOT return an error and will not apply the minikube labels on it.
|
||||
// TODO: this should be fixed as a part of https://github.com/kubernetes/minikube/issues/6530
|
||||
func createDockerVolume(profile string, nodeName string) error {
|
||||
cmd := exec.Command(Docker, "volume", "create", nodeName, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, profile), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))
|
||||
glog.Infof("executing: %s", cmd.Args)
|
||||
if out, err := cmd.CombinedOutput(); err != nil {
|
||||
return errors.Wrapf(err, "output %s", string(out))
|
||||
if _, err := runCmd(exec.Command(Docker, "volume", "create", nodeName, "--label", fmt.Sprintf("%s=%s", ProfileLabelKey, profile), "--label", fmt.Sprintf("%s=%s", CreatedByLabelKey, "true"))); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
@ -23,16 +23,10 @@ import (
|
|||
)
|
||||
|
||||
const (
|
||||
// DefaultNetwork is the Docker default bridge network named "bridge"
|
||||
// (https://docs.docker.com/network/bridge/#use-the-default-bridge-network)
|
||||
DefaultNetwork = "bridge"
|
||||
// DefaultPodCIDR is The CIDR to be used for pods inside the node.
|
||||
DefaultPodCIDR = "10.244.0.0/16"
|
||||
|
||||
// Version is the current version of kic
|
||||
Version = "v0.0.9"
|
||||
Version = "v0.0.10"
|
||||
// SHA of the kic base image
|
||||
baseImageSHA = "82a826cc03c3e59ead5969b8020ca138de98f366c1907293df91fc57205dbb53"
|
||||
baseImageSHA = "f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438"
|
||||
// OverlayImage is the cni plugin used for overlay image, created by kind.
|
||||
// CNI plugin image used for kic drivers created by kind.
|
||||
OverlayImage = "kindest/kindnetd:0.5.4"
|
||||
|
|
|
@ -355,6 +355,20 @@ var Addons = map[string]*Addon{
|
|||
"0640",
|
||||
false),
|
||||
}, false, "ingress-dns"),
|
||||
"metallb": NewAddon([]*BinAsset{
|
||||
MustBinAsset(
|
||||
"deploy/addons/metallb/metallb.yaml",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metallb.yaml",
|
||||
"0640",
|
||||
false),
|
||||
MustBinAsset(
|
||||
"deploy/addons/metallb/metallb-config.yaml.tmpl",
|
||||
vmpath.GuestAddonsDir,
|
||||
"metallb-config.yaml",
|
||||
"0640",
|
||||
true),
|
||||
}, false, "metallb"),
|
||||
}
|
||||
|
||||
// GenerateTemplateData generates template data for template assets
|
||||
|
@ -371,10 +385,14 @@ func GenerateTemplateData(cfg config.KubernetesConfig) interface{} {
|
|||
Arch string
|
||||
ExoticArch string
|
||||
ImageRepository string
|
||||
LoadBalancerStartIP string
|
||||
LoadBalancerEndIP string
|
||||
}{
|
||||
Arch: a,
|
||||
ExoticArch: ea,
|
||||
ImageRepository: cfg.ImageRepository,
|
||||
LoadBalancerStartIP: cfg.LoadBalancerStartIP,
|
||||
LoadBalancerEndIP: cfg.LoadBalancerEndIP,
|
||||
}
|
||||
|
||||
return opts
|
||||
|
|
|
@ -102,7 +102,7 @@ func GenerateKubeadmYAML(cc config.ClusterConfig, n config.Node, r cruntime.Mana
|
|||
NoTaintMaster: false, // That does not work with k8s 1.12+
|
||||
DNSDomain: k8s.DNSDomain,
|
||||
NodeIP: n.IP,
|
||||
ControlPlaneAddress: cp.IP,
|
||||
ControlPlaneAddress: constants.ControlPlaneAlias,
|
||||
KubeProxyOptions: createKubeProxyOptions(k8s.ExtraOptions),
|
||||
}
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 12345
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
controlPlaneEndpoint: control-plane.minikube.internal
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
controlPlaneEndpoint: control-plane.minikube.internal
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
controlPlaneEndpoint: control-plane.minikube.internal
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
controlPlaneEndpoint: control-plane.minikube.internal
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
controlPlaneEndpoint: control-plane.minikube.internal
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
controlPlaneEndpoint: control-plane.minikube.internal
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
controlPlaneEndpoint: control-plane.minikube.internal
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -4,7 +4,7 @@ noTaintMaster: true
|
|||
api:
|
||||
advertiseAddress: 1.1.1.1
|
||||
bindPort: 8443
|
||||
controlPlaneEndpoint: 1.1.1.1
|
||||
controlPlaneEndpoint: control-plane.minikube.internal
|
||||
kubernetesVersion: v1.11.0
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
networking:
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:12345
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -32,7 +32,7 @@ schedulerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -29,7 +29,7 @@ schedulerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:12345
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -32,7 +32,7 @@ schedulerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -24,7 +24,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -29,7 +29,7 @@ schedulerExtraArgs:
|
|||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
apiServerCertSANs: ["127.0.0.1", "localhost", "1.1.1.1"]
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
etcd:
|
||||
local:
|
||||
dataDir: /var/lib/minikube/etcd
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:12345
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -35,7 +35,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -26,7 +26,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -32,7 +32,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:12345
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -35,7 +35,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -26,7 +26,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -32,7 +32,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:12345
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -35,7 +35,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -26,7 +26,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -32,7 +32,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:12345
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -35,7 +35,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -26,7 +26,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -32,7 +32,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:12345
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:12345
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -35,7 +35,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -25,7 +25,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -26,7 +26,7 @@ apiServer:
|
|||
enable-admission-plugins: "NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,DefaultTolerationSeconds,NodeRestriction,MutatingAdmissionWebhook,ValidatingAdmissionWebhook,ResourceQuota"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
|
@ -32,7 +32,7 @@ scheduler:
|
|||
scheduler-name: "mini-scheduler"
|
||||
certificatesDir: /var/lib/minikube/certs
|
||||
clusterName: mk
|
||||
controlPlaneEndpoint: 1.1.1.1:8443
|
||||
controlPlaneEndpoint: control-plane.minikube.internal:8443
|
||||
dns:
|
||||
type: CoreDNS
|
||||
etcd:
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue