Merge branch 'master' into kic-image-upgrade
commit
9546b1ebc6
|
@ -135,7 +135,7 @@ func initMinikubeFlags() {
|
|||
startCmd.Flags().String(criSocket, "", "The cri socket path to be used.")
|
||||
startCmd.Flags().String(networkPlugin, "", "Kubelet network plug-in to use (default: auto)")
|
||||
startCmd.Flags().Bool(enableDefaultCNI, false, "DEPRECATED: Replaced by --cni=bridge")
|
||||
startCmd.Flags().String(cniFlag, "", "CNI plug-in to use. Valid options: auto, bridge, flannel, kindnet, or path to a CNI manifest (default: auto)")
|
||||
startCmd.Flags().String(cniFlag, "", "CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)")
|
||||
startCmd.Flags().StringSlice(waitComponents, kverify.DefaultWaitList, fmt.Sprintf("comma separated list of Kubernetes components to verify and wait for after starting a cluster. defaults to %q, available options: %q . other acceptable values are 'all' or 'none', 'true' and 'false'", strings.Join(kverify.DefaultWaitList, ","), strings.Join(kverify.AllComponentsList, ",")))
|
||||
startCmd.Flags().Duration(waitTimeout, 6*time.Minute, "max time to wait per Kubernetes core services to be healthy.")
|
||||
startCmd.Flags().Bool(nativeSSH, true, "Use native Golang SSH client (default true). Set to 'false' to use the command line 'ssh' command when accessing the docker machine. Useful for the machine drivers when they will not start with 'Waiting for SSH'.")
|
||||
|
@ -579,7 +579,8 @@ func updateExistingConfigFromFlags(cmd *cobra.Command, existing *config.ClusterC
|
|||
cc.VerifyComponents = interpretWaitFlag(*cmd)
|
||||
}
|
||||
|
||||
if cmd.Flags().Changed(kicBaseImage) {
|
||||
// Handle flags and legacy configuration upgrades that do not contain KicBaseImage
|
||||
if cmd.Flags().Changed(kicBaseImage) || cc.KicBaseImage == "" {
|
||||
cc.KicBaseImage = viper.GetString(kicBaseImage)
|
||||
}
|
||||
|
||||
|
|
|
@ -396,16 +396,15 @@ func (k *Bootstrapper) client(ip string, port int) (*kubernetes.Clientset, error
|
|||
func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, timeout time.Duration) error {
|
||||
start := time.Now()
|
||||
|
||||
if !n.ControlPlane {
|
||||
glog.Infof("%s is not a control plane, nothing to wait for", n.Name)
|
||||
return nil
|
||||
}
|
||||
|
||||
register.Reg.SetStep(register.VerifyingKubernetes)
|
||||
out.T(out.HealthCheck, "Verifying Kubernetes components...")
|
||||
|
||||
// TODO: #7706: for better performance we could use k.client inside minikube to avoid asking for external IP:PORT
|
||||
hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &n, cfg.Driver)
|
||||
cp, err := config.PrimaryControlPlane(&cfg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get primary control plane")
|
||||
}
|
||||
hostname, _, port, err := driver.ControlPlaneEndpoint(&cfg, &cp, cfg.Driver)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "get control plane endpoint")
|
||||
}
|
||||
|
@ -430,6 +429,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return errors.Wrapf(err, "create runtme-manager %s", cfg.KubernetesConfig.ContainerRuntime)
|
||||
}
|
||||
|
||||
if n.ControlPlane {
|
||||
if cfg.VerifyComponents[kverify.APIServerWaitKey] {
|
||||
if err := kverify.WaitForAPIServerProcess(cr, k, cfg, k.c, start, timeout); err != nil {
|
||||
return errors.Wrap(err, "wait for apiserver proc")
|
||||
|
@ -457,6 +457,7 @@ func (k *Bootstrapper) WaitForNode(cfg config.ClusterConfig, n config.Node, time
|
|||
return errors.Wrap(err, "waiting for apps_running")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if cfg.VerifyComponents[kverify.NodeReadyKey] {
|
||||
if err := kverify.WaitForNodeReady(client, timeout); err != nil {
|
||||
|
@ -730,7 +731,7 @@ func (k *Bootstrapper) SetupCerts(k8s config.KubernetesConfig, n config.Node) er
|
|||
return err
|
||||
}
|
||||
|
||||
// UpdateCluster updates the cluster.
|
||||
// UpdateCluster updates the control plane with cluster-level info.
|
||||
func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
||||
images, err := images.Kubeadm(cfg.KubernetesConfig.ImageRepository, cfg.KubernetesConfig.KubernetesVersion)
|
||||
if err != nil {
|
||||
|
@ -753,11 +754,14 @@ func (k *Bootstrapper) UpdateCluster(cfg config.ClusterConfig) error {
|
|||
}
|
||||
}
|
||||
|
||||
for _, n := range cfg.Nodes {
|
||||
err := k.UpdateNode(cfg, n, r)
|
||||
cp, err := config.PrimaryControlPlane(&cfg)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "updating node")
|
||||
return errors.Wrap(err, "getting control plane")
|
||||
}
|
||||
|
||||
err = k.UpdateNode(cfg, cp, r)
|
||||
if err != nil {
|
||||
return errors.Wrap(err, "updating control plane")
|
||||
}
|
||||
|
||||
return nil
|
||||
|
|
|
@ -0,0 +1,886 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cni
|
||||
|
||||
import (
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
)
|
||||
|
||||
// calicoTmpl is from https://docs.projectcalico.org/manifests/calico.yaml
|
||||
var calicoTmpl = `---
|
||||
# Source: calico/templates/calico-config.yaml
|
||||
# This ConfigMap is used to configure a self-hosted Calico installation.
|
||||
kind: ConfigMap
|
||||
apiVersion: v1
|
||||
metadata:
|
||||
name: calico-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
# Typha is disabled.
|
||||
typha_service_name: "none"
|
||||
# Configure the backend to use.
|
||||
calico_backend: "bird"
|
||||
# Configure the MTU to use for workload interfaces and the
|
||||
# tunnels. For IPIP, set to your network MTU - 20; for VXLAN
|
||||
# set to your network MTU - 50.
|
||||
veth_mtu: "1440"
|
||||
|
||||
# The CNI network configuration to install on each node. The special
|
||||
# values in this config will be automatically populated.
|
||||
cni_network_config: |-
|
||||
{
|
||||
"name": "k8s-pod-network",
|
||||
"cniVersion": "0.3.1",
|
||||
"plugins": [
|
||||
{
|
||||
"type": "calico",
|
||||
"log_level": "info",
|
||||
"datastore_type": "kubernetes",
|
||||
"nodename": "__KUBERNETES_NODE_NAME__",
|
||||
"mtu": __CNI_MTU__,
|
||||
"ipam": {
|
||||
"type": "calico-ipam"
|
||||
},
|
||||
"policy": {
|
||||
"type": "k8s"
|
||||
},
|
||||
"kubernetes": {
|
||||
"kubeconfig": "__KUBECONFIG_FILEPATH__"
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "portmap",
|
||||
"snat": true,
|
||||
"capabilities": {"portMappings": true}
|
||||
},
|
||||
{
|
||||
"type": "bandwidth",
|
||||
"capabilities": {"bandwidth": true}
|
||||
}
|
||||
]
|
||||
}
|
||||
|
||||
---
|
||||
# Source: calico/templates/kdd-crds.yaml
|
||||
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgpconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPConfiguration
|
||||
plural: bgpconfigurations
|
||||
singular: bgpconfiguration
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: bgppeers.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BGPPeer
|
||||
plural: bgppeers
|
||||
singular: bgppeer
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: blockaffinities.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: BlockAffinity
|
||||
plural: blockaffinities
|
||||
singular: blockaffinity
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: clusterinformations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: ClusterInformation
|
||||
plural: clusterinformations
|
||||
singular: clusterinformation
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: felixconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: FelixConfiguration
|
||||
plural: felixconfigurations
|
||||
singular: felixconfiguration
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkPolicy
|
||||
plural: globalnetworkpolicies
|
||||
singular: globalnetworkpolicy
|
||||
shortNames:
|
||||
- gnp
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: globalnetworksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: GlobalNetworkSet
|
||||
plural: globalnetworksets
|
||||
singular: globalnetworkset
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: hostendpoints.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: HostEndpoint
|
||||
plural: hostendpoints
|
||||
singular: hostendpoint
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamblocks.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMBlock
|
||||
plural: ipamblocks
|
||||
singular: ipamblock
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamconfigs.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMConfig
|
||||
plural: ipamconfigs
|
||||
singular: ipamconfig
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ipamhandles.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPAMHandle
|
||||
plural: ipamhandles
|
||||
singular: ipamhandle
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: ippools.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: IPPool
|
||||
plural: ippools
|
||||
singular: ippool
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: kubecontrollersconfigurations.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Cluster
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: KubeControllersConfiguration
|
||||
plural: kubecontrollersconfigurations
|
||||
singular: kubecontrollersconfiguration
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networkpolicies.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkPolicy
|
||||
plural: networkpolicies
|
||||
singular: networkpolicy
|
||||
|
||||
---
|
||||
apiVersion: apiextensions.k8s.io/v1beta1
|
||||
kind: CustomResourceDefinition
|
||||
metadata:
|
||||
name: networksets.crd.projectcalico.org
|
||||
spec:
|
||||
scope: Namespaced
|
||||
group: crd.projectcalico.org
|
||||
version: v1
|
||||
names:
|
||||
kind: NetworkSet
|
||||
plural: networksets
|
||||
singular: networkset
|
||||
|
||||
---
|
||||
---
|
||||
# Source: calico/templates/rbac.yaml
|
||||
|
||||
# Include a clusterrole for the kube-controllers component,
|
||||
# and bind it to the calico-kube-controllers serviceaccount.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
rules:
|
||||
# Nodes are watched to monitor for deletions.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
- get
|
||||
# Pods are queried to check for existence.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
# IPAM resources are manipulated when nodes are deleted.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
verbs:
|
||||
- list
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
# kube-controllers manages hostendpoints.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- hostendpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
# Needs access to update clusterinformations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- get
|
||||
- create
|
||||
- update
|
||||
# KubeControllersConfiguration is where it gets its config
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- kubecontrollersconfigurations
|
||||
verbs:
|
||||
# read its own config
|
||||
- get
|
||||
# create a default if none exists
|
||||
- create
|
||||
# update status
|
||||
- update
|
||||
# watch for changes
|
||||
- watch
|
||||
---
|
||||
kind: ClusterRoleBinding
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-kube-controllers
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
---
|
||||
# Include a clusterrole for the calico-node DaemonSet,
|
||||
# and bind it to the calico-node serviceaccount.
|
||||
kind: ClusterRole
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
metadata:
|
||||
name: calico-node
|
||||
rules:
|
||||
# The CNI plugin needs to get pods, nodes, and namespaces.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- endpoints
|
||||
- services
|
||||
verbs:
|
||||
# Used to discover service IPs for advertisement.
|
||||
- watch
|
||||
- list
|
||||
# Used to discover Typhas.
|
||||
- get
|
||||
# Pod CIDR auto-detection on kubeadm needs access to config maps.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- configmaps
|
||||
verbs:
|
||||
- get
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes/status
|
||||
verbs:
|
||||
# Needed for clearing NodeNetworkUnavailable flag.
|
||||
- patch
|
||||
# Calico stores some configuration information in node annotations.
|
||||
- update
|
||||
# Watch for changes to Kubernetes NetworkPolicies.
|
||||
- apiGroups: ["networking.k8s.io"]
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- watch
|
||||
- list
|
||||
# Used by Calico for policy information.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods
|
||||
- namespaces
|
||||
- serviceaccounts
|
||||
verbs:
|
||||
- list
|
||||
- watch
|
||||
# The CNI plugin patches pods/status.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- pods/status
|
||||
verbs:
|
||||
- patch
|
||||
# Calico monitors various CRDs for config.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- globalfelixconfigs
|
||||
- felixconfigurations
|
||||
- bgppeers
|
||||
- globalbgpconfigs
|
||||
- bgpconfigurations
|
||||
- ippools
|
||||
- ipamblocks
|
||||
- globalnetworkpolicies
|
||||
- globalnetworksets
|
||||
- networkpolicies
|
||||
- networksets
|
||||
- clusterinformations
|
||||
- hostendpoints
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# Calico must create and update some CRDs on startup.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ippools
|
||||
- felixconfigurations
|
||||
- clusterinformations
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# Calico stores some configuration information on the node.
|
||||
- apiGroups: [""]
|
||||
resources:
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
# These permissions are only required for upgrade from v2.6, and can
|
||||
# be removed after upgrade or on fresh installations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- bgpconfigurations
|
||||
- bgppeers
|
||||
verbs:
|
||||
- create
|
||||
- update
|
||||
# These permissions are required for Calico CNI to perform IPAM allocations.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
- ipamblocks
|
||||
- ipamhandles
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- create
|
||||
- update
|
||||
- delete
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- ipamconfigs
|
||||
verbs:
|
||||
- get
|
||||
# Block affinities must also be watchable by confd for route aggregation.
|
||||
- apiGroups: ["crd.projectcalico.org"]
|
||||
resources:
|
||||
- blockaffinities
|
||||
verbs:
|
||||
- watch
|
||||
# The Calico IPAM migration needs to get daemonsets. These permissions can be
|
||||
# removed if not upgrading from an installation using host-local IPAM.
|
||||
- apiGroups: ["apps"]
|
||||
resources:
|
||||
- daemonsets
|
||||
verbs:
|
||||
- get
|
||||
|
||||
---
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: calico-node
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: calico-node
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-node.yaml
|
||||
# This manifest installs the calico-node container, as well
|
||||
# as the CNI plugins and network config on
|
||||
# each master and worker node in a Kubernetes cluster.
|
||||
kind: DaemonSet
|
||||
apiVersion: apps/v1
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-node
|
||||
updateStrategy:
|
||||
type: RollingUpdate
|
||||
rollingUpdate:
|
||||
maxUnavailable: 1
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: calico-node
|
||||
annotations:
|
||||
# This, along with the CriticalAddonsOnly toleration below,
|
||||
# marks the pod as a critical add-on, ensuring it gets
|
||||
# priority scheduling and that its resources are reserved
|
||||
# if it ever gets evicted.
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
hostNetwork: true
|
||||
tolerations:
|
||||
# Make sure calico-node gets scheduled on all nodes.
|
||||
- effect: NoSchedule
|
||||
operator: Exists
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- effect: NoExecute
|
||||
operator: Exists
|
||||
serviceAccountName: calico-node
|
||||
# Minimize downtime during a rolling upgrade or deletion; tell Kubernetes to do a "force
|
||||
# deletion": https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods.
|
||||
terminationGracePeriodSeconds: 0
|
||||
priorityClassName: system-node-critical
|
||||
initContainers:
|
||||
# This container performs upgrade from host-local IPAM to calico-ipam.
|
||||
# It can be deleted if this is a fresh installation, or if you have already
|
||||
# upgraded to use calico-ipam.
|
||||
- name: upgrade-ipam
|
||||
image: calico/cni:v3.14.1
|
||||
command: ["/opt/cni/bin/calico-ipam", "-upgrade"]
|
||||
env:
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
volumeMounts:
|
||||
- mountPath: /var/lib/cni/networks
|
||||
name: host-local-net-dir
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# This container installs the CNI binaries
|
||||
# and CNI network config file on each node.
|
||||
- name: install-cni
|
||||
image: calico/cni:v3.14.1
|
||||
command: ["/install-cni.sh"]
|
||||
env:
|
||||
# Name of the CNI config file to create.
|
||||
- name: CNI_CONF_NAME
|
||||
value: "10-calico.conflist"
|
||||
# The CNI network config to install on each node.
|
||||
- name: CNI_NETWORK_CONFIG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: cni_network_config
|
||||
# Set the hostname based on the k8s node name.
|
||||
- name: KUBERNETES_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# CNI MTU Config variable
|
||||
- name: CNI_MTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Prevents the container from sleeping forever.
|
||||
- name: SLEEP
|
||||
value: "false"
|
||||
volumeMounts:
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-bin-dir
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: cni-net-dir
|
||||
securityContext:
|
||||
privileged: true
|
||||
# Adds a Flex Volume Driver that creates a per-pod Unix Domain Socket to allow Dikastes
|
||||
# to communicate with Felix over the Policy Sync API.
|
||||
- name: flexvol-driver
|
||||
image: calico/pod2daemon-flexvol:v3.14.1
|
||||
volumeMounts:
|
||||
- name: flexvol-driver-host
|
||||
mountPath: /host/driver
|
||||
securityContext:
|
||||
privileged: true
|
||||
containers:
|
||||
# Runs calico-node container on each Kubernetes node. This
|
||||
# container programs network policy and routes on each
|
||||
# host.
|
||||
- name: calico-node
|
||||
image: calico/node:v3.14.1
|
||||
env:
|
||||
# Use Kubernetes API as the backing datastore.
|
||||
- name: DATASTORE_TYPE
|
||||
value: "kubernetes"
|
||||
# Wait for the datastore.
|
||||
- name: WAIT_FOR_DATASTORE
|
||||
value: "true"
|
||||
# Set based on the k8s node name.
|
||||
- name: NODENAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
fieldPath: spec.nodeName
|
||||
# Choose the backend to use.
|
||||
- name: CALICO_NETWORKING_BACKEND
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: calico_backend
|
||||
# Cluster type to identify the deployment type
|
||||
- name: CLUSTER_TYPE
|
||||
value: "k8s,bgp"
|
||||
# Auto-detect the BGP IP address.
|
||||
- name: IP
|
||||
value: "autodetect"
|
||||
# Enable IPIP
|
||||
- name: CALICO_IPV4POOL_IPIP
|
||||
value: "Always"
|
||||
# Enable or Disable VXLAN on the default IP pool.
|
||||
- name: CALICO_IPV4POOL_VXLAN
|
||||
value: "Never"
|
||||
# Set MTU for tunnel device used if ipip is enabled
|
||||
- name: FELIX_IPINIPMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# Set MTU for the VXLAN tunnel device.
|
||||
- name: FELIX_VXLANMTU
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
name: calico-config
|
||||
key: veth_mtu
|
||||
# The default IPv4 pool to create on startup if none exists. Pod IPs will be
|
||||
# chosen from this range. Changing this value after installation will have
|
||||
# no effect. This should fall within --cluster-cidr
|
||||
# - name: CALICO_IPV4POOL_CIDR
|
||||
# value: "192.168.0.0/16"
|
||||
# Disable file logging so kubectl logs works.
|
||||
- name: CALICO_DISABLE_FILE_LOGGING
|
||||
value: "true"
|
||||
# Set Felix endpoint to host default action to ACCEPT.
|
||||
- name: FELIX_DEFAULTENDPOINTTOHOSTACTION
|
||||
value: "ACCEPT"
|
||||
# Disable IPv6 on Kubernetes.
|
||||
- name: FELIX_IPV6SUPPORT
|
||||
value: "false"
|
||||
# Set Felix logging to "info"
|
||||
- name: FELIX_LOGSEVERITYSCREEN
|
||||
value: "info"
|
||||
- name: FELIX_HEALTHENABLED
|
||||
value: "true"
|
||||
securityContext:
|
||||
privileged: true
|
||||
resources:
|
||||
requests:
|
||||
cpu: 250m
|
||||
livenessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-live
|
||||
- -bird-live
|
||||
periodSeconds: 10
|
||||
initialDelaySeconds: 10
|
||||
failureThreshold: 6
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /bin/calico-node
|
||||
- -felix-ready
|
||||
- -bird-ready
|
||||
periodSeconds: 10
|
||||
volumeMounts:
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
readOnly: false
|
||||
- mountPath: /var/run/calico
|
||||
name: var-run-calico
|
||||
readOnly: false
|
||||
- mountPath: /var/lib/calico
|
||||
name: var-lib-calico
|
||||
readOnly: false
|
||||
- name: policysync
|
||||
mountPath: /var/run/nodeagent
|
||||
volumes:
|
||||
# Used by calico-node.
|
||||
- name: lib-modules
|
||||
hostPath:
|
||||
path: /lib/modules
|
||||
- name: var-run-calico
|
||||
hostPath:
|
||||
path: /var/run/calico
|
||||
- name: var-lib-calico
|
||||
hostPath:
|
||||
path: /var/lib/calico
|
||||
- name: xtables-lock
|
||||
hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
# Used to install CNI.
|
||||
- name: cni-bin-dir
|
||||
hostPath:
|
||||
path: /opt/cni/bin
|
||||
- name: cni-net-dir
|
||||
hostPath:
|
||||
path: /etc/cni/net.d
|
||||
# Mount in the directory for host-local IPAM allocations. This is
|
||||
# used when upgrading from host-local to calico-ipam, and can be removed
|
||||
# if not using the upgrade-ipam init container.
|
||||
- name: host-local-net-dir
|
||||
hostPath:
|
||||
path: /var/lib/cni/networks
|
||||
# Used to create per-pod Unix Domain Sockets
|
||||
- name: policysync
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /var/run/nodeagent
|
||||
# Used to install Flex Volume Driver
|
||||
- name: flexvol-driver-host
|
||||
hostPath:
|
||||
type: DirectoryOrCreate
|
||||
path: /usr/libexec/kubernetes/kubelet-plugins/volume/exec/nodeagent~uds
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-node
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-kube-controllers.yaml
|
||||
# See https://github.com/projectcalico/kube-controllers
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
spec:
|
||||
# The controllers can only have a single active instance.
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: calico-kube-controllers
|
||||
strategy:
|
||||
type: Recreate
|
||||
template:
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
labels:
|
||||
k8s-app: calico-kube-controllers
|
||||
annotations:
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ''
|
||||
spec:
|
||||
nodeSelector:
|
||||
kubernetes.io/os: linux
|
||||
tolerations:
|
||||
# Mark the pod as a critical add-on for rescheduling.
|
||||
- key: CriticalAddonsOnly
|
||||
operator: Exists
|
||||
- key: node-role.kubernetes.io/master
|
||||
effect: NoSchedule
|
||||
serviceAccountName: calico-kube-controllers
|
||||
priorityClassName: system-cluster-critical
|
||||
containers:
|
||||
- name: calico-kube-controllers
|
||||
image: calico/kube-controllers:v3.14.1
|
||||
env:
|
||||
# Choose which controllers to run.
|
||||
- name: ENABLED_CONTROLLERS
|
||||
value: node
|
||||
- name: DATASTORE_TYPE
|
||||
value: kubernetes
|
||||
readinessProbe:
|
||||
exec:
|
||||
command:
|
||||
- /usr/bin/check-status
|
||||
- -r
|
||||
|
||||
---
|
||||
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: calico-kube-controllers
|
||||
namespace: kube-system
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-etcd-secrets.yaml
|
||||
|
||||
---
|
||||
# Source: calico/templates/calico-typha.yaml
|
||||
|
||||
---
|
||||
# Source: calico/templates/configure-canal.yaml
|
||||
|
||||
`
|
||||
|
||||
// Calico is the Calico CNI manager
|
||||
type Calico struct {
|
||||
cc config.ClusterConfig
|
||||
}
|
||||
|
||||
// String returns a string representation of this CNI
|
||||
func (c Calico) String() string {
|
||||
return "Calico"
|
||||
}
|
||||
|
||||
// Apply enables the CNI
|
||||
func (c Calico) Apply(r Runner) error {
|
||||
return applyManifest(c.cc, r, manifestAsset([]byte(calicoTmpl)))
|
||||
}
|
||||
|
||||
// CIDR returns the default CIDR used by this CNI
|
||||
func (c Calico) CIDR() string {
|
||||
// Calico docs specify 192.168.0.0/16 - but we do this for compatibility with other CNI's.
|
||||
return DefaultPodCIDR
|
||||
}
|
|
@ -0,0 +1,695 @@
|
|||
/*
|
||||
Copyright 2020 The Kubernetes Authors All rights reserved.
|
||||
|
||||
Licensed under the Apache License, Version 2.0 (the "License");
|
||||
you may not use this file except in compliance with the License.
|
||||
You may obtain a copy of the License at
|
||||
|
||||
http://www.apache.org/licenses/LICENSE-2.0
|
||||
|
||||
Unless required by applicable law or agreed to in writing, software
|
||||
distributed under the License is distributed on an "AS IS" BASIS,
|
||||
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
||||
See the License for the specific language governing permissions and
|
||||
limitations under the License.
|
||||
*/
|
||||
|
||||
package cni
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
|
||||
"github.com/pkg/errors"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
)
|
||||
|
||||
// From https://raw.githubusercontent.com/cilium/cilium/v1.8/install/kubernetes/quick-install.yaml
|
||||
var ciliumTmpl = `---
|
||||
# Source: cilium/charts/agent/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/charts/operator/templates/serviceaccount.yaml
|
||||
apiVersion: v1
|
||||
kind: ServiceAccount
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/charts/config/templates/configmap.yaml
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: cilium-config
|
||||
namespace: kube-system
|
||||
data:
|
||||
|
||||
# Identity allocation mode selects how identities are shared between cilium
|
||||
# nodes by setting how they are stored. The options are "crd" or "kvstore".
|
||||
# - "crd" stores identities in kubernetes as CRDs (custom resource definition).
|
||||
# These can be queried with:
|
||||
# kubectl get ciliumid
|
||||
# - "kvstore" stores identities in a kvstore, etcd or consul, that is
|
||||
# configured below. Cilium versions before 1.6 supported only the kvstore
|
||||
# backend. Upgrades from these older cilium versions should continue using
|
||||
# the kvstore by commenting out the identity-allocation-mode below, or
|
||||
# setting it to "kvstore".
|
||||
identity-allocation-mode: crd
|
||||
|
||||
# If you want to run cilium in debug mode change this value to true
|
||||
debug: "false"
|
||||
|
||||
# Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4
|
||||
# address.
|
||||
enable-ipv4: "true"
|
||||
|
||||
# Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6
|
||||
# address.
|
||||
enable-ipv6: "false"
|
||||
enable-bpf-clock-probe: "true"
|
||||
|
||||
# If you want cilium monitor to aggregate tracing for packets, set this level
|
||||
# to "low", "medium", or "maximum". The higher the level, the less packets
|
||||
# that will be seen in monitor output.
|
||||
monitor-aggregation: medium
|
||||
|
||||
# The monitor aggregation interval governs the typical time between monitor
|
||||
# notification events for each allowed connection.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-interval: 5s
|
||||
|
||||
# The monitor aggregation flags determine which TCP flags which, upon the
|
||||
# first observation, cause monitor notifications to be generated.
|
||||
#
|
||||
# Only effective when monitor aggregation is set to "medium" or higher.
|
||||
monitor-aggregation-flags: all
|
||||
# bpf-policy-map-max specified the maximum number of entries in endpoint
|
||||
# policy map (per endpoint)
|
||||
bpf-policy-map-max: "16384"
|
||||
# Specifies the ratio (0.0-1.0) of total system memory to use for dynamic
|
||||
# sizing of the TCP CT, non-TCP CT, NAT and policy BPF maps.
|
||||
bpf-map-dynamic-size-ratio: "0.0025"
|
||||
|
||||
# Pre-allocation of map entries allows per-packet latency to be reduced, at
|
||||
# the expense of up-front memory allocation for the entries in the maps. The
|
||||
# default value below will minimize memory usage in the default installation;
|
||||
# users who are sensitive to latency may consider setting this to "true".
|
||||
#
|
||||
# This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore
|
||||
# this option and behave as though it is set to "true".
|
||||
#
|
||||
# If this value is modified, then during the next Cilium startup the restore
|
||||
# of existing endpoints and tracking of ongoing connections may be disrupted.
|
||||
# This may lead to policy drops or a change in loadbalancing decisions for a
|
||||
# connection for some time. Endpoints may need to be recreated to restore
|
||||
# connectivity.
|
||||
#
|
||||
# If this option is set to "false" during an upgrade from 1.3 or earlier to
|
||||
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
|
||||
preallocate-bpf-maps: "false"
|
||||
|
||||
# Regular expression matching compatible Istio sidecar istio-proxy
|
||||
# container image names
|
||||
sidecar-istio-proxy-image: "cilium/istio_proxy"
|
||||
|
||||
# Encapsulation mode for communication between nodes
|
||||
# Possible values:
|
||||
# - disabled
|
||||
# - vxlan (default)
|
||||
# - geneve
|
||||
tunnel: vxlan
|
||||
|
||||
# Name of the cluster. Only relevant when building a mesh of clusters.
|
||||
cluster-name: default
|
||||
|
||||
# DNS Polling periodically issues a DNS lookup for each 'matchName' from
|
||||
# cilium-agent. The result is used to regenerate endpoint policy.
|
||||
# DNS lookups are repeated with an interval of 5 seconds, and are made for
|
||||
# A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP
|
||||
# data is used instead. An IP change will trigger a regeneration of the Cilium
|
||||
# policy for each endpoint and increment the per cilium-agent policy
|
||||
# repository revision.
|
||||
#
|
||||
# This option is disabled by default starting from version 1.4.x in favor
|
||||
# of a more powerful DNS proxy-based implementation, see [0] for details.
|
||||
# Enable this option if you want to use FQDN policies but do not want to use
|
||||
# the DNS proxy.
|
||||
#
|
||||
# To ease upgrade, users may opt to set this option to "true".
|
||||
# Otherwise please refer to the Upgrade Guide [1] which explains how to
|
||||
# prepare policy rules for upgrade.
|
||||
#
|
||||
# [0] http://docs.cilium.io/en/stable/policy/language/#dns-based
|
||||
# [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action
|
||||
tofqdns-enable-poller: "false"
|
||||
|
||||
# wait-bpf-mount makes init container wait until bpf filesystem is mounted
|
||||
wait-bpf-mount: "false"
|
||||
|
||||
masquerade: "true"
|
||||
enable-bpf-masquerade: "true"
|
||||
enable-xt-socket-fallback: "true"
|
||||
install-iptables-rules: "true"
|
||||
auto-direct-node-routes: "false"
|
||||
kube-proxy-replacement: "probe"
|
||||
node-port-bind-protection: "true"
|
||||
enable-auto-protect-node-port-range: "true"
|
||||
enable-session-affinity: "true"
|
||||
k8s-require-ipv4-pod-cidr: "true"
|
||||
k8s-require-ipv6-pod-cidr: "false"
|
||||
enable-endpoint-health-checking: "true"
|
||||
enable-well-known-identities: "false"
|
||||
enable-remote-node-identity: "true"
|
||||
operator-api-serve-addr: "127.0.0.1:9234"
|
||||
ipam: "cluster-pool"
|
||||
cluster-pool-ipv4-cidr: "10.0.0.0/8"
|
||||
cluster-pool-ipv4-mask-size: "24"
|
||||
disable-cnp-status-updates: "true"
|
||||
---
|
||||
# Source: cilium/charts/agent/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium
|
||||
rules:
|
||||
- apiGroups:
|
||||
- networking.k8s.io
|
||||
resources:
|
||||
- networkpolicies
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- namespaces
|
||||
- services
|
||||
- nodes
|
||||
- endpoints
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- pods
|
||||
- nodes
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
- nodes
|
||||
- nodes/status
|
||||
verbs:
|
||||
- patch
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- create
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- update
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumidentities
|
||||
verbs:
|
||||
- '*'
|
||||
---
|
||||
# Source: cilium/charts/operator/templates/clusterrole.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRole
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
rules:
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to automatically delete [core|kube]dns pods so that are starting to being
|
||||
# managed by Cilium
|
||||
- pods
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- delete
|
||||
- apiGroups:
|
||||
- discovery.k8s.io
|
||||
resources:
|
||||
- endpointslices
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- ""
|
||||
resources:
|
||||
# to perform the translation of a CNP that contains 'ToGroup' to its endpoints
|
||||
- services
|
||||
- endpoints
|
||||
# to check apiserver connectivity
|
||||
- namespaces
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
- apiGroups:
|
||||
- cilium.io
|
||||
resources:
|
||||
- ciliumnetworkpolicies
|
||||
- ciliumnetworkpolicies/status
|
||||
- ciliumclusterwidenetworkpolicies
|
||||
- ciliumclusterwidenetworkpolicies/status
|
||||
- ciliumendpoints
|
||||
- ciliumendpoints/status
|
||||
- ciliumnodes
|
||||
- ciliumnodes/status
|
||||
- ciliumidentities
|
||||
- ciliumidentities/status
|
||||
verbs:
|
||||
- '*'
|
||||
- apiGroups:
|
||||
- apiextensions.k8s.io
|
||||
resources:
|
||||
- customresourcedefinitions
|
||||
verbs:
|
||||
- get
|
||||
- list
|
||||
- watch
|
||||
---
|
||||
# Source: cilium/charts/agent/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/charts/operator/templates/clusterrolebinding.yaml
|
||||
apiVersion: rbac.authorization.k8s.io/v1
|
||||
kind: ClusterRoleBinding
|
||||
metadata:
|
||||
name: cilium-operator
|
||||
roleRef:
|
||||
apiGroup: rbac.authorization.k8s.io
|
||||
kind: ClusterRole
|
||||
name: cilium-operator
|
||||
subjects:
|
||||
- kind: ServiceAccount
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
---
|
||||
# Source: cilium/charts/agent/templates/daemonset.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: DaemonSet
|
||||
metadata:
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
name: cilium
|
||||
namespace: kube-system
|
||||
spec:
|
||||
selector:
|
||||
matchLabels:
|
||||
k8s-app: cilium
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
# This annotation plus the CriticalAddonsOnly toleration makes
|
||||
# cilium to be a critical pod in the cluster, which ensures cilium
|
||||
# gets priority scheduling.
|
||||
# https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/
|
||||
scheduler.alpha.kubernetes.io/critical-pod: ""
|
||||
labels:
|
||||
k8s-app: cilium
|
||||
spec:
|
||||
affinity:
|
||||
podAntiAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchExpressions:
|
||||
- key: k8s-app
|
||||
operator: In
|
||||
values:
|
||||
- cilium
|
||||
topologyKey: kubernetes.io/hostname
|
||||
containers:
|
||||
- args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
command:
|
||||
- cilium-agent
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: '127.0.0.1'
|
||||
path: /healthz
|
||||
port: 9876
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 10
|
||||
# The initial delay for the liveness probe is intentionally large to
|
||||
# avoid an endless kill & restart cycle if in the event that the initial
|
||||
# bootstrapping takes longer than expected.
|
||||
initialDelaySeconds: 120
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
readinessProbe:
|
||||
httpGet:
|
||||
host: '127.0.0.1'
|
||||
path: /healthz
|
||||
port: 9876
|
||||
scheme: HTTP
|
||||
httpHeaders:
|
||||
- name: "brief"
|
||||
value: "true"
|
||||
failureThreshold: 3
|
||||
initialDelaySeconds: 5
|
||||
periodSeconds: 30
|
||||
successThreshold: 1
|
||||
timeoutSeconds: 5
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_Cilium_MASTER_DEVICE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: Cilium-master-device
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_Cilium_UNINSTALL_ON_EXIT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: Cilium-uninstall-on-exit
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CLUSTERMESH_CONFIG
|
||||
value: /var/lib/cilium/clustermesh/
|
||||
- name: CILIUM_CNI_CHAINING_MODE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: cni-chaining-mode
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_CUSTOM_CNI_CONF
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: custom-cni-conf
|
||||
name: cilium-config
|
||||
optional: true
|
||||
image: "docker.io/cilium/cilium:v1.8.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
lifecycle:
|
||||
postStart:
|
||||
exec:
|
||||
command:
|
||||
- "/cni-install.sh"
|
||||
- "--enable-debug=false"
|
||||
preStop:
|
||||
exec:
|
||||
command:
|
||||
- /cni-uninstall.sh
|
||||
name: cilium-agent
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
- SYS_MODULE
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys/fs/bpf
|
||||
name: bpf-maps
|
||||
- mountPath: /var/run/cilium
|
||||
name: cilium-run
|
||||
- mountPath: /host/opt/cni/bin
|
||||
name: cni-path
|
||||
- mountPath: /host/etc/cni/net.d
|
||||
name: etc-cni-netd
|
||||
- mountPath: /var/lib/cilium/clustermesh
|
||||
name: clustermesh-secrets
|
||||
readOnly: true
|
||||
- mountPath: /tmp/cilium/config-map
|
||||
name: cilium-config-path
|
||||
readOnly: true
|
||||
# Needed to be able to load kernel modules
|
||||
- mountPath: /lib/modules
|
||||
name: lib-modules
|
||||
readOnly: true
|
||||
- mountPath: /run/xtables.lock
|
||||
name: xtables-lock
|
||||
hostNetwork: true
|
||||
initContainers:
|
||||
- command:
|
||||
- /init-container.sh
|
||||
env:
|
||||
- name: CILIUM_ALL_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: clean-cilium-state
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_BPF_STATE
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: clean-cilium-bpf-state
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: CILIUM_WAIT_BPF_MOUNT
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: wait-bpf-mount
|
||||
name: cilium-config
|
||||
optional: true
|
||||
image: "docker.io/cilium/cilium:v1.8.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: clean-cilium-state
|
||||
securityContext:
|
||||
capabilities:
|
||||
add:
|
||||
- NET_ADMIN
|
||||
privileged: true
|
||||
volumeMounts:
|
||||
- mountPath: /sys/fs/bpf
|
||||
name: bpf-maps
|
||||
mountPropagation: HostToContainer
|
||||
- mountPath: /var/run/cilium
|
||||
name: cilium-run
|
||||
resources:
|
||||
requests:
|
||||
cpu: 100m
|
||||
memory: 100Mi
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-node-critical
|
||||
serviceAccount: cilium
|
||||
serviceAccountName: cilium
|
||||
terminationGracePeriodSeconds: 1
|
||||
tolerations:
|
||||
- operator: Exists
|
||||
volumes:
|
||||
# To keep state between restarts / upgrades
|
||||
- hostPath:
|
||||
path: /var/run/cilium
|
||||
type: DirectoryOrCreate
|
||||
name: cilium-run
|
||||
# To keep state between restarts / upgrades for bpf maps
|
||||
- hostPath:
|
||||
path: /sys/fs/bpf
|
||||
type: DirectoryOrCreate
|
||||
name: bpf-maps
|
||||
# To install cilium cni plugin in the host
|
||||
- hostPath:
|
||||
path: /opt/cni/bin
|
||||
type: DirectoryOrCreate
|
||||
name: cni-path
|
||||
# To install cilium cni configuration in the host
|
||||
- hostPath:
|
||||
path: /etc/cni/net.d
|
||||
type: DirectoryOrCreate
|
||||
name: etc-cni-netd
|
||||
# To be able to load kernel modules
|
||||
- hostPath:
|
||||
path: /lib/modules
|
||||
name: lib-modules
|
||||
# To access iptables concurrently with other processes (e.g. kube-proxy)
|
||||
- hostPath:
|
||||
path: /run/xtables.lock
|
||||
type: FileOrCreate
|
||||
name: xtables-lock
|
||||
# To read the clustermesh configuration
|
||||
- name: clustermesh-secrets
|
||||
secret:
|
||||
defaultMode: 420
|
||||
optional: true
|
||||
secretName: cilium-clustermesh
|
||||
# To read the configuration from the config map
|
||||
- configMap:
|
||||
name: cilium-config
|
||||
name: cilium-config-path
|
||||
updateStrategy:
|
||||
rollingUpdate:
|
||||
maxUnavailable: 2
|
||||
type: RollingUpdate
|
||||
---
|
||||
# Source: cilium/charts/operator/templates/deployment.yaml
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
name: cilium-operator
|
||||
namespace: kube-system
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
strategy:
|
||||
rollingUpdate:
|
||||
maxSurge: 1
|
||||
maxUnavailable: 1
|
||||
type: RollingUpdate
|
||||
template:
|
||||
metadata:
|
||||
annotations:
|
||||
labels:
|
||||
io.cilium/app: operator
|
||||
name: cilium-operator
|
||||
spec:
|
||||
containers:
|
||||
- args:
|
||||
- --config-dir=/tmp/cilium/config-map
|
||||
- --debug=$(CILIUM_DEBUG)
|
||||
command:
|
||||
- cilium-operator-generic
|
||||
env:
|
||||
- name: K8S_NODE_NAME
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: spec.nodeName
|
||||
- name: CILIUM_K8S_NAMESPACE
|
||||
valueFrom:
|
||||
fieldRef:
|
||||
apiVersion: v1
|
||||
fieldPath: metadata.namespace
|
||||
- name: CILIUM_DEBUG
|
||||
valueFrom:
|
||||
configMapKeyRef:
|
||||
key: debug
|
||||
name: cilium-config
|
||||
optional: true
|
||||
- name: AWS_ACCESS_KEY_ID
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_ACCESS_KEY_ID
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: AWS_SECRET_ACCESS_KEY
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_SECRET_ACCESS_KEY
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
- name: AWS_DEFAULT_REGION
|
||||
valueFrom:
|
||||
secretKeyRef:
|
||||
key: AWS_DEFAULT_REGION
|
||||
name: cilium-aws
|
||||
optional: true
|
||||
image: "docker.io/cilium/operator-generic:v1.8.0"
|
||||
imagePullPolicy: IfNotPresent
|
||||
name: cilium-operator
|
||||
livenessProbe:
|
||||
httpGet:
|
||||
host: '127.0.0.1'
|
||||
path: /healthz
|
||||
port: 9234
|
||||
scheme: HTTP
|
||||
initialDelaySeconds: 60
|
||||
periodSeconds: 10
|
||||
timeoutSeconds: 3
|
||||
volumeMounts:
|
||||
- mountPath: /tmp/cilium/config-map
|
||||
name: cilium-config-path
|
||||
readOnly: true
|
||||
hostNetwork: true
|
||||
restartPolicy: Always
|
||||
priorityClassName: system-cluster-critical
|
||||
serviceAccount: cilium-operator
|
||||
serviceAccountName: cilium-operator
|
||||
volumes:
|
||||
# To read the configuration from the config map
|
||||
- configMap:
|
||||
name: cilium-config
|
||||
name: cilium-config-path
|
||||
`
|
||||
|
||||
// Cilium is the Cilium CNI manager
|
||||
type Cilium struct {
|
||||
cc config.ClusterConfig
|
||||
}
|
||||
|
||||
// String returns a string representation of this CNI
|
||||
func (c Cilium) String() string {
|
||||
return "Cilium"
|
||||
}
|
||||
|
||||
// Apply enables the CNI
|
||||
func (c Cilium) Apply(r Runner) error {
|
||||
// see https://kubernetes.io/docs/tasks/administer-cluster/network-policy-provider/cilium-network-policy/
|
||||
if _, err := r.RunCmd(exec.Command("sudo", "/bin/bash", "-c", "grep 'bpffs /sys/fs/bpf' /proc/mounts || sudo mount bpffs -t bpf /sys/fs/bpf")); err != nil {
|
||||
return errors.Wrap(err, "bpf mount")
|
||||
}
|
||||
|
||||
return applyManifest(c.cc, r, manifestAsset([]byte(ciliumTmpl)))
|
||||
}
|
||||
|
||||
// CIDR returns the default CIDR used by this CNI
|
||||
func (c Cilium) CIDR() string {
|
||||
return DefaultPodCIDR
|
||||
}
|
|
@ -82,6 +82,10 @@ func New(cc config.ClusterConfig) (Manager, error) {
|
|||
return KindNet{cc: cc}, nil
|
||||
case "bridge":
|
||||
return Bridge{cc: cc}, nil
|
||||
case "calico":
|
||||
return Calico{cc: cc}, nil
|
||||
case "cilium":
|
||||
return Cilium{cc: cc}, nil
|
||||
case "flannel":
|
||||
return Flannel{cc: cc}, nil
|
||||
default:
|
||||
|
|
|
@ -18,11 +18,13 @@ package node
|
|||
|
||||
import (
|
||||
"fmt"
|
||||
"os/exec"
|
||||
|
||||
"github.com/golang/glog"
|
||||
"github.com/pkg/errors"
|
||||
"github.com/spf13/viper"
|
||||
|
||||
"k8s.io/minikube/pkg/kapi"
|
||||
"k8s.io/minikube/pkg/minikube/config"
|
||||
"k8s.io/minikube/pkg/minikube/driver"
|
||||
"k8s.io/minikube/pkg/minikube/machine"
|
||||
|
@ -66,12 +68,44 @@ func Delete(cc config.ClusterConfig, name string) (*config.Node, error) {
|
|||
return n, errors.Wrap(err, "retrieve")
|
||||
}
|
||||
|
||||
m := driver.MachineName(cc, *n)
|
||||
api, err := machine.NewAPIClient()
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
err = machine.DeleteHost(api, driver.MachineName(cc, *n))
|
||||
// grab control plane to use kubeconfig
|
||||
host, err := machine.LoadHost(api, cc.Name)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
runner, err := machine.CommandRunner(host)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
// kubectl drain
|
||||
kubectl := kapi.KubectlBinaryPath(cc.KubernetesConfig.KubernetesVersion)
|
||||
cmd := exec.Command("sudo", "KUBECONFIG=/var/lib/minikube/kubeconfig", kubectl, "drain", m)
|
||||
if _, err := runner.RunCmd(cmd); err != nil {
|
||||
glog.Warningf("unable to scale coredns replicas to 1: %v", err)
|
||||
} else {
|
||||
glog.Infof("successfully scaled coredns replicas to 1")
|
||||
}
|
||||
|
||||
// kubectl delete
|
||||
client, err := kapi.Client(cc.Name)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
err = client.CoreV1().Nodes().Delete(m, nil)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
||||
err = machine.DeleteHost(api, m)
|
||||
if err != nil {
|
||||
return n, err
|
||||
}
|
||||
|
|
|
@ -162,11 +162,6 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
|||
prepareNone()
|
||||
}
|
||||
|
||||
glog.Infof("Will wait %s for node ...", waitTimeout)
|
||||
if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil {
|
||||
return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout))
|
||||
}
|
||||
|
||||
} else {
|
||||
if err := bs.UpdateNode(*starter.Cfg, *starter.Node, cr); err != nil {
|
||||
return nil, errors.Wrap(err, "update node")
|
||||
|
@ -197,6 +192,11 @@ func Start(starter Starter, apiServer bool) (*kubeconfig.Settings, error) {
|
|||
}
|
||||
}
|
||||
|
||||
glog.Infof("Will wait %s for node ...", waitTimeout)
|
||||
if err := bs.WaitForNode(*starter.Cfg, *starter.Node, viper.GetDuration(waitTimeout)); err != nil {
|
||||
return nil, errors.Wrapf(err, "wait %s for node", viper.GetDuration(waitTimeout))
|
||||
}
|
||||
|
||||
glog.Infof("waiting for startup goroutines ...")
|
||||
wg.Wait()
|
||||
|
||||
|
|
|
@ -154,12 +154,13 @@ func suggestFix(stderr string, err error) registry.State {
|
|||
}
|
||||
|
||||
if strings.Contains(stderr, "/pipe/docker_engine: The system cannot find the file specified.") && runtime.GOOS == "windows" {
|
||||
return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Reset Docker to factory settings: under Settings > Reset.", Doc: "https://github.com/docker/for-win/issues/1825#issuecomment-450501157"}
|
||||
return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Start the Docker service. If Docker is already running, you may need to reset Docker to factory settings with: Settings > Reset.", Doc: "https://github.com/docker/for-win/issues/1825#issuecomment-450501157"}
|
||||
}
|
||||
|
||||
if strings.Contains(stderr, "Cannot connect") || strings.Contains(stderr, "refused") || strings.Contains(stderr, "Is the docker daemon running") || strings.Contains(stderr, "docker daemon is not running") {
|
||||
return registry.State{Error: err, Installed: true, Healthy: false, Fix: "Start the Docker service", Doc: docURL}
|
||||
}
|
||||
|
||||
// We don't have good advice, but at least we can provide a good error message
|
||||
return registry.State{Error: err, Installed: true, Healthy: false, Doc: docURL}
|
||||
}
|
||||
|
|
|
@ -29,7 +29,7 @@ minikube start [flags]
|
|||
--auto-update-drivers If set, automatically updates drivers to the latest version. Defaults to true. (default true)
|
||||
--base-image string The base image to use for docker/podman drivers. Intended for local development. (default "gcr.io/k8s-minikube/kicbase:v0.0.10@sha256:f58e0c4662bac8a9b5dda7984b185bad8502ade5d9fa364bf2755d636ab51438")
|
||||
--cache-images If true, cache docker images for the current bootstrapper and load them into the machine. Always false with --driver=none. (default true)
|
||||
--cni string CNI plug-in to use. Valid options: auto, bridge, flannel, kindnet, or path to a CNI manifest (default: auto)
|
||||
--cni string CNI plug-in to use. Valid options: auto, bridge, calico, cilium, flannel, kindnet, or path to a CNI manifest (default: auto)
|
||||
--container-runtime string The container runtime to be used (docker, cri-o, containerd). (default "docker")
|
||||
--cpus int Number of CPUs allocated to Kubernetes. (default 2)
|
||||
--cri-socket string The cri socket path to be used.
|
||||
|
|
|
@ -15,7 +15,7 @@ as well.
|
|||
|
||||
`minikube kubectl -- <kubectl commands>`
|
||||
|
||||
You can also `alias kubectl=minikube kubectl --` for easier usage.
|
||||
You can also `alias kubectl="minikube kubectl --"` for easier usage.
|
||||
|
||||
Get pods
|
||||
|
||||
|
|
|
@ -24,6 +24,7 @@ import (
|
|||
"os/exec"
|
||||
"strings"
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestMultiNode(t *testing.T) {
|
||||
|
@ -179,7 +180,7 @@ func validateStartNodeAfterStop(ctx context.Context, t *testing.T, profile strin
|
|||
}
|
||||
|
||||
func validateStopMultiNodeCluster(ctx context.Context, t *testing.T, profile string) {
|
||||
// Run minikube node stop on that node
|
||||
// Run minikube stop on the cluster
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), "-p", profile, "stop"))
|
||||
if err != nil {
|
||||
t.Errorf("node stop returned an error. args %q: %v", rr.Command(), err)
|
||||
|
@ -218,7 +219,7 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile
|
|||
}
|
||||
}
|
||||
// Restart a full cluster with minikube start
|
||||
startArgs := append([]string{"start", "-p", profile}, StartArgs()...)
|
||||
startArgs := append([]string{"start", "-p", profile, "--wait=true"}, StartArgs()...)
|
||||
rr, err := Run(t, exec.CommandContext(ctx, Target(), startArgs...))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to start cluster. args %q : %v", rr.Command(), err)
|
||||
|
@ -231,11 +232,30 @@ func validateRestartMultiNodeCluster(ctx context.Context, t *testing.T, profile
|
|||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "host: Running") != 2 {
|
||||
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
t.Errorf("status says both hosts are not running: args %q: %v", rr.Command(), rr.Output())
|
||||
}
|
||||
|
||||
if strings.Count(rr.Stdout.String(), "kubelet: Running") != 2 {
|
||||
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Stdout.String())
|
||||
t.Errorf("status says both kubelets are not running: args %q: %v", rr.Command(), rr.Output())
|
||||
}
|
||||
|
||||
time.Sleep(Seconds(30))
|
||||
|
||||
// Make sure kubectl reports that all nodes are ready
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes"))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if strings.Count(rr.Stdout.String(), "NotReady") > 0 {
|
||||
t.Errorf("expected 2 nodes to be Ready, got %v", rr.Output())
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "kubectl", "get", "nodes", "-o", `go-template='{{range .items}}{{range .status.conditions}}{{if eq .type "Ready"}} {{.status}}{{"\n"}}{{end}}{{end}}{{end}}'`))
|
||||
if err != nil {
|
||||
t.Fatalf("failed to run kubectl get nodes. args %q : %v", rr.Command(), err)
|
||||
}
|
||||
if strings.Count(rr.Stdout.String(), "True") != 2 {
|
||||
t.Errorf("expected 2 nodes Ready status to be True, got %v", rr.Output())
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
@ -54,6 +54,8 @@ func TestNetworkPlugins(t *testing.T) {
|
|||
{"kindnet", []string{"--cni=kindnet"}, "cni", "app=kindnet", true},
|
||||
{"false", []string{"--cni=false"}, "", "", false},
|
||||
{"custom-weave", []string{fmt.Sprintf("--cni=%s", filepath.Join(*testdataDir, "weavenet.yaml"))}, "cni", "", true},
|
||||
{"calico", []string{"--cni=calico"}, "cni", "k8s-app=calico-node", true},
|
||||
{"cilium", []string{"--cni=cilium"}, "cni", "k8s-app=cilium", true},
|
||||
}
|
||||
|
||||
for _, tc := range tests {
|
||||
|
@ -156,7 +158,7 @@ func TestNetworkPlugins(t *testing.T) {
|
|||
}
|
||||
|
||||
// If the coredns process was stable, this retry wouldn't be necessary.
|
||||
if err := retry.Expo(nslookup, 1*time.Second, Minutes(2)); err != nil {
|
||||
if err := retry.Expo(nslookup, 1*time.Second, Minutes(6)); err != nil {
|
||||
t.Errorf("failed to do nslookup on kubernetes.default: %v", err)
|
||||
}
|
||||
|
||||
|
|
|
@ -267,3 +267,63 @@ func TestMissingContainerUpgrade(t *testing.T) {
|
|||
t.Errorf("failed missing container upgrade from %s. args: %s : %v", legacyVersion, rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
||||
// TestMissingContainerUpgrade tests a Docker upgrade where the underlying container is missing
|
||||
func TestMissingContainerUpgrade(t *testing.T) {
|
||||
if !DockerDriver() {
|
||||
t.Skipf("This test is only for Docker")
|
||||
}
|
||||
|
||||
MaybeParallel(t)
|
||||
profile := UniqueProfileName("missing-upgrade")
|
||||
ctx, cancel := context.WithTimeout(context.Background(), Minutes(55))
|
||||
|
||||
defer CleanupWithLogs(t, profile, cancel)
|
||||
|
||||
legacyVersion := "v1.9.1"
|
||||
tf, err := ioutil.TempFile("", fmt.Sprintf("minikube-%s.*.exe", legacyVersion))
|
||||
if err != nil {
|
||||
t.Fatalf("tempfile: %v", err)
|
||||
}
|
||||
defer os.Remove(tf.Name())
|
||||
tf.Close()
|
||||
|
||||
url := pkgutil.GetBinaryDownloadURL(legacyVersion, runtime.GOOS)
|
||||
if err := retry.Expo(func() error { return getter.GetFile(tf.Name(), url) }, 3*time.Second, Minutes(3)); err != nil {
|
||||
t.Fatalf("get failed: %v", err)
|
||||
}
|
||||
|
||||
if runtime.GOOS != "windows" {
|
||||
if err := os.Chmod(tf.Name(), 0700); err != nil {
|
||||
t.Errorf("chmod: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
args := append([]string{"start", "-p", profile, "--memory=2200"}, StartArgs()...)
|
||||
rr := &RunResult{}
|
||||
r := func() error {
|
||||
rr, err = Run(t, exec.CommandContext(ctx, tf.Name(), args...))
|
||||
return err
|
||||
}
|
||||
|
||||
// Retry up to two times, to allow flakiness for the previous release
|
||||
if err := retry.Expo(r, 1*time.Second, Minutes(30), 2); err != nil {
|
||||
t.Fatalf("release start failed: %v", err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "docker", "stop", profile))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
rr, err = Run(t, exec.CommandContext(ctx, "docker", "rm", profile))
|
||||
if err != nil {
|
||||
t.Fatalf("%s failed: %v", rr.Command(), err)
|
||||
}
|
||||
|
||||
args = append([]string{"start", "-p", profile, "--memory=2200", "--alsologtostderr", "-v=1"}, StartArgs()...)
|
||||
rr, err = Run(t, exec.CommandContext(ctx, Target(), args...))
|
||||
if err != nil {
|
||||
t.Errorf("failed missing container upgrade from %s. args: %s : %v", legacyVersion, rr.Command(), err)
|
||||
}
|
||||
}
|
||||
|
|
Loading…
Reference in New Issue