CNI: Update cilium from v1.15.7 to v1.16.0

pull/19358/head
minikube-bot 2024-07-29 10:02:08 +00:00 committed by Medya Ghazizadeh
parent cc6f0fd5b9
commit abcff17414
1 changed files with 554 additions and 23 deletions

View File

@ -6,6 +6,13 @@ metadata:
name: "cilium"
namespace: kube-system
---
# Source: cilium/templates/cilium-envoy/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
metadata:
name: "cilium-envoy"
namespace: kube-system
---
# Source: cilium/templates/cilium-operator/serviceaccount.yaml
apiVersion: v1
kind: ServiceAccount
@ -36,7 +43,6 @@ data:
identity-gc-interval: "15m0s"
cilium-endpoint-gc-interval: "5m0s"
nodes-gc-interval: "5m0s"
skip-cnp-status-startup-clean: "false"
# If you want to run cilium in debug mode change this value to true
debug: "false"
@ -46,9 +52,6 @@ data:
# https://docs.cilium.io/en/latest/security/policy/intro/#policy-enforcement-modes
enable-policy: "default"
policy-cidr-match-mode: ""
# Port to expose Envoy metrics (e.g. "9964"). Envoy metrics listener will be disabled if this
# field is not set.
proxy-prometheus-port: "9964"
# If you want metrics enabled in cilium-operator, set the port for
# which the Cilium Operator will have their metrics exposed.
# NOTE that this will open the port on the nodes where Cilium operator pod
@ -94,6 +97,10 @@ data:
bpf-lb-map-max: "65536"
bpf-lb-external-clusterip: "false"
bpf-events-drop-enabled: "true"
bpf-events-policy-verdict-enabled: "true"
bpf-events-trace-enabled: "true"
# Pre-allocation of map entries allows per-packet latency to be reduced, at
# the expense of up-front memory allocation for the entries in the maps. The
# default value below will minimize memory usage in the default installation;
@ -111,10 +118,6 @@ data:
# 1.4 or later, then it may cause one-time disruptions during the upgrade.
preallocate-bpf-maps: "false"
# Regular expression matching compatible Istio sidecar istio-proxy
# container image names
sidecar-istio-proxy-image: "cilium/istio_proxy"
# Name of the cluster. Only relevant when building a mesh of clusters.
cluster-name: default
# Unique ID of the cluster. Must be unique across all conneted clusters and
@ -139,20 +142,26 @@ data:
enable-ipv4-big-tcp: "false"
enable-ipv6-big-tcp: "false"
enable-ipv6-masquerade: "true"
enable-tcx: "true"
datapath-mode: "veth"
enable-masquerade-to-route-source: "false"
enable-xt-socket-fallback: "true"
install-no-conntrack-iptables-rules: "false"
auto-direct-node-routes: "false"
direct-routing-skip-unreachable: "false"
enable-local-redirect-policy: "false"
enable-runtime-device-detection: "true"
kube-proxy-replacement: "false"
kube-proxy-replacement-healthz-bind-address: ""
bpf-lb-sock: "false"
bpf-lb-sock-terminate-pod-connections: "false"
enable-host-port: "false"
enable-external-ips: "false"
enable-node-port: "false"
nodeport-addresses: ""
enable-health-check-nodeport: "true"
enable-health-check-loadbalancer-ip: "false"
node-port-bind-protection: "true"
@ -161,6 +170,8 @@ data:
enable-svc-source-range-check: "true"
enable-l2-neigh-discovery: "true"
arping-refresh-period: "30s"
k8s-require-ipv4-pod-cidr: "false"
k8s-require-ipv6-pod-cidr: "false"
enable-k8s-networkpolicy: "true"
# Tell the agent to generate and write a CNI configuration file
write-cni-conf-when-ready: /host/etc/cni/net.d/05-cilium.conflist
@ -169,7 +180,7 @@ data:
enable-endpoint-health-checking: "true"
enable-health-checking: "true"
enable-well-known-identities: "false"
enable-remote-node-identity: "true"
enable-node-selector-labels: "false"
synchronize-k8s-nodes: "true"
operator-api-serve-addr: "127.0.0.1:9234"
# Enable Hubble gRPC service.
@ -194,7 +205,6 @@ data:
vtep-cidr: ""
vtep-mask: ""
vtep-mac: ""
enable-bgp-control-plane: "false"
procfs: "/host/proc"
bpf-root: "/sys/fs/bpf"
cgroup-root: "/run/cilium/cgroupv2"
@ -209,6 +219,7 @@ data:
unmanaged-pod-watcher-interval: "15"
# default DNS proxy to transparent mode in non-chaining modes
dnsproxy-enable-transparent-mode: "true"
dnsproxy-socket-linger-timeout: "10"
tofqdns-dns-reject-response-code: "refused"
tofqdns-enable-dns-compression: "true"
tofqdns-endpoint-max-ip-per-hostname: "50"
@ -229,12 +240,343 @@ data:
proxy-max-connection-duration-seconds: "0"
proxy-idle-timeout-seconds: "60"
external-envoy-proxy: "false"
external-envoy-proxy: "true"
envoy-base-id: "0"
envoy-keep-cap-netbindservice: "false"
max-connected-clusters: "255"
clustermesh-enable-endpoint-sync: "false"
clustermesh-enable-mcs-api: "false"
# Extra config allows adding arbitrary properties to the cilium config.
# By putting it at the end of the ConfigMap, it's also possible to override existing properties.
---
# Source: cilium/templates/cilium-envoy/configmap.yaml
apiVersion: v1
kind: ConfigMap
metadata:
name: cilium-envoy-config
namespace: kube-system
data:
bootstrap-config.json: |
{
"node": {
"id": "host~127.0.0.1~no-id~localdomain",
"cluster": "ingress-cluster"
},
"staticResources": {
"listeners": [
{
"name": "envoy-prometheus-metrics-listener",
"address": {
"socket_address": {
"address": "0.0.0.0",
"port_value": 9964
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "envoy-prometheus-metrics-listener",
"route_config": {
"virtual_hosts": [
{
"name": "prometheus_metrics_route",
"domains": [
"*"
],
"routes": [
{
"name": "prometheus_metrics_route",
"match": {
"prefix": "/metrics"
},
"route": {
"cluster": "/envoy-admin",
"prefix_rewrite": "/stats/prometheus"
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.filters.http.router",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
}
}
],
"stream_idle_timeout": "0s"
}
}
]
}
]
},
{
"name": "envoy-health-listener",
"address": {
"socket_address": {
"address": "127.0.0.1",
"port_value": 9878
}
},
"filter_chains": [
{
"filters": [
{
"name": "envoy.filters.network.http_connection_manager",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.network.http_connection_manager.v3.HttpConnectionManager",
"stat_prefix": "envoy-health-listener",
"route_config": {
"virtual_hosts": [
{
"name": "health",
"domains": [
"*"
],
"routes": [
{
"name": "health",
"match": {
"prefix": "/healthz"
},
"route": {
"cluster": "/envoy-admin",
"prefix_rewrite": "/ready"
}
}
]
}
]
},
"http_filters": [
{
"name": "envoy.filters.http.router",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.filters.http.router.v3.Router"
}
}
],
"stream_idle_timeout": "0s"
}
}
]
}
]
}
],
"clusters": [
{
"name": "ingress-cluster",
"type": "ORIGINAL_DST",
"connectTimeout": "2s",
"lbPolicy": "CLUSTER_PROVIDED",
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"commonHttpProtocolOptions": {
"idleTimeout": "60s",
"maxConnectionDuration": "0s",
"maxRequestsPerConnection": 0
},
"useDownstreamProtocolConfig": {}
}
},
"cleanupInterval": "2.500s"
},
{
"name": "egress-cluster-tls",
"type": "ORIGINAL_DST",
"connectTimeout": "2s",
"lbPolicy": "CLUSTER_PROVIDED",
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"commonHttpProtocolOptions": {
"idleTimeout": "60s",
"maxConnectionDuration": "0s",
"maxRequestsPerConnection": 0
},
"upstreamHttpProtocolOptions": {},
"useDownstreamProtocolConfig": {}
}
},
"cleanupInterval": "2.500s",
"transportSocket": {
"name": "cilium.tls_wrapper",
"typedConfig": {
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
}
}
},
{
"name": "egress-cluster",
"type": "ORIGINAL_DST",
"connectTimeout": "2s",
"lbPolicy": "CLUSTER_PROVIDED",
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"commonHttpProtocolOptions": {
"idleTimeout": "60s",
"maxConnectionDuration": "0s",
"maxRequestsPerConnection": 0
},
"useDownstreamProtocolConfig": {}
}
},
"cleanupInterval": "2.500s"
},
{
"name": "ingress-cluster-tls",
"type": "ORIGINAL_DST",
"connectTimeout": "2s",
"lbPolicy": "CLUSTER_PROVIDED",
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"commonHttpProtocolOptions": {
"idleTimeout": "60s",
"maxConnectionDuration": "0s",
"maxRequestsPerConnection": 0
},
"upstreamHttpProtocolOptions": {},
"useDownstreamProtocolConfig": {}
}
},
"cleanupInterval": "2.500s",
"transportSocket": {
"name": "cilium.tls_wrapper",
"typedConfig": {
"@type": "type.googleapis.com/cilium.UpstreamTlsWrapperContext"
}
}
},
{
"name": "xds-grpc-cilium",
"type": "STATIC",
"connectTimeout": "2s",
"loadAssignment": {
"clusterName": "xds-grpc-cilium",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"pipe": {
"path": "/var/run/cilium/envoy/sockets/xds.sock"
}
}
}
}
]
}
]
},
"typedExtensionProtocolOptions": {
"envoy.extensions.upstreams.http.v3.HttpProtocolOptions": {
"@type": "type.googleapis.com/envoy.extensions.upstreams.http.v3.HttpProtocolOptions",
"explicitHttpConfig": {
"http2ProtocolOptions": {}
}
}
}
},
{
"name": "/envoy-admin",
"type": "STATIC",
"connectTimeout": "2s",
"loadAssignment": {
"clusterName": "/envoy-admin",
"endpoints": [
{
"lbEndpoints": [
{
"endpoint": {
"address": {
"pipe": {
"path": "/var/run/cilium/envoy/sockets/admin.sock"
}
}
}
}
]
}
]
}
}
]
},
"dynamicResources": {
"ldsConfig": {
"apiConfigSource": {
"apiType": "GRPC",
"transportApiVersion": "V3",
"grpcServices": [
{
"envoyGrpc": {
"clusterName": "xds-grpc-cilium"
}
}
],
"setNodeOnFirstMessageOnly": true
},
"resourceApiVersion": "V3"
},
"cdsConfig": {
"apiConfigSource": {
"apiType": "GRPC",
"transportApiVersion": "V3",
"grpcServices": [
{
"envoyGrpc": {
"clusterName": "xds-grpc-cilium"
}
}
],
"setNodeOnFirstMessageOnly": true
},
"resourceApiVersion": "V3"
}
},
"bootstrapExtensions": [
{
"name": "envoy.bootstrap.internal_listener",
"typed_config": {
"@type": "type.googleapis.com/envoy.extensions.bootstrap.internal_listener.v3.InternalListener"
}
}
],
"layeredRuntime": {
"layers": [
{
"name": "static_layer_0",
"staticLayer": {
"overload": {
"global_downstream_max_connections": 50000
}
}
}
]
},
"admin": {
"address": {
"pipe": {
"path": "/var/run/cilium/envoy/sockets/admin.sock"
}
}
}
}
---
# Source: cilium/templates/cilium-agent/clusterrole.yaml
apiVersion: rbac.authorization.k8s.io/v1
kind: ClusterRole
@ -340,8 +682,6 @@ rules:
- apiGroups:
- cilium.io
resources:
- ciliumnetworkpolicies/status
- ciliumclusterwidenetworkpolicies/status
- ciliumendpoints/status
- ciliumendpoints
- ciliuml2announcementpolicies/status
@ -537,6 +877,7 @@ rules:
resources:
- ciliumloadbalancerippools
- ciliumpodippools
- ciliumbgppeeringpolicies
- ciliumbgpclusterconfigs
- ciliumbgpnodeconfigoverrides
verbs:
@ -688,7 +1029,7 @@ spec:
type: Unconfined
containers:
- name: cilium-agent
image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
image: "quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058"
imagePullPolicy: IfNotPresent
command:
- cilium-agent
@ -804,6 +1145,9 @@ spec:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
# Unprivileged containers need to mount /proc/sys/net from the host
# to have write access
- mountPath: /host/proc/sys/net
@ -839,7 +1183,7 @@ spec:
mountPath: /tmp
initContainers:
- name: config
image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
image: "quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058"
imagePullPolicy: IfNotPresent
command:
- cilium-dbg
@ -862,7 +1206,7 @@ spec:
# Required to mount cgroup2 filesystem on the underlying Kubernetes node.
# We use nsenter command with host's cgroup and mount namespaces enabled.
- name: mount-cgroup
image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
image: "quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058"
imagePullPolicy: IfNotPresent
env:
- name: CGROUP_ROOT
@ -899,7 +1243,7 @@ spec:
drop:
- ALL
- name: apply-sysctl-overwrites
image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
image: "quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058"
imagePullPolicy: IfNotPresent
env:
- name: BIN_PATH
@ -937,7 +1281,7 @@ spec:
# from a privileged container because the mount propagation bidirectional
# only works from privileged containers.
- name: mount-bpf-fs
image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
image: "quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058"
imagePullPolicy: IfNotPresent
args:
- 'mount | grep "/sys/fs/bpf type bpf" || mount -t bpf bpf /sys/fs/bpf'
@ -953,7 +1297,7 @@ spec:
mountPath: /sys/fs/bpf
mountPropagation: Bidirectional
- name: clean-cilium-state
image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
image: "quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058"
imagePullPolicy: IfNotPresent
command:
- /init-container.sh
@ -1000,7 +1344,7 @@ spec:
mountPath: /var/run/cilium # wait-for-kube-proxy
# Install the CNI binaries in an InitContainer so we don't have a writable host mount in the agent
- name: install-cni-binaries
image: "quay.io/cilium/cilium:v1.15.7@sha256:2e432bf6879feb8b891c497d6fd784b13e53456017d2b8e4ea734145f0282ef0"
image: "quay.io/cilium/cilium:v1.16.0@sha256:46ffa4ef3cf6d8885dcc4af5963b0683f7d59daa90d49ed9fb68d3b1627fe058"
imagePullPolicy: IfNotPresent
command:
- "/install-plugin.sh"
@ -1021,7 +1365,6 @@ spec:
mountPath: /host/opt/cni/bin # .Values.cni.install
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccount: "cilium"
serviceAccountName: "cilium"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
@ -1080,6 +1423,11 @@ spec:
hostPath:
path: /run/xtables.lock
type: FileOrCreate
# Sharing socket with Cilium Envoy on the same node by using a host path
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
# To read the clustermesh configuration
- name: clustermesh-secrets
projected:
@ -1103,6 +1451,20 @@ spec:
path: common-etcd-client.crt
- key: ca.crt
path: common-etcd-client-ca.crt
# note: we configure the volume for the kvstoremesh-specific certificate
# regardless of whether KVStoreMesh is enabled or not, so that it can be
# automatically mounted in case KVStoreMesh gets subsequently enabled,
# without requiring an agent restart.
- secret:
name: clustermesh-apiserver-local-cert
optional: true
items:
- key: tls.key
path: local-etcd-client.key
- key: tls.crt
path: local-etcd-client.crt
- key: ca.crt
path: local-etcd-client-ca.crt
- name: host-proc-sys-net
hostPath:
path: /proc/sys/net
@ -1127,6 +1489,176 @@ spec:
- key: ca.crt
path: client-ca.crt
---
# Source: cilium/templates/cilium-envoy/daemonset.yaml
apiVersion: apps/v1
kind: DaemonSet
metadata:
name: cilium-envoy
namespace: kube-system
labels:
k8s-app: cilium-envoy
app.kubernetes.io/part-of: cilium
app.kubernetes.io/name: cilium-envoy
name: cilium-envoy
spec:
selector:
matchLabels:
k8s-app: cilium-envoy
updateStrategy:
rollingUpdate:
maxUnavailable: 2
type: RollingUpdate
template:
metadata:
annotations:
prometheus.io/port: "9964"
prometheus.io/scrape: "true"
labels:
k8s-app: cilium-envoy
name: cilium-envoy
app.kubernetes.io/name: cilium-envoy
app.kubernetes.io/part-of: cilium
spec:
securityContext:
appArmorProfile:
type: Unconfined
containers:
- name: cilium-envoy
image: "quay.io/cilium/cilium-envoy:v1.29.7-39a2a56bbd5b3a591f69dbca51d3e30ef97e0e51@sha256:bd5ff8c66716080028f414ec1cb4f7dc66f40d2fb5a009fff187f4a9b90b566b"
imagePullPolicy: IfNotPresent
command:
- /usr/bin/cilium-envoy-starter
args:
- '--'
- '-c /var/run/cilium/envoy/bootstrap-config.json'
- '--base-id 0'
- '--log-level info'
- '--log-format [%Y-%m-%d %T.%e][%t][%l][%n] [%g:%#] %v'
startupProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
failureThreshold: 105
periodSeconds: 2
successThreshold: 1
initialDelaySeconds: 5
livenessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 10
timeoutSeconds: 5
readinessProbe:
httpGet:
host: "127.0.0.1"
path: /healthz
port: 9878
scheme: HTTP
periodSeconds: 30
successThreshold: 1
failureThreshold: 3
timeoutSeconds: 5
env:
- name: K8S_NODE_NAME
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: spec.nodeName
- name: CILIUM_K8S_NAMESPACE
valueFrom:
fieldRef:
apiVersion: v1
fieldPath: metadata.namespace
ports:
- name: envoy-metrics
containerPort: 9964
hostPort: 9964
protocol: TCP
securityContext:
seLinuxOptions:
level: s0
type: spc_t
capabilities:
add:
- NET_ADMIN
- SYS_ADMIN
drop:
- ALL
terminationMessagePolicy: FallbackToLogsOnError
volumeMounts:
- name: envoy-sockets
mountPath: /var/run/cilium/envoy/sockets
readOnly: false
- name: envoy-artifacts
mountPath: /var/run/cilium/envoy/artifacts
readOnly: true
- name: envoy-config
mountPath: /var/run/cilium/envoy/
readOnly: true
- name: bpf-maps
mountPath: /sys/fs/bpf
mountPropagation: HostToContainer
restartPolicy: Always
priorityClassName: system-node-critical
serviceAccountName: "cilium-envoy"
automountServiceAccountToken: true
terminationGracePeriodSeconds: 1
hostNetwork: true
affinity:
nodeAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
nodeSelectorTerms:
- matchExpressions:
- key: cilium.io/no-schedule
operator: NotIn
values:
- "true"
podAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium
topologyKey: kubernetes.io/hostname
podAntiAffinity:
requiredDuringSchedulingIgnoredDuringExecution:
- labelSelector:
matchLabels:
k8s-app: cilium-envoy
topologyKey: kubernetes.io/hostname
nodeSelector:
kubernetes.io/os: linux
tolerations:
- operator: Exists
volumes:
- name: envoy-sockets
hostPath:
path: "/var/run/cilium/envoy/sockets"
type: DirectoryOrCreate
- name: envoy-artifacts
hostPath:
path: "/var/run/cilium/envoy/artifacts"
type: DirectoryOrCreate
- name: envoy-config
configMap:
name: cilium-envoy-config
# note: the leading zero means this number is in octal representation: do not remove it
defaultMode: 0400
items:
- key: bootstrap-config.json
path: bootstrap-config.json
# To keep state between restarts / upgrades
# To keep state between restarts / upgrades for bpf maps
- name: bpf-maps
hostPath:
path: /sys/fs/bpf
type: DirectoryOrCreate
---
# Source: cilium/templates/cilium-operator/deployment.yaml
apiVersion: apps/v1
kind: Deployment
@ -1168,7 +1700,7 @@ spec:
spec:
containers:
- name: cilium-operator
image: "quay.io/cilium/operator-generic:v1.15.7@sha256:6840a6dde703b3e73dd31e03390327a9184fcb888efbad9d9d098d65b9035b54"
image: "quay.io/cilium/operator-generic:v1.16.0@sha256:d6621c11c4e4943bf2998af7febe05be5ed6fdcf812b27ad4388f47022190316"
imagePullPolicy: IfNotPresent
command:
- cilium-operator-generic
@ -1224,7 +1756,6 @@ spec:
hostNetwork: true
restartPolicy: Always
priorityClassName: system-cluster-critical
serviceAccount: "cilium-operator"
serviceAccountName: "cilium-operator"
automountServiceAccountToken: true
# In HA mode, cilium-operator pods must not be scheduled on the same