From 8365812f523416d11720268c461d53c3cca031b3 Mon Sep 17 00:00:00 2001 From: Arhell Date: Sat, 23 Dec 2023 00:43:35 +0200 Subject: [PATCH 01/18] [en] Update go lang link common-parameters.md --- .../kubernetes-api/common-parameters/common-parameters.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md b/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md index 95a8f5dd37..6c4c643f80 100644 --- a/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md +++ b/content/en/docs/reference/kubernetes-api/common-parameters/common-parameters.md @@ -60,7 +60,7 @@ When present, indicates that modifications should not be persisted. An invalid o ## fieldManager {#fieldManager} -fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://golang.org/pkg/unicode/#IsPrint. +fieldManager is a name associated with the actor or entity that is making these changes. The value must be less than or 128 characters long, and only contain printable characters, as defined by https://pkg.go.dev/unicode#IsPrint.
From 29353df17bcc1a4a4789602504bbce49deccf566 Mon Sep 17 00:00:00 2001 From: Olawale Olaleye Date: Mon, 23 Oct 2023 08:46:17 +0100 Subject: [PATCH 02/18] added commands to cheatsheet.md --- content/en/docs/reference/kubectl/quick-reference.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/content/en/docs/reference/kubectl/quick-reference.md b/content/en/docs/reference/kubectl/quick-reference.md index d0140aebf2..b205098f69 100644 --- a/content/en/docs/reference/kubectl/quick-reference.md +++ b/content/en/docs/reference/kubectl/quick-reference.md @@ -71,12 +71,19 @@ KUBECONFIG=~/.kube/config:~/.kube/kubconfig2 kubectl config view +# Show merged kubeconfig settings and raw certificate data and exposed secrets +kubectl config view --raw + # get the password for the e2e user kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' +# get the certificate for the e2e user +kubectl config view --raw -ojsonpath="{.users[?(.name == 'e2e')].user.client-certificate-data}" | base64 -d + kubectl config view -o jsonpath='{.users[].name}' # display the first user kubectl config view -o jsonpath='{.users[*].name}' # get a list of users kubectl config get-contexts # display list of contexts +kubectl config get-contexts -o name # get all context names kubectl config current-context # display the current-context kubectl config use-context my-cluster-name # set the default context to my-cluster-name From 55bcdb2c0cfc113355493f9192bb544c75441311 Mon Sep 17 00:00:00 2001 From: Olawale Olaleye Date: Sun, 21 Jan 2024 07:49:29 +0000 Subject: [PATCH 03/18] Update content/en/docs/reference/kubectl/quick-reference.md Co-authored-by: Ritika <52399571+Ritikaa96@users.noreply.github.com> --- content/en/docs/reference/kubectl/quick-reference.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/docs/reference/kubectl/quick-reference.md b/content/en/docs/reference/kubectl/quick-reference.md index b205098f69..5e3026299d 100644 --- a/content/en/docs/reference/kubectl/quick-reference.md +++ b/content/en/docs/reference/kubectl/quick-reference.md @@ -78,7 +78,7 @@ kubectl config view --raw kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' # get the certificate for the e2e user -kubectl config view --raw -ojsonpath="{.users[?(.name == 'e2e')].user.client-certificate-data}" | base64 -d +kubectl config view --raw -ojsonpath='{.users[?(.name == 'e2e')].user.client-certificate-data}' | base64 -d kubectl config view -o jsonpath='{.users[].name}' # display the first user kubectl config view -o jsonpath='{.users[*].name}' # get a list of users From 33dcba8aa6dd983be15816ba1ac41862e4780f69 Mon Sep 17 00:00:00 2001 From: Alexis Boissiere Date: Fri, 16 Feb 2024 17:27:27 +0100 Subject: [PATCH 04/18] Fix spelling mistake in kube scheduler --- content/en/docs/concepts/scheduling-eviction/kube-scheduler.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md index 4dc27d80b8..313d56fe34 100644 --- a/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md +++ b/content/en/docs/concepts/scheduling-eviction/kube-scheduler.md @@ -62,7 +62,7 @@ kube-scheduler selects a node for the pod in a 2-step operation: The _filtering_ step finds the set of Nodes where it's feasible to schedule the Pod. For example, the PodFitsResources filter checks whether a -candidate Node has enough available resource to meet a Pod's specific +candidate Node has enough available resources to meet a Pod's specific resource requests. After this step, the node list contains any suitable Nodes; often, there will be more than one. If the list is empty, that Pod isn't (yet) schedulable. From e839bf7aeeccb314ce660c2bf78389d7d47c7728 Mon Sep 17 00:00:00 2001 From: Alexis Boissiere Date: Mon, 19 Feb 2024 13:21:52 +0100 Subject: [PATCH 05/18] Fix spelling mistake in scheduling section --- .../concepts/scheduling-eviction/assign-pod-node.md | 12 ++++++------ .../dynamic-resource-allocation.md | 4 ++-- .../scheduling-eviction/node-pressure-eviction.md | 2 +- .../scheduling-eviction/pod-priority-preemption.md | 12 ++++++------ .../scheduling-eviction/pod-scheduling-readiness.md | 2 +- .../scheduling-eviction/resource-bin-packing.md | 6 +++--- .../scheduling-eviction/scheduler-perf-tuning.md | 2 +- .../scheduling-eviction/scheduling-framework.md | 2 +- .../scheduling-eviction/taint-and-toleration.md | 4 ++-- .../topology-spread-constraints.md | 12 ++++++------ 10 files changed, 29 insertions(+), 29 deletions(-) diff --git a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md index c976faa978..3aeb05d8ec 100644 --- a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -254,13 +254,13 @@ the node label that the system uses to denote the domain. For examples, see [Well-Known Labels, Annotations and Taints](/docs/reference/labels-annotations-taints/). {{< note >}} -Inter-pod affinity and anti-affinity require substantial amount of +Inter-pod affinity and anti-affinity require substantial amounts of processing which can slow down scheduling in large clusters significantly. We do not recommend using them in clusters larger than several hundred nodes. {{< /note >}} {{< note >}} -Pod anti-affinity requires nodes to be consistently labelled, in other words, +Pod anti-affinity requires nodes to be consistently labeled, in other words, every node in the cluster must have an appropriate label matching `topologyKey`. If some or all nodes are missing the specified `topologyKey` label, it can lead to unintended behavior. @@ -364,7 +364,7 @@ null `namespaceSelector` matches the namespace of the Pod where the rule is defi {{< note >}} -The `matchLabelKeys` field is a alpha-level field and is disabled by default in +The `matchLabelKeys` field is an alpha-level field and is disabled by default in Kubernetes {{< skew currentVersion >}}. When you want to use it, you have to enable it via the `MatchLabelKeysInPodAffinity` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). @@ -415,7 +415,7 @@ spec: {{< note >}} -The `mismatchLabelKeys` field is a alpha-level field and is disabled by default in +The `mismatchLabelKeys` field is an alpha-level field and is disabled by default in Kubernetes {{< skew currentVersion >}}. When you want to use it, you have to enable it via the `MatchLabelKeysInPodAffinity` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/). @@ -561,7 +561,7 @@ where each web server is co-located with a cache, on three separate nodes. | *webserver-1* | *webserver-2* | *webserver-3* | | *cache-1* | *cache-2* | *cache-3* | -The overall effect is that each cache instance is likely to be accessed by a single client, that +The overall effect is that each cache instance is likely to be accessed by a single client that is running on the same node. This approach aims to minimize both skew (imbalanced load) and latency. You might have other reasons to use Pod anti-affinity. @@ -589,7 +589,7 @@ Some of the limitations of using `nodeName` to select nodes are: {{< note >}} `nodeName` is intended for use by custom schedulers or advanced use cases where you need to bypass any configured schedulers. Bypassing the schedulers might lead to -failed Pods if the assigned Nodes get oversubscribed. You can use [node affinity](#node-affinity) or a the [`nodeselector` field](#nodeselector) to assign a Pod to a specific Node without bypassing the schedulers. +failed Pods if the assigned Nodes get oversubscribed. You can use the [node affinity](#node-affinity) or the [`nodeselector` field](#nodeselector) to assign a Pod to a specific Node without bypassing the schedulers. {{}} Here is an example of a Pod spec using the `nodeName` field: diff --git a/content/en/docs/concepts/scheduling-eviction/dynamic-resource-allocation.md b/content/en/docs/concepts/scheduling-eviction/dynamic-resource-allocation.md index 47420240d9..fdc5408a9c 100644 --- a/content/en/docs/concepts/scheduling-eviction/dynamic-resource-allocation.md +++ b/content/en/docs/concepts/scheduling-eviction/dynamic-resource-allocation.md @@ -41,14 +41,14 @@ ResourceClass driver. ResourceClaim -: Defines a particular resource instances that is required by a +: Defines a particular resource instance that is required by a workload. Created by a user (lifecycle managed manually, can be shared between different Pods) or for individual Pods by the control plane based on a ResourceClaimTemplate (automatic lifecycle, typically used by just one Pod). ResourceClaimTemplate -: Defines the spec and some meta data for creating +: Defines the spec and some metadata for creating ResourceClaims. Created by a user when deploying a workload. PodSchedulingContext diff --git a/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md b/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md index 1f3e84fb5e..fcadea652e 100644 --- a/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md +++ b/content/en/docs/concepts/scheduling-eviction/node-pressure-eviction.md @@ -171,7 +171,7 @@ The kubelet has the following default hard eviction thresholds: - `nodefs.inodesFree<5%` (Linux nodes) These default values of hard eviction thresholds will only be set if none -of the parameters is changed. If you changed the value of any parameter, +of the parameters is changed. If you change the value of any parameter, then the values of other parameters will not be inherited as the default values and will be set to zero. In order to provide custom values, you should provide all the thresholds respectively. diff --git a/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md b/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md index d5607f48f5..c312611564 100644 --- a/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md +++ b/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md @@ -182,8 +182,8 @@ When Pod priority is enabled, the scheduler orders pending Pods by their priority and a pending Pod is placed ahead of other pending Pods with lower priority in the scheduling queue. As a result, the higher priority Pod may be scheduled sooner than Pods with lower priority if -its scheduling requirements are met. If such Pod cannot be scheduled, -scheduler will continue and tries to schedule other lower priority Pods. +its scheduling requirements are met. If such Pod cannot be scheduled, the +scheduler will continue and try to schedule other lower priority Pods. ## Preemption @@ -199,7 +199,7 @@ the Pods are gone, P can be scheduled on the Node. ### User exposed information When Pod P preempts one or more Pods on Node N, `nominatedNodeName` field of Pod -P's status is set to the name of Node N. This field helps scheduler track +P's status is set to the name of Node N. This field helps the scheduler track resources reserved for Pod P and also gives users information about preemptions in their clusters. @@ -209,8 +209,8 @@ After victim Pods are preempted, they get their graceful termination period. If another node becomes available while scheduler is waiting for the victim Pods to terminate, scheduler may use the other node to schedule Pod P. As a result `nominatedNodeName` and `nodeName` of Pod spec are not always the same. Also, if -scheduler preempts Pods on Node N, but then a higher priority Pod than Pod P -arrives, scheduler may give Node N to the new higher priority Pod. In such a +the scheduler preempts Pods on Node N, but then a higher priority Pod than Pod P +arrives, the scheduler may give Node N to the new higher priority Pod. In such a case, scheduler clears `nominatedNodeName` of Pod P. By doing this, scheduler makes Pod P eligible to preempt Pods on another Node. @@ -288,7 +288,7 @@ enough demand and if we find an algorithm with reasonable performance. ## Troubleshooting -Pod priority and pre-emption can have unwanted side effects. Here are some +Pod priority and preemption can have unwanted side effects. Here are some examples of potential problems and ways to deal with them. ### Pods are preempted unnecessarily diff --git a/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md b/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md index 0b671ecbfc..9b1df2851f 100644 --- a/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md +++ b/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md @@ -59,7 +59,7 @@ The output is: ``` To inform scheduler this Pod is ready for scheduling, you can remove its `schedulingGates` entirely -by re-applying a modified manifest: +by reapplying a modified manifest: {{% code_sample file="pods/pod-without-scheduling-gates.yaml" %}} diff --git a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md index c666c14f21..49432b6210 100644 --- a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -57,9 +57,9 @@ the `NodeResourcesFit` score function can be controlled by the Within the `scoringStrategy` field, you can configure two parameters: `requestedToCapacityRatio` and `resources`. The `shape` in the `requestedToCapacityRatio` parameter allows the user to tune the function as least requested or most -requested based on `utilization` and `score` values. The `resources` parameter -consists of `name` of the resource to be considered during scoring and `weight` -specify the weight of each resource. +requested based on `utilization` and `score` values. The `resources` parameter +comprises both the `name` of the resource to be considered during scoring and +its corresponding `weight`, which specifies the weight of each resource. Below is an example configuration that sets the bin packing behavior for extended resources `intel.com/foo` and `intel.com/bar` diff --git a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md index 786b6ed984..fd499c550f 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduler-perf-tuning.md @@ -77,7 +77,7 @@ If you don't specify a threshold, Kubernetes calculates a figure using a linear formula that yields 50% for a 100-node cluster and yields 10% for a 5000-node cluster. The lower bound for the automatic value is 5%. -This means that, the kube-scheduler always scores at least 5% of your cluster no +This means that the kube-scheduler always scores at least 5% of your cluster no matter how large the cluster is, unless you have explicitly set `percentageOfNodesToScore` to be smaller than 5. diff --git a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md index 618f5586f6..d68548f68e 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md @@ -113,7 +113,7 @@ called for that node. Nodes may be evaluated concurrently. ### PostFilter {#post-filter} -These plugins are called after Filter phase, but only when no feasible nodes +These plugins are called after the Filter phase, but only when no feasible nodes were found for the pod. Plugins are called in their configured order. If any postFilter plugin marks the node as `Schedulable`, the remaining plugins will not be called. A typical PostFilter implementation is preemption, which diff --git a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md index d2e78c98a8..cfe239377c 100644 --- a/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md +++ b/content/en/docs/concepts/scheduling-eviction/taint-and-toleration.md @@ -84,7 +84,7 @@ An empty `effect` matches all effects with key `key1`. {{< /note >}} -The above example used `effect` of `NoSchedule`. Alternatively, you can use `effect` of `PreferNoSchedule`. +The above example used the `effect` of `NoSchedule`. Alternatively, you can use the `effect` of `PreferNoSchedule`. The allowed values for the `effect` field are: @@ -227,7 +227,7 @@ are true. The following taints are built in: * `node.kubernetes.io/network-unavailable`: Node's network is unavailable. * `node.kubernetes.io/unschedulable`: Node is unschedulable. * `node.cloudprovider.kubernetes.io/uninitialized`: When the kubelet is started - with "external" cloud provider, this taint is set on a node to mark it + with an "external" cloud provider, this taint is set on a node to mark it as unusable. After a controller from the cloud-controller-manager initializes this node, the kubelet removes this taint. diff --git a/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md b/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md index 03a0172cb2..2a8eb78d5b 100644 --- a/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md +++ b/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md @@ -71,7 +71,7 @@ spec: ``` You can read more about this field by running `kubectl explain Pod.spec.topologySpreadConstraints` or -refer to [scheduling](/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling) section of the API reference for Pod. +refer to the [scheduling](/docs/reference/kubernetes-api/workload-resources/pod-v1/#scheduling) section of the API reference for Pod. ### Spread constraint definition @@ -254,7 +254,7 @@ follows the API definition of the field; however, the behavior is more likely to confusing and troubleshooting is less straightforward. You need a mechanism to ensure that all the nodes in a topology domain (such as a -cloud provider region) are labelled consistently. +cloud provider region) are labeled consistently. To avoid you needing to manually label nodes, most clusters automatically populate well-known labels such as `kubernetes.io/hostname`. Check whether your cluster supports this. @@ -263,7 +263,7 @@ your cluster supports this. ### Example: one topology spread constraint {#example-one-topologyspreadconstraint} -Suppose you have a 4-node cluster where 3 Pods labelled `foo: bar` are located in +Suppose you have a 4-node cluster where 3 Pods labeled `foo: bar` are located in node1, node2 and node3 respectively: {{}} @@ -290,7 +290,7 @@ can use a manifest similar to: {{% code_sample file="pods/topology-spread-constraints/one-constraint.yaml" %}} From that manifest, `topologyKey: zone` implies the even distribution will only be applied -to nodes that are labelled `zone: ` (nodes that don't have a `zone` label +to nodes that are labeled `zone: ` (nodes that don't have a `zone` label are skipped). The field `whenUnsatisfiable: DoNotSchedule` tells the scheduler to let the incoming Pod stay pending if the scheduler can't find a way to satisfy the constraint. @@ -494,7 +494,7 @@ There are some implicit conventions worth noting here: above example, if you remove the incoming Pod's labels, it can still be placed onto nodes in zone `B`, since the constraints are still satisfied. However, after that placement, the degree of imbalance of the cluster remains unchanged - it's still zone `A` - having 2 Pods labelled as `foo: bar`, and zone `B` having 1 Pod labelled as + having 2 Pods labeled as `foo: bar`, and zone `B` having 1 Pod labeled as `foo: bar`. If this is not what you expect, update the workload's `topologySpreadConstraints[*].labelSelector` to match the labels in the pod template. @@ -618,7 +618,7 @@ section of the enhancement proposal about Pod topology spread constraints. because, in this case, those topology domains won't be considered until there is at least one node in them. - You can work around this by using an cluster autoscaling tool that is aware of + You can work around this by using a cluster autoscaling tool that is aware of Pod topology spread constraints and is also aware of the overall set of topology domains. From 2f298d207718da6d85a886cd16f86067e6f3c5f5 Mon Sep 17 00:00:00 2001 From: Alexis Boissiere Date: Mon, 19 Feb 2024 13:54:35 +0100 Subject: [PATCH 06/18] Fix trailing whitespace in scheduler section --- .../scheduling-eviction/api-eviction.md | 14 +++--- .../scheduling-eviction/assign-pod-node.md | 50 +++++++++---------- .../pod-priority-preemption.md | 8 +-- .../pod-scheduling-readiness.md | 12 ++--- .../resource-bin-packing.md | 4 +- .../scheduling-framework.md | 2 +- .../topology-spread-constraints.md | 2 +- 7 files changed, 46 insertions(+), 46 deletions(-) diff --git a/content/en/docs/concepts/scheduling-eviction/api-eviction.md b/content/en/docs/concepts/scheduling-eviction/api-eviction.md index 5da823d566..b1aea442e8 100644 --- a/content/en/docs/concepts/scheduling-eviction/api-eviction.md +++ b/content/en/docs/concepts/scheduling-eviction/api-eviction.md @@ -11,11 +11,11 @@ using a client of the {{}}. You may be able to attempt the eviction again later. You might also see this - response because of API rate limiting. + response because of API rate limiting. * `500 Internal Server Error`: the eviction is not allowed because there is a misconfiguration, like if multiple PodDisruptionBudgets reference the same Pod. If the Pod you want to evict isn't part of a workload that has a PodDisruptionBudget, the API server always returns `200 OK` and allows the -eviction. +eviction. If the API server allows the eviction, the Pod is deleted as follows: @@ -103,12 +103,12 @@ If the API server allows the eviction, the Pod is deleted as follows: ## Troubleshooting stuck evictions In some cases, your applications may enter a broken state, where the Eviction -API will only return `429` or `500` responses until you intervene. This can -happen if, for example, a ReplicaSet creates pods for your application but new +API will only return `429` or `500` responses until you intervene. This can +happen if, for example, a ReplicaSet creates pods for your application but new pods do not enter a `Ready` state. You may also notice this behavior in cases where the last evicted Pod had a long termination grace period. -If you notice stuck evictions, try one of the following solutions: +If you notice stuck evictions, try one of the following solutions: * Abort or pause the automated operation causing the issue. Investigate the stuck application before you restart the operation. diff --git a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md index 3aeb05d8ec..bb6ce8d416 100644 --- a/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md +++ b/content/en/docs/concepts/scheduling-eviction/assign-pod-node.md @@ -96,7 +96,7 @@ define. Some of the benefits of affinity and anti-affinity include: The affinity feature consists of two types of affinity: - *Node affinity* functions like the `nodeSelector` field but is more expressive and - allows you to specify soft rules. + allows you to specify soft rules. - *Inter-pod affinity/anti-affinity* allows you to constrain Pods against labels on other Pods. @@ -305,22 +305,22 @@ Pod affinity rule uses the "hard" `requiredDuringSchedulingIgnoredDuringExecution`, while the anti-affinity rule uses the "soft" `preferredDuringSchedulingIgnoredDuringExecution`. -The affinity rule specifies that the scheduler is allowed to place the example Pod +The affinity rule specifies that the scheduler is allowed to place the example Pod on a node only if that node belongs to a specific [zone](/docs/concepts/scheduling-eviction/topology-spread-constraints/) -where other Pods have been labeled with `security=S1`. -For instance, if we have a cluster with a designated zone, let's call it "Zone V," -consisting of nodes labeled with `topology.kubernetes.io/zone=V`, the scheduler can -assign the Pod to any node within Zone V, as long as there is at least one Pod within -Zone V already labeled with `security=S1`. Conversely, if there are no Pods with `security=S1` +where other Pods have been labeled with `security=S1`. +For instance, if we have a cluster with a designated zone, let's call it "Zone V," +consisting of nodes labeled with `topology.kubernetes.io/zone=V`, the scheduler can +assign the Pod to any node within Zone V, as long as there is at least one Pod within +Zone V already labeled with `security=S1`. Conversely, if there are no Pods with `security=S1` labels in Zone V, the scheduler will not assign the example Pod to any node in that zone. -The anti-affinity rule specifies that the scheduler should try to avoid scheduling the Pod +The anti-affinity rule specifies that the scheduler should try to avoid scheduling the Pod on a node if that node belongs to a specific [zone](/docs/concepts/scheduling-eviction/topology-spread-constraints/) -where other Pods have been labeled with `security=S2`. -For instance, if we have a cluster with a designated zone, let's call it "Zone R," -consisting of nodes labeled with `topology.kubernetes.io/zone=R`, the scheduler should avoid -assigning the Pod to any node within Zone R, as long as there is at least one Pod within -Zone R already labeled with `security=S2`. Conversely, the anti-affinity rule does not impact +where other Pods have been labeled with `security=S2`. +For instance, if we have a cluster with a designated zone, let's call it "Zone R," +consisting of nodes labeled with `topology.kubernetes.io/zone=R`, the scheduler should avoid +assigning the Pod to any node within Zone R, as long as there is at least one Pod within +Zone R already labeled with `security=S2`. Conversely, the anti-affinity rule does not impact scheduling into Zone R if there are no Pods with `security=S2` labels. To get yourself more familiar with the examples of Pod affinity and anti-affinity, @@ -371,12 +371,12 @@ When you want to use it, you have to enable it via the {{< /note >}} Kubernetes includes an optional `matchLabelKeys` field for Pod affinity -or anti-affinity. The field specifies keys for the labels that should match with the incoming Pod's labels, +or anti-affinity. The field specifies keys for the labels that should match with the incoming Pod's labels, when satisfying the Pod (anti)affinity. The keys are used to look up values from the pod labels; those key-value labels are combined (using `AND`) with the match restrictions defined using the `labelSelector` field. The combined -filtering selects the set of existing pods that will be taken into Pod (anti)affinity calculation. +filtering selects the set of existing pods that will be taken into Pod (anti)affinity calculation. A common use case is to use `matchLabelKeys` with `pod-template-hash` (set on Pods managed as part of a Deployment, where the value is unique for each revision). @@ -405,7 +405,7 @@ spec: # Only Pods from a given rollout are taken into consideration when calculating pod affinity. # If you update the Deployment, the replacement Pods follow their own affinity rules # (if there are any defined in the new Pod template) - matchLabelKeys: + matchLabelKeys: - pod-template-hash ``` @@ -422,7 +422,7 @@ When you want to use it, you have to enable it via the {{< /note >}} Kubernetes includes an optional `mismatchLabelKeys` field for Pod affinity -or anti-affinity. The field specifies keys for the labels that should **not** match with the incoming Pod's labels, +or anti-affinity. The field specifies keys for the labels that should **not** match with the incoming Pod's labels, when satisfying the Pod (anti)affinity. One example use case is to ensure Pods go to the topology domain (node, zone, etc) where only Pods from the same tenant or team are scheduled in. @@ -438,22 +438,22 @@ metadata: ... spec: affinity: - podAffinity: + podAffinity: requiredDuringSchedulingIgnoredDuringExecution: # ensure that pods associated with this tenant land on the correct node pool - matchLabelKeys: - tenant topologyKey: node-pool - podAntiAffinity: + podAntiAffinity: requiredDuringSchedulingIgnoredDuringExecution: # ensure that pods associated with this tenant can't schedule to nodes used for another tenant - mismatchLabelKeys: - - tenant # whatever the value of the "tenant" label for this Pod, prevent + - tenant # whatever the value of the "tenant" label for this Pod, prevent # scheduling to nodes in any pool where any Pod from a different # tenant is running. labelSelector: # We have to have the labelSelector which selects only Pods with the tenant label, - # otherwise this Pod would hate Pods from daemonsets as well, for example, + # otherwise this Pod would hate Pods from daemonsets as well, for example, # which aren't supposed to have the tenant label. matchExpressions: - key: tenant @@ -633,13 +633,13 @@ The following operators can only be used with `nodeAffinity`. | Operator | Behaviour | | :------------: | :-------------: | -| `Gt` | The supplied value will be parsed as an integer, and that integer is less than the integer that results from parsing the value of a label named by this selector | -| `Lt` | The supplied value will be parsed as an integer, and that integer is greater than the integer that results from parsing the value of a label named by this selector | +| `Gt` | The supplied value will be parsed as an integer, and that integer is less than the integer that results from parsing the value of a label named by this selector | +| `Lt` | The supplied value will be parsed as an integer, and that integer is greater than the integer that results from parsing the value of a label named by this selector | {{}} -`Gt` and `Lt` operators will not work with non-integer values. If the given value -doesn't parse as an integer, the pod will fail to get scheduled. Also, `Gt` and `Lt` +`Gt` and `Lt` operators will not work with non-integer values. If the given value +doesn't parse as an integer, the pod will fail to get scheduled. Also, `Gt` and `Lt` are not available for `podAffinity`. {{}} diff --git a/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md b/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md index c312611564..c6b8da1838 100644 --- a/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md +++ b/content/en/docs/concepts/scheduling-eviction/pod-priority-preemption.md @@ -64,7 +64,7 @@ and it cannot be prefixed with `system-`. A PriorityClass object can have any 32-bit integer value smaller than or equal to 1 billion. This means that the range of values for a PriorityClass object is -from -2147483648 to 1000000000 inclusive. Larger numbers are reserved for +from -2147483648 to 1000000000 inclusive. Larger numbers are reserved for built-in PriorityClasses that represent critical system Pods. A cluster admin should create one PriorityClass object for each such mapping that they want. @@ -256,9 +256,9 @@ the Node is not considered for preemption. If a pending Pod has inter-pod {{< glossary_tooltip text="affinity" term_id="affinity" >}} to one or more of the lower-priority Pods on the Node, the inter-Pod affinity -rule cannot be satisfied in the absence of those lower-priority Pods. In this case, +rule cannot be satisfied in the absence of those lower-priority Pods. In this case, the scheduler does not preempt any Pods on the Node. Instead, it looks for another -Node. The scheduler might find a suitable Node or it might not. There is no +Node. The scheduler might find a suitable Node or it might not. There is no guarantee that the pending Pod can be scheduled. Our recommended solution for this problem is to create inter-Pod affinity only @@ -361,7 +361,7 @@ to get evicted. The kubelet ranks pods for eviction based on the following facto 1. Whether the starved resource usage exceeds requests 1. Pod Priority - 1. Amount of resource usage relative to requests + 1. Amount of resource usage relative to requests See [Pod selection for kubelet eviction](/docs/concepts/scheduling-eviction/node-pressure-eviction/#pod-selection-for-kubelet-eviction) for more details. diff --git a/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md b/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md index 9b1df2851f..e895ffd5fb 100644 --- a/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md +++ b/content/en/docs/concepts/scheduling-eviction/pod-scheduling-readiness.md @@ -9,7 +9,7 @@ weight: 40 {{< feature-state for_k8s_version="v1.27" state="beta" >}} Pods were considered ready for scheduling once created. Kubernetes scheduler -does its due diligence to find nodes to place all pending Pods. However, in a +does its due diligence to find nodes to place all pending Pods. However, in a real-world case, some Pods may stay in a "miss-essential-resources" state for a long period. These Pods actually churn the scheduler (and downstream integrators like Cluster AutoScaler) in an unnecessary manner. @@ -79,7 +79,7 @@ Given the test-pod doesn't request any CPU/memory resources, it's expected that transited from previous `SchedulingGated` to `Running`: ```none -NAME READY STATUS RESTARTS AGE IP NODE +NAME READY STATUS RESTARTS AGE IP NODE test-pod 1/1 Running 0 15s 10.0.0.4 node-2 ``` @@ -94,8 +94,8 @@ scheduling. You can use `scheduler_pending_pods{queue="gated"}` to check the met {{< feature-state for_k8s_version="v1.27" state="beta" >}} You can mutate scheduling directives of Pods while they have scheduling gates, with certain constraints. -At a high level, you can only tighten the scheduling directives of a Pod. In other words, the updated -directives would cause the Pods to only be able to be scheduled on a subset of the nodes that it would +At a high level, you can only tighten the scheduling directives of a Pod. In other words, the updated +directives would cause the Pods to only be able to be scheduled on a subset of the nodes that it would previously match. More concretely, the rules for updating a Pod's scheduling directives are as follows: 1. For `.spec.nodeSelector`, only additions are allowed. If absent, it will be allowed to be set. @@ -107,8 +107,8 @@ previously match. More concretely, the rules for updating a Pod's scheduling dir or `fieldExpressions` are allowed, and no changes to existing `matchExpressions` and `fieldExpressions` will be allowed. This is because the terms in `.requiredDuringSchedulingIgnoredDuringExecution.NodeSelectorTerms`, are ORed - while the expressions in `nodeSelectorTerms[].matchExpressions` and - `nodeSelectorTerms[].fieldExpressions` are ANDed. + while the expressions in `nodeSelectorTerms[].matchExpressions` and + `nodeSelectorTerms[].fieldExpressions` are ANDed. 4. For `.preferredDuringSchedulingIgnoredDuringExecution`, all updates are allowed. This is because preferred terms are not authoritative, and so policy controllers diff --git a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md index 49432b6210..46930cc062 100644 --- a/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md +++ b/content/en/docs/concepts/scheduling-eviction/resource-bin-packing.md @@ -57,8 +57,8 @@ the `NodeResourcesFit` score function can be controlled by the Within the `scoringStrategy` field, you can configure two parameters: `requestedToCapacityRatio` and `resources`. The `shape` in the `requestedToCapacityRatio` parameter allows the user to tune the function as least requested or most -requested based on `utilization` and `score` values. The `resources` parameter -comprises both the `name` of the resource to be considered during scoring and +requested based on `utilization` and `score` values. The `resources` parameter +comprises both the `name` of the resource to be considered during scoring and its corresponding `weight`, which specifies the weight of each resource. Below is an example configuration that sets diff --git a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md index d68548f68e..63a8c7d3e6 100644 --- a/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md +++ b/content/en/docs/concepts/scheduling-eviction/scheduling-framework.md @@ -83,7 +83,7 @@ the Pod is put into the active queue or the backoff queue so that the scheduler will retry the scheduling of the Pod. {{< note >}} -QueueingHint evaluation during scheduling is a beta-level feature. +QueueingHint evaluation during scheduling is a beta-level feature. The v1.28 release series initially enabled the associated feature gate; however, after the discovery of an excessive memory footprint, the Kubernetes project set that feature gate to be disabled by default. In Kubernetes {{< skew currentVersion >}}, this feature gate is diff --git a/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md b/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md index 2a8eb78d5b..6ebddabd8a 100644 --- a/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md +++ b/content/en/docs/concepts/scheduling-eviction/topology-spread-constraints.md @@ -99,7 +99,7 @@ your cluster. Those fields are: {{< note >}} The `MinDomainsInPodTopologySpread` [feature gate](/docs/reference/command-line-tools-reference/feature-gates/) enables `minDomains` for pod topology spread. Starting from v1.28, - the `MinDomainsInPodTopologySpread` gate + the `MinDomainsInPodTopologySpread` gate is enabled by default. In older Kubernetes clusters it might be explicitly disabled or the field might not be available. {{< /note >}} From 96ca190dee91bad3c718110f5dbd51612755ae04 Mon Sep 17 00:00:00 2001 From: Victor Morales Date: Tue, 20 Feb 2024 14:15:16 -0800 Subject: [PATCH 07/18] Add Jossemar to Spanish reviewers group --- OWNERS_ALIASES | 1 + 1 file changed, 1 insertion(+) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index be35b977f6..3f7ade955f 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -59,6 +59,7 @@ aliases: - electrocucaracha - raelga - ramrodo + - jossemarGT sig-docs-fr-owners: # Admins for French content - awkif - feloy From 865021d2879dcec49cd3df64c9a384cbda6933f5 Mon Sep 17 00:00:00 2001 From: Victor Morales Date: Wed, 28 Feb 2024 12:35:53 -0800 Subject: [PATCH 08/18] Fix sig-docs-es name's order --- OWNERS_ALIASES | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/OWNERS_ALIASES b/OWNERS_ALIASES index 3f7ade955f..6eb5333258 100644 --- a/OWNERS_ALIASES +++ b/OWNERS_ALIASES @@ -49,17 +49,17 @@ aliases: - windsonsea sig-docs-es-owners: # Admins for Spanish content - 92nqb - - krol3 - electrocucaracha + - krol3 - raelga - ramrodo sig-docs-es-reviews: # PR reviews for Spanish content - 92nqb - - krol3 - electrocucaracha + - jossemarGT + - krol3 - raelga - ramrodo - - jossemarGT sig-docs-fr-owners: # Admins for French content - awkif - feloy From 206c52758cea4ecf7f72c1782e6cf59381f71e5b Mon Sep 17 00:00:00 2001 From: Olawale Olaleye Date: Mon, 4 Mar 2024 10:32:27 +0000 Subject: [PATCH 09/18] added commands to cheatsheet.md --- content/en/docs/reference/kubectl/quick-reference.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/content/en/docs/reference/kubectl/quick-reference.md b/content/en/docs/reference/kubectl/quick-reference.md index 1ec1991331..bc617518e3 100644 --- a/content/en/docs/reference/kubectl/quick-reference.md +++ b/content/en/docs/reference/kubectl/quick-reference.md @@ -78,7 +78,11 @@ kubectl config view --raw kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' # get the certificate for the e2e user +<<<<<<< HEAD kubectl config view --raw -ojsonpath='{.users[?(.name == 'e2e')].user.client-certificate-data}' | base64 -d +======= +kubectl config view --raw -o jsonpath="{.users[?(.name == 'e2e')].user.client-certificate-data}" | base64 -d +>>>>>>> 908a1cca4c (added commands to cheatsheet.md) kubectl config view -o jsonpath='{.users[].name}' # display the first user kubectl config view -o jsonpath='{.users[*].name}' # get a list of users From 7d987ff8a703a2236364f8fc183cb280c83df0bd Mon Sep 17 00:00:00 2001 From: Olawale Olaleye Date: Mon, 4 Mar 2024 10:40:36 +0000 Subject: [PATCH 10/18] added commands to cheatsheet.md --- content/en/docs/reference/kubectl/quick-reference.md | 6 +----- 1 file changed, 1 insertion(+), 5 deletions(-) diff --git a/content/en/docs/reference/kubectl/quick-reference.md b/content/en/docs/reference/kubectl/quick-reference.md index bc617518e3..d0af256b4d 100644 --- a/content/en/docs/reference/kubectl/quick-reference.md +++ b/content/en/docs/reference/kubectl/quick-reference.md @@ -78,11 +78,7 @@ kubectl config view --raw kubectl config view -o jsonpath='{.users[?(@.name == "e2e")].user.password}' # get the certificate for the e2e user -<<<<<<< HEAD -kubectl config view --raw -ojsonpath='{.users[?(.name == 'e2e')].user.client-certificate-data}' | base64 -d -======= -kubectl config view --raw -o jsonpath="{.users[?(.name == 'e2e')].user.client-certificate-data}" | base64 -d ->>>>>>> 908a1cca4c (added commands to cheatsheet.md) +kubectl config view --raw -o jsonpath='{.users[?(.name == 'e2e')].user.client-certificate-data}' | base64 -d kubectl config view -o jsonpath='{.users[].name}' # display the first user kubectl config view -o jsonpath='{.users[*].name}' # get a list of users From c56e8ab1413bc4c8239e1ced0408dc556b74bb98 Mon Sep 17 00:00:00 2001 From: Abigail McCarthy <20771501+a-mccarthy@users.noreply.github.com> Date: Tue, 5 Mar 2024 15:46:17 -0500 Subject: [PATCH 11/18] Update pr-wranglers.md --- content/en/docs/contribute/participate/pr-wranglers.md | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/content/en/docs/contribute/participate/pr-wranglers.md b/content/en/docs/contribute/participate/pr-wranglers.md index cc00de9ce1..446e6e73bd 100644 --- a/content/en/docs/contribute/participate/pr-wranglers.md +++ b/content/en/docs/contribute/participate/pr-wranglers.md @@ -45,6 +45,10 @@ Each day in a week-long shift as PR Wrangler: - Using style fixups as good first issues is a good way to ensure a supply of easier tasks to help onboard new contributors. +{{< note >}} +PR Wrangler duties apply to English language PRs. Localization teams have their own processes and teams for reviewing their language PRs. However if you have time during your shift, its often helpful to make sure language PRs are labeled correctly, review small non-language dependent PRs (like a link update), or tag reviewers or contributors in long running PRs (ones opened more than 6 months ago and have not been updated in a month or more). +{{< /note >}} + ### Helpful GitHub queries for wranglers From f23d7e032729d9cc35416b7d1ba3c591718ebd52 Mon Sep 17 00:00:00 2001 From: Abigail McCarthy <20771501+a-mccarthy@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:22:50 -0500 Subject: [PATCH 12/18] Update content/en/docs/contribute/participate/pr-wranglers.md Co-authored-by: Seokho Son --- content/en/docs/contribute/participate/pr-wranglers.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/docs/contribute/participate/pr-wranglers.md b/content/en/docs/contribute/participate/pr-wranglers.md index 446e6e73bd..1b16a1cd12 100644 --- a/content/en/docs/contribute/participate/pr-wranglers.md +++ b/content/en/docs/contribute/participate/pr-wranglers.md @@ -46,7 +46,7 @@ Each day in a week-long shift as PR Wrangler: to help onboard new contributors. {{< note >}} -PR Wrangler duties apply to English language PRs. Localization teams have their own processes and teams for reviewing their language PRs. However if you have time during your shift, its often helpful to make sure language PRs are labeled correctly, review small non-language dependent PRs (like a link update), or tag reviewers or contributors in long running PRs (ones opened more than 6 months ago and have not been updated in a month or more). +PR wrangler duties do not apply to localization PRs (non-English PRs). Localization teams have their own processes and teams for reviewing their language PRs. However, if you have time during your shift, it's often helpful to ensure language PRs are labeled correctly, review small non-language dependent PRs (like a link update), or tag reviewers or contributors in long-running PRs (ones opened more than 6 months ago and have not been updated in a month or more). {{< /note >}} From 4dfde3498a09e8e1c315579feffc3653c8c67618 Mon Sep 17 00:00:00 2001 From: Abigail McCarthy <20771501+a-mccarthy@users.noreply.github.com> Date: Thu, 7 Mar 2024 13:30:22 -0500 Subject: [PATCH 13/18] update line wraping --- content/en/docs/contribute/participate/pr-wranglers.md | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/content/en/docs/contribute/participate/pr-wranglers.md b/content/en/docs/contribute/participate/pr-wranglers.md index 1b16a1cd12..759dadb566 100644 --- a/content/en/docs/contribute/participate/pr-wranglers.md +++ b/content/en/docs/contribute/participate/pr-wranglers.md @@ -46,7 +46,11 @@ Each day in a week-long shift as PR Wrangler: to help onboard new contributors. {{< note >}} -PR wrangler duties do not apply to localization PRs (non-English PRs). Localization teams have their own processes and teams for reviewing their language PRs. However, if you have time during your shift, it's often helpful to ensure language PRs are labeled correctly, review small non-language dependent PRs (like a link update), or tag reviewers or contributors in long-running PRs (ones opened more than 6 months ago and have not been updated in a month or more). +PR wrangler duties do not apply to localization PRs (non-English PRs). +Localization teams have their own processes and teams for reviewing their language PRs. +However, it's often helpful to ensure language PRs are labeled correctly, +review small non-language dependent PRs (like a link update), +or tag reviewers or contributors in long-running PRs (ones opened more than 6 months ago and have not been updated in a month or more). {{< /note >}} From 517cd1a72274e7a4b80ee9480ccf4983e2778b1d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20Mudrini=C4=87?= Date: Thu, 7 Mar 2024 14:33:03 +0100 Subject: [PATCH 14/18] Add a note about /etc/apt/keyrings to pkgs.k8s.io migration guide MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Signed-off-by: Marko Mudrinić Co-authored-by: Sean McGinnis --- content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md b/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md index 755b4ce04d..ba60140a45 100644 --- a/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md +++ b/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md @@ -173,6 +173,8 @@ publishing packages to the Google-hosted repository in the future. curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg ``` + Note: In releases older than Debian 12 and Ubuntu 22.04, the folder `/etc/apt/keyrings` does not exist by default, and it should be created before the curl command. + 3. Update the `apt` package index: ```shell From 4f366561a162427d51090c12181cfb28d652d515 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Marko=20Mudrini=C4=87?= Date: Thu, 7 Mar 2024 23:55:58 +0100 Subject: [PATCH 15/18] Update content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md Co-authored-by: Rey Lejano --- content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md b/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md index ba60140a45..6a279073cf 100644 --- a/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md +++ b/content/en/blog/_posts/2023-08-15-pkgs-k8s-io-introduction.md @@ -173,7 +173,7 @@ publishing packages to the Google-hosted repository in the future. curl -fsSL https://pkgs.k8s.io/core:/stable:/v1.28/deb/Release.key | sudo gpg --dearmor -o /etc/apt/keyrings/kubernetes-apt-keyring.gpg ``` - Note: In releases older than Debian 12 and Ubuntu 22.04, the folder `/etc/apt/keyrings` does not exist by default, and it should be created before the curl command. +_Update: In releases older than Debian 12 and Ubuntu 22.04, the folder `/etc/apt/keyrings` does not exist by default, and it should be created before the curl command._ 3. Update the `apt` package index: From d488e6d3a3de5f26f33de66397c529a3aa14fc6d Mon Sep 17 00:00:00 2001 From: steve-hardman <132999137+steve-hardman@users.noreply.github.com> Date: Fri, 8 Mar 2024 06:57:48 +0000 Subject: [PATCH 16/18] [hi] Fix page layout for "Using kubectl to Create a Deployment" (#44745) * Fix layout * Fix layout * Fix layout Co-authored-by: Dipesh Rawat * Fix layout Co-authored-by: Dipesh Rawat --------- Co-authored-by: Dipesh Rawat --- .../tutorials/kubernetes-basics/deploy-app/deploy-intro.html | 5 ++--- 1 file changed, 2 insertions(+), 3 deletions(-) diff --git a/content/hi/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html b/content/hi/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html index 10022ef7b6..9a66ea7ac2 100644 --- a/content/hi/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html +++ b/content/hi/docs/tutorials/kubernetes-basics/deploy-app/deploy-intro.html @@ -9,7 +9,7 @@ weight: 10 - +
@@ -65,13 +65,12 @@ weight: 10
-

+

आप कुबेरनेट्स कमांड लाइन इंटरफेस, kubectl का उपयोग करके डिप्लॉयमेंट बना और प्रबंधित कर सकते हैं। kubectl क्लस्टर के साथ बातचीत करने के लिए कुबेरनेट्स एपीआई का उपयोग करता है। इस मॉड्यूल में, आप कुबेरनेट्स क्लस्टर पर आपके एप्लिकेशन चलाने वाले डिप्लॉयमेंट बनाने के लिए आवश्यक सबसे सामान्य kubectl कमांड सीखेंगे।

जब आप कोई डिप्लॉयमेंट बनाते हैं, तो आपको अपने एप्लिकेशन के लिए कंटेनर इमेज और चलाने के लिए इच्छित प्रतिकृतियों की संख्या निर्दिष्ट करने की आवश्यकता होगी। आप अपने कामकाज को अपडेट करके बाद में उस जानकारी को बदल सकते हैं; बूटकैंप के मॉड्यूल 5 और 6 चर्चा करते हैं कि आप अपने डिप्लॉयमेंट को कैसे स्केल और अपडेट कर सकते हैं।

-
From dfc0c6682e1934558cef36e2643c3d6a27817da3 Mon Sep 17 00:00:00 2001 From: "John.C" <155592353+dev-johnn@users.noreply.github.com> Date: Fri, 8 Mar 2024 17:02:27 +0000 Subject: [PATCH 17/18] Add integrity attribute to script used by CNCF Landscape shortcode (#45412) * Add integrity attribute to script * Add integrity attribute to script * Add integrity attribute to script --- layouts/partials/head.html | 4 ++++ layouts/shortcodes/cncf-landscape.html | 1 - 2 files changed, 4 insertions(+), 1 deletion(-) diff --git a/layouts/partials/head.html b/layouts/partials/head.html index ad4fb8aa3a..719142ab70 100644 --- a/layouts/partials/head.html +++ b/layouts/partials/head.html @@ -91,6 +91,10 @@ {{- end -}} +{{- if .HasShortcode "cncf-landscape" -}} + +{{- end -}} + {{- if eq (lower .Params.cid) "community" -}} {{- if eq .Params.community_styles_migrated true -}} diff --git a/layouts/shortcodes/cncf-landscape.html b/layouts/shortcodes/cncf-landscape.html index 5324ea1ee3..10d9fe31dc 100644 --- a/layouts/shortcodes/cncf-landscape.html +++ b/layouts/shortcodes/cncf-landscape.html @@ -57,7 +57,6 @@ document.addEventListener("DOMContentLoaded", function () { {{- end -}}
- {{ if ( .Get "category" ) }}