Merge pull request #48842 from hacktivist123/merged-main-dev-1.32
Merge `main` into `dev-1.32`pull/48797/head
commit
f904ca117d
|
@ -1 +1,3 @@
|
|||
**
|
||||
!package.json
|
||||
!package-lock.json
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
[submodule "themes/docsy"]
|
||||
path = themes/docsy
|
||||
url = https://github.com/google/docsy.git
|
||||
branch = v0.2.0
|
||||
branch = v0.3.0
|
||||
[submodule "api-ref-generator"]
|
||||
path = api-ref-generator
|
||||
url = https://github.com/kubernetes-sigs/reference-docs
|
||||
|
|
|
@ -29,8 +29,7 @@ RUN apk add --no-cache \
|
|||
git \
|
||||
openssh-client \
|
||||
rsync \
|
||||
npm && \
|
||||
npm install -D autoprefixer postcss-cli
|
||||
npm
|
||||
|
||||
RUN mkdir -p /var/hugo && \
|
||||
addgroup -Sg 1000 hugo && \
|
||||
|
@ -41,6 +40,8 @@ RUN mkdir -p /var/hugo && \
|
|||
COPY --from=0 /go/bin/hugo /usr/local/bin/hugo
|
||||
|
||||
WORKDIR /src
|
||||
COPY package.json package-lock.json ./
|
||||
RUN npm ci
|
||||
|
||||
USER hugo:hugo
|
||||
|
||||
|
|
10
Makefile
10
Makefile
|
@ -97,11 +97,17 @@ docker-push: ## Build a multi-architecture image and push that into the registry
|
|||
rm Dockerfile.cross
|
||||
|
||||
container-build: module-check
|
||||
$(CONTAINER_RUN) --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 $(CONTAINER_IMAGE) sh -c "npm ci && hugo --minify --environment development"
|
||||
mkdir -p public
|
||||
$(CONTAINER_RUN) --read-only \
|
||||
--mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 \
|
||||
--mount type=bind,source=$(CURDIR)/public,target=/src/public $(CONTAINER_IMAGE) \
|
||||
hugo --cleanDestinationDir --buildDrafts --buildFuture --environment preview --noBuildLock
|
||||
|
||||
# no build lock to allow for read-only mounts
|
||||
container-serve: module-check ## Boot the development server using container.
|
||||
$(CONTAINER_RUN) --cap-drop=ALL --cap-add=AUDIT_WRITE --read-only --mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) hugo server --buildFuture --environment development --bind 0.0.0.0 --destination /tmp/hugo --cleanDestinationDir --noBuildLock
|
||||
$(CONTAINER_RUN) --cap-drop=ALL --cap-add=AUDIT_WRITE --read-only \
|
||||
--mount type=tmpfs,destination=/tmp,tmpfs-mode=01777 -p 1313:1313 $(CONTAINER_IMAGE) \
|
||||
hugo server --buildFuture --environment development --bind 0.0.0.0 --destination /tmp/public --cleanDestinationDir --noBuildLock
|
||||
|
||||
test-examples:
|
||||
scripts/test_examples.sh install
|
||||
|
|
|
@ -437,12 +437,6 @@ $ocean-nodes-h3-margin-bottom: 30px;
|
|||
// video
|
||||
$video-section-height: 200px;
|
||||
|
||||
// features
|
||||
$features-h3-margin-bottom: 20px;
|
||||
$feature-box-div-width: 100%;
|
||||
$feature-box-margin-bottom: 0;
|
||||
$feature-box-div-margin-bottom: 40px;
|
||||
|
||||
// Home-specific
|
||||
|
||||
.td-home {
|
||||
|
@ -687,47 +681,58 @@ section#cncf {
|
|||
}
|
||||
|
||||
// Features
|
||||
#features {
|
||||
padding-top: 140px;
|
||||
body.td-home section.features-container {
|
||||
padding: 0; // reset
|
||||
padding-top: 140px; // make room for logo
|
||||
|
||||
background-color: $light-grey;
|
||||
background-image: url(/images/wheel.svg);
|
||||
background-position: center 60px;
|
||||
background-repeat: no-repeat;
|
||||
background-size: 60px;
|
||||
}
|
||||
|
||||
.feature-box {
|
||||
//padding: 50px 0
|
||||
width: 100%;
|
||||
overflow: hidden;
|
||||
clear: both;
|
||||
display: flex;
|
||||
justify-content: space-evenly;
|
||||
flex-wrap: wrap;
|
||||
padding-bottom: 2em;
|
||||
|
||||
h4 {
|
||||
line-height: normal;
|
||||
margin-bottom: 15px;
|
||||
.k8s-features-heading {
|
||||
color: $primary;
|
||||
text-align: center;
|
||||
}
|
||||
|
||||
& > div {
|
||||
background-color: #daeaf9;
|
||||
border-radius: 20px;
|
||||
padding: 25px;
|
||||
}
|
||||
}
|
||||
display: flex;
|
||||
flex-direction: row;
|
||||
justify-content: center;
|
||||
flex-wrap: wrap;
|
||||
align-content: flex-start;
|
||||
align-items: stretch;
|
||||
gap: 1.2rem;
|
||||
|
||||
#features {
|
||||
h3 {
|
||||
margin-bottom: $features-h3-margin-bottom;
|
||||
}
|
||||
margin-top: 1.8em;
|
||||
|
||||
.feature-box {
|
||||
margin-bottom: $feature-box-margin-bottom;
|
||||
max-width: clamp(75em, 25cm, 90vw);
|
||||
margin-left: auto;
|
||||
margin-right: auto;
|
||||
|
||||
& > div {
|
||||
width: $feature-box-div-width;
|
||||
margin-bottom: $feature-box-div-margin-bottom;
|
||||
background: initial;
|
||||
|
||||
& > .feature-box {
|
||||
margin: 0;
|
||||
|
||||
h3 {
|
||||
text-align: center;
|
||||
line-height: normal;
|
||||
font-size: 1.3em;
|
||||
margin-bottom: 1rem;
|
||||
}
|
||||
|
||||
flex-basis: calc(min(75vw, 12.5cm, 35em));
|
||||
flex-shrink: 0;
|
||||
|
||||
border-radius: 0.5em;
|
||||
padding: 1em;
|
||||
padding-bottom: 1.2em;
|
||||
|
||||
background-color: #daeaf9; // light blue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
|
@ -1954,6 +1954,14 @@ body.td-search #search {
|
|||
padding: 0.2rem;
|
||||
}
|
||||
|
||||
|
||||
// handle main page features on narrow viewports
|
||||
@media screen and (max-width: 768px) {
|
||||
.features-container div.feature-box {
|
||||
min-width: 80vw;
|
||||
}
|
||||
}
|
||||
|
||||
@media screen and (max-aspect-ratio: 9/15) {
|
||||
gap: 0.4rem;
|
||||
}
|
||||
|
|
|
@ -31,12 +31,6 @@ $vendor-strip-font-size: 16px;
|
|||
//video
|
||||
$video-section-height: 400px;
|
||||
|
||||
//features
|
||||
$features-h3-margin-bottom: 40px;
|
||||
$feature-box-margin-bottom: 60px;
|
||||
$feature-box-div-margin-bottom: 0;
|
||||
$feature-box-div-width: 45%;
|
||||
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
////////////////////////////////////////////////////////////////////////////////////////////////////
|
||||
|
@ -156,29 +150,6 @@ $feature-box-div-width: 45%;
|
|||
display: none;
|
||||
}
|
||||
|
||||
#features {
|
||||
padding-bottom: 60px;
|
||||
|
||||
.feature-box {
|
||||
margin-bottom: 30px;
|
||||
|
||||
&:last-child {
|
||||
margin-bottom: 0;
|
||||
}
|
||||
}
|
||||
|
||||
h3 {
|
||||
margin-bottom: $features-h3-margin-bottom;
|
||||
}
|
||||
|
||||
.feature-box {
|
||||
& > div {
|
||||
width: $feature-box-div-width;
|
||||
margin-bottom: $feature-box-div-margin-bottom;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#talkToUs {
|
||||
#bigSocial {
|
||||
div {
|
||||
|
@ -211,4 +182,9 @@ $feature-box-div-width: 45%;
|
|||
width: 48%;
|
||||
}
|
||||
}
|
||||
|
||||
.features-container div.feature-box {
|
||||
min-width: clamp(20rem, 6cm, 90vw);
|
||||
max-width: clamp(90rem, 10cm, 90vw);
|
||||
}
|
||||
}
|
||||
|
|
|
@ -68,7 +68,7 @@ If you would like to see the feature in action and verify it works fine in your
|
|||
name: csi-sc-example
|
||||
provisioner: pd.csi.storage.gke.io
|
||||
parameters:
|
||||
disk-type: "hyperdisk-balanced"
|
||||
type: "hyperdisk-balanced"
|
||||
volumeBindingMode: WaitForFirstConsumer
|
||||
```
|
||||
|
||||
|
@ -174,4 +174,4 @@ Special thanks to all the contributors that provided great reviews, shared valua
|
|||
* Jordan Liggitt (liggitt)
|
||||
* Matthew Cary (mattcary)
|
||||
* Michelle Au (msau42)
|
||||
* Xing Yang (xing-yang)
|
||||
* Xing Yang (xing-yang)
|
||||
|
|
|
@ -0,0 +1,26 @@
|
|||
<?xml version="1.0" encoding="UTF-8"?>
|
||||
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" "http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
|
||||
<svg xmlns="http://www.w3.org/2000/svg" version="1.1" width="900px" height="250px" style="shape-rendering:geometricPrecision; text-rendering:geometricPrecision; image-rendering:optimizeQuality; fill-rule:evenodd; clip-rule:evenodd" xmlns:xlink="http://www.w3.org/1999/xlink">
|
||||
<g><path style="opacity:0.908" fill="#fbfcfe" d="M 134.5,17.5 C 137.85,17.335 141.183,17.5017 144.5,18C 170.5,30.3333 196.5,42.6667 222.5,55C 226.894,58.0684 229.728,62.235 231,67.5C 236.872,93.8622 242.872,120.196 249,146.5C 249.61,150.236 249.277,153.903 248,157.5C 230.333,179.833 212.667,202.167 195,224.5C 192.441,227.531 189.274,229.698 185.5,231C 154.5,231.667 123.5,231.667 92.5,231C 88.7257,229.698 85.559,227.531 83,224.5C 66.9068,203.984 50.5734,183.651 34,163.5C 27.7798,155.497 26.7798,146.83 31,137.5C 36.6667,113.167 42.3333,88.8333 48,64.5C 49.7735,59.7271 52.9402,56.2271 57.5,54C 83.2576,41.7854 108.924,29.6188 134.5,17.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#346de5" d="M 134.5,24.5 C 139.08,24.1134 143.414,24.9468 147.5,27C 171.045,38.606 194.712,49.9393 218.5,61C 222.491,63.7785 224.658,67.6119 225,72.5C 229.528,94.2768 234.528,115.943 240,137.5C 241.168,142.482 241.835,147.482 242,152.5C 241.439,154.725 240.439,156.725 239,158.5C 222.427,178.651 206.093,198.984 190,219.5C 188.269,221.617 186.102,223.117 183.5,224C 153.5,224.667 123.5,224.667 93.5,224C 73.0249,201.215 53.8582,177.382 36,152.5C 41.3608,123.356 47.6941,94.3556 55,65.5C 56.5,64 58,62.5 59.5,61C 84.8363,49.3308 109.836,37.1641 134.5,24.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#fafbfe" d="M 133.5,45.5 C 137.167,45.5 140.833,45.5 144.5,45.5C 144.5,52.8333 144.5,60.1667 144.5,67.5C 158.146,68.9079 169.979,74.2412 180,83.5C 186.083,79.5376 191.917,75.2043 197.5,70.5C 199.493,72.6655 201.327,74.9989 203,77.5C 203.749,78.635 203.583,79.635 202.5,80.5C 197.179,84.489 192.179,88.8223 187.5,93.5C 194.894,105.411 198.061,118.411 197,132.5C 198.785,133.24 200.618,133.907 202.5,134.5C 203.471,131.879 204.804,129.546 206.5,127.5C 212.363,132.529 217.697,138.029 222.5,144C 222.355,144.772 222.022,145.439 221.5,146C 214.573,148.476 207.573,150.643 200.5,152.5C 200.5,149.833 200.5,147.167 200.5,144.5C 198.208,144.756 196.041,144.423 194,143.5C 188.976,155.86 180.976,165.86 170,173.5C 170.384,176.309 171.384,178.975 173,181.5C 174.897,179.984 177.064,179.317 179.5,179.5C 178.903,187.153 178.403,194.82 178,202.5C 177.439,203.022 176.772,203.355 176,203.5C 169.677,199.182 163.344,194.848 157,190.5C 156.312,189.668 156.479,189.002 157.5,188.5C 159.332,187.752 160.999,186.752 162.5,185.5C 161.42,183.004 160.086,180.67 158.5,178.5C 145.627,183.814 132.794,183.814 120,178.5C 118.833,180.833 117.667,183.167 116.5,185.5C 117.912,186.806 119.579,187.64 121.5,188C 122.451,188.718 122.617,189.551 122,190.5C 115.505,195.521 108.671,199.854 101.5,203.5C 100.745,195.178 100.078,186.845 99.5,178.5C 101.816,179.36 104.149,179.86 106.5,180C 107.627,178.247 108.627,176.413 109.5,174.5C 97.8509,166.691 89.3509,156.358 84,143.5C 81.9592,144.423 79.7925,144.756 77.5,144.5C 77.8333,147.167 78.1667,149.833 78.5,152.5C 71.0621,150.856 63.7288,148.689 56.5,146C 55.9781,145.439 55.6448,144.772 55.5,144C 60.3409,138.232 65.6742,132.899 71.5,128C 72.3317,127.312 72.9984,127.479 73.5,128.5C 74.3094,130.071 74.6427,131.738 74.5,133.5C 76.7925,133.756 78.9592,133.423 81,132.5C 80.115,118.45 83.2817,105.45 90.5,93.5C 85.5084,88.6769 80.3418,84.0102 75,79.5C 75.7298,75.4517 77.8965,72.6183 81.5,71C 87.0109,75.1809 92.5109,79.3475 98,83.5C 108.046,74.2274 119.879,68.8941 133.5,67.5C 133.5,60.1667 133.5,52.8333 133.5,45.5 Z"/></g>
|
||||
<g><path style="opacity:0.882" fill="#000000" d="M 858.5,74.5 C 867.424,74.3534 871.257,78.6868 870,87.5C 867.185,93.1691 862.685,95.0024 856.5,93C 850.261,88.7034 849.261,83.3701 853.5,77C 855.315,76.2432 856.981,75.4098 858.5,74.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#356ee5" d="M 127.5,79.5 C 129.5,79.5 131.5,79.5 133.5,79.5C 133.666,89.1724 133.5,98.8391 133,108.5C 132.275,109.059 131.442,109.392 130.5,109.5C 122.292,104.225 114.625,98.2248 107.5,91.5C 113.265,85.9526 119.932,81.9526 127.5,79.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#356de5" d="M 144.5,79.5 C 154.716,80.2764 163.382,84.2764 170.5,91.5C 163.172,97.9916 155.672,104.325 148,110.5C 147,109.833 146,109.167 145,108.5C 144.5,98.8391 144.334,89.1724 144.5,79.5 Z"/></g>
|
||||
<g><path style="opacity:0.928" fill="#000000" d="M 423.5,83.5 C 424.833,83.5 426.167,83.5 427.5,83.5C 427.5,88.8333 427.5,94.1667 427.5,99.5C 433.833,99.5 440.167,99.5 446.5,99.5C 446.5,104.167 446.5,108.833 446.5,113.5C 440.167,113.5 433.833,113.5 427.5,113.5C 427.13,121.903 427.63,130.236 429,138.5C 430.779,140.764 433.113,142.097 436,142.5C 439.478,141.671 442.978,141.004 446.5,140.5C 446.896,144.375 447.562,148.208 448.5,152C 448.095,152.945 447.428,153.612 446.5,154C 438.116,156.922 429.782,156.922 421.5,154C 415.996,151.16 412.829,146.66 412,140.5C 411.5,122.17 411.333,103.836 411.5,85.5C 415.733,85.4613 419.733,84.7947 423.5,83.5 Z"/></g>
|
||||
<g><path style="opacity:0.918" fill="#000000" d="M 311.5,98.5 C 321.347,97.9802 331.014,98.9802 340.5,101.5C 341.921,120.529 341.754,139.529 340,158.5C 337.742,166.389 332.575,171.222 324.5,173C 314.057,175.006 303.724,174.506 293.5,171.5C 294.111,166.892 295.111,162.392 296.5,158C 303.028,159.529 309.694,160.196 316.5,160C 322.554,158.957 325.054,155.457 324,149.5C 303.472,154.648 292.305,146.648 290.5,125.5C 291.084,111.263 298.084,102.263 311.5,98.5 Z M 316.5,111.5 C 319.119,111.232 321.619,111.565 324,112.5C 324.167,116.5 324.333,120.5 324.5,124.5C 327.333,136.731 323,140.564 311.5,136C 307.355,130.681 306.522,124.848 309,118.5C 310.767,115.228 313.267,112.895 316.5,111.5 Z"/></g>
|
||||
<g><path style="opacity:0.94" fill="#000000" d="M 364.5,98.5 C 371.175,98.3337 377.842,98.5004 384.5,99C 391.702,100.869 396.202,105.369 398,112.5C 398.5,126.163 398.667,139.829 398.5,153.5C 387.249,155.423 375.916,155.923 364.5,155C 353.152,151.144 348.985,143.31 352,131.5C 354.443,125.394 358.943,121.894 365.5,121C 371.528,120.83 377.528,120.33 383.5,119.5C 382.625,115.126 379.958,112.626 375.5,112C 369.805,111.623 364.305,112.456 359,114.5C 357.414,109.983 356.58,105.316 356.5,100.5C 359.373,100.198 362.039,99.531 364.5,98.5 Z M 372.5,131.5 C 376.167,131.5 379.833,131.5 383.5,131.5C 383.5,135.167 383.5,138.833 383.5,142.5C 378.728,143.929 374.061,143.595 369.5,141.5C 366.482,136.899 367.482,133.565 372.5,131.5 Z"/></g>
|
||||
<g><path style="opacity:0.928" fill="#000000" d="M 472.5,98.5 C 497.203,96.5548 507.87,107.888 504.5,132.5C 493.167,132.5 481.833,132.5 470.5,132.5C 470.79,136.961 473.123,139.795 477.5,141C 479.847,141.436 482.181,141.936 484.5,142.5C 489.581,141.61 494.581,140.776 499.5,140C 500.861,144.362 501.528,148.862 501.5,153.5C 491.612,156.456 481.612,156.956 471.5,155C 458.543,150.518 452.543,141.352 453.5,127.5C 453.103,113.266 459.436,103.599 472.5,98.5 Z M 477.5,111.5 C 483.988,111.484 487.988,114.651 489.5,121C 483.175,121.5 476.842,121.666 470.5,121.5C 470.873,116.742 473.206,113.409 477.5,111.5 Z"/></g>
|
||||
<g><path style="opacity:0.926" fill="#000000" d="M 605.5,98.5 C 612.175,98.3337 618.842,98.5004 625.5,99C 635.288,101.791 640.122,108.291 640,118.5C 640.5,130.162 640.667,141.829 640.5,153.5C 628.91,155.397 617.243,155.897 605.5,155C 594.473,151.455 590.306,143.955 593,132.5C 595.154,125.994 599.654,122.161 606.5,121C 612.491,120.501 618.491,120.334 624.5,120.5C 624.064,115.564 621.397,112.731 616.5,112C 610.805,111.623 605.305,112.456 600,114.5C 598.627,109.928 597.794,105.261 597.5,100.5C 600.373,100.198 603.039,99.531 605.5,98.5 Z M 613.5,131.5 C 617.167,131.5 620.833,131.5 624.5,131.5C 624.5,135.167 624.5,138.833 624.5,142.5C 619.728,143.929 615.061,143.595 610.5,141.5C 607.462,136.989 608.462,133.656 613.5,131.5 Z"/></g>
|
||||
<g><path style="opacity:0.925" fill="#000000" d="M 742.5,98.5 C 749.175,98.3337 755.842,98.5004 762.5,99C 771.815,101.649 776.649,107.816 777,117.5C 777.5,129.495 777.667,141.495 777.5,153.5C 766.244,155.386 754.911,155.886 743.5,155C 731.751,152.02 727.251,144.52 730,132.5C 732.154,125.994 736.654,122.161 743.5,121C 749.491,120.501 755.491,120.334 761.5,120.5C 761.064,115.564 758.397,112.731 753.5,112C 747.826,111.696 742.326,112.529 737,114.5C 735.627,109.928 734.794,105.261 734.5,100.5C 737.373,100.198 740.039,99.531 742.5,98.5 Z M 750.5,131.5 C 754.167,131.5 757.833,131.5 761.5,131.5C 761.5,135.167 761.5,138.833 761.5,142.5C 757.128,143.885 752.795,143.718 748.5,142C 744.299,137.629 744.966,134.129 750.5,131.5 Z"/></g>
|
||||
<g><path style="opacity:0.945" fill="#000000" d="M 802.5,98.5 C 832.848,95.8694 845.348,109.536 840,139.5C 837.5,147.333 832.333,152.5 824.5,155C 818.472,155.641 812.472,155.474 806.5,154.5C 806.5,160.833 806.5,167.167 806.5,173.5C 801.167,173.5 795.833,173.5 790.5,173.5C 790.333,149.498 790.5,125.498 791,101.5C 794.917,100.439 798.751,99.4392 802.5,98.5 Z M 806.5,112.5 C 818.841,110.485 824.841,115.652 824.5,128C 824.34,140.262 818.34,144.429 806.5,140.5C 806.5,131.167 806.5,121.833 806.5,112.5 Z"/></g>
|
||||
<g><path style="opacity:0.919" fill="#000000" d="M 509.5,99.5 C 515.5,99.5 521.5,99.5 527.5,99.5C 529.363,110.955 531.863,122.288 535,133.5C 538.352,122.28 541.186,110.947 543.5,99.5C 547.833,99.5 552.167,99.5 556.5,99.5C 558.225,110.401 560.892,121.068 564.5,131.5C 567.793,120.994 570.46,110.328 572.5,99.5C 578.167,99.5 583.833,99.5 589.5,99.5C 584.799,118.104 578.799,136.271 571.5,154C 567.129,154.828 562.795,154.661 558.5,153.5C 555.493,144.813 552.493,136.146 549.5,127.5C 546.671,136.14 543.838,144.806 541,153.5C 536.55,154.8 532.05,154.8 527.5,153.5C 520.497,135.824 514.497,117.824 509.5,99.5 Z"/></g>
|
||||
<g><path style="opacity:0.917" fill="#000000" d="M 645.5,99.5 C 651.425,99.1918 657.259,99.5251 663,100.5C 665.869,111.773 669.536,122.773 674,133.5C 677.886,122.345 681.053,111.011 683.5,99.5C 689.167,99.5 694.833,99.5 700.5,99.5C 694.611,121.996 686.445,143.663 676,164.5C 669.118,173.048 660.284,175.881 649.5,173C 647.616,172.784 645.949,172.117 644.5,171C 645.942,166.959 646.942,162.792 647.5,158.5C 651.796,159.463 656.129,159.629 660.5,159C 662.958,157.213 664.624,154.879 665.5,152C 657.154,135.128 650.488,117.628 645.5,99.5 Z"/></g>
|
||||
<g><path style="opacity:0.95" fill="#000000" d="M 852.5,99.5 C 857.833,99.5 863.167,99.5 868.5,99.5C 868.5,117.833 868.5,136.167 868.5,154.5C 863.167,154.5 857.833,154.5 852.5,154.5C 852.5,136.167 852.5,117.833 852.5,99.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#386ee5" d="M 99.5,100.5 C 107.134,105.665 114.468,111.332 121.5,117.5C 122.833,119.167 122.833,120.833 121.5,122.5C 112.581,125.153 103.581,127.486 94.5,129.5C 92.1812,119.117 93.8478,109.45 99.5,100.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#386fe5" d="M 177.5,100.5 C 184.058,109.086 186.058,118.752 183.5,129.5C 174.476,127.494 165.476,125.328 156.5,123C 155.24,121.186 155.24,119.353 156.5,117.5C 163.753,112.054 170.753,106.387 177.5,100.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#4173e6" d="M 135.5,116.5 C 141.755,115.261 145.422,117.761 146.5,124C 144.602,131.278 140.269,133.111 133.5,129.5C 130.544,124.611 131.211,120.278 135.5,116.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#386fe5" d="M 120.5,134.5 C 122.5,134.5 124.5,134.5 126.5,134.5C 123.684,144.464 119.517,153.797 114,162.5C 105.956,157.595 100.123,150.762 96.5,142C 96.9054,141.055 97.572,140.388 98.5,140C 105.962,138.134 113.295,136.301 120.5,134.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#386ee5" d="M 152.5,133.5 C 161.379,136.092 170.379,138.259 179.5,140C 180.428,140.388 181.095,141.055 181.5,142C 178.209,150.792 172.542,157.626 164.5,162.5C 159.86,154.421 155.693,146.087 152,137.5C 151.421,136.072 151.588,134.738 152.5,133.5 Z"/></g>
|
||||
<g><path style="opacity:1" fill="#376ee5" d="M 136.5,141.5 C 138.604,141.201 140.604,141.534 142.5,142.5C 146.737,150.968 150.403,159.635 153.5,168.5C 148.384,169.489 143.218,170.156 138,170.5C 133.215,170.678 128.715,169.678 124.5,167.5C 129.059,159.051 133.059,150.384 136.5,141.5 Z"/></g>
|
||||
</svg>
|
After Width: | Height: | Size: 12 KiB |
|
@ -0,0 +1,445 @@
|
|||
---
|
||||
layout: blog
|
||||
title: "Gateway API v1.2: WebSockets, Timeouts, Retries, and More"
|
||||
date: 2024-11-21T09:00:00-08:00
|
||||
slug: gateway-api-v1-2
|
||||
author: Gateway API Contributors
|
||||
---
|
||||
|
||||
![Gateway API logo](gateway-api-logo.svg)
|
||||
|
||||
Kubernetes SIG Network is delighted to announce the general availability of
|
||||
[Gateway API](https://gateway-api.sigs.k8s.io/) v1.2! This version of the API
|
||||
was released on October 3, and we're delighted to report that we now have a
|
||||
number of conformant implementations of it for you to try out.
|
||||
|
||||
Gateway API v1.2 brings a number of new features to the _Standard channel_
|
||||
(Gateway API's GA release channel), introduces some new experimental features,
|
||||
and inaugurates our new release process — but it also brings two breaking
|
||||
changes that you'll want to be careful of.
|
||||
|
||||
## Breaking changes
|
||||
|
||||
### GRPCRoute and ReferenceGrant `v1alpha2` removal
|
||||
|
||||
Now that the `v1` versions of GRPCRoute and ReferenceGrant have graduated to
|
||||
Standard, the old `v1alpha2` versions have been removed from both the Standard
|
||||
and Experimental channels, in order to ease the maintenance burden that
|
||||
perpetually supporting the old versions would place on the Gateway API
|
||||
community.
|
||||
|
||||
Before upgrading to Gateway API v1.2, you'll want to confirm that any
|
||||
implementations of Gateway API have been upgraded to support the v1 API
|
||||
version of these resources instead of the v1alpha2 API version. Note that even
|
||||
if you've been using v1 in your YAML manifests, a controller may still be
|
||||
using v1alpha2 which would cause it to fail during this upgrade. Additionally,
|
||||
Kubernetes itself goes to some effort to stop you from removing a CRD version
|
||||
that it thinks you're using: check out the [release notes] for more
|
||||
information about what you need to do to safely upgrade.
|
||||
|
||||
[release notes]: https://github.com/kubernetes-sigs/gateway-api/releases/tag/v1.2.0
|
||||
|
||||
### Change to `.status.supportedFeatures` (experimental) {#status-supported-features}
|
||||
|
||||
A much smaller breaking change: `.status.supportedFeatures` in a Gateway is
|
||||
now a list of objects instead of a list of strings. The objects have a single
|
||||
`name` field, so the translation from the strings is straightforward, but
|
||||
moving to objects permits a lot more flexibility for the future. This stanza
|
||||
is not yet present in the Standard channel.
|
||||
|
||||
## Graduations to the standard channel
|
||||
|
||||
Gateway API 1.2.0 graduates four features to the Standard channel, meaning
|
||||
that they can now be considered generally available. Inclusion in the Standard
|
||||
release channel denotes a high level of confidence in the API surface and
|
||||
provides guarantees of backward compatibility. Of course, as with any other
|
||||
Kubernetes API, Standard channel features can continue to evolve with
|
||||
backward-compatible additions over time, and we certainly expect further
|
||||
refinements and improvements to these new features in the future. For more
|
||||
information on how all of this works, refer to the [Gateway API Versioning
|
||||
Policy](https://gateway-api.sigs.k8s.io/concepts/versioning/).
|
||||
|
||||
### HTTPRoute timeouts
|
||||
|
||||
[GEP-1742](https://gateway-api.sigs.k8s.io/geps/gep-1742/) introduced the
|
||||
`timeouts` stanza into HTTPRoute, permitting configuring basic timeouts for
|
||||
HTTP traffic. This is a simple but important feature for proper resilience
|
||||
when handling HTTP traffic, and it is now Standard.
|
||||
|
||||
For example, this HTTPRoute configuration sets a timeout of 300ms for traffic
|
||||
to the `/face` path:
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: face-with-timeouts
|
||||
namespace: faces
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: my-gateway
|
||||
kind: Gateway
|
||||
rules:
|
||||
- matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /face
|
||||
backendRefs:
|
||||
- name: face
|
||||
port: 80
|
||||
timeouts:
|
||||
request: 300ms
|
||||
```
|
||||
|
||||
For more information, check out the [HTTP routing] documentation. (Note that
|
||||
this applies only to HTTPRoute timeouts. GRPCRoute timeouts are not yet part
|
||||
of Gateway API.)
|
||||
|
||||
[HTTP routing]: https://gateway-api.sigs.k8s.io/guides/http-routing/
|
||||
|
||||
### Gateway infrastructure labels and annotations
|
||||
|
||||
Gateway API implementations are responsible for creating the backing
|
||||
infrastructure needed to make each Gateway work. For example, implementations
|
||||
running in a Kubernetes cluster often create Services and Deployments, while
|
||||
cloud-based implementations may be creating cloud load balancer resources. In
|
||||
many cases, it can be helpful to be able to propagate labels or annotations to
|
||||
these generated resources.
|
||||
|
||||
In v1.2.0, the Gateway `infrastructure` stanza moves to the Standard channel,
|
||||
allowing you to specify labels and annotations for the infrastructure created
|
||||
by the Gateway API controller. For example, if your Gateway infrastructure is
|
||||
running in-cluster, you can specify both Linkerd and Istio injection using the
|
||||
following Gateway configuration, making it simpler for the infrastructure to
|
||||
be incorporated into whichever service mesh you've installed:
|
||||
|
||||
```
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: Gateway
|
||||
metadata:
|
||||
name: meshed-gateway
|
||||
namespace: incoming
|
||||
spec:
|
||||
gatewayClassName: meshed-gateway-class
|
||||
listeners:
|
||||
- name: http-listener
|
||||
protocol: HTTP
|
||||
port: 80
|
||||
infrastructure:
|
||||
labels:
|
||||
istio-injection: enabled
|
||||
annotations:
|
||||
linkerd.io/inject: enabled
|
||||
```
|
||||
|
||||
For more information, check out the
|
||||
[`infrastructure` API reference](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io/v1.GatewayInfrastructure).
|
||||
|
||||
### Backend protocol support
|
||||
|
||||
Since Kubernetes v1.20, the Service and EndpointSlice resources have supported
|
||||
a stable `appProtocol` field to allow users to specify the L7 protocol that
|
||||
Service supports. With the adoption of
|
||||
[KEP 3726](https://github.com/kubernetes/enhancements/tree/master/keps/sig-network/3726-standard-application-protocols),
|
||||
Kubernetes now supports three new `appProtocol` values:
|
||||
|
||||
`kubernetes.io/h2c`
|
||||
: HTTP/2 over cleartext as described in [RFC7540](https://www.rfc-editor.org/rfc/rfc7540)
|
||||
|
||||
`kubernetes.io/ws`
|
||||
: WebSocket over cleartext as described in [RFC6445](https://www.rfc-editor.org/rfc/rfc6445)
|
||||
|
||||
`kubernetes.io/wss`
|
||||
: WebSocket over TLS as described in [RFC6445](https://www.rfc-editor.org/rfc/rfc6445)
|
||||
|
||||
With Gateway API 1.2.0, support for honoring `appProtocol` is now Standard.
|
||||
For example, given the following Service:
|
||||
|
||||
```yaml
|
||||
apiVersion: v1
|
||||
kind: Service
|
||||
metadata:
|
||||
name: websocket-service
|
||||
namespace: my-namespace
|
||||
spec:
|
||||
selector:
|
||||
app.kubernetes.io/name: websocket-app
|
||||
ports:
|
||||
- name: http
|
||||
port: 80
|
||||
targetPort: 9376
|
||||
protocol: TCP
|
||||
appProtocol: kubernetes.io/ws
|
||||
```
|
||||
|
||||
then an HTTPRoute that includes this Service as a `backendRef` will
|
||||
automatically upgrade the connection to use WebSockets rather than assuming
|
||||
that the connection is pure HTTP.
|
||||
|
||||
For more information, check out
|
||||
[GEP-1911](https://gateway-api.sigs.k8s.io/geps/gep-1911/).
|
||||
|
||||
## New additions to experimental channel
|
||||
|
||||
### Named rules for *Route resources
|
||||
|
||||
The `rules` field in HTTPRoute and GRPCRoute resources can now be named, in
|
||||
order to make it easier to reference the specific rule, for example:
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: multi-color-route
|
||||
namespace: faces
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: my-gateway
|
||||
kind: Gateway
|
||||
port: 80
|
||||
rules:
|
||||
- name: center-rule
|
||||
matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /color/center
|
||||
backendRefs:
|
||||
- name: color-center
|
||||
port: 80
|
||||
- name: edge-rule
|
||||
matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /color/edge
|
||||
backendRefs:
|
||||
- name: color-edge
|
||||
port: 80
|
||||
```
|
||||
|
||||
Logging or status messages can now refer to these two rules as `center-rule`
|
||||
or `edge-rule` instead of being forced to refer to them by index. For more
|
||||
information, see [GEP-995](https://gateway-api.sigs.k8s.io/geps/gep-995/).
|
||||
|
||||
### HTTPRoute retry support
|
||||
|
||||
Gateway API 1.2.0 introduces experimental support for counted HTTPRoute
|
||||
retries. For example, the following HTTPRoute configuration retries requests
|
||||
to the `/face` path up to 3 times with a 500ms delay between retries:
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: face-with-retries
|
||||
namespace: faces
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: my-gateway
|
||||
kind: Gateway
|
||||
port: 80
|
||||
rules:
|
||||
- matches:
|
||||
- path:
|
||||
type: PathPrefix
|
||||
value: /face
|
||||
backendRefs:
|
||||
- name: face
|
||||
port: 80
|
||||
retry:
|
||||
codes: [ 500, 502, 503, 504 ]
|
||||
attempts: 3
|
||||
backoff: 500ms
|
||||
```
|
||||
|
||||
For more information, check out [GEP
|
||||
1731](https://gateway-api.sigs.k8s.io/geps/gep-1731).
|
||||
|
||||
### HTTPRoute percentage-based mirroring
|
||||
|
||||
Gateway API has long supported the
|
||||
[Request Mirroring](https://gateway-api.sigs.k8s.io/guides/http-request-mirroring/)
|
||||
feature, which allows sending the same request to multiple backends. In
|
||||
Gateway API 1.2.0, we're introducing percentage-based mirroring, which allows
|
||||
you to specify a percentage of requests to mirror to a different backend. For
|
||||
example, the following HTTPRoute configuration mirrors 42% of requests to the
|
||||
`color-mirror` backend:
|
||||
|
||||
```yaml
|
||||
apiVersion: gateway.networking.k8s.io/v1
|
||||
kind: HTTPRoute
|
||||
metadata:
|
||||
name: color-mirror-route
|
||||
namespace: faces
|
||||
spec:
|
||||
parentRefs:
|
||||
- name: mirror-gateway
|
||||
hostnames:
|
||||
- mirror.example
|
||||
rules:
|
||||
- backendRefs:
|
||||
- name: color
|
||||
port: 80
|
||||
filters:
|
||||
- type: RequestMirror
|
||||
requestMirror:
|
||||
backendRef:
|
||||
name: color-mirror
|
||||
port: 80
|
||||
percent: 42 # This value must be an integer.
|
||||
```
|
||||
|
||||
There's also a `fraction` stanza which can be used in place of `percent`, to
|
||||
allow for more precise control over exactly what amount of traffic is
|
||||
mirrored, for example:
|
||||
|
||||
```yaml
|
||||
...
|
||||
filters:
|
||||
- type: RequestMirror
|
||||
requestMirror:
|
||||
backendRef:
|
||||
name: color-mirror
|
||||
port: 80
|
||||
fraction:
|
||||
numerator: 1
|
||||
denominator: 10000
|
||||
```
|
||||
|
||||
This configuration mirrors 1 in 10,000 requests to the `color-mirror` backend,
|
||||
which may be relevant with very high request rates. For more details, see
|
||||
[GEP-1731](https://gateway-api.sigs.k8s.io/geps/gep-3171).
|
||||
|
||||
### Additional backend TLS configuration
|
||||
|
||||
This release includes three additions related to TLS configuration for
|
||||
communications between a Gateway and a workload (a _backend_):
|
||||
|
||||
1. **A new `backendTLS` field on Gateway**
|
||||
|
||||
This new field allows you to specify the client certificate that a Gateway
|
||||
should use when connecting to backends.
|
||||
|
||||
2. **A new `subjectAltNames` field on BackendTLSPolicy**
|
||||
|
||||
Previously, the `hostname` field was used to configure both the SNI that a
|
||||
Gateway should send to a backend _and_ the identity that should be provided
|
||||
by a certificate. When the new `subjectAltNames` field is specified, any
|
||||
certificate matching at least one of the specified SANs will be considered
|
||||
valid. This is particularly critical for SPIFFE where URI-based SANs may
|
||||
not be valid SNIs.
|
||||
|
||||
3. **A new `options` field on BackendTLSPolicy**
|
||||
|
||||
Similar to the TLS options field on Gateway Listeners, we believe the same
|
||||
concept will be broadly useful for TLS-specific configuration for Backend
|
||||
TLS.
|
||||
|
||||
For more information, check out
|
||||
[GEP-3135](https://gateway-api.sigs.k8s.io/geps/gep-3155).
|
||||
|
||||
## More changes
|
||||
|
||||
For a full list of the changes included in this release, please refer to the
|
||||
[v1.2.0 release notes](https://github.com/kubernetes-sigs/gateway-api/releases/tag/v1.2.0).
|
||||
|
||||
## Project updates
|
||||
|
||||
Beyond the technical, the v1.2 release also marks a few milestones in the life
|
||||
of the Gateway API project itself.
|
||||
|
||||
### Release process improvements
|
||||
|
||||
Gateway API has never been intended to be a static API, and as more projects
|
||||
use it as a component to build on, it's become clear that we need to bring
|
||||
some more predictability to Gateway API releases. To that end, we're pleased -
|
||||
and a little nervous! - to announce that we've formalized a new release
|
||||
process:
|
||||
|
||||
- **Scoping** (4-6 weeks): maintainers and community determine the set of
|
||||
features we want to include in the release. A particular emphasis here is
|
||||
getting features _out_ of the Experimental channel — ideally this involves
|
||||
moving them to Standard, but it can also mean removing them.
|
||||
|
||||
- **GEP Iteration and Review** (5-7 weeks): contributors write or update
|
||||
Gateway Enhancement Proposals (GEPs) for features accepted into the release,
|
||||
with emphasis on getting consensus around the design and graduation criteria
|
||||
of the feature.
|
||||
|
||||
- **API Refinement and Documentation** (3-5 weeks): contributors implement the
|
||||
features in the Gateway API controllers and write the necessary
|
||||
documentation.
|
||||
|
||||
- **SIG Network Review and Release Candidates** (2-4 weeks): maintainers get
|
||||
the required upstream review, build release candidates, and release the new
|
||||
version.
|
||||
|
||||
Gateway API 1.2.0 was the first release to use the new process, and although
|
||||
there are the usual rough edges of anything new, we believe that it went well.
|
||||
We've already completed the Scoping phase for Gateway API 1.3, with the
|
||||
release expected around the end of January 2025.
|
||||
|
||||
### `gwctl` moves out
|
||||
|
||||
The `gwctl` CLI tool has moved into its very own repository,
|
||||
https://github.com/kubernetes-sigs/gwctl. `gwctl` has proven a valuable tool
|
||||
for the Gateway API community; moving it into its own repository will, we
|
||||
believe, make it easier to maintain and develop. As always, we welcome
|
||||
contributions; while still experimental, `gwctl` already helps make working
|
||||
with Gateway API a bit easier — especially for newcomers to the project!
|
||||
|
||||
### Maintainer changes
|
||||
|
||||
Rounding out our changes to the project itself, we're pleased to announce that
|
||||
[Mattia Lavacca] has joined the ranks of Gateway API Maintainers! We're also
|
||||
sad to announce that [Keith Mattix] has stepped down as a GAMMA lead —
|
||||
happily, [Mike Morris] has returned to the role. We're grateful for everything
|
||||
Keith has done, and excited to have Mattia and Mike on board.
|
||||
|
||||
[Mattia Lavacca]: https://github.com/mlavacca
|
||||
[Keith Mattix]: https://github.com/@keithmattix
|
||||
[Mike Morris]: https://github.com/@mikemorris
|
||||
|
||||
## Try it out
|
||||
|
||||
Unlike other Kubernetes APIs, you don't need to upgrade to the latest version of
|
||||
Kubernetes to get the latest version of Gateway API. As long as you're running
|
||||
Kubernetes 1.26 or later, you'll be able to get up and running with this
|
||||
version of Gateway API.
|
||||
|
||||
To try out the API, follow our [Getting Started
|
||||
Guide](https://gateway-api.sigs.k8s.io/guides/). As of this writing, five
|
||||
implementations are already conformant with Gateway API v1.2. In alphabetical
|
||||
order:
|
||||
|
||||
* [Cilium v1.17.0-pre.1](https://github.com/cilium/cilium), Experimental channel
|
||||
* [Envoy Gateway v1.2.0-rc.1](https://github.com/envoyproxy/gateway), Experimental channel
|
||||
* [Istio v1.24.0-alpha.0](https://istio.io), Experimental channel
|
||||
* [Kong v3.2.0-244-gea4944bb0](https://github.com/kong/kubernetes-ingress-controller), Experimental channel
|
||||
* [Traefik v3.2](https://traefik.io), Experimental channel
|
||||
|
||||
## Get involved
|
||||
|
||||
There are lots of opportunities to get involved and help define the future of
|
||||
Kubernetes routing APIs for both ingress and service mesh.
|
||||
|
||||
* Check out the [user guides](https://gateway-api.sigs.k8s.io/guides) to see what use-cases can be addressed.
|
||||
* Try out one of the [existing Gateway controllers](https://gateway-api.sigs.k8s.io/implementations/).
|
||||
* Or [join us in the community](https://gateway-api.sigs.k8s.io/contributing/)
|
||||
and help us build the future of Gateway API together!
|
||||
|
||||
The maintainers would like to thank _everyone_ who's contributed to Gateway
|
||||
API, whether in the form of commits to the repo, discussion, ideas, or general
|
||||
support. We could never have gotten this far without the support of this
|
||||
dedicated and active community.
|
||||
|
||||
## Related Kubernetes blog articles
|
||||
|
||||
* [Gateway API v1.1: Service mesh, GRPCRoute, and a whole lot more](https://kubernetes.io/blog/2024/05/09/gateway-api-v1-1/)
|
||||
* [New Experimental Features in Gateway API v1.0](/blog/2023/11/28/gateway-api-ga/)
|
||||
11/2023
|
||||
* [Gateway API v1.0: GA Release](/blog/2023/10/31/gateway-api-ga/)
|
||||
10/2023
|
||||
* [Introducing ingress2gateway; Simplifying Upgrades to Gateway API](/blog/2023/10/25/introducing-ingress2gateway/)
|
||||
10/2023
|
||||
* [Gateway API v0.8.0: Introducing Service Mesh Support](/blog/2023/08/29/gateway-api-v0-8/)
|
||||
08/2023
|
|
@ -28,22 +28,37 @@ that system resource specifically for that container to use.
|
|||
|
||||
If the node where a Pod is running has enough of a resource available, it's possible (and
|
||||
allowed) for a container to use more resource than its `request` for that resource specifies.
|
||||
However, a container is not allowed to use more than its resource `limit`.
|
||||
|
||||
For example, if you set a `memory` request of 256 MiB for a container, and that container is in
|
||||
a Pod scheduled to a Node with 8GiB of memory and no other Pods, then the container can try to use
|
||||
more RAM.
|
||||
|
||||
If you set a `memory` limit of 4GiB for that container, the kubelet (and
|
||||
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}) enforce the limit.
|
||||
The runtime prevents the container from using more than the configured resource limit. For example:
|
||||
when a process in the container tries to consume more than the allowed amount of memory,
|
||||
the system kernel terminates the process that attempted the allocation, with an out of memory
|
||||
(OOM) error.
|
||||
Limits are a different story. Both `cpu` and `memory` limits are applied by the kubelet (and
|
||||
{{< glossary_tooltip text="container runtime" term_id="container-runtime" >}}),
|
||||
and are ultimately enforced by the kernel. On Linux nodes, the Linux kernel
|
||||
enforces limits with
|
||||
{{< glossary_tooltip text="cgroups" term_id="cgroup" >}}.
|
||||
The behavior of `cpu` and `memory` limit enforcement is slightly different.
|
||||
|
||||
Limits can be implemented either reactively (the system intervenes once it sees a violation)
|
||||
or by enforcement (the system prevents the container from ever exceeding the limit). Different
|
||||
runtimes can have different ways to implement the same restrictions.
|
||||
`cpu` limits are enforced by CPU throttling. When a container approaches
|
||||
its `cpu` limit, the kernel will restrict access to the CPU corresponding to the
|
||||
container's limit. Thus, a `cpu` limit is a hard limit the kernel enforces.
|
||||
Containers may not use more CPU than is specified in their `cpu` limit.
|
||||
|
||||
`memory` limits are enforced by the kernel with out of memory (OOM) kills. When
|
||||
a container uses more than its `memory` limit, the kernel may terminate it. However,
|
||||
terminations only happen when the kernel detects memory pressure. Thus, a
|
||||
container that over allocates memory may not be immediately killed. This means
|
||||
`memory` limits are enforced reactively. A container may use more memory than
|
||||
its `memory` limit, but if it does, it may get killed.
|
||||
|
||||
{{< note >}}
|
||||
There is an alpha feature `MemoryQoS` which attempts to add more preemptive
|
||||
limit enforcement for memory (as opposed to reactive enforcement by the OOM
|
||||
killer). However, this effort is
|
||||
[stalled](https://github.com/kubernetes/enhancements/tree/a47155b340/keps/sig-node/2570-memory-qos#latest-update-stalled)
|
||||
due to a potential livelock situation a memory hungry can cause.
|
||||
{{< /note >}}
|
||||
|
||||
{{< note >}}
|
||||
If you specify a limit for a resource, but do not specify any request, and no admission-time
|
||||
|
@ -883,5 +898,4 @@ memory limit (and possibly request) for that container.
|
|||
and its [resource requirements](/docs/reference/kubernetes-api/workload-resources/pod-v1/#resources)
|
||||
* Read about [project quotas](https://www.linux.org/docs/man8/xfs_quota.html) in XFS
|
||||
* Read more about the [kube-scheduler configuration reference (v1)](/docs/reference/config-api/kube-scheduler-config.v1/)
|
||||
* Read more about [Quality of Service classes for Pods](/docs/concepts/workloads/pods/pod-qos/)
|
||||
|
||||
* Read more about [Quality of Service classes for Pods](/docs/concepts/workloads/pods/pod-qos/)
|
|
@ -9,135 +9,142 @@ weight: 90
|
|||
<!-- overview -->
|
||||
|
||||
Selecting the appropriate authentication mechanism(s) is a crucial aspect of securing your cluster.
|
||||
Kubernetes provides several built-in mechanisms, each with its own strengths and weaknesses that
|
||||
Kubernetes provides several built-in mechanisms, each with its own strengths and weaknesses that
|
||||
should be carefully considered when choosing the best authentication mechanism for your cluster.
|
||||
|
||||
In general, it is recommended to enable as few authentication mechanisms as possible to simplify
|
||||
In general, it is recommended to enable as few authentication mechanisms as possible to simplify
|
||||
user management and prevent cases where users retain access to a cluster that is no longer required.
|
||||
|
||||
It is important to note that Kubernetes does not have an in-built user database within the cluster.
|
||||
Instead, it takes user information from the configured authentication system and uses that to make
|
||||
authorization decisions. Therefore, to audit user access, you need to review credentials from every
|
||||
It is important to note that Kubernetes does not have an in-built user database within the cluster.
|
||||
Instead, it takes user information from the configured authentication system and uses that to make
|
||||
authorization decisions. Therefore, to audit user access, you need to review credentials from every
|
||||
configured authentication source.
|
||||
|
||||
For production clusters with multiple users directly accessing the Kubernetes API, it is
|
||||
recommended to use external authentication sources such as OIDC. The internal authentication
|
||||
mechanisms, such as client certificates and service account tokens, described below, are not
|
||||
suitable for this use-case.
|
||||
For production clusters with multiple users directly accessing the Kubernetes API, it is
|
||||
recommended to use external authentication sources such as OIDC. The internal authentication
|
||||
mechanisms, such as client certificates and service account tokens, described below, are not
|
||||
suitable for this use case.
|
||||
|
||||
<!-- body -->
|
||||
|
||||
## X.509 client certificate authentication {#x509-client-certificate-authentication}
|
||||
|
||||
Kubernetes leverages [X.509 client certificate](/docs/reference/access-authn-authz/authentication/#x509-client-certificates)
|
||||
authentication for system components, such as when the Kubelet authenticates to the API Server.
|
||||
While this mechanism can also be used for user authentication, it might not be suitable for
|
||||
Kubernetes leverages [X.509 client certificate](/docs/reference/access-authn-authz/authentication/#x509-client-certificates)
|
||||
authentication for system components, such as when the kubelet authenticates to the API Server.
|
||||
While this mechanism can also be used for user authentication, it might not be suitable for
|
||||
production use due to several restrictions:
|
||||
|
||||
- Client certificates cannot be individually revoked. Once compromised, a certificate can be used
|
||||
by an attacker until it expires. To mitigate this risk, it is recommended to configure short
|
||||
- Client certificates cannot be individually revoked. Once compromised, a certificate can be used
|
||||
by an attacker until it expires. To mitigate this risk, it is recommended to configure short
|
||||
lifetimes for user authentication credentials created using client certificates.
|
||||
- If a certificate needs to be invalidated, the certificate authority must be re-keyed, which
|
||||
can introduce availability risks to the cluster.
|
||||
- There is no permanent record of client certificates created in the cluster. Therefore, all
|
||||
issued certificates must be recorded if you need to keep track of them.
|
||||
- Private keys used for client certificate authentication cannot be password-protected. Anyone
|
||||
who can read the file containing the key will be able to make use of it.
|
||||
- Using client certificate authentication requires a direct connection from the client to the
|
||||
API server with no intervening TLS termination points, which can complicate network architectures.
|
||||
- Group data is embedded in the `O` value of the client certificate, which means the user's group
|
||||
memberships cannot be changed for the lifetime of the certificate.
|
||||
- If a certificate needs to be invalidated, the certificate authority must be re-keyed, which
|
||||
can introduce availability risks to the cluster.
|
||||
- There is no permanent record of client certificates created in the cluster. Therefore, all
|
||||
issued certificates must be recorded if you need to keep track of them.
|
||||
- Private keys used for client certificate authentication cannot be password-protected. Anyone
|
||||
who can read the file containing the key will be able to make use of it.
|
||||
- Using client certificate authentication requires a direct connection from the client to the
|
||||
API server without any intervening TLS termination points, which can complicate network architectures.
|
||||
- Group data is embedded in the `O` value of the client certificate, which means the user's group
|
||||
memberships cannot be changed for the lifetime of the certificate.
|
||||
|
||||
## Static token file {#static-token-file}
|
||||
|
||||
Although Kubernetes allows you to load credentials from a
|
||||
[static token file](/docs/reference/access-authn-authz/authentication/#static-token-file) located
|
||||
on the control plane node disks, this approach is not recommended for production servers due to
|
||||
Although Kubernetes allows you to load credentials from a
|
||||
[static token file](/docs/reference/access-authn-authz/authentication/#static-token-file) located
|
||||
on the control plane node disks, this approach is not recommended for production servers due to
|
||||
several reasons:
|
||||
|
||||
- Credentials are stored in clear text on control plane node disks, which can be a security risk.
|
||||
- Changing any credential requires a restart of the API server process to take effect, which can
|
||||
impact availability.
|
||||
- There is no mechanism available to allow users to rotate their credentials. To rotate a
|
||||
credential, a cluster administrator must modify the token on disk and distribute it to the users.
|
||||
- Changing any credential requires a restart of the API server process to take effect, which can
|
||||
impact availability.
|
||||
- There is no mechanism available to allow users to rotate their credentials. To rotate a
|
||||
credential, a cluster administrator must modify the token on disk and distribute it to the users.
|
||||
- There is no lockout mechanism available to prevent brute-force attacks.
|
||||
|
||||
## Bootstrap tokens {#bootstrap-tokens}
|
||||
|
||||
[Bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) are used for joining
|
||||
[Bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) are used for joining
|
||||
nodes to clusters and are not recommended for user authentication due to several reasons:
|
||||
|
||||
- They have hard-coded group memberships that are not suitable for general use, making them
|
||||
unsuitable for authentication purposes.
|
||||
- Manually generating bootstrap tokens can lead to weak tokens that can be guessed by an attacker,
|
||||
which can be a security risk.
|
||||
- There is no lockout mechanism available to prevent brute-force attacks, making it easier for
|
||||
attackers to guess or crack the token.
|
||||
- They have hard-coded group memberships that are not suitable for general use, making them
|
||||
unsuitable for authentication purposes.
|
||||
- Manually generating bootstrap tokens can lead to weak tokens that can be guessed by an attacker,
|
||||
which can be a security risk.
|
||||
- There is no lockout mechanism available to prevent brute-force attacks, making it easier for
|
||||
attackers to guess or crack the token.
|
||||
|
||||
## ServiceAccount secret tokens {#serviceaccount-secret-tokens}
|
||||
|
||||
[Service account secrets](/docs/reference/access-authn-authz/service-accounts-admin/#manual-secret-management-for-serviceaccounts)
|
||||
are available as an option to allow workloads running in the cluster to authenticate to the
|
||||
API server. In Kubernetes < 1.23, these were the default option, however, they are being replaced
|
||||
with TokenRequest API tokens. While these secrets could be used for user authentication, they are
|
||||
[Service account secrets](/docs/reference/access-authn-authz/service-accounts-admin/#manual-secret-management-for-serviceaccounts)
|
||||
are available as an option to allow workloads running in the cluster to authenticate to the
|
||||
API server. In Kubernetes < 1.23, these were the default option, however, they are being replaced
|
||||
with TokenRequest API tokens. While these secrets could be used for user authentication, they are
|
||||
generally unsuitable for a number of reasons:
|
||||
|
||||
- They cannot be set with an expiry and will remain valid until the associated service account is deleted.
|
||||
- The authentication tokens are visible to any cluster user who can read secrets in the namespace
|
||||
that they are defined in.
|
||||
- The authentication tokens are visible to any cluster user who can read secrets in the namespace
|
||||
that they are defined in.
|
||||
- Service accounts cannot be added to arbitrary groups complicating RBAC management where they are used.
|
||||
|
||||
## TokenRequest API tokens {#tokenrequest-api-tokens}
|
||||
|
||||
The TokenRequest API is a useful tool for generating short-lived credentials for service
|
||||
authentication to the API server or third-party systems. However, it is not generally recommended
|
||||
for user authentication as there is no revocation method available, and distributing credentials
|
||||
The TokenRequest API is a useful tool for generating short-lived credentials for service
|
||||
authentication to the API server or third-party systems. However, it is not generally recommended
|
||||
for user authentication as there is no revocation method available, and distributing credentials
|
||||
to users in a secure manner can be challenging.
|
||||
|
||||
When using TokenRequest tokens for service authentication, it is recommended to implement a short
|
||||
When using TokenRequest tokens for service authentication, it is recommended to implement a short
|
||||
lifespan to reduce the impact of compromised tokens.
|
||||
|
||||
## OpenID Connect token authentication {#openid-connect-token-authentication}
|
||||
|
||||
Kubernetes supports integrating external authentication services with the Kubernetes API using
|
||||
[OpenID Connect (OIDC)](/docs/reference/access-authn-authz/authentication/#openid-connect-tokens).
|
||||
There is a wide variety of software that can be used to integrate Kubernetes with an identity
|
||||
provider. However, when using OIDC authentication for Kubernetes, it is important to consider the
|
||||
Kubernetes supports integrating external authentication services with the Kubernetes API using
|
||||
[OpenID Connect (OIDC)](/docs/reference/access-authn-authz/authentication/#openid-connect-tokens).
|
||||
There is a wide variety of software that can be used to integrate Kubernetes with an identity
|
||||
provider. However, when using OIDC authentication in Kubernetes, it is important to consider the
|
||||
following hardening measures:
|
||||
|
||||
- The software installed in the cluster to support OIDC authentication should be isolated from
|
||||
general workloads as it will run with high privileges.
|
||||
- The software installed in the cluster to support OIDC authentication should be isolated from
|
||||
general workloads as it will run with high privileges.
|
||||
- Some Kubernetes managed services are limited in the OIDC providers that can be used.
|
||||
- As with TokenRequest tokens, OIDC tokens should have a short lifespan to reduce the impact of
|
||||
compromised tokens.
|
||||
- As with TokenRequest tokens, OIDC tokens should have a short lifespan to reduce the impact of
|
||||
compromised tokens.
|
||||
|
||||
## Webhook token authentication {#webhook-token-authentication}
|
||||
|
||||
[Webhook token authentication](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)
|
||||
is another option for integrating external authentication providers into Kubernetes. This mechanism
|
||||
allows for an authentication service, either running inside the cluster or externally, to be
|
||||
contacted for an authentication decision over a webhook. It is important to note that the suitability
|
||||
of this mechanism will likely depend on the software used for the authentication service, and there
|
||||
[Webhook token authentication](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)
|
||||
is another option for integrating external authentication providers into Kubernetes. This mechanism
|
||||
allows for an authentication service, either running inside the cluster or externally, to be
|
||||
contacted for an authentication decision over a webhook. It is important to note that the suitability
|
||||
of this mechanism will likely depend on the software used for the authentication service, and there
|
||||
are some Kubernetes-specific considerations to take into account.
|
||||
|
||||
To configure Webhook authentication, access to control plane server filesystems is required. This
|
||||
means that it will not be possible with Managed Kubernetes unless the provider specifically makes it
|
||||
available. Additionally, any software installed in the cluster to support this access should be
|
||||
To configure Webhook authentication, access to control plane server filesystems is required. This
|
||||
means that it will not be possible with Managed Kubernetes unless the provider specifically makes it
|
||||
available. Additionally, any software installed in the cluster to support this access should be
|
||||
isolated from general workloads, as it will run with high privileges.
|
||||
|
||||
## Authenticating proxy {#authenticating-proxy}
|
||||
|
||||
Another option for integrating external authentication systems into Kubernetes is to use an
|
||||
[authenticating proxy](/docs/reference/access-authn-authz/authentication/#authenticating-proxy).
|
||||
With this mechanism, Kubernetes expects to receive requests from the proxy with specific header
|
||||
values set, indicating the username and group memberships to assign for authorization purposes.
|
||||
It is important to note that there are specific considerations to take into account when using
|
||||
Another option for integrating external authentication systems into Kubernetes is to use an
|
||||
[authenticating proxy](/docs/reference/access-authn-authz/authentication/#authenticating-proxy).
|
||||
With this mechanism, Kubernetes expects to receive requests from the proxy with specific header
|
||||
values set, indicating the username and group memberships to assign for authorization purposes.
|
||||
It is important to note that there are specific considerations to take into account when using
|
||||
this mechanism.
|
||||
|
||||
Firstly, securely configured TLS must be used between the proxy and Kubernetes API server to
|
||||
mitigate the risk of traffic interception or sniffing attacks. This ensures that the communication
|
||||
Firstly, securely configured TLS must be used between the proxy and Kubernetes API server to
|
||||
mitigate the risk of traffic interception or sniffing attacks. This ensures that the communication
|
||||
between the proxy and Kubernetes API server is secure.
|
||||
|
||||
Secondly, it is important to be aware that an attacker who is able to modify the headers of the
|
||||
request may be able to gain unauthorized access to Kubernetes resources. As such, it is important
|
||||
to ensure that the headers are properly secured and cannot be tampered with.
|
||||
Secondly, it is important to be aware that an attacker who is able to modify the headers of the
|
||||
request may be able to gain unauthorized access to Kubernetes resources. As such, it is important
|
||||
to ensure that the headers are properly secured and cannot be tampered with.
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
- [User Authentication](/docs/reference/access-authn-authz/authentication/)
|
||||
- [Authenticating with Bootstrap Tokens](/docs/reference/access-authn-authz/bootstrap-tokens/)
|
||||
- [kubelet Authentication](/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication)
|
||||
- [Authenticating with Service Account Tokens](/docs/reference/access-authn-authz/service-accounts-admin/#bound-service-account-tokens)
|
||||
|
|
|
@ -339,7 +339,7 @@ You can restrict the use of `gitRepo` volumes in your cluster using
|
|||
[ValidatingAdmissionPolicy](/docs/reference/access-authn-authz/validating-admission-policy/).
|
||||
You can use the following Common Expression Language (CEL) expression as
|
||||
part of a policy to reject use of `gitRepo` volumes:
|
||||
`has(object.spec.volumes) || !object.spec.volumes.exists(v, has(v.gitRepo))`.
|
||||
`!has(object.spec.volumes) || !object.spec.volumes.exists(v, has(v.gitRepo))`.
|
||||
|
||||
{{< /warning >}}
|
||||
|
||||
|
|
|
@ -1,15 +0,0 @@
|
|||
---
|
||||
title: Mapping from dockercli to crictl
|
||||
content_type: reference
|
||||
weight: 10
|
||||
---
|
||||
|
||||
{{< note >}}
|
||||
|
||||
This page is being directed to
|
||||
https://v1-24.docs.kubernetes.io/docs/reference/tools/map-crictl-dockercli/ because of the
|
||||
[removal of dockershim from crictl in v1.24](https://github.com/kubernetes-sigs/cri-tools/issues/870).
|
||||
As per our community policy, deprecated documents are not maintained beyond next three versions.
|
||||
The reason for deprecation is explained in [Dockershim-FAQ](/blog/2020/12/02/dockershim-faq/).
|
||||
|
||||
{{</ note >}}
|
|
@ -96,6 +96,11 @@ becomes available.
|
|||
|
||||
A simple example of an object created using Server-Side Apply could look like this:
|
||||
|
||||
{{< note >}}
|
||||
`kubectl get` omits managed fields by default.
|
||||
Add `--show-managed-fields` to show `managedFields` when the output format is either `json` or `yaml`.
|
||||
{{< /note >}}
|
||||
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
|
@ -249,8 +254,10 @@ metadata:
|
|||
managedFields:
|
||||
- manager: kubectl
|
||||
operation: Apply
|
||||
time: '2019-03-30T15:00:00.000Z'
|
||||
apiVersion: v1
|
||||
fields:
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
f:test-label: {}
|
||||
|
@ -258,7 +265,8 @@ metadata:
|
|||
operation: Update
|
||||
apiVersion: v1
|
||||
time: '2019-03-30T16:00:00.000Z'
|
||||
fields:
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:data:
|
||||
f:key: {}
|
||||
data:
|
||||
|
@ -294,12 +302,12 @@ for fields within Kubernetes objects.
|
|||
For a {{< glossary_tooltip term_id="CustomResourceDefinition" text="CustomResourceDefinition" >}},
|
||||
you can set these markers when you define the custom resource.
|
||||
|
||||
| Golang marker | OpenAPI extension | Possible values | Description |
|
||||
|---|---|---|---|---|
|
||||
| `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | Applicable to lists. `set` applies to lists that include only scalar elements. These elements must be unique. `map` applies to lists of nested types only. The key values (see `listMapKey`) must be unique in the list. `atomic` can apply to any list. If configured as `atomic`, the entire list is replaced during merge. At any point in time, a single manager owns the list. If `set` or `map`, different managers can manage entries separately. |
|
||||
| `//+listMapKey` | `x-kubernetes-list-map-keys` | List of field names, e.g. `["port", "protocol"]` | Only applicable when `+listType=map`. A list of field names whose values uniquely identify entries in the list. While there can be multiple keys, `listMapKey` is singular because keys need to be specified individually in the Go type. The key fields must be scalars. |
|
||||
| `//+mapType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to maps. `atomic` means that the map can only be entirely replaced by a single manager. `granular` means that the map supports separate managers updating individual fields. |
|
||||
| `//+structType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to structs; otherwise same usage and OpenAPI annotation as `//+mapType`.|
|
||||
| Golang marker | OpenAPI extension | Possible values | Description |
|
||||
| --------------- | ---------------------------- | ------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- |
|
||||
| `//+listType` | `x-kubernetes-list-type` | `atomic`/`set`/`map` | Applicable to lists. `set` applies to lists that include only scalar elements. These elements must be unique. `map` applies to lists of nested types only. The key values (see `listMapKey`) must be unique in the list. `atomic` can apply to any list. If configured as `atomic`, the entire list is replaced during merge. At any point in time, a single manager owns the list. If `set` or `map`, different managers can manage entries separately. |
|
||||
| `//+listMapKey` | `x-kubernetes-list-map-keys` | List of field names, e.g. `["port", "protocol"]` | Only applicable when `+listType=map`. A list of field names whose values uniquely identify entries in the list. While there can be multiple keys, `listMapKey` is singular because keys need to be specified individually in the Go type. The key fields must be scalars. |
|
||||
| `//+mapType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to maps. `atomic` means that the map can only be entirely replaced by a single manager. `granular` means that the map supports separate managers updating individual fields. |
|
||||
| `//+structType` | `x-kubernetes-map-type` | `atomic`/`granular` | Applicable to structs; otherwise same usage and OpenAPI annotation as `//+mapType`. |
|
||||
|
||||
If `listType` is missing, the API server interprets a
|
||||
`patchStrategy=merge` marker as a `listType=map` and the
|
||||
|
@ -355,7 +363,8 @@ metadata:
|
|||
- manager: "manager-one"
|
||||
operation: Apply
|
||||
apiVersion: example.com/v1
|
||||
fields:
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:spec:
|
||||
f:data: {}
|
||||
spec:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
---
|
||||
title: Overprovision Node Capacity For A Cluster
|
||||
title: Overprovision Node Capacity For A Cluster
|
||||
content_type: task
|
||||
weight: 10
|
||||
---
|
||||
|
@ -7,24 +7,29 @@ weight: 10
|
|||
|
||||
<!-- overview -->
|
||||
|
||||
This page guides you through configuring {{< glossary_tooltip text="Node" term_id="node" >}} overprovisioning in your Kubernetes cluster. Node overprovisioning is a strategy that proactively reserves a portion of your cluster's compute resources. This reservation helps reduce the time required to schedule new pods during scaling events, enhancing your cluster's responsiveness to sudden spikes in traffic or workload demands.
|
||||
This page guides you through configuring {{< glossary_tooltip text="Node" term_id="node" >}}
|
||||
overprovisioning in your Kubernetes cluster. Node overprovisioning is a strategy that proactively
|
||||
reserves a portion of your cluster's compute resources. This reservation helps reduce the time
|
||||
required to schedule new pods during scaling events, enhancing your cluster's responsiveness
|
||||
to sudden spikes in traffic or workload demands.
|
||||
|
||||
By maintaining some unused capacity, you ensure that resources are immediately available when new pods are created, preventing them from entering a pending state while the cluster scales up.
|
||||
By maintaining some unused capacity, you ensure that resources are immediately available when
|
||||
new pods are created, preventing them from entering a pending state while the cluster scales up.
|
||||
|
||||
## {{% heading "prerequisites" %}}
|
||||
|
||||
- You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with
|
||||
- You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with
|
||||
your cluster.
|
||||
- You should already have a basic understanding of
|
||||
[Deployments](/docs/concepts/workloads/controllers/deployment/),
|
||||
Pod {{<glossary_tooltip text="priority" term_id="pod-priority">}},
|
||||
and [PriorityClasses](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass).
|
||||
Pod {{< glossary_tooltip text="priority" term_id="pod-priority" >}},
|
||||
and {{< glossary_tooltip text="PriorityClasses" term_id="priority-class" >}}.
|
||||
- Your cluster must be set up with an [autoscaler](/docs/concepts/cluster-administration/cluster-autoscaling/)
|
||||
that manages nodes based on demand.
|
||||
|
||||
<!-- steps -->
|
||||
|
||||
## Create a placeholder Deployment
|
||||
## Create a PriorityClass
|
||||
|
||||
Begin by defining a PriorityClass for the placeholder Pods. First, create a PriorityClass with a
|
||||
negative priority value, that you will shortly assign to the placeholder pods.
|
||||
|
@ -43,14 +48,24 @@ When you add this to your cluster, Kubernetes runs those placeholder pods to res
|
|||
is a capacity shortage, the control plane will pick one these placeholder pods as the first candidate to
|
||||
{{< glossary_tooltip text="preempt" term_id="preemption" >}}.
|
||||
|
||||
## Run Pods that request node capacity
|
||||
|
||||
Review the sample manifest:
|
||||
|
||||
{{% code_sample language="yaml" file="deployments/deployment-with-capacity-reservation.yaml" %}}
|
||||
|
||||
### Pick a namespace for the placeholder pods
|
||||
|
||||
You should select, or create, a {{< glossary_tooltip term_id="namespace" text="namespace">}}
|
||||
that the placeholder Pods will go into.
|
||||
|
||||
### Create the placeholder deployment
|
||||
|
||||
Create a Deployment based on that manifest:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/deployments/deployment-with-capacity-reservation.yaml
|
||||
# Change the namespace name "example"
|
||||
kubectl --namespace example apply -f https://k8s.io/examples/deployments/deployment-with-capacity-reservation.yaml
|
||||
```
|
||||
|
||||
## Adjust placeholder resource requests
|
||||
|
@ -61,7 +76,13 @@ To edit the Deployment, modify the `resources` section in the Deployment manifes
|
|||
to set appropriate requests and limits. You can download that file locally and then edit it
|
||||
with whichever text editor you prefer.
|
||||
|
||||
For example, to reserve 500m CPU and 1Gi memory across 5 placeholder pods,
|
||||
You can also edit the Deployment using kubectl:
|
||||
|
||||
```shell
|
||||
kubectl edit deployment capacity-reservation
|
||||
```
|
||||
|
||||
For example, to reserve a total of a 0.5 CPU and 1GiB of memory across 5 placeholder pods,
|
||||
define the resource requests and limits for a single placeholder pod as follows:
|
||||
|
||||
```yaml
|
||||
|
@ -77,10 +98,10 @@ define the resource requests and limits for a single placeholder pod as follows:
|
|||
|
||||
### Calculate the total reserved resources
|
||||
|
||||
For example, with 5 replicas each reserving 0.1 CPU and 200MiB of memory:
|
||||
|
||||
Total CPU reserved: 5 × 0.1 = 0.5 (in the Pod specification, you'll write the quantity `500m`)
|
||||
Total Memory reserved: 5 × 200MiB = 1GiB (in the Pod specification, you'll write `1 Gi`)
|
||||
<!-- trailing whitespace in next paragraph is significant -->
|
||||
For example, with 5 replicas each reserving 0.1 CPU and 200MiB of memory:
|
||||
Total CPU reserved: 5 × 0.1 = 0.5 (in the Pod specification, you'll write the quantity `500m`)
|
||||
Total memory reserved: 5 × 200MiB = 1GiB (in the Pod specification, you'll write `1 Gi`)
|
||||
|
||||
To scale the Deployment, adjust the number of replicas based on your cluster's size and expected workload:
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ kubectl expose deployment/my-nginx
|
|||
service/my-nginx exposed
|
||||
```
|
||||
|
||||
This is equivalent to `kubectl apply -f` the following yaml:
|
||||
This is equivalent to `kubectl apply -f` in the following yaml:
|
||||
|
||||
{{% code_sample file="service/networking/nginx-svc.yaml" %}}
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ graceful connection draining.
|
|||
|
||||
## Termination process for Pods and their endpoints
|
||||
|
||||
There are often cases when you need to terminate a Pod - be it for upgrade or scale down.
|
||||
There are often cases when you need to terminate a Pod - be it to upgrade or scale down.
|
||||
In order to improve application availability, it may be important to implement
|
||||
a proper active connections draining.
|
||||
|
||||
|
@ -29,12 +29,12 @@ a simple nginx web server to demonstrate the concept.
|
|||
|
||||
## Example flow with endpoint termination
|
||||
|
||||
The following is the example of the flow described in the
|
||||
The following is the example flow described in the
|
||||
[Termination of Pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)
|
||||
document.
|
||||
|
||||
Let's say you have a Deployment containing of a single `nginx` replica
|
||||
(just for demonstration purposes) and a Service:
|
||||
Let's say you have a Deployment containing a single `nginx` replica
|
||||
(say just for the sake of demonstration purposes) and a Service:
|
||||
|
||||
{{% code_sample file="service/pod-with-graceful-termination.yaml" %}}
|
||||
|
||||
|
@ -158,10 +158,10 @@ The output is similar to this:
|
|||
```
|
||||
|
||||
This allows applications to communicate their state during termination
|
||||
and clients (such as load balancers) to implement a connections draining functionality.
|
||||
and clients (such as load balancers) to implement connection draining functionality.
|
||||
These clients may detect terminating endpoints and implement a special logic for them.
|
||||
|
||||
In Kubernetes, endpoints that are terminating always have their `ready` status set as as `false`.
|
||||
In Kubernetes, endpoints that are terminating always have their `ready` status set as `false`.
|
||||
This needs to happen for backward
|
||||
compatibility, so existing load balancers will not use it for regular traffic.
|
||||
If traffic draining on terminating pod is needed, the actual readiness can be
|
||||
|
|
|
@ -2,6 +2,7 @@ apiVersion: apps/v1
|
|||
kind: Deployment
|
||||
metadata:
|
||||
name: capacity-reservation
|
||||
# You should decide what namespace to deploy this into
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: placeholder
|
||||
name: placeholder # these Pods represent placeholder capacity
|
||||
value: -1000
|
||||
globalDefault: false
|
||||
description: "Negative priority for placeholder pods to enable overprovisioning."
|
|
@ -10,7 +10,7 @@ cid: home
|
|||
{{% blocks/feature image="flower" %}}
|
||||
### Kubernetes (K8s) es una plataforma de código abierto para automatizar la implementación, el escalado y la administración de aplicaciones en contenedores.
|
||||
|
||||
Kubernetes agrupa los contenedores que conforman una aplicación en unidades lógicas para una fácil administración y descubrimiento. Kubernetes se basa en [15 años de experiencia en la ejecución de cargas de trabajo de producción en Google](http://queue.acm.org/detail.cfm?id=2898444), combinada con las mejores ideas y prácticas de la comunidad.
|
||||
Kubernetes agrupa los contenedores que conforman una aplicación en unidades lógicas para una fácil administración y descubrimiento. Kubernetes se basa en [15 años de experiencia en la ejecución de cargas de trabajo de producción en Google](https://queue.acm.org/detail.cfm?id=2898444), combinada con las mejores ideas y prácticas de la comunidad.
|
||||
{{% /blocks/feature %}}
|
||||
|
||||
{{% blocks/feature image="scalable" %}}
|
||||
|
|
|
@ -0,0 +1,414 @@
|
|||
---
|
||||
title: ノードの圧迫による退避
|
||||
content_type: concept
|
||||
weight: 100
|
||||
---
|
||||
|
||||
{{<glossary_definition term_id="node-pressure-eviction" length="short">}}</br>
|
||||
|
||||
{{< feature-state feature_gate_name="KubeletSeparateDiskGC" >}}
|
||||
|
||||
{{<note>}}
|
||||
_分割イメージファイルシステム_ 機能は、`containerfs`ファイルシステムのサポートを有効にし、いくつかの新しい退避シグナル、閾値、メトリクスを追加します。
|
||||
`containerfs`を使用するには、Kubernetesリリース v{{< skew currentVersion >}}で`KubeletSeparateDiskGC`[フィーチャーゲート](/ja/docs/reference/command-line-tools-reference/feature-gates/)を有効にする必要があります。
|
||||
現在、`containerfs`ファイルシステムのサポートを提供しているのはCRI-O(v1.29以降)のみです。
|
||||
{{</note>}}
|
||||
|
||||
{{<glossary_tooltip term_id="kubelet" text="kubelet">}}は、クラスターのノード上のメモリ、ディスク容量、ファイルシステムのinodeといったのリソースを監視します。
|
||||
これらのリソースの1つ以上が特定の消費レベルに達すると、kubeletはリソースの枯渇を防ぐため、ノード上の1つ以上のPodを事前に停止してリソースを回収します。
|
||||
|
||||
ノードのリソース枯渇による退避中に、kubeletは選択された[Podのフェーズ](/ja/docs/concepts/workloads/pods/pod-lifecycle/#pod-phase)を`Failed`に設定し、Podを終了します。
|
||||
|
||||
ノードのリソース枯渇による退避は、[APIを起点とした退避](/ja/docs/concepts/scheduling-eviction/api-eviction/)とは異なります。
|
||||
|
||||
kubeletは、設定した{{<glossary_tooltip term_id="pod-disruption-budget" text="PodDisruptionBudget">}}やPodの`terminationGracePeriodSeconds`を考慮しません。
|
||||
[ソフト退避の閾値](#soft-eviction-thresholds)を使用する場合は、kubeletは設定された`eviction-max-pod-grace-period`を順守します。
|
||||
[ハード退避の閾値](#hard-eviction-thresholds)を使用する場合は、kubeletは終了に`0秒`の猶予期間(即時シャットダウン)を使用します。
|
||||
|
||||
## 自己修復の仕組み
|
||||
|
||||
kubeletは、エンドユーザーのPodを終了する前に[ノードレベルのリソースを回収](#reclaim-node-resources)しようとします。
|
||||
例えば、ディスクリソースが枯渇している場合は未使用のコンテナイメージを削除します。
|
||||
|
||||
失敗したPodを置き換える{{< glossary_tooltip text="ワークロード" term_id="workload" >}}管理オブジェクト({{< glossary_tooltip text="StatefulSet" term_id="statefulset" >}}や{{< glossary_tooltip text="Deployment" term_id="deployment" >}})によってPodが管理されている場合、コントロールプレーン(`kube-controller-manager`)は退避されたPodの代わりに新しいPodを作成します。
|
||||
|
||||
### static Podの自己修復
|
||||
|
||||
リソースが圧迫しているノード上で[static pod](/ja/docs/concepts/workloads/pods/#static-pods)が実行されている場合、kubeletはそのstatic Podを退避することがあります。
|
||||
static Podは常にそのノード上でPodを実行しようとするため、kubeletは代替のPodの作成を試みます。
|
||||
|
||||
kubeletは、代替のPodを作成する際にstatic Podの _priority_ を考慮します。
|
||||
static Podのマニフェストで低い優先度が指定され、クラスターのコントロールプレーン内で定義されたより高い優先度のPodがあります。
|
||||
ノードのリソースが圧迫されている場合、kubeletはそのstatic Podのためにスペースを確保できない可能性があります。
|
||||
kubeletは、ノードのリソースが圧迫されている場合でもすべてのstatic Podの実行を試行し続けます。
|
||||
|
||||
## 退避シグナルと閾値
|
||||
|
||||
kubeletは、退避を決定するために次のようにさまざまなパラメータを使用します:
|
||||
|
||||
- 退避シグナル
|
||||
- 退避閾値
|
||||
- 監視間隔
|
||||
|
||||
### 退避シグナル {#eviction-signals}
|
||||
|
||||
退避シグナルは、ある時点での特定リソースの状態を表します。
|
||||
kubeletは退避シグナルを使用して、シグナルと退避閾値(ノード上で利用可能なリソースの最小量)を比較して退避を決定します。
|
||||
|
||||
kubeletは次の退避シグナルを使用します:
|
||||
|
||||
| 退避シグナル | 説明 | Linux専用 |
|
||||
|--------------------------|---------------------------------------------------------------------------------------|------------|
|
||||
| `memory.available` | `memory.available` := `node.status.capacity[memory]` - `node.stats.memory.workingSet` | |
|
||||
| `nodefs.available` | `nodefs.available` := `node.stats.fs.available` | |
|
||||
| `nodefs.inodesFree` | `nodefs.inodesFree` := `node.stats.fs.inodesFree` | • |
|
||||
| `imagefs.available` | `imagefs.available` := `node.stats.runtime.imagefs.available` | |
|
||||
| `imagefs.inodesFree` | `imagefs.inodesFree` := `node.stats.runtime.imagefs.inodesFree` | • |
|
||||
| `containerfs.available` | `containerfs.available` := `node.stats.runtime.containerfs.available` | |
|
||||
| `containerfs.inodesFree` | `containerfs.inodesFree` := `node.stats.runtime.containerfs.inodesFree` | • |
|
||||
| `pid.available` | `pid.available` := `node.stats.rlimit.maxpid` - `node.stats.rlimit.curproc` | • |
|
||||
|
||||
この表では、**説明**列がシグナルの値の取得方法を示しています。
|
||||
それぞれのシグナルは、パーセンテージまたはリテラル値をサポートします。
|
||||
kubeletは、シグナルに関連付けられた総容量に対する割合を計算します。
|
||||
|
||||
#### メモリシグナル
|
||||
|
||||
Linuxノード上では、`free -m`のようなツールの代わりにcgroupfsから`memory.available`の値が取得されます。
|
||||
これは重要なことであり、`free -m`はコンテナ内で動作せず、ユーザーが[Node Allocatable](/docs/tasks/administer-cluster/reserve-compute-resources/#node-allocatable)機能を使用する場合、リソース不足の判断はルートノードと同様にcgroup階層のエンドユーザーPodの一部に対してローカルに行われるためです。
|
||||
この[スクリプト](/examples/admin/resource/memory-available.sh)または[cgroupv2スクリプト](/examples/admin/resource/memory-available-cgroupv2.sh)は、kubeletが`memory.available`を計算するために実行する一連の手順を再現します。
|
||||
kubeletは、圧迫下でもメモリが再利用可能であると想定しているため、inactive_file(非アクティブなLRUリスト上のファイルベースのメモリのバイト数)を計算から除外します。
|
||||
|
||||
Windowsノードでは、`memory.available`の値は、ノードのグローバルメモリコミットレベル([`GetPerformanceInfo関数`](https://learn.microsoft.com/windows/win32/api/psapi/nf-psapi-getperformanceinfo)システムコールによって参照)から、ノードの[`CommitLimit`](https://learn.microsoft.com/windows/win32/api/psapi/ns-psapi-performance_information)からノードのグローバル[`CommitTotal`](https://learn.microsoft.com/windows/win32/api/psapi/ns-psapi-performance_information)を減算することによって導出されます。
|
||||
ノードのページファイルサイズが変更されると、`CommitLimit`も変更されることに注意してください。
|
||||
|
||||
#### ファイルシステムシグナル
|
||||
|
||||
kubeletは、退避シグナル(`<identifier>.inodesFree`や`<identifier>.available`)で使用できる3つの特定のファイルシステム識別子を認識します:
|
||||
|
||||
1. `nodefs`: ノードのファイルシステムであり、ローカルディスクボリューム、メモリにバックアップされていないemptyDirボリューム、ログストレージ、エフェメラルストレージなどに使用されます。
|
||||
例えば、`nodefs`には`/var/lib/kubelet`が含まれます。
|
||||
|
||||
1. `imagefs`: コンテナランタイムがコンテナイメージ(読み取り専用レイヤー)とコンテナの書き込みレイヤーを格納するために使用できるオプションのファイルシステムです。
|
||||
|
||||
1. `containerfs`: コンテナランタイムが書き込み可能なレイヤーを格納するために使用できるオプションのファイルシステムです。
|
||||
メインファイルシステム(`nodefs`を参照)と同様に、ローカルディスクボリューム、メモリにバックアップされていないemptyDirボリューム、ログストレージ、エフェメラルストレージに使用されますが、コンテナイメージは含まれません。
|
||||
`containerfs`を使用すると、`imagefs`ファイルシステムをコンテナイメージ(読み取り専用レイヤー)のみを格納するように分割できます。
|
||||
|
||||
したがって、kubeletは通常コンテナファイルシステムについて次の3つのオプションを許可します:
|
||||
|
||||
- すべてが単一の`nodefs`にある場合、"rootfs"または単に"root"として参照され、専用のイメージファイルシステムはありません。
|
||||
|
||||
- コンテナストレージ(`nodefs`を参照)は専用のディスクにあり、`imagefs`(書き込み可能レイヤーと読み取り専用レイヤー)はルートファイルシステムから分離されています。
|
||||
これはよく「分割ディスク」(または「分離ディスク」)ファイルシステムと呼ばれます。
|
||||
|
||||
- コンテナファイルシステム`containerfs`(書き込み可能レイヤーを含む`nodefs`と同じ)がルートにあり、コンテナイメージ(読み取り専用レイヤー)は分離された`imagefs`に格納されています。
|
||||
これはよく「分割イメージ」ファイルシステムと呼ばれます。
|
||||
|
||||
kubeletは、これらのファイルシステムを現在の構成に基づいてコンテナランタイムから直接自動検出しようとし、他のローカルノードファイルシステムを無視します。
|
||||
|
||||
kubeletは、他のコンテナファイルシステムやストレージ構成をサポートせず、現在イメージとコンテナに対して複数のファイルシステムをサポートしていません。
|
||||
|
||||
### 非推奨のkubeletガベージコレクション機能
|
||||
|
||||
一部のkubeletガベージコレクション機能は、退避に置き換えられるため非推奨となりました:
|
||||
|
||||
| 既存フラグ | 理由 |
|
||||
| ------------- | --------- |
|
||||
| `--maximum-dead-containers` | 古いログがコンテナのコンテキスト外に保存されると非推奨になります |
|
||||
| `--maximum-dead-containers-per-container` | 古いログがコンテナのコンテキスト外に保存されると非推奨になります |
|
||||
| `--minimum-container-ttl-duration` | 古いログがコンテナのコンテキスト外に保存されると非推奨になります |
|
||||
|
||||
### 退避閾値
|
||||
|
||||
kubeletは、退避の判断を行うときに使用するカスタムの退避閾値を指定できます。
|
||||
[ソフト退避の閾値](#soft-eviction-thresholds)と[ハード退避の閾値](#hard-eviction-thresholds)の退避閾値を構成できます。
|
||||
|
||||
退避閾値は`[eviction-signal][operator][quantity]`の形式を取ります:
|
||||
|
||||
- `eviction-signal`は、使用する[退避シグナル](#eviction-signals)です。
|
||||
- `operator`は、`<`(より小さい)などの[関係演算子](https://ja.wikipedia.org/wiki/%E9%96%A2%E4%BF%82%E6%BC%94%E7%AE%97%E5%AD%90#%E6%A8%99%E6%BA%96%E7%9A%84%E3%81%AA%E9%96%A2%E4%BF%82%E6%BC%94%E7%AE%97%E5%AD%90)です。
|
||||
- `quantity`は、`1Gi`などの退避閾値量です。
|
||||
`quantity`の値はKubernetesで使用される数量表現と一致する必要があります。
|
||||
リテラル値またはパーセンテージ(`%`)を使用できます。
|
||||
|
||||
例えば、ノードの総メモリが10GiBで、利用可能なメモリが1GiB未満になった場合に退避をトリガーする場合、退避閾値を`memory.available<10%`または`memory.available<1Gi`のどちらかで定義できます(両方を使用することはできません)。
|
||||
|
||||
#### ソフト退避の閾値 {#soft-eviction-thresholds}
|
||||
|
||||
ソフト退避閾値は、退避閾値と必須の管理者指定の猶予期間をペアにします。
|
||||
kubeletは猶予期間が経過するまでポッドを退避しません。
|
||||
kubeletは猶予期間を指定しない場合、起動時にエラーを返します。
|
||||
|
||||
ソフト退避閾値の猶予期間と、kubeletが退避中に使用する最大許容Pod終了の猶予期間を両方指定できます。
|
||||
最大許容猶予期間を指定しており、かつソフト退避閾値に達した場合、kubeletは2つの猶予期間のうち短い方を使用します。
|
||||
最大許容猶予期間を指定していない場合、kubeletはグレースフルな終了ではなくPodを即座に終了します。
|
||||
|
||||
ソフト退避閾値を構成するために次のフラグを使用できます:
|
||||
|
||||
- `eviction-soft`: 指定された猶予期間を超えた場合にPodの退避をトリガーする、`memory.available<1.5Gi`のような退避閾値のセット。
|
||||
- `eviction-soft-grace-period`: Podと退避をトリガーする前にソフト退避閾値を保持する必要がある時間を定義する、`memory.available=1m30s`のような退避猶予期間のセット。
|
||||
- `eviction-max-pod-grace-period`: ソフト退避閾値に達した場合、Podを終了する際に使用する最大許容猶予期間(秒)。
|
||||
|
||||
#### ハード退避の閾値 {#hard-eviction-thresholds}
|
||||
|
||||
ハード退避閾値には、猶予期間がありません。
|
||||
ハード退避閾値に達した場合、kubeletはグレースフルな終了ではなく即座にポッドを終了してリソースを回収します。
|
||||
|
||||
`eviction-hard`フラグを使用して、`memory.available<1Gi`のようなハード退避閾値のセットを構成します。
|
||||
|
||||
kubeletには、次のデフォルトのハード退避閾値があります:
|
||||
|
||||
- `memory.available<100Mi`(Linuxノード)
|
||||
- `memory.available<500Mi`(Windowsノード)
|
||||
- `nodefs.available<10%`
|
||||
- `imagefs.available<15%`
|
||||
- `nodefs.inodesFree<5%`(Linuxノード)
|
||||
- `imagefs.inodesFree<5%`(Linuxノード)
|
||||
|
||||
これらのハード退避閾値のデフォルト値は、いずれのパラメーター値も変更されていない場合にのみ設定されます。
|
||||
いずれかのパラメーター値を変更すると、他のパラメーター値はデフォルト値として継承されず、ゼロに設定されます。
|
||||
カスタム値を指定するには、すべての閾値を指定する必要があります。
|
||||
|
||||
`containerfs.available`と`containerfs.inodesFree`(Linuxノード)のデフォルトの退避閾値は次のように設定されます:
|
||||
|
||||
- 単一のファイルシステムがすべてに使用されている場合、`containerfs`の閾値は`nodefs`と同じに設定されます。
|
||||
|
||||
- イメージとコンテナの両方に対して分離されたファイルシステムが構成されている場合、`containerfs`の閾値は`imagefs`と同じに設定されます。
|
||||
|
||||
現在は`containerfs`に関連する閾値のカスタムオーバーライド設定はサポートされていないため、そのような設定を試みると警告が出ます。指定されたカスタム値はすべて無視されます。
|
||||
|
||||
## 退避の監視間隔
|
||||
|
||||
kubeletは、設定された`housekeeping-interval`に基づいて退避閾値を評価しており、デフォルトでは`10s`です。
|
||||
|
||||
### ノードの状態 {#node-conditions}
|
||||
|
||||
kubeletは、猶予期間の構成とは関係なく、ハードまたはソフト退避閾値に達したためにノードが圧迫されていることを示すノードの[Conditions](/ja/docs/concepts/architecture/nodes/#condition)を報告します。
|
||||
|
||||
kubeletは、次のように退避シグナルをノードの状態にマッピングします:
|
||||
|
||||
| ノードのCondition | 退避シグナル | 説明 |
|
||||
|-------------------|---------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------|
|
||||
| `MemoryPressure` | `memory.available` | ノード上の利用可能なメモリが退避閾値に達しています |
|
||||
| `DiskPressure` | `nodefs.available`, `nodefs.inodesFree`, `imagefs.available`, `imagefs.inodesFree`, `containerfs.available`, or `containerfs.inodesFree` | ノードのルートファイルシステム、イメージファイルシステム、またはコンテナファイルシステムのいずれかの利用可能なディスク容量とinodeが退避閾値に達しています |
|
||||
| `PIDPressure` | `pid.available` | (Linux)ノード上で使用可能なプロセス識別子が退避閾値を下回りました |
|
||||
|
||||
コントロールプレーンは、これらのノードの状態をテイントにも[マッピング](/docs/concepts/scheduling-eviction/taint-and-toleration/#taint-nodes-by-condition)します。
|
||||
|
||||
kubeletは、設定された`--node-status-update-frequency`に基づいてノードの状態を更新し、デフォルトでは`10s`です。
|
||||
|
||||
### ノードの状態の振動
|
||||
|
||||
場合によっては、ノードが定義された猶予期間を超えずに、ソフト閾値の上下を振動することがあります。
|
||||
これにより、報告されるノードの状態が`true`と`false`の間で頻繁に切り替わり、不適切な退避の判断をトリガーする可能性があります。
|
||||
|
||||
振動を防ぐために、`eviction-pressure-transition-period`フラグを使用できます。
|
||||
このフラグは、kubeletがノードの状態を別の状態に遷移させるまでの時間を制御します。
|
||||
デフォルトの遷移期間は`5m`です。
|
||||
|
||||
### ノードレベルのリソースの回収 {#reclaim-node-resources}
|
||||
|
||||
kubeletは、エンドユーザーのPodを退避する前にのノードレベルのリソースを回収しようとします。
|
||||
|
||||
ノードの`DiscPressure`状態が報告されると、kubeletはノード上のファイルシステムに基づいてノードレベルのリソースを回収します。
|
||||
|
||||
#### `imagefs`または`containerfs`がない場合
|
||||
|
||||
ノードに`nodefs`ファイルシステムのみがあり、退避閾値に達した場合、kubeletは次の順序でディスク容量を解放します:
|
||||
|
||||
1. deadなPodとコンテナをガベージコレクションします。
|
||||
1. 未使用のイメージを削除します。
|
||||
|
||||
#### `imagefs`を使用する場合
|
||||
|
||||
ノードにコンテナランタイムが使用するための`imagefs`ファイルシステムがある場合、kubeletは次のようにノードレベルのリソースを回収します:
|
||||
|
||||
- `nodefs`ファイルシステムが退避閾値に達した場合、kubeletは終了したPodとコンテナをガベージコレクションします。
|
||||
|
||||
- `imagefs`ファイルシステムが退避閾値に場合、kubeletは未使用のイメージをすべて削除します。
|
||||
|
||||
#### `imagefs`と`containerfs`を使用する場合
|
||||
|
||||
ノードにコンテナランタイムが使用するための`containerfs`と`imagefs`ファイルシステムがある場合、kubeletは次のようにノードレベルのリソースを回収します:
|
||||
|
||||
- `containerfs`ファイルシステムが退避閾値に達した場合、kubeletは終了したPodとコンテナをガベージコレクションします。
|
||||
|
||||
- `imagefs`ファイルシステムが退避閾値に達した場合、kubeletはすべての未使用のイメージを削除します。
|
||||
|
||||
### kubeletの退避におけるPodの選択
|
||||
|
||||
kubeletは、ノードレベルのリソースを回収しても退避シグナルが閾値を下回らない場合、エンドユーザーのPodを退避し始めます。
|
||||
|
||||
kubeletは、次のパラメータを使用してPodの退避順序を決定します:
|
||||
|
||||
1. Podのリソース使用量がリクエストを超えているかどうか
|
||||
1. [Podの優先度](/ja/docs/concepts/scheduling-eviction/pod-priority-preemption/)
|
||||
1. Podのリソース使用量がリクエストを下回っているかどうか
|
||||
|
||||
結果として、kubeletは次の順序でPodをランク付けして退避します:
|
||||
|
||||
1. リソース使用量がリクエストを超えている`BestEffort`または`Burstable`Pod。
|
||||
これらのPodは、その優先度に基づき、リクエストを超える使用量に応じて退避されます。
|
||||
|
||||
1. リソース使用量がリクエストを下回っている`Guaranteed`と`Burstable`Podは、その優先度に基づいて最後に退避されます。
|
||||
|
||||
{{<note>}}
|
||||
kubeletは、Podの[QoSクラス](/docs/concepts/workloads/pods/pod-qos/)を使用して退避順序を決定しません。
|
||||
メモリなどのリソースを回収する際に、QoSクラスを使用して最も退避される可能性の高いPodの順序を予測することができます。
|
||||
QoSの分類はEphemeralStorageのリクエストには適用されないため、例えばノードが`DiskPressure`状態にある場合、上記のシナリオは当てはまりません。
|
||||
{{</note>}}
|
||||
|
||||
`Guaranteed`Podは、すべてのコンテナにリクエストとリミットが指定されており、それらが等しい場合にのみ保証されます。
|
||||
これらのPodは、他のPodのリソース消費によって退避されることはありません。
|
||||
(`kubelet`や`journald`のような)システムデーモンが、`system-reserved`や`kube-reserved`の割り当てよりも多くのリソースを消費しており、ノードにはリクエストより少ないリソースを使用している`Guaranteed`または`Burstable`Podしかない場合、kubeletは他のPodへのリソース枯渇の影響を制限してノードの安定性を保つために、これらのPodのなかから退避するPodを選択する必要があります。
|
||||
この場合、最も低い優先度のPodを退避するように選択します。
|
||||
|
||||
[static Pod](/ja/docs/concepts/workloads/pods/#static-pod)を実行しており、リソース圧迫による退避を回避したい場合は、そのPodに直接`priority`フィールドを設定します。
|
||||
Static Podは`priorityClassName`フィールドをサポートしていません。
|
||||
|
||||
kubeletは、inodeまたはプロセスIDの枯渇に応じてPodを退避する場合、inodeとPIDにはリクエストがないため、Podの相対的な優先度を使用して退避順序を決定します。
|
||||
|
||||
kubeletは、ノードが専用の`imagefs`または`containerfs`ファイルシステムを持っているかどうかに基づいて、異なる方法でPodをソートします:
|
||||
|
||||
#### `imagefs`または`containerfs`がない場合(`nodefs`と`imagefs`は同じファイルシステムを使用します) {#without-imagefs}
|
||||
|
||||
- `nodefs`が退避をトリガーした場合、kubeletはそれらの合計ディスク使用量(`ローカルボリューム + すべてのコンテナのログと書き込み可能レイヤー`)に基づいてPodをソートします。
|
||||
|
||||
#### `imagefs`を使用する場合(`nodefs`と`imagefs`ファイルシステムが分離されている) {#with-imagefs}
|
||||
|
||||
- `nodefs`が退避をトリガーした場合、kubeletは`nodefs`使用量(`ローカルボリューム + すべてのコンテナのログ`)に基づいてPodをソートします。
|
||||
|
||||
- `imagefs`が退避をトリガーした場合、kubeletはすべてのコンテナの書き込み可能レイヤーの使用量に基づいてPodをソートします。
|
||||
|
||||
#### `imagefs`と`containerfs`を使用する場合(`imagefs`と`containerfs`は分割されています) {#with-containersfs}
|
||||
|
||||
- `containerfs`が退避をトリガーした場合、kubeletは`containerfs`使用量(`ローカルボリューム + すべてのコンテナのログと書き込み可能レイヤー`)に基づいてPodをソートします。
|
||||
|
||||
- `imagefs`が退避をトリガーした場合、kubeletは特定のイメージのディスク使用量を表す`イメージのストレージ`ランクに基づいてPodをソートします。
|
||||
|
||||
### 退避による最小の回収
|
||||
|
||||
{{<note>}}
|
||||
Kubernetes v{{< skew currentVersion >}}以降、`containerfs.available`メトリクスのカスタム値を設定することはできません。
|
||||
この特定のメトリクスの構成は、構成に応じて、`nodefs`または`imagefs`に設定された値を自動的に反映するように設定されます。
|
||||
{{</note>}}
|
||||
|
||||
場合によっては、Podの退避によって回収されるリソースがごくわずかであることがあります。
|
||||
このため、kubeletが設定された退避閾値に繰り返し達し、複数の退避をトリガーする可能性があります。
|
||||
|
||||
`--eviction-minimum-reclaim`フラグや[kubeletの設定ファイル](/docs/tasks/administer-cluster/kubelet-config-file/)を使用して、各リソースの最小の回収量を構成できます。
|
||||
kubeletがリソース不足を検知すると、指定した値に達するまでリソースを回収し続けます。
|
||||
|
||||
例えば、次の構成は最小回収量を設定します:
|
||||
|
||||
```yaml
|
||||
apiVersion: kubelet.config.k8s.io/v1beta1
|
||||
kind: KubeletConfiguration
|
||||
evictionHard:
|
||||
memory.available: "500Mi"
|
||||
nodefs.available: "1Gi"
|
||||
imagefs.available: "100Gi"
|
||||
evictionMinimumReclaim:
|
||||
memory.available: "0Mi"
|
||||
nodefs.available: "500Mi"
|
||||
imagefs.available: "2Gi"
|
||||
```
|
||||
|
||||
この例では、`nodefs.available`シグナルが退避閾値に達した場合、kubeletはシグナルが1GiBに達するまでリソースを回収します。
|
||||
その後は500MiBの最小量を回収し続け、利用可能なnodefsストレージが1.5GiBに達するまで続行します。
|
||||
|
||||
同様に、kubeletは`imagefs`リソースを回収し、`imagefs.available`の値が`102Gi`に達するまでリソースを回収を試みます。
|
||||
これは、コンテナイメージストレージの102GiBが利用可能であることを示します。
|
||||
kubeletが回収できるストレージ量が2GiB未満の場合、kubeletは何も回収しません。
|
||||
|
||||
`eviction-minimum-reclaim`のデフォルト値は、すべてのリソースに対して`0`です。
|
||||
|
||||
## ノードのメモリ不足の挙動
|
||||
|
||||
kubeletがメモリを回収する前にノードで _メモリ不足_ (OOM)イベントが発生した場合、ノードは[oom_killer](https://lwn.net/Articles/391222/)に依存して対応します。
|
||||
|
||||
kubeletは、PodのQoSに基づいて各コンテナの`oom_score_adj`値を設定します。
|
||||
|
||||
| サービスの品質 | `oom_score_adj` |
|
||||
|--------------------|-----------------------------------------------------------------------------------|
|
||||
| `Guaranteed` | -997 |
|
||||
| `BestEffort` | 1000 |
|
||||
| `Burstable` | _min(max(2, 1000 - (1000 × memoryRequestBytes) / machineMemoryCapacityBytes), 999)_ |
|
||||
|
||||
{{<note>}}
|
||||
またkubeletは、`system-node-critical`{{<glossary_tooltip text="優先度" term_id="pod-priority">}}を持つPodのコンテナに対して`oom_score_adj`値を`-997`に設定します。
|
||||
{{</note>}}
|
||||
|
||||
kubeletがノードでOOMが発生する前にメモリを回収できない場合、`oom_killer`はそのノード上で使用しているメモリの割合に基づいて`oom_score`を計算し、次に`oom_score_adj`を加算して各コンテナの有効な`oom_score`を計算します。
|
||||
その後、`oom_killer`は最も高いスコアを持つコンテナを終了します。
|
||||
|
||||
これは、スケジューリングリクエストに対して多くのメモリを消費する低いQoS Podのコンテナが最初に終了されることを意味します。
|
||||
|
||||
Podの退避とは異なり、コンテナがOOMで強制終了された場合、kubeletは`restartPolicy`に基づいてコンテナを再起動できます。
|
||||
|
||||
## グッドプラクティス {#node-pressure-eviction-good-practices}
|
||||
|
||||
退避の構成に関するグッドプラクティスを次のセクションで説明します。
|
||||
|
||||
### スケジュール可能なリソースと退避ポリシー
|
||||
|
||||
退避ポリシーを使用してkubeletを構成する場合、スケジューラーがPodのスケジュール直後にメモリ圧迫をトリガーして退避を引き起こさないようにする必要があります。
|
||||
|
||||
次のシナリオを考えてみましょう:
|
||||
|
||||
- ノードのメモリキャパシティ: 10GiB
|
||||
- オペレーターはシステムデーモン(kernel、`kubelet`など)に10%のメモリ容量を予約したい
|
||||
- オペレーターはシステムのOOMの発生を減らすために、メモリ使用率が95%に達したときにPodを退避したい
|
||||
|
||||
この場合、kubeletは次のように起動されます:
|
||||
|
||||
```none
|
||||
--eviction-hard=memory.available<500Mi
|
||||
--system-reserved=memory=1.5Gi
|
||||
```
|
||||
|
||||
この構成では、`--system-reserved`フラグによりシステム用に1.5GiBのメモリが予約されます。
|
||||
これは`総メモリの10% + 退避閾値量`です。
|
||||
|
||||
ノードは、Podがリクエスト以上のメモリを使用している場合や、システムが1GiB以上のメモリを使用している場合に、退避閾値に達する可能性があります。
|
||||
これにより、`memory.available`シグナルが500MiBを下回り、閾値がトリガーされます。
|
||||
|
||||
### DaemonSetとノードの圧迫による退避 {#daemonset}
|
||||
|
||||
Podの優先度は、退避の決定において重要な要素です。
|
||||
kubeletは、DaemonSetに属するPodを退避させたくない場合、そのPodのspecに適切な`priorityClassName`を指定して十分な優先度を与えることができます。
|
||||
より低い、またはデフォルトの優先度を使用して、十分なリソースがある場合にのみDaemonSetのPodを実行できるようにすることも可能です。
|
||||
|
||||
## 既知の問題
|
||||
|
||||
リソースの圧迫に関連する既知の問題について次のセクションで説明します。
|
||||
|
||||
### kubeletが即座にメモリ圧迫を検知しないことがある
|
||||
|
||||
デフォルトでは、kubeletはcAdvisorを定期的にポーリングしてメモリ使用量の統計を収集します。
|
||||
メモリ使用量がその間に急速に増加した場合、kubeletは`MemoryPressure`状態を十分な早さで検知できない可能性があり、OOMキラーが呼び出される可能性があります。
|
||||
|
||||
`--kernel-memcg-notification`フラグにより、kubeletの`memcg`通知APIを有効にして、閾値を超えたとき即座に通知を受け取ることができます。
|
||||
|
||||
極端な使用率を達成しようとするのではなく、合理的なオーバーコミットを目指している場合、この問題に対して実行可能な回避策は`--kube-reserved`および`--system-reserved`フラグを使用してシステム用のメモリを割り当てることです。
|
||||
|
||||
### active_fileメモリは使用可能なメモリとして見なされません
|
||||
|
||||
Linuxでは、カーネルがアクティブなLRUリスト上のファイルベースのメモリのバイト数を`active_file`統計として追跡します。
|
||||
kubeletは、`active_file`メモリの領域を回収不可能として扱います。
|
||||
一時的なローカルストレージを含むブロックベースのローカルストレージを集中的に使用するワークロードの場合、カーネルレベルのファイルおよびブロックデータのキャッシュにより、多くの直近アクセスされたキャッシュページが`active_file`としてカウントされる可能性が高いです。
|
||||
これらのカーネルブロックバッファがアクティブなLRUリストに十分に存在すると、kubeletはこれを高いリソース使用として観測し、ノードにメモリ圧迫が発生しているとしてテイントし、Podの退避をトリガーします。
|
||||
|
||||
より詳細については、[https://github.com/kubernetes/kubernetes/issues/43916](https://github.com/kubernetes/kubernetes/issues/43916)を参照してください。
|
||||
|
||||
その動作を回避するためには、集中的なI/Oアクティビティを行う可能性があるコンテナに対してメモリリミットとメモリリクエストを同じ値に設定します。
|
||||
そのコンテナに対して最適なメモリのリミット値を見積もるか、測定する必要があります。
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
- [APIを起点とした退避](/ja/docs/concepts/scheduling-eviction/api-eviction/)について学ぶ
|
||||
- [Podの優先度とプリエンプション](/ja/docs/concepts/scheduling-eviction/pod-priority-preemption/)について学ぶ
|
||||
- [PodDisruptionBudgets](/docs/tasks/run-application/configure-pdb/)について学ぶ
|
||||
- [Quality of Service](/ja/docs/tasks/configure-pod-container/quality-service-pod/) (QoS)について学ぶ
|
||||
- [Eviction API](/docs/reference/generated/kubernetes-api/{{<param "version">}}/#create-eviction-pod-v1-core)について学ぶ
|
|
@ -0,0 +1,20 @@
|
|||
---
|
||||
title: ノード圧迫による退避
|
||||
id: node-pressure-eviction
|
||||
date: 2021-05-13
|
||||
full_link: /ja/docs/concepts/scheduling-eviction/node-pressure-eviction/
|
||||
short_description: >
|
||||
ノード圧迫による退避は、kubeletがノード上のリソースを回収するためにPodを積極的に失敗させるプロセスです。
|
||||
aka:
|
||||
- kubelet eviction
|
||||
tags:
|
||||
- operation
|
||||
---
|
||||
ノード圧迫による退避は、{{<glossary_tooltip term_id="kubelet" text="kubelet">}}がノード上のリソースを回収するためにPodを積極的に失敗させるプロセスです。
|
||||
|
||||
<!--more-->
|
||||
|
||||
kubeletは、クラスターのノード上のCPU、メモリ、ディスク容量、ファイルシステムのinodeなどのリソースを監視します。
|
||||
これらのリソースの1つ以上が特定の消費レベルに達すると、kubeletはノード上の1つ以上のPodを積極的に失敗させることでリソースを回収し、枯渇を防ぎます。
|
||||
|
||||
ノード圧迫による退避は、[APIを起点とした退避](/ja/docs/concepts/scheduling-eviction/api-eviction/)とは異なります。
|
|
@ -10,7 +10,7 @@ cid: home
|
|||
{{% blocks/feature image="flower" %}}
|
||||
### Kubernetes (K8s) é um produto Open Source utilizado para automatizar a implantação, o dimensionamento e o gerenciamento de aplicativos em contêiner.
|
||||
|
||||
Ele agrupa contêineres que compõem uma aplicação em unidades lógicas para facilitar o gerenciamento e a descoberta de serviço. O Kubernetes se baseia em [15 anos de experiência na execução de containers em produção no Google](http://queue.acm.org/detail.cfm?id=2898444), combinado com as melhores ideias e práticas da comunidade.
|
||||
Ele agrupa contêineres que compõem uma aplicação em unidades lógicas para facilitar o gerenciamento e a descoberta de serviço. O Kubernetes se baseia em [15 anos de experiência na execução de containers em produção no Google](https://queue.acm.org/detail.cfm?id=2898444), combinado com as melhores ideias e práticas da comunidade.
|
||||
{{% /blocks/feature %}}
|
||||
|
||||
{{% blocks/feature image="scalable" %}}
|
||||
|
|
|
@ -0,0 +1,112 @@
|
|||
---
|
||||
title: Executando tarefas automatizadas com CronJob
|
||||
min-kubernetes-server-version: v1.21
|
||||
content_type: task
|
||||
weight: 10
|
||||
---
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
Esta página mostra como executar tarefas automatizadas usando o objeto {{< glossary_tooltip text="CronJob" term_id="cronjob" >}} no kubernetes.
|
||||
|
||||
## {{% heading "prerequisites" %}}
|
||||
|
||||
* {{< include "task-tutorial-prereqs.md" >}}
|
||||
|
||||
<!-- steps -->
|
||||
|
||||
## Criando um CronJob {#creating-a-cron-job}
|
||||
|
||||
Cron jobs requerem um arquivo de configuração.
|
||||
Aqui está um manifesto para CronJob que executa uma tarefa de demonstração simples a cada minuto:
|
||||
|
||||
{{% code_sample file="application/job/cronjob.yaml" %}}
|
||||
|
||||
Execute o exemplo de CronJob usando o seguinte comando:
|
||||
|
||||
```shell
|
||||
kubectl create -f https://k8s.io/examples/application/job/cronjob.yaml
|
||||
```
|
||||
A saída é semelhante a esta:
|
||||
|
||||
```
|
||||
cronjob.batch/hello created
|
||||
```
|
||||
|
||||
Após criar o cron job, obtenha o status usando este comando:
|
||||
|
||||
```shell
|
||||
kubectl get cronjob hello
|
||||
```
|
||||
|
||||
A saída é semelhante a esta:
|
||||
|
||||
```
|
||||
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
|
||||
hello */1 * * * * False 0 <none> 10s
|
||||
```
|
||||
|
||||
Como você pode ver pelos resultados do comando, o cron job ainda não agendou ou executou uma tarefa ainda.
|
||||
{{< glossary_tooltip text="Observe" term_id="watch" >}} que a tarefa será criada em cerca de um minuto:
|
||||
|
||||
```shell
|
||||
kubectl get jobs --watch
|
||||
```
|
||||
A saída é semelhante a esta:
|
||||
|
||||
```
|
||||
NAME COMPLETIONS DURATION AGE
|
||||
hello-4111706356 0/1 0s
|
||||
hello-4111706356 0/1 0s 0s
|
||||
hello-4111706356 1/1 5s 5s
|
||||
```
|
||||
|
||||
Agora você viu uma tarefa em execução agendada pelo cron job "hello".
|
||||
Você pode parar de observá-lo e visualizar o cron job novamente para ver que ele agendou a tarefa:
|
||||
|
||||
```shell
|
||||
kubectl get cronjob hello
|
||||
```
|
||||
|
||||
A saída é semelhante a esta:
|
||||
|
||||
```
|
||||
NAME SCHEDULE SUSPEND ACTIVE LAST SCHEDULE AGE
|
||||
hello */1 * * * * False 0 50s 75s
|
||||
```
|
||||
|
||||
Você deve ver que o cron job `hello` agendou uma tarefa com sucesso no tempo especificado em
|
||||
`LAST SCHEDULE`. Existem atualmente 0 tarefas ativas, o que significa que a tarefa foi concluída ou falhou.
|
||||
|
||||
Agora, encontre os pods da última tarefa agendada criada e veja a saída padrão de um dos pods.
|
||||
|
||||
{{< note >}}
|
||||
O nome da tarefa é diferente do nome do pod.
|
||||
{{< /note >}}
|
||||
|
||||
```shell
|
||||
# Replace "hello-4111706356" with the job name in your system
|
||||
pods=$(kubectl get pods --selector=job-name=hello-4111706356 --output=jsonpath={.items[*].metadata.name})
|
||||
```
|
||||
Veja os logs do pod:
|
||||
|
||||
```shell
|
||||
kubectl logs $pods
|
||||
```
|
||||
A saída é semelhante a esta:
|
||||
|
||||
```
|
||||
Fri Feb 22 11:02:09 UTC 2019
|
||||
Hello from the Kubernetes cluster
|
||||
```
|
||||
|
||||
## Deletando um CronJob {#deleting-a-cron-job}
|
||||
|
||||
Quando você não precisar mais de um cron job, exclua-o com `kubectl delete cronjob <cronjob name>`:
|
||||
|
||||
```shell
|
||||
kubectl delete cronjob hello
|
||||
```
|
||||
|
||||
Excluindo o cron job remove todas as tarefas e pods que ele criou e impede a criação de novas tarefas.
|
||||
Você pode ler mais sobre como remover tarefas em [garbage collection](/docs/concepts/architecture/garbage-collection/).
|
|
@ -1,6 +1,6 @@
|
|||
---
|
||||
title: "Autocompletar no bash macOS"
|
||||
description: "Configurações opcionais do auto-completar do bash no macOS."
|
||||
title: "Autocompletar do bash no macOS"
|
||||
description: "Configurações opcionais para habilitar o autocompletar do bash no macOS."
|
||||
headless: true
|
||||
_build:
|
||||
list: never
|
||||
|
@ -14,17 +14,18 @@ O script de autocompletar do kubectl para Bash pode ser gerado com o comando `ku
|
|||
O script permite habilitar o autocompletar do kubectl no seu shell.
|
||||
|
||||
No entanto, o script autocompletar depende do
|
||||
[**bash-completar**](https://github.com/scop/bash-completion), o que significa que você precisa instalar este software primeiro (executando `type _init_completion` você pode testar se tem o bash-completion instalado).
|
||||
[**bash-completion**](https://github.com/scop/bash-completion), o que significa
|
||||
que você precisa instalar este software primeiro.
|
||||
|
||||
{{< warning>}}
|
||||
Existem duas versões de autocompletar do Bash, v1 e v2. V1 é para Bash 3.2
|
||||
(que é padrão no macOS), e v2 que é para Bash 4.1+. O script de autocompletar
|
||||
do kubectl **não funciona** corretamente com o autocompletar do bash v1 e o
|
||||
Existem duas versões do bash-completion, v1 e v2. V1 é para Bash 3.2
|
||||
(que é padrão no macOS), e v2 é para Bash 4.1+. O script de autocompletar
|
||||
do kubectl **não funciona** corretamente com o bash-completion v1 e o
|
||||
Bash 3.2. Ele requer **bash-completion v2** e **Bash 4.1+**. Por isso, para
|
||||
executarmos o autocompletar do kubectl no macOS de forma correta, você precisa
|
||||
instalar e usar o Bash 4.1+([*guia*](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)).
|
||||
instalar e usar o Bash 4.1+ ([*guia*](https://itnext.io/upgrading-bash-on-macos-7138bd1066ba)).
|
||||
As instruções a seguir, levam em conta que você utilize o Bash 4.1+.
|
||||
(Isso quer dizer, nenhuma versão do Bash 4.1 ou mais recente).
|
||||
(ou seja, a versão 4.1 do Bash ou qualquer outra mais recente).
|
||||
{{< /warning >}}
|
||||
|
||||
### Atualizando Bash
|
||||
|
@ -35,13 +36,13 @@ As instruções abaixo sugerem que você esteja utilizando o Bash 4.1+. Você po
|
|||
echo $BASH_VERSION
|
||||
```
|
||||
|
||||
Se a versão do Bash for antiga, você pode instalar ou atualizar utilizando o Homebrew:
|
||||
Se a versão do Bash for muito antiga, você pode instalar ou atualizar utilizando o Homebrew:
|
||||
|
||||
```bash
|
||||
brew install bash
|
||||
```
|
||||
|
||||
Recarregue seu shell e verifique se a versão desejada foi instalada ou se está em uso:
|
||||
Recarregue seu shell e verifique se a versão desejada foi instalada e está em uso:
|
||||
|
||||
```bash
|
||||
echo $BASH_VERSION $SHELL
|
||||
|
@ -52,12 +53,12 @@ O Homebrew normalmente instala os pacotes em `/usr/local/bin/bash`.
|
|||
### Instalar bash-completar
|
||||
|
||||
{{< note >}}
|
||||
Como mencionado anteriormente, essas instruções levam em consideração que você esteja utilizando o Bash 4.1+, dessa forma você
|
||||
vai instalar o bash-completion v2 (diferentemente do Bash 3.2 e do bash-completion v1,
|
||||
nesses casos, o completar do kubectl não irá funcionar).
|
||||
Como mencionado anteriormente, essas instruções assumem que você esteja utilizando
|
||||
o Bash 4.1+. Por isso, você irá instalar o bash-completion v2 (em contraste ao
|
||||
Bash 3.2 e bash-completion v1, caso em que o autocompletar do kubectl não irá funcionar).
|
||||
{{< /note >}}
|
||||
|
||||
Você pode testar se você tiver o bash-completion v2 instalado, utilizando `type _init_completion`.
|
||||
Você pode testar se o bash-completion v2 está instalado, utilizando `type _init_completion`.
|
||||
Se não, você pode instalar utilizando o Homebrew:
|
||||
|
||||
```bash
|
||||
|
@ -70,7 +71,7 @@ Como indicado na saída deste comando, adicione a seguinte linha em seu arquivo
|
|||
brew_etc="$(brew --prefix)/etc" && [[ -r "${brew_etc}/profile.d/bash_completion.sh" ]] && . "${brew_etc}/profile.d/bash_completion.sh"
|
||||
```
|
||||
|
||||
Recarregue seu shell e verifique que o bash-completion v2 está instalado corretamente, utilizando `type _init_completion`.
|
||||
Recarregue seu shell e verifique que o bash-completion v2 está instalado corretamente utilizando `type _init_completion`.
|
||||
|
||||
### Habilitar autocompletar do kubectl
|
||||
|
||||
|
@ -97,13 +98,13 @@ as suas sessões de shell. Existem várias maneiras de fazer isso:
|
|||
```
|
||||
|
||||
- Se você tiver instalado o kubectl com o Homebrew(conforme explicado
|
||||
[aqui](/docs/tasks/tools/install-kubectl-macos/#install-with-homebrew-on-macos)),
|
||||
[aqui](/docs/tasks/tools/install-kubectl-macos/#instalando-o-kubectl-no-macos)),
|
||||
então o script de autocompletar do kubectl deverá estar pronto em `/usr/local/etc/bash_completion.d/kubectl`.
|
||||
Neste caso, você não precisa fazer mais nada.
|
||||
|
||||
{{< note >}}
|
||||
A instalação do bash-completion v2 via Homebrew carrega todos os arquivos no diretório
|
||||
`BASH_COMPLETION_COMPAT_DIR`, é por isso que os dois últimos métodos funcionam..
|
||||
`BASH_COMPLETION_COMPAT_DIR`, é por isso que os dois últimos métodos funcionam.
|
||||
{{< /note >}}
|
||||
|
||||
De qualquer forma, após recarregar seu shell, o auto-completar do kubectl deve estar funcionando.
|
||||
Em todos os casos, após recarregar seu shell, o autocompletar do kubectl deve estar funcionando.
|
||||
|
|
|
@ -7,21 +7,21 @@ weight: 10
|
|||
## {{% heading "prerequisites" %}}
|
||||
|
||||
Você deve usar uma versão do kubectl que esteja próxima da versão do seu cluster.
|
||||
Por exemplo, um cliente v{{< skew currentVersion >}} pode se comunicar
|
||||
com control planes nas versões v{{< skew currentVersionAddMinor -1 >}}, v{{< skew currentVersionAddMinor 0 >}},
|
||||
e v{{< skew currentVersionAddMinor 1 >}}.
|
||||
Usar a versão compatível e mais recente do kubectl pode evitar imprevistos ou problemas.
|
||||
Por exemplo, um cliente v{{< skew currentVersion >}} pode se comunicar com as
|
||||
versões v{{< skew currentVersionAddMinor -1 >}}, v{{< skew currentVersionAddMinor 0 >}}
|
||||
e v{{< skew currentVersionAddMinor 1 >}} da camada de gerenciamento. Usar a
|
||||
versão compatível mais recente do kubectl ajuda a evitar problemas inesperados.
|
||||
|
||||
## Instalando o kubectl no macOS
|
||||
|
||||
Existem os seguintes métodos para instalar o kubectl no macOS:
|
||||
|
||||
- [Instalar kubectl no macOS](#instalar-kubectl-no-macos)
|
||||
- [Instalando o kubectl no macOS](#instalando-o-kubectl-no-macos)
|
||||
- [Instalar o kubectl com curl no macOS](#instalar-o-kubectl-com-o-curl-no-macos)
|
||||
- [Instalar com Homebrew no macOS](#instalar-com-homebrew-no-macos)
|
||||
- [Instalar com Macports no macOS](#instalar-com-macports-no-macos)
|
||||
- [Verificar a configuração do kubectl](#verificar-a-configuração-do-kubectl)
|
||||
- [Plugins e ajustes opcionais do kubectl](#plugins-e-ajustes-opcionais-do-kubectl)
|
||||
- [Configurações e plugins opcionais do kubectl](#configurações-e-plugins-opcionais-do-kubectl)
|
||||
- [Habilitar o autocompletar no shell](#ative-o-autocompletar-no-shell)
|
||||
- [Instalar o plugin `kubectl convert`](#instalar-kubectl-convert-plugin)
|
||||
|
||||
|
|
|
@ -21,14 +21,14 @@ weight: 50
|
|||
## Владельцы и зависимости {#owners-dependents}
|
||||
|
||||
Многие объекты в Kubernetes ссылаются друг на друга через [*ссылки владельцев*](/docs/concepts/overview/working-with-objects/owners-dependents/).
|
||||
Ссылки владельцев сообщают плоскости управления какие объекты зависят от других.
|
||||
Kubernetes использует ссылки владельцев, чтобы предоставить плоскости управления и другим API
|
||||
клиентам, возможность очистить связанные ресурсы перед удалением объекта. В большинстве случаев, Kubernetes автоматический управляет ссылками владельцев.
|
||||
Ссылки владельцев сообщают управляющему слою, какие объекты зависят от других.
|
||||
Kubernetes использует ссылки владельцев, чтобы предоставить управляющему слою и другим API
|
||||
клиентам, возможность очистить связанные ресурсы перед удалением объекта. В большинстве случаев, Kubernetes автоматически управляет ссылками владельцев.
|
||||
|
||||
Владелец отличается от [меток и селекторов](/docs/concepts/overview/working-with-objects/labels/)
|
||||
которые также используют некоторые ресурсы. Например, рассмотрим
|
||||
{{<glossary_tooltip text="Службу" term_id="service">}} которая создает объект
|
||||
`EndpointSlice`. Служба использует *метки* чтобы позволить плоскости управления определить какие `EndpointSlice` объекты используются для этой службы. В дополнение
|
||||
`EndpointSlice`. Служба использует *метки* чтобы позволить управляющему слою определить, какие `EndpointSlice` объекты используются для этой службы. В дополнение
|
||||
к меткам, каждый `EndpointSlice` управляет ои имени службы, имеет
|
||||
ссылку владельца. Ссылки владельцев помогают различным частям Kubernetes избегать
|
||||
вмешательства в объекты, которые они не контролируют.
|
||||
|
|
|
@ -4,7 +4,7 @@ id: cloud-controller-manager
|
|||
date: 2018-04-12
|
||||
full_link: /docs/concepts/architecture/cloud-controller/
|
||||
short_description: >
|
||||
Компонент плоскости управления, который интегрирует Kubernetes со сторонними облачными провайдерами.
|
||||
Компонент управляющего слоя, который интегрирует Kubernetes со сторонними облачными провайдерами.
|
||||
aka:
|
||||
tags:
|
||||
- core-object
|
||||
|
|
|
@ -20,10 +20,10 @@ tags:
|
|||
|
||||
Контроллеры отсллеживают общее состояние вашего кластера через
|
||||
{{< glossary_tooltip text="API-сервер" term_id="kube-apiserver" >}} (часть
|
||||
{{< glossary_tooltip text="плоскости управления" term_id="control-plane" >}}).
|
||||
{{< glossary_tooltip text="управляющего слоя" term_id="control-plane" >}}).
|
||||
|
||||
Некоторые контроллеры также работают внутри плоскости управления, обеспечивая
|
||||
управляющие циклы, которые являются ядром для операций Kubernetes. Например:
|
||||
Некоторые контроллеры также работают внутри управляющего слоя (control plane),
|
||||
обеспечивая управляющие циклы, которые являются ядром для операций Kubernetes. Например:
|
||||
контроллер развертывания (deployment controller), контроллер daemonset (daemonset controller),
|
||||
контроллер пространства имен (namespace controller) и контроллер постоянных томов (persistent volume
|
||||
controller) (и другие) работают с {{< glossary_tooltip term_id="kube-controller-manager" >}}.
|
||||
|
|
|
@ -4,13 +4,13 @@ id: kube-scheduler
|
|||
date: 2018-04-12
|
||||
full_link: /docs/reference/generated/kube-scheduler/
|
||||
short_description: >
|
||||
Компонент плоскости управления, который отслеживает созданные поды без привязанного узла и выбирает узел, на котором они должны работать.
|
||||
Компонент управляющего слоя, который отслеживает созданные поды без привязанного узла и выбирает узел, на котором они должны работать.
|
||||
|
||||
aka:
|
||||
tags:
|
||||
- architecture
|
||||
---
|
||||
Компонент плоскости управления, который отслеживает созданные поды без привязанного узла и выбирает узел, на котором они должны работать.
|
||||
Компонент управляющего слоя (control plane), который отслеживает созданные поды без привязанного узла и выбирает узел, на котором они должны работать.
|
||||
|
||||
<!--more-->
|
||||
|
||||
|
|
|
@ -0,0 +1,289 @@
|
|||
---
|
||||
layout: blog
|
||||
title: 'Kubernetes v1.32 预览'
|
||||
date: 2024-11-08
|
||||
slug: kubernetes-1-32-upcoming-changes
|
||||
---
|
||||
<!--
|
||||
layout: blog
|
||||
title: 'Kubernetes v1.32 sneak peek'
|
||||
date: 2024-11-08
|
||||
slug: kubernetes-1-32-upcoming-changes
|
||||
author: >
|
||||
Matteo Bianchi,
|
||||
Edith Puclla,
|
||||
William Rizzo,
|
||||
Ryota Sawada,
|
||||
Rashan Smith
|
||||
-->
|
||||
|
||||
<!--
|
||||
As we get closer to the release date for Kubernetes v1.32, the project develops and matures.
|
||||
Features may be deprecated, removed, or replaced with better ones for the project's overall health.
|
||||
|
||||
This blog outlines some of the planned changes for the Kubernetes v1.32 release,
|
||||
that the release team feels you should be aware of, for the continued maintenance
|
||||
of your Kubernetes environment and keeping up to date with the latest changes.
|
||||
Information listed below is based on the current status of the v1.32 release
|
||||
and may change before the actual release date.
|
||||
-->
|
||||
随着 Kubernetes v1.32 发布日期的临近,Kubernetes 项目继续发展和成熟。
|
||||
在这个过程中,某些特性可能会被弃用、移除或被更好的特性取代,以确保项目的整体健康与发展。
|
||||
|
||||
本文概述了 Kubernetes v1.32 发布的一些计划变更,发布团队认为你应该了解这些变更,
|
||||
以确保你的 Kubernetes 环境得以持续维护并跟上最新的变化。以下信息基于 v1.32
|
||||
发布的当前状态,实际发布日期前可能会有所变动。
|
||||
|
||||
<!--
|
||||
### The Kubernetes API removal and deprecation process
|
||||
|
||||
The Kubernetes project has a well-documented [deprecation policy](/docs/reference/using-api/deprecation-policy/)
|
||||
for features. This policy states that stable APIs may only be deprecated when a newer,
|
||||
stable version of that API is available and that APIs have a minimum lifetime for each stability level.
|
||||
A deprecated API has been marked for removal in a future Kubernetes release will continue to function until
|
||||
removal (at least one year from the deprecation). Its usage will result in a warning being displayed.
|
||||
Removed APIs are no longer available in the current version, so you must migrate to use the replacement instead.
|
||||
-->
|
||||
### Kubernetes API 的移除和弃用流程
|
||||
|
||||
Kubernetes 项目对功能特性有一个文档完备的[弃用策略](/zh-cn/docs/reference/using-api/deprecation-policy/)。
|
||||
该策略规定,只有当较新的、稳定的相同 API 可用时,原有的稳定 API 才可能被弃用,每个稳定级别的 API 都有一个最短的生命周期。
|
||||
弃用的 API 指的是已标记为将在后续发行某个 Kubernetes 版本时移除的 API;
|
||||
移除之前该 API 将继续发挥作用(从弃用起至少一年时间),但使用时会显示一条警告。
|
||||
移除的 API 将在当前版本中不再可用,此时你必须迁移以使用替换的 API。
|
||||
|
||||
<!--
|
||||
* Generally available (GA) or stable API versions may be marked as deprecated but must not be removed within a major version of Kubernetes.
|
||||
|
||||
* Beta or pre-release API versions must be supported for 3 releases after the deprecation.
|
||||
|
||||
* Alpha or experimental API versions may be removed in any release without prior deprecation notice;
|
||||
this process can become a withdrawal in cases where a different implementation for the same feature is already in place.
|
||||
-->
|
||||
* 正式发布的(GA)或稳定的 API 版本可被标记为已弃用,但不得在 Kubernetes 主要版本未变时删除。
|
||||
|
||||
* Beta 或预发布 API 版本,必须保持在被弃用后 3 个发布版本中仍然可用。
|
||||
|
||||
* Alpha 或实验性 API 版本可以在任何版本中删除,不必提前通知;
|
||||
如果同一特性已有不同实施方案,则此过程可能会成为撤销。
|
||||
|
||||
<!--
|
||||
Whether an API is removed due to a feature graduating from beta to stable or because that API did not succeed,
|
||||
all removals comply with this deprecation policy. Whenever an API is removed,
|
||||
migration options are communicated in the [deprecation guide](/docs/reference/using-api/deprecation-guide/).
|
||||
-->
|
||||
无论 API 是因为特性从 Beta 升级到稳定状态还是因为未能成功而被移除,
|
||||
所有移除操作都遵守此弃用策略。每当 API 被移除时,
|
||||
迁移选项都会在[弃用指南](/zh-cn/docs/reference/using-api/deprecation-guide/)中进行说明。
|
||||
|
||||
<!--
|
||||
## Note on the withdrawal of the old DRA implementation
|
||||
|
||||
The enhancement [#3063](https://github.com/kubernetes/enhancements/issues/3063)
|
||||
introduced Dynamic Resource Allocation (DRA) in Kubernetes 1.26.
|
||||
-->
|
||||
## 关于撤回 DRA 的旧的实现的说明
|
||||
|
||||
增强特性 [#3063](https://github.com/kubernetes/enhancements/issues/3063) 在 Kubernetes 1.26
|
||||
中引入了动态资源分配(DRA)。
|
||||
|
||||
<!--
|
||||
However, in Kubernetes v1.32, this approach to DRA will be significantly changed.
|
||||
Code related to the original implementation will be removed, leaving KEP
|
||||
[#4381](https://github.com/kubernetes/enhancements/issues/4381) as the "new" base functionality.
|
||||
-->
|
||||
然而,在 Kubernetes v1.32 中,这种 DRA 的实现方法将发生重大变化。与原来实现相关的代码将被删除,
|
||||
只留下 KEP [#4381](https://github.com/kubernetes/enhancements/issues/4381) 作为"新"的基础特性。
|
||||
|
||||
<!--
|
||||
The decision to change the existing approach originated from its incompatibility with cluster autoscaling
|
||||
as resource availability was non-transparent, complicating decision-making for both Cluster Autoscaler and controllers.
|
||||
The newly added Structured Parameter model substitutes the functionality.
|
||||
-->
|
||||
改变现有方法的决定源于其与集群自动伸缩的不兼容性,因为资源可用性是不透明的,
|
||||
这使得 Cluster Autoscaler 和控制器的决策变得复杂。
|
||||
新增的结构化参数模型替换了原有特性。
|
||||
|
||||
<!--
|
||||
This removal will allow Kubernetes to handle new hardware requirements and resource claims more predictably,
|
||||
bypassing the complexities of back and forth API calls to the kube-apiserver.
|
||||
|
||||
Please also see the enhancement issue [#3063](https://github.com/kubernetes/enhancements/issues/3063) to find out more.
|
||||
-->
|
||||
这次移除将使 Kubernetes 能够更可预测地处理新的硬件需求和资源声明,
|
||||
避免了与 kube-apiserver 之间复杂的来回 API 调用。
|
||||
|
||||
请参阅增强问题 [#3063](https://github.com/kubernetes/enhancements/issues/3063) 以了解更多信息。
|
||||
|
||||
<!--
|
||||
## API removal
|
||||
|
||||
There is only a single API removal planned for [Kubernetes v1.32](/docs/reference/using-api/deprecation-guide/#v1-32):
|
||||
-->
|
||||
## API 移除
|
||||
|
||||
在 [Kubernetes v1.32](/zh-cn/docs/reference/using-api/deprecation-guide/#v1-32) 中,计划仅移除一个 API:
|
||||
|
||||
<!--
|
||||
* The `flowcontrol.apiserver.k8s.io/v1beta3` API version of FlowSchema and PriorityLevelConfiguration has been removed.
|
||||
To prepare for this, you can edit your existing manifests and rewrite client software to use the
|
||||
`flowcontrol.apiserver.k8s.io/v1 API` version, available since v1.29.
|
||||
All existing persisted objects are accessible via the new API. Notable changes in `flowcontrol.apiserver.k8s.io/v1beta3`
|
||||
include that the PriorityLevelConfiguration `spec.limited.nominalConcurrencyShares` field only defaults to 30 when unspecified,
|
||||
and an explicit value of 0 is not changed to 30.
|
||||
|
||||
For more information, please refer to the [API deprecation guide](/docs/reference/using-api/deprecation-guide/#v1-32).
|
||||
-->
|
||||
* `flowcontrol.apiserver.k8s.io/v1beta3` 版本的 FlowSchema 和 PriorityLevelConfiguration 已被移除。
|
||||
为了对此做好准备,你可以编辑现有的清单文件并重写客户端软件,使用自 v1.29 起可用的 `flowcontrol.apiserver.k8s.io/v1` API 版本。
|
||||
所有现有的持久化对象都可以通过新 API 访问。`flowcontrol.apiserver.k8s.io/v1beta3` 中的重要变化包括:
|
||||
当未指定时,PriorityLevelConfiguration 的 `spec.limited.nominalConcurrencyShares`
|
||||
字段仅默认为 30,而显式设置的 0 值不会被更改为此默认值。
|
||||
|
||||
有关更多信息,请参阅 [API 弃用指南](/zh-cn/docs/reference/using-api/deprecation-guide/#v1-32)。
|
||||
|
||||
<!--
|
||||
## Sneak peek of Kubernetes v1.32
|
||||
|
||||
The following list of enhancements is likely to be included in the v1.32 release.
|
||||
This is not a commitment and the release content is subject to change.
|
||||
-->
|
||||
## Kubernetes v1.32 的抢先预览
|
||||
|
||||
以下增强特性有可能会被包含在 v1.32 发布版本中。请注意,这并不是最终承诺,发布内容可能会发生变化。
|
||||
|
||||
<!--
|
||||
### Even more DRA enhancements!
|
||||
|
||||
In this release, like the previous one, the Kubernetes project continues proposing a number
|
||||
of enhancements to the Dynamic Resource Allocation (DRA), a key component of the Kubernetes resource management system.
|
||||
These enhancements aim to improve the flexibility and efficiency of resource allocation for workloads that require specialized hardware,
|
||||
such as GPUs, FPGAs and network adapters. This release introduces improvements,
|
||||
including the addition of resource health status in the Pod status, as outlined in
|
||||
KEP [#4680](https://github.com/kubernetes/enhancements/issues/4680).
|
||||
-->
|
||||
### 更多 DRA 增强特性!
|
||||
|
||||
在此次发布中,就像上一次一样,Kubernetes 项目继续提出多项对动态资源分配(DRA)的增强。
|
||||
DRA 是 Kubernetes 资源管理系统的关键组件,这些增强旨在提高对需要专用硬件(如 GPU、FPGA 和网络适配器)
|
||||
的工作负载进行资源分配的灵活性和效率。此次发布引入了多项改进,包括在 Pod 状态中添加资源健康状态,
|
||||
具体内容详见 KEP [#4680](https://github.com/kubernetes/enhancements/issues/4680)。
|
||||
|
||||
<!--
|
||||
#### Add resource health status to the Pod status
|
||||
|
||||
It isn't easy to know when a Pod uses a device that has failed or is temporarily unhealthy.
|
||||
KEP [#4680](https://github.com/kubernetes/enhancements/issues/4680) proposes exposing device
|
||||
health via Pod `status`, making troubleshooting of Pod crashes easier.
|
||||
-->
|
||||
#### 在 Pod 状态中添加资源健康状态
|
||||
|
||||
当 Pod 使用的设备出现故障或暂时不健康时,很难及时发现。
|
||||
KEP [#4680](https://github.com/kubernetes/enhancements/issues/4680)
|
||||
提议通过 Pod 的 `status` 暴露设备健康状态,从而使 Pod 崩溃的故障排除更加容易。
|
||||
|
||||
<!--
|
||||
### Windows strikes back!
|
||||
|
||||
KEP [#4802](https://github.com/kubernetes/enhancements/issues/4802) adds support
|
||||
for graceful shutdowns of Windows nodes in Kubernetes clusters.
|
||||
Before this release, Kubernetes provided graceful node shutdown functionality for
|
||||
Linux nodes but lacked equivalent support for Windows.
|
||||
This enhancement enables the kubelet on Windows nodes to handle system shutdown events properly.
|
||||
Doing so, it ensures that Pods running on Windows nodes are gracefully terminated,
|
||||
allowing workloads to be rescheduled without disruption.
|
||||
This improvement enhances the reliability and stability of clusters that include Windows nodes,
|
||||
especially during a planned maintenance or any system updates.
|
||||
-->
|
||||
### Windows 工作继续
|
||||
|
||||
KEP [#4802](https://github.com/kubernetes/enhancements/issues/4802) 为
|
||||
Kubernetes 集群中的 Windows 节点添加了体面关机支持。
|
||||
在此之前,Kubernetes 为 Linux 节点提供了体面关机特性,但缺乏对 Windows 节点的同等支持。
|
||||
这一增强特性使 Windows 节点上的 kubelet 能够正确处理系统关机事件,确保在 Windows 节点上运行的 Pod 能够体面终止,
|
||||
从而允许工作负载在不受干扰的情况下重新调度。这一改进提高了包含 Windows 节点的集群的可靠性和稳定性,
|
||||
特别是在计划维护或系统更新期间。
|
||||
|
||||
<!--
|
||||
### Allow special characters in environment variables
|
||||
|
||||
With the graduation of this [enhancement](https://github.com/kubernetes/enhancements/issues/4369) to beta,
|
||||
Kubernetes now allows almost all printable ASCII characters (excluding "=") to be used as environment variable names.
|
||||
This change addresses the limitations previously imposed on variable naming, facilitating a broader adoption of
|
||||
Kubernetes by accommodating various application needs. The relaxed validation will be enabled by default via the
|
||||
`RelaxedEnvironmentVariableValidation` feature gate, ensuring that users can easily utilize environment
|
||||
variables without strict constraints, enhancing flexibility for developers working with applications like
|
||||
.NET Core that require special characters in their configurations.
|
||||
-->
|
||||
### 允许环境变量中使用特殊字符
|
||||
|
||||
随着这一[增强特性](https://github.com/kubernetes/enhancements/issues/4369)升级到 Beta 阶段,
|
||||
Kubernetes 现在允许几乎所有的可打印 ASCII 字符(不包括 `=`)作为环境变量名称。
|
||||
这一变化解决了此前对变量命名的限制,通过适应各种应用需求,促进了 Kubernetes 的更广泛采用。
|
||||
放宽的验证将通过 `RelaxedEnvironmentVariableValidation` 特性门控默认启用,
|
||||
确保用户可以轻松使用环境变量而不受严格限制,增强了开发者在处理需要特殊字符配置的应用(如 .NET Core)时的灵活性。
|
||||
|
||||
<!--
|
||||
### Make Kubernetes aware of the LoadBalancer behavior
|
||||
|
||||
KEP [#1860](https://github.com/kubernetes/enhancements/issues/1860) graduates to GA,
|
||||
introducing the `ipMode` field for a Service of `type: LoadBalancer`, which can be set to either
|
||||
`"VIP"` or `"Proxy"`. This enhancement is aimed at improving how cloud providers load balancers
|
||||
interact with kube-proxy and it is a change transparent to the end user.
|
||||
The existing behavior of kube-proxy is preserved when using `"VIP"`, where kube-proxy handles the load balancing.
|
||||
Using `"Proxy"` results in traffic sent directly to the load balancer,
|
||||
providing cloud providers greater control over relying on kube-proxy;
|
||||
this means that you could see an improvement in the performance of your load balancer for some cloud providers.
|
||||
-->
|
||||
### 使 Kubernetes 感知到 LoadBalancer 的行为
|
||||
|
||||
KEP [#1860](https://github.com/kubernetes/enhancements/issues/1860) 升级到 GA 阶段,
|
||||
为 `type: LoadBalancer` 类型的 Service 引入了 `ipMode` 字段,该字段可以设置为 `"VIP"` 或 `"Proxy"`。
|
||||
这一增强旨在改善云提供商负载均衡器与 kube-proxy 的交互方式,对最终用户来说是透明的。
|
||||
使用 `"VIP"` 时,kube-proxy 会继续处理负载均衡,保持现有的行为。使用 `"Proxy"` 时,
|
||||
流量将直接发送到负载均衡器,提供云提供商对依赖 kube-proxy 的更大控制权;
|
||||
这意味着对于某些云提供商,你可能会看到负载均衡器性能的提升。
|
||||
|
||||
<!--
|
||||
### Retry generate name for resources
|
||||
|
||||
This [enhancement](https://github.com/kubernetes/enhancements/issues/4420)
|
||||
improves how name conflicts are handled for Kubernetes resources created with the `generateName` field.
|
||||
Previously, if a name conflict occurred, the API server returned a 409 HTTP Conflict error and clients
|
||||
had to manually retry the request. With this update, the API server automatically retries generating
|
||||
a new name up to seven times in case of a conflict. This significantly reduces the chances of collision,
|
||||
ensuring smooth generation of up to 1 million names with less than a 0.1% probability of a conflict,
|
||||
providing more resilience for large-scale workloads.
|
||||
-->
|
||||
### 为资源生成名称时重试
|
||||
|
||||
这一[增强特性](https://github.com/kubernetes/enhancements/issues/4420)改进了使用
|
||||
`generateName` 字段创建 Kubernetes 资源时的名称冲突处理。此前,如果发生名称冲突,
|
||||
API 服务器会返回 409 HTTP 冲突错误,客户端需要手动重试请求。通过此次更新,
|
||||
API 服务器在发生冲突时会自动重试生成新名称,最多重试七次。这显著降低了冲突的可能性,
|
||||
确保生成多达 100 万个名称时冲突的概率低于 0.1%,为大规模工作负载提供了更高的弹性。
|
||||
|
||||
<!--
|
||||
## Want to know more?
|
||||
New features and deprecations are also announced in the Kubernetes release notes.
|
||||
We will formally announce what's new in
|
||||
[Kubernetes v1.32](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.32.md)
|
||||
as part of the CHANGELOG for this release.
|
||||
|
||||
You can see the announcements of changes in the release notes for:
|
||||
-->
|
||||
## 想了解更多?
|
||||
|
||||
新特性和弃用特性也会在 Kubernetes 发布说明中宣布。我们将在此次发布的
|
||||
[Kubernetes v1.32](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.32.md)
|
||||
的 CHANGELOG 中正式宣布新内容。
|
||||
|
||||
你可以在以下版本的发布说明中查看变更公告:
|
||||
|
||||
* [Kubernetes v1.31](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.31.md)
|
||||
|
||||
* [Kubernetes v1.30](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.30.md)
|
||||
|
||||
* [Kubernetes v1.29](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.29.md)
|
||||
|
||||
* [Kubernetes v1.28](https://github.com/kubernetes/kubernetes/blob/master/CHANGELOG/CHANGELOG-1.28.md)
|
|
@ -221,5 +221,7 @@ Cluster Proportional Autoscaler 扩缩工作负载的副本数量,而 Cluster
|
|||
|
||||
<!--
|
||||
- Read about [workload-level autoscaling](/docs/concepts/workloads/autoscaling/)
|
||||
- Read about [node overprovisioning](/docs/tasks/administer-cluster/node-overprovisioning/)
|
||||
-->
|
||||
- 参阅[工作负载级别自动扩缩容](/zh-cn/docs/concepts/workloads/autoscaling/)
|
||||
- 参阅[节点超分配](/zh-cn/docs/tasks/administer-cluster/node-overprovisioning/)
|
||||
|
|
|
@ -512,7 +512,7 @@ Credentials can be provided in several ways:
|
|||
- all pods can use any images cached on a node
|
||||
- requires root access to all nodes to set up
|
||||
- Specifying ImagePullSecrets on a Pod
|
||||
- only pods which provide own keys can access the private registry
|
||||
- only pods which provide their own keys can access the private registry
|
||||
- Vendor-specific or local extensions
|
||||
- if you're using a custom node configuration, you (or your cloud
|
||||
provider) can implement your mechanism for authenticating the node
|
||||
|
|
|
@ -19,7 +19,7 @@ weight: 90
|
|||
|
||||
<!--
|
||||
Selecting the appropriate authentication mechanism(s) is a crucial aspect of securing your cluster.
|
||||
Kubernetes provides several built-in mechanisms, each with its own strengths and weaknesses that
|
||||
Kubernetes provides several built-in mechanisms, each with its own strengths and weaknesses that
|
||||
should be carefully considered when choosing the best authentication mechanism for your cluster.
|
||||
-->
|
||||
选择合适的身份认证机制是确保集群安全的一个重要方面。
|
||||
|
@ -27,16 +27,16 @@ Kubernetes 提供了多种内置机制,
|
|||
当为你的集群选择最好的身份认证机制时需要谨慎考虑每种机制的优缺点。
|
||||
|
||||
<!--
|
||||
In general, it is recommended to enable as few authentication mechanisms as possible to simplify
|
||||
In general, it is recommended to enable as few authentication mechanisms as possible to simplify
|
||||
user management and prevent cases where users retain access to a cluster that is no longer required.
|
||||
-->
|
||||
通常情况下,建议启用尽可能少的身份认证机制,
|
||||
以简化用户管理,避免用户仍保有对其不再需要的集群的访问权限的情况。
|
||||
|
||||
<!--
|
||||
It is important to note that Kubernetes does not have an in-built user database within the cluster.
|
||||
Instead, it takes user information from the configured authentication system and uses that to make
|
||||
authorization decisions. Therefore, to audit user access, you need to review credentials from every
|
||||
It is important to note that Kubernetes does not have an in-built user database within the cluster.
|
||||
Instead, it takes user information from the configured authentication system and uses that to make
|
||||
authorization decisions. Therefore, to audit user access, you need to review credentials from every
|
||||
configured authentication source.
|
||||
-->
|
||||
值得注意的是 Kubernetes 集群中并没有内置的用户数据库。
|
||||
|
@ -44,10 +44,10 @@ configured authentication source.
|
|||
因此,要审计用户访问,你需要检视来自每个已配置身份认证数据源的凭据。
|
||||
|
||||
<!--
|
||||
For production clusters with multiple users directly accessing the Kubernetes API, it is
|
||||
recommended to use external authentication sources such as OIDC. The internal authentication
|
||||
mechanisms, such as client certificates and service account tokens, described below, are not
|
||||
suitable for this use-case.
|
||||
For production clusters with multiple users directly accessing the Kubernetes API, it is
|
||||
recommended to use external authentication sources such as OIDC. The internal authentication
|
||||
mechanisms, such as client certificates and service account tokens, described below, are not
|
||||
suitable for this use case.
|
||||
-->
|
||||
对于有多个用户直接访问 Kubernetes API 的生产集群来说,
|
||||
建议使用外部身份认证数据源,例如:OIDC。
|
||||
|
@ -61,9 +61,9 @@ suitable for this use-case.
|
|||
## X.509 客户端证书身份认证 {#x509-client-certificate-authentication}
|
||||
|
||||
<!--
|
||||
Kubernetes leverages [X.509 client certificate](/docs/reference/access-authn-authz/authentication/#x509-client-certificates)
|
||||
authentication for system components, such as when the Kubelet authenticates to the API Server.
|
||||
While this mechanism can also be used for user authentication, it might not be suitable for
|
||||
Kubernetes leverages [X.509 client certificate](/docs/reference/access-authn-authz/authentication/#x509-client-certificates)
|
||||
authentication for system components, such as when the Kubelet authenticates to the API Server.
|
||||
While this mechanism can also be used for user authentication, it might not be suitable for
|
||||
production use due to several restrictions:
|
||||
-->
|
||||
Kubernetes 采用 [X.509 客户端证书](/zh-cn/docs/reference/access-authn-authz/authentication/#x509-client-certificates)
|
||||
|
@ -71,54 +71,53 @@ Kubernetes 采用 [X.509 客户端证书](/zh-cn/docs/reference/access-authn-aut
|
|||
例如 Kubelet 对 API 服务器进行身份认证时。
|
||||
虽然这种机制也可以用于用户身份认证,但由于一些限制它可能不太适合在生产中使用:
|
||||
|
||||
|
||||
<!--
|
||||
- Client certificates cannot be individually revoked. Once compromised, a certificate can be used
|
||||
by an attacker until it expires. To mitigate this risk, it is recommended to configure short
|
||||
- Client certificates cannot be individually revoked. Once compromised, a certificate can be used
|
||||
by an attacker until it expires. To mitigate this risk, it is recommended to configure short
|
||||
lifetimes for user authentication credentials created using client certificates.
|
||||
-->
|
||||
- 客户端证书无法独立撤销。
|
||||
证书一旦被泄露,攻击者就可以使用它,直到证书过期。
|
||||
为了降低这种风险,建议为使用客户端证书创建的用户身份认证凭据配置较短的有效期。
|
||||
<!--
|
||||
- If a certificate needs to be invalidated, the certificate authority must be re-keyed, which
|
||||
can introduce availability risks to the cluster.
|
||||
- If a certificate needs to be invalidated, the certificate authority must be re-keyed, which
|
||||
can introduce availability risks to the cluster.
|
||||
-->
|
||||
- 如果证书需要被作废,必须重新为证书机构设置密钥,但这样做可能给集群带来可用性风险。
|
||||
<!--
|
||||
- There is no permanent record of client certificates created in the cluster. Therefore, all
|
||||
issued certificates must be recorded if you need to keep track of them.
|
||||
- There is no permanent record of client certificates created in the cluster. Therefore, all
|
||||
issued certificates must be recorded if you need to keep track of them.
|
||||
-->
|
||||
- 在集群中创建的客户端证书不会被永久记录。
|
||||
因此,如果你要跟踪所有已签发的证书,就必须将它们记录下来。
|
||||
<!--
|
||||
- Private keys used for client certificate authentication cannot be password-protected. Anyone
|
||||
who can read the file containing the key will be able to make use of it.
|
||||
- Private keys used for client certificate authentication cannot be password-protected. Anyone
|
||||
who can read the file containing the key will be able to make use of it.
|
||||
-->
|
||||
- 用于对客户端证书进行身份认证的私钥不可以启用密码保护。
|
||||
任何可以读取包含密钥文件的人都可以利用该密钥。
|
||||
<!--
|
||||
- Using client certificate authentication requires a direct connection from the client to the
|
||||
API server with no intervening TLS termination points, which can complicate network architectures.
|
||||
- Using client certificate authentication requires a direct connection from the client to the
|
||||
API server with no intervening TLS termination points, which can complicate network architectures.
|
||||
-->
|
||||
- 使用客户端证书身份认证需要客户端直连 API 服务器而不允许中间存在 TLS 终止节点,
|
||||
这一约束可能会使网络架构变得复杂。
|
||||
<!--
|
||||
- Group data is embedded in the `O` value of the client certificate, which means the user's group
|
||||
memberships cannot be changed for the lifetime of the certificate.
|
||||
- Group data is embedded in the `O` value of the client certificate, which means the user's group
|
||||
memberships cannot be changed for the lifetime of the certificate.
|
||||
-->
|
||||
- 组数据包含在客户端证书的 `O` 值中,
|
||||
这意味着在证书有效期内无法更改用户的组成员身份。
|
||||
|
||||
<!--
|
||||
## Static token file {#static-token-file}、
|
||||
## Static token file {#static-token-file}
|
||||
-->
|
||||
## 静态令牌文件 {#static-token-file}
|
||||
|
||||
<!--
|
||||
Although Kubernetes allows you to load credentials from a
|
||||
[static token file](/docs/reference/access-authn-authz/authentication/#static-token-file) located
|
||||
on the control plane node disks, this approach is not recommended for production servers due to
|
||||
Although Kubernetes allows you to load credentials from a
|
||||
[static token file](/docs/reference/access-authn-authz/authentication/#static-token-file) located
|
||||
on the control plane node disks, this approach is not recommended for production servers due to
|
||||
several reasons:
|
||||
-->
|
||||
尽管 Kubernetes 允许你从控制平面节点的磁盘中加载
|
||||
|
@ -130,13 +129,13 @@ several reasons:
|
|||
-->
|
||||
- 凭据以明文的方式存储在控制平面节点的磁盘中,这可能是一种安全风险。
|
||||
<!--
|
||||
- Changing any credential requires a restart of the API server process to take effect, which can
|
||||
impact availability.
|
||||
- Changing any credential requires a restart of the API server process to take effect, which can
|
||||
impact availability.
|
||||
-->
|
||||
- 修改任何凭据都需要重启 API 服务进程使其生效,这会影响可用性。
|
||||
<!--
|
||||
- There is no mechanism available to allow users to rotate their credentials. To rotate a
|
||||
credential, a cluster administrator must modify the token on disk and distribute it to the users.
|
||||
- There is no mechanism available to allow users to rotate their credentials. To rotate a
|
||||
credential, a cluster administrator must modify the token on disk and distribute it to the users.
|
||||
-->
|
||||
- 没有现成的机制让用户轮换其凭据数据。
|
||||
要轮换凭据数据,集群管理员必须修改磁盘上的令牌并将其分发给用户。
|
||||
|
@ -151,27 +150,27 @@ credential, a cluster administrator must modify the token on disk and distribute
|
|||
## 启动引导令牌 {#bootstrap-tokens}
|
||||
|
||||
<!--
|
||||
[Bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) are used for joining
|
||||
[Bootstrap tokens](/docs/reference/access-authn-authz/bootstrap-tokens/) are used for joining
|
||||
nodes to clusters and are not recommended for user authentication due to several reasons:
|
||||
-->
|
||||
[启动引导令牌](/zh-cn/docs/reference/access-authn-authz/bootstrap-tokens/)用于节点加入集群,
|
||||
因为下列的一些原因,不建议用于用户身份认证:
|
||||
|
||||
<!--
|
||||
- They have hard-coded group memberships that are not suitable for general use, making them
|
||||
unsuitable for authentication purposes.
|
||||
- They have hard-coded group memberships that are not suitable for general use, making them
|
||||
unsuitable for authentication purposes.
|
||||
-->
|
||||
- 启动引导令牌中包含有硬编码的组成员身份,不适合一般使用,
|
||||
因此不适用于身份认证目的。
|
||||
<!--
|
||||
- Manually generating bootstrap tokens can lead to weak tokens that can be guessed by an attacker,
|
||||
which can be a security risk.
|
||||
- Manually generating bootstrap tokens can lead to weak tokens that can be guessed by an attacker,
|
||||
which can be a security risk.
|
||||
-->
|
||||
- 手动生成启动引导令牌有可能使较弱的令牌容易被攻击者猜到,
|
||||
有可能成为安全隐患。
|
||||
<!--
|
||||
- There is no lockout mechanism available to prevent brute-force attacks, making it easier for
|
||||
attackers to guess or crack the token.
|
||||
- There is no lockout mechanism available to prevent brute-force attacks, making it easier for
|
||||
attackers to guess or crack the token.
|
||||
-->
|
||||
- 没有现成的加锁定机制用来防止暴力破解,
|
||||
这使得攻击者更容易猜测或破解令牌。
|
||||
|
@ -182,10 +181,10 @@ attackers to guess or crack the token.
|
|||
## 服务账号令牌 {#serviceaccount-secret-tokens}
|
||||
|
||||
<!--
|
||||
[Service account secrets](/docs/reference/access-authn-authz/service-accounts-admin/#manual-secret-management-for-serviceaccounts)
|
||||
are available as an option to allow workloads running in the cluster to authenticate to the
|
||||
API server. In Kubernetes < 1.23, these were the default option, however, they are being replaced
|
||||
with TokenRequest API tokens. While these secrets could be used for user authentication, they are
|
||||
[Service account secrets](/docs/reference/access-authn-authz/service-accounts-admin/#manual-secret-management-for-serviceaccounts)
|
||||
are available as an option to allow workloads running in the cluster to authenticate to the
|
||||
API server. In Kubernetes < 1.23, these were the default option, however, they are being replaced
|
||||
with TokenRequest API tokens. While these secrets could be used for user authentication, they are
|
||||
generally unsuitable for a number of reasons:
|
||||
-->
|
||||
[服务账号令牌](/zh-cn/docs/reference/access-authn-authz/service-accounts-admin/#manual-secret-management-for-serviceaccounts)
|
||||
|
@ -198,8 +197,8 @@ generally unsuitable for a number of reasons:
|
|||
-->
|
||||
- 服务账号令牌无法设置有效期,在相关的服务账号被删除前一直有效。
|
||||
<!--
|
||||
- The authentication tokens are visible to any cluster user who can read secrets in the namespace
|
||||
that they are defined in.
|
||||
- The authentication tokens are visible to any cluster user who can read secrets in the namespace
|
||||
that they are defined in.
|
||||
-->
|
||||
- 任何集群用户,只要能读取服务账号令牌定义所在的命名空间中的 Secret,就能看到身份认证令牌。
|
||||
<!--
|
||||
|
@ -213,9 +212,9 @@ that they are defined in.
|
|||
## TokenRequest API 令牌 {#tokenrequest-api-tokens}
|
||||
|
||||
<!--
|
||||
The TokenRequest API is a useful tool for generating short-lived credentials for service
|
||||
authentication to the API server or third-party systems. However, it is not generally recommended
|
||||
for user authentication as there is no revocation method available, and distributing credentials
|
||||
The TokenRequest API is a useful tool for generating short-lived credentials for service
|
||||
authentication to the API server or third-party systems. However, it is not generally recommended
|
||||
for user authentication as there is no revocation method available, and distributing credentials
|
||||
to users in a secure manner can be challenging.
|
||||
-->
|
||||
TokenRequest API 是一种可生成短期凭据的有用工具,所生成的凭据可
|
||||
|
@ -224,7 +223,7 @@ TokenRequest API 是一种可生成短期凭据的有用工具,所生成的凭
|
|||
而且,如何以安全的方式向用户分发凭据信息也是挑战。
|
||||
|
||||
<!--
|
||||
When using TokenRequest tokens for service authentication, it is recommended to implement a short
|
||||
When using TokenRequest tokens for service authentication, it is recommended to implement a short
|
||||
lifespan to reduce the impact of compromised tokens.
|
||||
-->
|
||||
当使用 TokenRequest 令牌进行服务身份认证时,
|
||||
|
@ -236,10 +235,10 @@ lifespan to reduce the impact of compromised tokens.
|
|||
## OpenID Connect 令牌身份认证 {#openid-connect-token-authentication}
|
||||
|
||||
<!--
|
||||
Kubernetes supports integrating external authentication services with the Kubernetes API using
|
||||
[OpenID Connect (OIDC)](/docs/reference/access-authn-authz/authentication/#openid-connect-tokens).
|
||||
There is a wide variety of software that can be used to integrate Kubernetes with an identity
|
||||
provider. However, when using OIDC authentication for Kubernetes, it is important to consider the
|
||||
Kubernetes supports integrating external authentication services with the Kubernetes API using
|
||||
[OpenID Connect (OIDC)](/docs/reference/access-authn-authz/authentication/#openid-connect-tokens).
|
||||
There is a wide variety of software that can be used to integrate Kubernetes with an identity
|
||||
provider. However, when using OIDC authentication for Kubernetes, it is important to consider the
|
||||
following hardening measures:
|
||||
-->
|
||||
Kubernetes 支持使用 [OpenID Connect (OIDC)](/zh-cn/docs/reference/access-authn-authz/authentication/#openid-connect-tokens)
|
||||
|
@ -249,8 +248,8 @@ Kubernetes 支持使用 [OpenID Connect (OIDC)](/zh-cn/docs/reference/access-aut
|
|||
必须考虑以下加固措施:
|
||||
|
||||
<!--
|
||||
- The software installed in the cluster to support OIDC authentication should be isolated from
|
||||
general workloads as it will run with high privileges.
|
||||
- The software installed in the cluster to support OIDC authentication should be isolated from
|
||||
general workloads as it will run with high privileges.
|
||||
-->
|
||||
- 安装在集群中用于支持 OIDC 身份认证的软件应该与普通的工作负载隔离,
|
||||
因为它要以较高的特权来运行。
|
||||
|
@ -259,8 +258,8 @@ general workloads as it will run with high privileges.
|
|||
-->
|
||||
- 有些 Kubernetes 托管服务对可使用的 OIDC 服务组件有限制。
|
||||
<!--
|
||||
- As with TokenRequest tokens, OIDC tokens should have a short lifespan to reduce the impact of
|
||||
compromised tokens.
|
||||
- As with TokenRequest tokens, OIDC tokens should have a short lifespan to reduce the impact of
|
||||
compromised tokens.
|
||||
-->
|
||||
- 与 TokenRequest 令牌一样,OIDC 令牌的有效期也应较短,以减少被泄露的令牌所带来的影响。
|
||||
|
||||
|
@ -270,11 +269,11 @@ compromised tokens.
|
|||
## Webhook 令牌身份认证 {#webhook-token-authentication}
|
||||
|
||||
<!--
|
||||
[Webhook token authentication](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)
|
||||
is another option for integrating external authentication providers into Kubernetes. This mechanism
|
||||
allows for an authentication service, either running inside the cluster or externally, to be
|
||||
contacted for an authentication decision over a webhook. It is important to note that the suitability
|
||||
of this mechanism will likely depend on the software used for the authentication service, and there
|
||||
[Webhook token authentication](/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)
|
||||
is another option for integrating external authentication providers into Kubernetes. This mechanism
|
||||
allows for an authentication service, either running inside the cluster or externally, to be
|
||||
contacted for an authentication decision over a webhook. It is important to note that the suitability
|
||||
of this mechanism will likely depend on the software used for the authentication service, and there
|
||||
are some Kubernetes-specific considerations to take into account.
|
||||
-->
|
||||
[Webhook 令牌身份认证](/zh-cn/docs/reference/access-authn-authz/authentication/#webhook-token-authentication)
|
||||
|
@ -285,9 +284,9 @@ are some Kubernetes-specific considerations to take into account.
|
|||
而且还需要考虑一些特定于 Kubernetes 的因素。
|
||||
|
||||
<!--
|
||||
To configure Webhook authentication, access to control plane server filesystems is required. This
|
||||
means that it will not be possible with Managed Kubernetes unless the provider specifically makes it
|
||||
available. Additionally, any software installed in the cluster to support this access should be
|
||||
To configure Webhook authentication, access to control plane server filesystems is required. This
|
||||
means that it will not be possible with Managed Kubernetes unless the provider specifically makes it
|
||||
available. Additionally, any software installed in the cluster to support this access should be
|
||||
isolated from general workloads, as it will run with high privileges.
|
||||
-->
|
||||
要配置 Webhook 身份认证的前提是需要提供控制平面服务器文件系统的访问权限。
|
||||
|
@ -301,11 +300,11 @@ isolated from general workloads, as it will run with high privileges.
|
|||
## 身份认证代理 {#authenticating-proxy}
|
||||
|
||||
<!--
|
||||
Another option for integrating external authentication systems into Kubernetes is to use an
|
||||
[authenticating proxy](/docs/reference/access-authn-authz/authentication/#authenticating-proxy).
|
||||
With this mechanism, Kubernetes expects to receive requests from the proxy with specific header
|
||||
values set, indicating the username and group memberships to assign for authorization purposes.
|
||||
It is important to note that there are specific considerations to take into account when using
|
||||
Another option for integrating external authentication systems into Kubernetes is to use an
|
||||
[authenticating proxy](/docs/reference/access-authn-authz/authentication/#authenticating-proxy).
|
||||
With this mechanism, Kubernetes expects to receive requests from the proxy with specific header
|
||||
values set, indicating the username and group memberships to assign for authorization purposes.
|
||||
It is important to note that there are specific considerations to take into account when using
|
||||
this mechanism.
|
||||
-->
|
||||
将外部身份认证系统集成到 Kubernetes 的另一种方式是使用
|
||||
|
@ -315,8 +314,8 @@ this mechanism.
|
|||
值得注意的是,在使用这种机制时有一些特定的注意事项。
|
||||
|
||||
<!--
|
||||
Firstly, securely configured TLS must be used between the proxy and Kubernetes API server to
|
||||
mitigate the risk of traffic interception or sniffing attacks. This ensures that the communication
|
||||
Firstly, securely configured TLS must be used between the proxy and Kubernetes API server to
|
||||
mitigate the risk of traffic interception or sniffing attacks. This ensures that the communication
|
||||
between the proxy and Kubernetes API server is secure.
|
||||
-->
|
||||
首先,在代理和 Kubernetes API 服务器间必须以安全的方式配置 TLS 连接,
|
||||
|
@ -324,9 +323,22 @@ between the proxy and Kubernetes API server is secure.
|
|||
TLS 连接可以确保代理和 Kubernetes API 服务器间的通信是安全的。
|
||||
|
||||
<!--
|
||||
Secondly, it is important to be aware that an attacker who is able to modify the headers of the
|
||||
request may be able to gain unauthorized access to Kubernetes resources. As such, it is important
|
||||
Secondly, it is important to be aware that an attacker who is able to modify the headers of the
|
||||
request may be able to gain unauthorized access to Kubernetes resources. As such, it is important
|
||||
to ensure that the headers are properly secured and cannot be tampered with.
|
||||
-->
|
||||
其次,需要注意的是,能够修改表头的攻击者可能会在未经授权的情况下访问 Kubernetes 资源。
|
||||
因此,确保标头得到妥善保护并且不会被篡改非常重要。
|
||||
因此,确保标头得到妥善保护并且不会被篡改非常重要。
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
<!--
|
||||
- [User Authentication](/docs/reference/access-authn-authz/authentication/)
|
||||
- [Authenticating with Bootstrap Tokens](/docs/reference/access-authn-authz/bootstrap-tokens/)
|
||||
- [kubelet Authentication](/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication)
|
||||
- [Authenticating with Service Account Tokens](/docs/reference/access-authn-authz/service-accounts-admin/#bound-service-account-tokens)
|
||||
-->
|
||||
- [用户认证](/zh-cn/docs/reference/access-authn-authz/authentication/)
|
||||
- [使用 Bootstrap 令牌进行身份验证](/zh-cn/docs/reference/access-authn-authz/bootstrap-tokens/)
|
||||
- [kubelet 认证](/zh-cn/docs/reference/access-authn-authz/kubelet-authn-authz/#kubelet-authentication)
|
||||
- [使用服务帐户令牌进行身份验证](/zh-cn/docs/reference/access-authn-authz/service-accounts-admin/#bound-service-account-tokens)
|
||||
|
|
|
@ -1,29 +0,0 @@
|
|||
---
|
||||
title: 从 Docker 命令行映射到 crictl
|
||||
content_type: reference
|
||||
weight: 10
|
||||
---
|
||||
|
||||
<!--
|
||||
title: Mapping from dockercli to crictl
|
||||
content_type: reference
|
||||
weight: 10
|
||||
-->
|
||||
|
||||
{{< note >}}
|
||||
|
||||
<!--
|
||||
This page is being directed to
|
||||
https://v1-24.docs.kubernetes.io/docs/reference/tools/map-crictl-dockercli/ because of the
|
||||
[removal of dockershim from crictl in v1.24](https://github.com/kubernetes-sigs/cri-tools/issues/870).
|
||||
As per our community policy, deprecated documents are not maintained beyond next three versions.
|
||||
The reason for deprecation is explained in [Dockershim-FAQ](/blog/2020/12/02/dockershim-faq/).
|
||||
-->
|
||||
此页面被重定向到
|
||||
https://v1-24.docs.kubernetes.io/zh-cn/docs/reference/tools/map-crictl-dockercli/
|
||||
,原因是
|
||||
[dockershim 在 v1.24 中被从 crictl 中移除](https://github.com/kubernetes-sigs/cri-tools/issues/870)。
|
||||
根据我们的社区政策,弃用的文档超过三个版本后不再维护。
|
||||
弃用的原因在 [Dockershim-FAQ](/zh-cn/docs/blog/2020/12/02/dockershim-faq/) 中进行了说明。
|
||||
|
||||
{{</ note >}}
|
|
@ -18,7 +18,7 @@ weight: 25
|
|||
|
||||
{{< feature-state feature_gate_name="ServerSideApply" >}}
|
||||
|
||||
<!--
|
||||
<!--
|
||||
Kubernetes supports multiple appliers collaborating to manage the fields
|
||||
of a single [object](/docs/concepts/overview/working-with-objects/).
|
||||
|
||||
|
@ -77,7 +77,7 @@ Kubernetes API 服务器跟踪所有新建对象的**受控字段(Managed Fiel
|
|||
这样做是为了表明操作可能会撤消另一个合作者的更改。
|
||||
可以强制写入具有托管字段的对象,在这种情况下,任何冲突字段的值都将被覆盖,并且所有权将被转移。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
Whenever a field's value does change, ownership moves from its current manager to the
|
||||
manager making the change.
|
||||
|
||||
|
@ -106,7 +106,7 @@ by including a value for that field in a Server-Side Apply operation.
|
|||
最后一次对字段值做出断言的用户将被记录到当前字段管理器。
|
||||
这可以通过发送 `POST`(**create**)、`PUT`(**update**)、或非应用的 `PATCH`(**patch**)
|
||||
显式更改字段管理器详细信息来实现。
|
||||
还可以通过在服务器端应用操作中包含字段的值来声明和记录字段管理器。
|
||||
你还可以通过在服务器端应用操作中包含字段的值来声明和记录字段管理器。
|
||||
|
||||
<!--
|
||||
A Server-Side Apply **patch** request requires the client to provide its identity
|
||||
|
@ -115,9 +115,8 @@ field that is controlled by a different manager results in a rejected
|
|||
request unless the client forces an override.
|
||||
For details of overrides, see [Conflicts](#conflicts).
|
||||
-->
|
||||
服务器端应用场景中的 **patch** 请求要求客户端提供自身的标识作为
|
||||
[字段管理器(Field Manager)](#managers)。使用服务器端应用时,
|
||||
如果尝试变更由别的管理器来控制的字段,会导致请求被拒绝,除非客户端强制要求进行覆盖。
|
||||
服务器端应用场景中的 **patch** 请求要求客户端提供自身的标识作为[字段管理器(Field Manager)](#managers)。
|
||||
使用服务器端应用时,如果尝试变更由别的管理器来控制的字段,会导致请求被拒绝,除非客户端强制要求进行覆盖。
|
||||
关于覆盖操作的细节,可参阅[冲突](#conflicts)节。
|
||||
|
||||
<!--
|
||||
|
@ -138,7 +137,7 @@ object's [`metadata`](/docs/reference/kubernetes-api/common-definitions/object-m
|
|||
[`metadata`](/zh-cn/docs/reference/kubernetes-api/common-definitions/object-meta/)
|
||||
中的一部分。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
If you remove a field from a manifest and apply that manifest, Server-Side
|
||||
Apply checks if there are any other field managers that also own the field.
|
||||
If the field is not owned by any other field managers, it is either deleted
|
||||
|
@ -149,7 +148,7 @@ The same rule applies to associative list or map items.
|
|||
如果该字段不属于任何其他字段管理器,则服务器会将其从活动对象中删除,或者重置为其默认值(如果有)。
|
||||
同样的规则也适用于关联列表(list)或键值对(map)。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
Compared to the (legacy)
|
||||
[`kubectl.kubernetes.io/last-applied-configuration`](/docs/reference/labels-annotations-taints/#kubectl-kubernetes-io-last-applied-configuration)
|
||||
annotation managed by `kubectl`, Server-Side Apply uses a more declarative
|
||||
|
@ -164,7 +163,7 @@ becomes available.
|
|||
它跟踪用户(或客户端)的字段管理,而不是用户上次应用的状态。
|
||||
作为服务器端应用的副作用,哪个字段管理器管理的对象的哪个字段的相关信息也会变得可用。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
### Example {#ssa-example-configmap}
|
||||
|
||||
A simple example of an object created using Server-Side Apply could look like this:
|
||||
|
@ -173,6 +172,16 @@ A simple example of an object created using Server-Side Apply could look like th
|
|||
|
||||
服务器端应用创建对象的简单示例如下:
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
`kubectl get` omits managed fields by default.
|
||||
Add `--show-managed-fields` to show `managedFields` when the output format is either `json` or `yaml`.
|
||||
-->
|
||||
`kubectl get` 默认省略 `managedFields`。
|
||||
当输出格式为 `json` 或 `yaml` 时,你可以添加 `--show-managed-fields` 参数以显示 `managedFields`。
|
||||
{{< /note >}}
|
||||
|
||||
<!--
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
|
@ -184,7 +193,32 @@ metadata:
|
|||
test-label: test
|
||||
managedFields:
|
||||
- manager: kubectl
|
||||
operation: Apply # 注意大写: “Apply” (或者 “Update”)
|
||||
operation: Apply # note capitalization: "Apply" (or "Update")
|
||||
apiVersion: v1
|
||||
time: "2010-10-10T0:00:00Z"
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:metadata:
|
||||
f:labels:
|
||||
f:test-label: {}
|
||||
f:data:
|
||||
f:key: {}
|
||||
data:
|
||||
key: some value
|
||||
```
|
||||
-->
|
||||
```yaml
|
||||
---
|
||||
apiVersion: v1
|
||||
kind: ConfigMap
|
||||
metadata:
|
||||
name: test-cm
|
||||
namespace: default
|
||||
labels:
|
||||
test-label: test
|
||||
managedFields:
|
||||
- manager: kubectl
|
||||
operation: Apply # 注意大写:“Apply” (或者 “Update”)
|
||||
apiVersion: v1
|
||||
time: "2010-10-10T0:00:00Z"
|
||||
fieldsType: FieldsV1
|
||||
|
@ -198,7 +232,7 @@ data:
|
|||
key: some value
|
||||
```
|
||||
|
||||
<!--
|
||||
<!--
|
||||
That example ConfigMap object contains a single field management record in
|
||||
`.metadata.managedFields`. The field management record consists of basic information
|
||||
about the managing entity itself, plus details about the fields being managed and
|
||||
|
@ -208,9 +242,9 @@ otherwise, it is `Update`.
|
|||
-->
|
||||
示例的 ConfigMap 对象在 `.metadata.managedFields` 中包含字段管理记录。
|
||||
字段管理记录包括关于管理实体本身的基本信息,以及关于被管理的字段和相关操作(`Apply` 或 `Update`)的详细信息。
|
||||
如果最后更改该字段的请求是服务器端应用的**patch**操作,则 `operation` 的值为 `Apply`;否则为 `Update`。
|
||||
如果最后更改该字段的请求是服务器端应用的 **patch** 操作,则 `operation` 的值为 `Apply`;否则为 `Update`。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
There is another possible outcome. A client could submit an invalid request
|
||||
body. If the fully specified intent does not produce a valid object, the
|
||||
request fails.
|
||||
|
@ -229,7 +263,7 @@ for example, the `.metadata.managedFields` get into an inconsistent state
|
|||
比如 `managedFields` 进入不一致的状态(显然不应该发生这种情况),
|
||||
这么做也是一个合理的尝试。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
The format of `managedFields` is [described](/docs/reference/kubernetes-api/common-definitions/object-meta/#System)
|
||||
in the Kubernetes API reference.
|
||||
-->
|
||||
|
@ -322,8 +356,7 @@ sets the manager identity to `"kubectl"` by default.
|
|||
|
||||
当你使用 `kubectl` 工具执行服务器端应用操作时,`kubectl` 默认情况下会将管理器标识设置为 `“kubectl”`。
|
||||
|
||||
|
||||
<!--
|
||||
<!--
|
||||
## Serialization
|
||||
|
||||
At the protocol level, Kubernetes represents Server-Side Apply message bodies
|
||||
|
@ -331,11 +364,11 @@ as [YAML](https://yaml.org/), with the media type `application/apply-patch+yaml`
|
|||
-->
|
||||
## 序列化 {#serialization}
|
||||
|
||||
在协议层面,Kubernetes 用 [YAML](https://yaml.org/) 来表示 Server-Side Apply 的消息体,
|
||||
在协议层面,Kubernetes 用 [YAML](https://yaml.org/) 来表示服务器端应用的消息体,
|
||||
媒体类型为 `application/apply-patch+yaml`。
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
<!--
|
||||
Whether you are submitting JSON data or YAML data, use
|
||||
`application/apply-patch+yaml` as the `Content-Type` header value.
|
||||
|
||||
|
@ -368,7 +401,7 @@ Here's an example of a Server-Side Apply message body (fully specified intent):
|
|||
}
|
||||
```
|
||||
|
||||
<!--
|
||||
<!--
|
||||
(this would make a no-change update, provided that it was sent as the body
|
||||
of a **patch** request to a valid `v1/configmaps` resource, and with the
|
||||
appropriate request `Content-Type`).
|
||||
|
@ -376,7 +409,7 @@ appropriate request `Content-Type`).
|
|||
(这个请求将导致无更改的更新,前提是它作为 **patch** 请求的主体发送到有效的 `v1/configmaps` 资源,
|
||||
并且请求中设置了合适的 `Content-Type`)。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
## Operations in scope for field management {#apply-and-update}
|
||||
|
||||
The Kubernetes API operations where field management is considered are:
|
||||
|
@ -391,7 +424,7 @@ The Kubernetes API operations where field management is considered are:
|
|||
1. 服务器端应用(HTTP `PATCH`,内容类型为 `application/apply-patch+yaml`)
|
||||
2. 替换现有对象(对 Kubernetes 而言是 **update**;HTTP 层面表现为 `PUT`)
|
||||
|
||||
<!--
|
||||
<!--
|
||||
Both operations update `.metadata.managedFields`, but behave a little differently.
|
||||
|
||||
Unless you specify a forced override, an apply operation that encounters field-level
|
||||
|
@ -548,7 +581,7 @@ keys are treated the same as struct fields, and all lists are considered atomic.
|
|||
默认情况下,服务器端应用将自定义资源视为无结构的数据。
|
||||
所有键被视为 struct 数据类型的字段,所有列表都被视为 atomic 形式。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
If the CustomResourceDefinition defines a
|
||||
[schema](/docs/reference/generated/kubernetes-api/{{< param "version" >}}#jsonschemaprops-v1-apiextensions-k8s-io)
|
||||
that contains annotations as defined in the previous [Merge Strategy](#merge-strategy)
|
||||
|
@ -612,10 +645,11 @@ kind: Foo
|
|||
metadata:
|
||||
name: foo-sample
|
||||
managedFields:
|
||||
- manager: manager-one
|
||||
- manager: "manager-one"
|
||||
operation: Apply
|
||||
apiVersion: example.com/v1
|
||||
fields:
|
||||
fieldsType: FieldsV1
|
||||
fieldsV1:
|
||||
f:spec:
|
||||
f:data: {}
|
||||
spec:
|
||||
|
@ -725,8 +759,7 @@ This is not what the user wants to happen, even temporarily - it might well degr
|
|||
a running workload.
|
||||
-->
|
||||
现在,用户希望从他们的配置中删除 `replicas`,从而避免与 HorizontalPodAutoscaler(HPA)及其控制器发生冲突。
|
||||
然而,这里存在一个竞态:
|
||||
在 HPA 需要调整 `.spec.replicas` 之前会有一个时间窗口,
|
||||
然而,这里存在一个竞态:在 HPA 需要调整 `.spec.replicas` 之前会有一个时间窗口,
|
||||
如果在 HPA 写入字段并成为新的属主之前,用户删除了 `.spec.replicas`,
|
||||
那 API 服务器就会把 `.spec.replicas` 的值设为 1(Deployment 的默认副本数)。
|
||||
这不是用户希望发生的事情,即使是暂时的——它很可能会导致正在运行的工作负载降级。
|
||||
|
@ -754,6 +787,17 @@ First, the user defines a new manifest containing only the `replicas` field:
|
|||
|
||||
首先,用户新定义一个只包含 `replicas` 字段的新清单:
|
||||
|
||||
<!--
|
||||
```yaml
|
||||
# Save this file as 'nginx-deployment-replicas-only.yaml'.
|
||||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: nginx-deployment
|
||||
spec:
|
||||
replicas: 3
|
||||
```
|
||||
-->
|
||||
```yaml
|
||||
# 将此文件另存为 'nginx-deployment-replicas-only.yaml'
|
||||
apiVersion: apps/v1
|
||||
|
@ -848,7 +892,7 @@ field in an object also becomes available.
|
|||
服务器端应用使用一种更具声明性的方法来跟踪对象的字段管理,而不是记录用户最后一次应用的状态。
|
||||
这意味着,使用服务器端应用的副作用,就是字段管理器所管理的对象的每个字段的相关信息也会变得可用。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
A consequence of the conflict detection and resolution implemented by Server-Side
|
||||
Apply is that an applier always has up to date field values in their local
|
||||
state. If they don't, they get a conflict the next time they apply. Any of the
|
||||
|
@ -895,6 +939,7 @@ using server-side apply with the following flag.
|
|||
```shell
|
||||
kubectl apply --server-side [--dry-run=server]
|
||||
```
|
||||
|
||||
<!--
|
||||
By default, field management of the object transfers from client-side apply to
|
||||
kubectl server-side apply, without encountering conflicts.
|
||||
|
@ -987,7 +1032,7 @@ request bodies that are also valid JSON.
|
|||
所有 JSON 消息都是有效的 YAML。一些客户端使用 YAML 请求体指定服务器端应用请求,
|
||||
而这些 YAML 同样是合法的 JSON。
|
||||
|
||||
<!--
|
||||
<!--
|
||||
### Access control and permissions {#rbac-and-permissions}
|
||||
|
||||
Since Server-Side Apply is a type of `PATCH`, a principal (such as a Role for Kubernetes
|
||||
|
@ -1062,7 +1107,7 @@ applier takes ownership of any fields updated in the same request.
|
|||
其结果是,应用者取得了同一个请求中所有字段的所有权。
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
<!--
|
||||
Server-Side Apply does not correctly track ownership on
|
||||
sub-resources that don't receive the resource object type. If you are
|
||||
using Server-Side Apply with such a sub-resource, the changed fields
|
||||
|
@ -1074,7 +1119,7 @@ may not be tracked.
|
|||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
<!--
|
||||
<!--
|
||||
You can read about `managedFields` within the Kubernetes API reference for the
|
||||
[`metadata`](/docs/reference/kubernetes-api/common-definitions/object-meta/)
|
||||
top level field.
|
||||
|
|
|
@ -0,0 +1,243 @@
|
|||
---
|
||||
title: 为集群超配节点容量
|
||||
content_type: task
|
||||
weight: 10
|
||||
---
|
||||
<!--
|
||||
title: Overprovision Node Capacity For A Cluster
|
||||
content_type: task
|
||||
weight: 10
|
||||
-->
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
<!--
|
||||
This page guides you through configuring {{< glossary_tooltip text="Node" term_id="node" >}}
|
||||
overprovisioning in your Kubernetes cluster. Node overprovisioning is a strategy that proactively
|
||||
reserves a portion of your cluster's compute resources. This reservation helps reduce the time
|
||||
required to schedule new pods during scaling events, enhancing your cluster's responsiveness
|
||||
to sudden spikes in traffic or workload demands.
|
||||
|
||||
By maintaining some unused capacity, you ensure that resources are immediately available when
|
||||
new pods are created, preventing them from entering a pending state while the cluster scales up.
|
||||
-->
|
||||
本页指导你在 Kubernetes 集群中配置{{< glossary_tooltip text="节点" term_id="node" >}}超配。
|
||||
节点超配是一种主动预留部分集群计算资源的策略。这种预留有助于减少在扩缩容事件期间调度新 Pod 所需的时间,
|
||||
从而增强集群对突发流量或突发工作负载需求的响应能力。
|
||||
|
||||
通过保持一些未使用的容量,确保在新 Pod 被创建时资源可以立即可用,防止 Pod 在集群扩缩容时进入 Pending 状态。
|
||||
|
||||
## {{% heading "prerequisites" %}}
|
||||
|
||||
<!--
|
||||
- You need to have a Kubernetes cluster, and the kubectl command-line tool must be configured to communicate with
|
||||
your cluster.
|
||||
- You should already have a basic understanding of
|
||||
[Deployments](/docs/concepts/workloads/controllers/deployment/),
|
||||
Pod {{< glossary_tooltip text="priority" term_id="pod-priority" >}},
|
||||
and {{< glossary_tooltip text="PriorityClasses" term_id="priority-class" >}}.
|
||||
- Your cluster must be set up with an [autoscaler](/docs/concepts/cluster-administration/cluster-autoscaling/)
|
||||
that manages nodes based on demand.
|
||||
-->
|
||||
- 你需要有一个 Kubernetes 集群,并且 kubectl 命令行工具必须被配置为与你的集群通信。
|
||||
- 你应该已经基本了解了 [Deployment](/zh-cn/docs/concepts/workloads/controllers/deployment/)、Pod
|
||||
{{<glossary_tooltip text="优先级" term_id="pod-priority">}}和
|
||||
{{< glossary_tooltip text="PriorityClass" term_id="priority-class" >}}。
|
||||
- 你的集群必须设置一个基于需求管理节点的[自动扩缩程序](/zh-cn/docs/concepts/cluster-administration/cluster-autoscaling/)。
|
||||
|
||||
<!-- steps -->
|
||||
|
||||
<!--
|
||||
## Create a PriorityClass
|
||||
|
||||
Begin by defining a PriorityClass for the placeholder Pods. First, create a PriorityClass with a
|
||||
negative priority value, that you will shortly assign to the placeholder pods.
|
||||
Later, you will set up a Deployment that uses this PriorityClass
|
||||
-->
|
||||
## 创建 PriorityClass {#create-a-priorityclass}
|
||||
|
||||
首先为占位 Pod 定义一个 PriorityClass。
|
||||
先创建一个优先级值为负数的 PriorityClass,稍后将其分配给占位 Pod。
|
||||
接下来,你将部署使用此 PriorityClass 的 Deployment。
|
||||
|
||||
{{% code_sample language="yaml" file="priorityclass/low-priority-class.yaml" %}}
|
||||
|
||||
<!--
|
||||
Then create the PriorityClass:
|
||||
-->
|
||||
然后创建 PriorityClass:
|
||||
|
||||
```shell
|
||||
kubectl apply -f https://k8s.io/examples/priorityclass/low-priority-class.yaml
|
||||
```
|
||||
|
||||
<!--
|
||||
You will next define a Deployment that uses the negative-priority PriorityClass and runs a minimal container.
|
||||
When you add this to your cluster, Kubernetes runs those placeholder pods to reserve capacity. Any time there
|
||||
is a capacity shortage, the control plane will pick one these placeholder pods as the first candidate to
|
||||
{{< glossary_tooltip text="preempt" term_id="preemption" >}}.
|
||||
-->
|
||||
接下来,你将定义一个 Deployment,使用优先级值为负数的 PriorityClass 并运行最小的容器。
|
||||
当你将此 Deployment 添加到集群中时,Kubernetes 会运行这些占位 Pod 以预留容量。
|
||||
每当出现容量短缺时,控制面将选择这些占位 Pod
|
||||
中的一个作为第一个候选者进行{{< glossary_tooltip text="抢占" term_id="preemption" >}}。
|
||||
|
||||
<!--
|
||||
## Run Pods that request node capacity
|
||||
|
||||
Review the sample manifest:
|
||||
-->
|
||||
## 运行请求节点容量的 Pod {#run-pods-that-request-node-capacity}
|
||||
|
||||
查看样例清单:
|
||||
|
||||
{{% code_sample language="yaml" file="deployments/deployment-with-capacity-reservation.yaml" %}}
|
||||
|
||||
<!--
|
||||
### Pick a namespace for the placeholder pods
|
||||
|
||||
You should select, or create, a {{< glossary_tooltip term_id="namespace" text="namespace">}}
|
||||
that the placeholder Pods will go into.
|
||||
-->
|
||||
### 为占位 Pod 挑选一个命名空间 {#pick-a-namespace-for-the-placeholder-pods}
|
||||
|
||||
你应选择或创建占位 Pod 要进入的{{< glossary_tooltip term_id="namespace" text="命名空间">}}。
|
||||
|
||||
<!--
|
||||
### Create the placeholder deployment
|
||||
|
||||
Create a Deployment based on that manifest:
|
||||
|
||||
```shell
|
||||
# Change the namespace name "example"
|
||||
kubectl --namespace example apply -f https://k8s.io/examples/deployments/deployment-with-capacity-reservation.yaml
|
||||
```
|
||||
-->
|
||||
### 创建占位 Deployment {#create-the-placeholder-deployment}
|
||||
|
||||
基于该清单创建 Deployment:
|
||||
|
||||
```shell
|
||||
# 你要更改命名空间名称 "example"
|
||||
kubectl --namespace example apply -f https://k8s.io/examples/deployments/deployment-with-capacity-reservation.yaml
|
||||
```
|
||||
|
||||
<!--
|
||||
## Adjust placeholder resource requests
|
||||
|
||||
Configure the resource requests and limits for the placeholder pods to define the amount of overprovisioned resources you want to maintain. This reservation ensures that a specific amount of CPU and memory is kept available for new pods.
|
||||
-->
|
||||
## 调整占位资源请求 {#adjust-placeholder-resource-requests}
|
||||
|
||||
为占位 Pod 配置资源请求和限制,以定义你希望保持的超配资源量。
|
||||
这种预留确保为新 Pod 保留可以使用的、特定量的 CPU 和内存。
|
||||
|
||||
<!--
|
||||
To edit the Deployment, modify the `resources` section in the Deployment manifest file
|
||||
to set appropriate requests and limits. You can download that file locally and then edit it
|
||||
with whichever text editor you prefer.
|
||||
|
||||
You can also edit the Deployment using kubectl:
|
||||
-->
|
||||
要编辑 Deployment,可以修改 Deployment 清单文件中的 `resources` 一节,
|
||||
设置合适的 `requests` 和 `limits`。
|
||||
你可以将该文件下载到本地,然后用自己喜欢的文本编辑器进行编辑。
|
||||
|
||||
你也可以使用 kubectl 来编辑 Deployment:
|
||||
|
||||
```shell
|
||||
kubectl edit deployment capacity-reservation
|
||||
```
|
||||
|
||||
<!--
|
||||
For example, to reserve 500m CPU and 1Gi memory across 5 placeholder pods,
|
||||
define the resource requests and limits for a single placeholder pod as follows:
|
||||
-->
|
||||
例如,要为 5 个占位 Pod 预留 500m CPU 和 1Gi 内存,请为单个占位 Pod 定义以下资源请求和限制:
|
||||
|
||||
```yaml
|
||||
resources:
|
||||
requests:
|
||||
cpu: "100m"
|
||||
memory: "200Mi"
|
||||
limits:
|
||||
cpu: "100m"
|
||||
```
|
||||
|
||||
<!--
|
||||
## Set the desired replica count
|
||||
|
||||
### Calculate the total reserved resources
|
||||
-->
|
||||
## 设置所需的副本数量 {#set-the-desired-replica-count}
|
||||
|
||||
### 计算总预留资源 {#calculate-the-total-reserved-resources}
|
||||
|
||||
<!-- trailing whitespace in next paragraph is significant -->
|
||||
|
||||
<!--
|
||||
For example, with 5 replicas each reserving 0.1 CPU and 200MiB of memory:
|
||||
Total CPU reserved: 5 × 0.1 = 0.5 (in the Pod specification, you'll write the quantity `500m`)
|
||||
Total memory reserved: 5 × 200MiB = 1GiB (in the Pod specification, you'll write `1 Gi`)
|
||||
|
||||
To scale the Deployment, adjust the number of replicas based on your cluster's size and expected workload:
|
||||
-->
|
||||
例如,有 5 个副本,每个预留 0.1 CPU 和 200MiB 内存:
|
||||
CPU 预留总量:5 × 0.1 = 0.5(在 Pod 规约中,你将写入数量 `500m`)
|
||||
内存预留总量:5 × 200MiB = 1GiB(在 Pod 规约中,你将写入 `1 Gi`)
|
||||
|
||||
要扩缩容 Deployment,请基于集群的大小和预期的工作负载调整副本数:
|
||||
|
||||
```shell
|
||||
kubectl scale deployment capacity-reservation --replicas=5
|
||||
```
|
||||
|
||||
<!--
|
||||
Verify the scaling:
|
||||
-->
|
||||
验证扩缩容效果:
|
||||
|
||||
```shell
|
||||
kubectl get deployment capacity-reservation
|
||||
```
|
||||
|
||||
<!--
|
||||
The output should reflect the updated number of replicas:
|
||||
-->
|
||||
输出应反映出更新后的副本数:
|
||||
|
||||
```none
|
||||
NAME READY UP-TO-DATE AVAILABLE AGE
|
||||
capacity-reservation 5/5 5 5 2m
|
||||
```
|
||||
|
||||
{{< note >}}
|
||||
<!--
|
||||
Some autoscalers, notably [Karpenter](/docs/concepts/cluster-administration/cluster-autoscaling/#autoscaler-karpenter),
|
||||
treat preferred affinity rules as hard rules when considering node scaling.
|
||||
If you use Karpenter or another node autoscaler that uses the same heuristic,
|
||||
the replica count you set here also sets a minimum node count for your cluster.
|
||||
-->
|
||||
一些自动扩缩组件,特别是
|
||||
[Karpenter](/zh-cn/docs/concepts/cluster-administration/cluster-autoscaling/#autoscaler-karpenter),
|
||||
在考虑节点扩缩容时将偏好的亲和性规则视为硬性规则。如果你使用 Karpenter
|
||||
或其他使用同样启发式的节点扩缩容组件,你在此处设置的副本数也就是你的集群的最少节点数。
|
||||
{{< /note >}}
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
<!--
|
||||
- Learn more about [PriorityClasses](/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass) and how they affect pod scheduling.
|
||||
- Explore [node autoscaling](/docs/concepts/cluster-administration/cluster-autoscaling/) to dynamically adjust your cluster's size based on workload demands.
|
||||
- Understand [Pod preemption](/docs/concepts/scheduling-eviction/pod-priority-preemption/), a
|
||||
key mechanism for Kubernetes to handle resource contention. The same page covers _eviction_,
|
||||
which is less relevant to the placeholder Pod approach, but is also a mechanism for Kubernetes
|
||||
to react when resources are contended.
|
||||
-->
|
||||
- 进一步了解 [PriorityClass](/zh-cn/docs/concepts/scheduling-eviction/pod-priority-preemption/#priorityclass)
|
||||
及其如何影响 Pod 调度。
|
||||
- 探索[节点自动扩缩容](/zh-cn/docs/concepts/cluster-administration/cluster-autoscaling/),
|
||||
以基于工作负载需求动态调整集群的大小。
|
||||
- 了解 [Pod 抢占](/zh-cn/docs/concepts/scheduling-eviction/pod-priority-preemption/),
|
||||
这是 Kubernetes 处理资源竞争的关键机制。这篇文档还涵盖了**驱逐**,
|
||||
虽然与占位 Pod 方法相关性较小,但也是 Kubernetes 在资源竞争时做出反应的一种机制。
|
|
@ -378,12 +378,9 @@ kubectl describe pod private-reg
|
|||
<!--
|
||||
If you then see an event with the reason set to `FailedToRetrieveImagePullSecret`,
|
||||
Kubernetes can't find a Secret with name (`regcred`, in this example).
|
||||
If you specify that a Pod needs image pull credentials, the kubelet checks that it can
|
||||
access that Secret before attempting to pull the image.
|
||||
-->
|
||||
如果你看到一个原因设为 `FailedToRetrieveImagePullSecret` 的事件,
|
||||
那么 Kubernetes 找不到指定名称(此例中为 `regcred`)的 Secret。
|
||||
如果你指定 Pod 需要拉取镜像凭据,kubelet 在尝试拉取镜像之前会检查是否可以访问该 Secret。
|
||||
|
||||
<!--
|
||||
Make sure that the Secret you have specified exists, and that its name is spelled properly.
|
||||
|
@ -397,6 +394,24 @@ Events:
|
|||
... FailedToRetrieveImagePullSecret ... Unable to retrieve some image pull secrets (<regcred>); attempting to pull the image may not succeed.
|
||||
```
|
||||
|
||||
<!--
|
||||
## Using images from multiple registries
|
||||
|
||||
A pod can have multiple containers, each container image can be from a different registry.
|
||||
You can use multiple `imagePullSecrets` with one pod, and each can contain multiple credentials.
|
||||
-->
|
||||
## 使用来自多个仓库的镜像
|
||||
|
||||
一个 Pod 可以包含多个容器,每个容器的镜像可以来自不同的仓库。
|
||||
你可以在一个 Pod 中使用多个 `imagePullSecrets`,每个 `imagePullSecrets` 可以包含多个凭证。
|
||||
|
||||
<!--
|
||||
The image pull will be attempted using each credential that matches the registry.
|
||||
If no credentials match the registry, the image pull will be attempted without authorization or using custom runtime specific configuration.
|
||||
-->
|
||||
kubelet 将使用与仓库匹配的每个凭证尝试拉取镜像。
|
||||
如果没有凭证匹配仓库,则 kubelet 将尝试在没有授权的情况下拉取镜像,或者使用特定运行时的自定义配置。
|
||||
|
||||
## {{% heading "whatsnext" %}}
|
||||
|
||||
<!--
|
||||
|
|
|
@ -1,10 +0,0 @@
|
|||
---
|
||||
title: "示例:配置 java 微服务"
|
||||
weight: 10
|
||||
---
|
||||
<!--
|
||||
---
|
||||
title: "Example: Configuring a Java Microservice"
|
||||
weight: 10
|
||||
---
|
||||
-->
|
|
@ -1,37 +0,0 @@
|
|||
---
|
||||
title: "互动教程 - 配置 java 微服务"
|
||||
weight: 20
|
||||
---
|
||||
<!--
|
||||
---
|
||||
title: "Interactive Tutorial - Configuring a Java Microservice"
|
||||
weight: 20
|
||||
---
|
||||
-->
|
||||
|
||||
<!DOCTYPE html>
|
||||
|
||||
<html lang="zh">
|
||||
|
||||
<body>
|
||||
|
||||
<link href="/docs/tutorials/kubernetes-basics/public/css/styles.css" rel="stylesheet">
|
||||
<link href="/docs/tutorials/kubernetes-basics/public/css/overrides.css" rel="stylesheet">
|
||||
<script src="https://katacoda.com/embed.js"></script>
|
||||
|
||||
<div class="layout" id="top">
|
||||
|
||||
<main class="content katacoda-content">
|
||||
<div class="katacoda">
|
||||
<div class="katacoda__alert">
|
||||
<!-- To interact with the Terminal, please use the desktop/tablet version -->
|
||||
如需要与终端交互,请使用台式机/平板电脑版
|
||||
</div>
|
||||
<div class="katacoda__box" id="inline-terminal-1" data-katacoda-id="kubernetes-bootcamp/9" data-katacoda-color="326de6" data-katacoda-secondary="273d6d" data-katacoda-hideintro="false" data-katacoda-prompt="Kubernetes Bootcamp Terminal" style="height: 600px;"></div>
|
||||
</div>
|
||||
</main>
|
||||
|
||||
</div>
|
||||
|
||||
</body>
|
||||
</html>
|
|
@ -1,121 +0,0 @@
|
|||
---
|
||||
title: "使用 MicroProfile、ConfigMaps、Secrets 实现外部化应用配置"
|
||||
content_type: tutorial
|
||||
weight: 10
|
||||
---
|
||||
<!--
|
||||
---
|
||||
title: "Externalizing config using MicroProfile, ConfigMaps and Secrets"
|
||||
content_type: tutorial
|
||||
weight: 10
|
||||
---
|
||||
-->
|
||||
|
||||
<!-- overview -->
|
||||
|
||||
<!--
|
||||
In this tutorial you will learn how and why to externalize your microservice’s configuration.
|
||||
Specifically, you will learn how to use Kubernetes ConfigMaps and Secrets to set environment
|
||||
variables and then consume them using MicroProfile Config.
|
||||
-->
|
||||
在本教程中,你会学到如何以及为什么要实现外部化微服务应用配置。
|
||||
具体来说,你将学习如何使用 Kubernetes ConfigMaps 和 Secrets 设置环境变量,
|
||||
然后在 MicroProfile config 中使用它们。
|
||||
|
||||
## {{% heading "prerequisites" %}}
|
||||
|
||||
<!--
|
||||
### Creating Kubernetes ConfigMaps & Secrets
|
||||
There are several ways to set environment variables for a Docker container in Kubernetes,
|
||||
including: Dockerfile, kubernetes.yml, Kubernetes ConfigMaps, and Kubernetes Secrets. In the
|
||||
tutorial, you will learn how to use the latter two for setting your environment variables whose
|
||||
values will be injected into your microservices. One of the benefits for using ConfigMaps and
|
||||
Secrets is that they can be re-used across multiple containers, including being assigned to
|
||||
different environment variables for the different containers.
|
||||
-->
|
||||
### 创建 Kubernetes ConfigMaps 和 Secrets {#creating-kubernetes-configmaps-secrets}
|
||||
在 Kubernetes 中,为 docker 容器设置环境变量有几种不同的方式,比如:
|
||||
Dockerfile、kubernetes.yml、Kubernetes ConfigMaps、和 Kubernetes Secrets。
|
||||
在本教程中,你将学到怎么用后两个方式去设置你的环境变量,而环境变量的值将注入到你的微服务里。
|
||||
使用 ConfigMaps 和 Secrets 的一个好处是他们能在多个容器间复用,
|
||||
比如赋值给不同的容器中的不同环境变量。
|
||||
|
||||
<!--
|
||||
ConfigMaps are API Objects that store non-confidential key-value pairs. In the Interactive
|
||||
Tutorial you will learn how to use a ConfigMap to store the application's name. For more
|
||||
information regarding ConfigMaps, you can find the documentation
|
||||
[here](/docs/tasks/configure-pod-container/configure-pod-configmap/).
|
||||
|
||||
Although Secrets are also used to store key-value pairs, they differ from ConfigMaps in that
|
||||
they're intended for confidential/sensitive information and are stored using Base64 encoding.
|
||||
This makes secrets the appropriate choice for storing such things as credentials, keys, and
|
||||
tokens, the former of which you'll do in the Interactive Tutorial. For more information on
|
||||
Secrets, you can find the documentation [here](/docs/concepts/configuration/secret/).
|
||||
-->
|
||||
ConfigMaps 是存储非机密键值对的 API 对象。
|
||||
在互动教程中,你会学到如何用 ConfigMap 来保存应用名字。
|
||||
ConfigMap 的更多信息,你可以在[这里](/zh-cn/docs/tasks/configure-pod-container/configure-pod-configmap/)找到文档。
|
||||
|
||||
Secrets 尽管也用来存储键值对,但区别于 ConfigMaps 的是:它针对机密/敏感数据,且存储格式为 Base64 编码。
|
||||
secrets 的这种特性使得它适合于存储证书、密钥、令牌,上述内容你将在交互教程中实现。
|
||||
Secrets 的更多信息,你可以在[这里](/zh-cn/docs/concepts/configuration/secret/)找到文档。
|
||||
|
||||
|
||||
<!--
|
||||
### Externalizing Config from Code
|
||||
Externalized application configuration is useful because configuration usually changes depending
|
||||
on your environment. In order to accomplish this, we'll use Java's Contexts and Dependency
|
||||
Injection (CDI) and MicroProfile Config. MicroProfile Config is a feature of MicroProfile, a set
|
||||
of open Java technologies for developing and deploying cloud-native microservices.
|
||||
-->
|
||||
### 从代码外部化配置
|
||||
外部化应用配置之所以有用处,是因为配置常常根据环境的不同而变化。
|
||||
为了实现此功能,我们用到了 Java 上下文和依赖注入(Contexts and Dependency Injection, CDI)、MicroProfile 配置。
|
||||
MicroProfile config 是 MicroProfile 的功能特性,
|
||||
是一组开放 Java 技术,用于开发、部署云原生微服务。
|
||||
|
||||
<!--
|
||||
CDI provides a standard dependency injection capability enabling an application to be assembled
|
||||
from collaborating, loosely-coupled beans. MicroProfile Config provides apps and microservices a
|
||||
standard way to obtain config properties from various sources, including the application, runtime,
|
||||
and environment. Based on the source's defined priority, the properties are automatically
|
||||
combined into a single set of properties that the application can access via an API. Together,
|
||||
CDI & MicroProfile will be used in the Interactive Tutorial to retrieve the externally provided
|
||||
properties from the Kubernetes ConfigMaps and Secrets and get injected into your application code.
|
||||
|
||||
Many open source frameworks and runtimes implement and support MicroProfile Config. Throughout
|
||||
the interactive tutorial, you'll be using Open Liberty, a flexible open-source Java runtime for
|
||||
building and running cloud-native apps and microservices. However, any MicroProfile compatible
|
||||
runtime could be used instead.
|
||||
-->
|
||||
CDI 提供一套标准的依赖注入能力,使得应用程序可以由相互协作的、松耦合的 beans 组装而成。
|
||||
MicroProfile Config 为 app 和微服务提供从各种来源,比如应用、运行时、环境,获取配置参数的标准方法。
|
||||
基于来源定义的优先级,属性可以自动的合并到单独一组应用可以通过 API 访问到的属性。
|
||||
CDI & MicroProfile 都会被用在互动教程中,
|
||||
用来从 Kubernetes ConfigMaps 和 Secrets 获得外部提供的属性,并注入应用程序代码中。
|
||||
|
||||
很多开源框架、运行时支持 MicroProfile Config。
|
||||
对于整个互动教程,你都可以使用开放的库、灵活的开源 Java 运行时,去构建并运行云原生的 apps 和微服务。
|
||||
然而,任何 MicroProfile 兼容的运行时都可以用来做替代品。
|
||||
|
||||
|
||||
## {{% heading "objectives" %}}
|
||||
|
||||
<!--
|
||||
* Create a Kubernetes ConfigMap and Secret
|
||||
* Inject microservice configuration using MicroProfile Config
|
||||
-->
|
||||
* 创建 Kubernetes ConfigMap 和 Secret
|
||||
* 使用 MicroProfile Config 注入微服务配置
|
||||
|
||||
|
||||
<!-- lessoncontent -->
|
||||
|
||||
<!--
|
||||
## Example: Externalizing config using MicroProfile, ConfigMaps and Secrets
|
||||
|
||||
[Start Interactive Tutorial](/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive/)
|
||||
-->
|
||||
## 示例:使用 MicroProfile、ConfigMaps、Secrets 实现外部化应用配置
|
||||
|
||||
[启动互动教程](/zh-cn/docs/tutorials/configuration/configure-java-microservice/configure-java-microservice-interactive/)
|
|
@ -148,7 +148,7 @@ service/my-nginx exposed
|
|||
```
|
||||
|
||||
<!--
|
||||
This is equivalent to `kubectl apply -f` the following yaml:
|
||||
This is equivalent to `kubectl apply -f` in the following yaml:
|
||||
-->
|
||||
这等价于使用 `kubectl create -f` 命令及如下的 yaml 文件创建:
|
||||
|
||||
|
|
|
@ -27,7 +27,7 @@ Service 连接到了你的应用,你就有了一个持续运行的多副本应
|
|||
<!--
|
||||
## Termination process for Pods and their endpoints
|
||||
|
||||
There are often cases when you need to terminate a Pod - be it for upgrade or scale down.
|
||||
There are often cases when you need to terminate a Pod - be it to upgrade or scale down.
|
||||
In order to improve application availability, it may be important to implement
|
||||
a proper active connections draining.
|
||||
|
||||
|
@ -48,12 +48,12 @@ a simple nginx web server to demonstrate the concept.
|
|||
<!--
|
||||
## Example flow with endpoint termination
|
||||
|
||||
The following is the example of the flow described in the
|
||||
The following is the example flow described in the
|
||||
[Termination of Pods](/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination)
|
||||
document.
|
||||
|
||||
Let's say you have a Deployment containing of a single `nginx` replica
|
||||
(just for demonstration purposes) and a Service:
|
||||
Let's say you have a Deployment containing a single `nginx` replica
|
||||
(say just for the sake of demonstration purposes) and a Service:
|
||||
-->
|
||||
## 端点终止的示例流程 {#example-flow-with-endpoint-termination}
|
||||
|
||||
|
@ -223,14 +223,14 @@ The output is similar to this:
|
|||
|
||||
<!--
|
||||
This allows applications to communicate their state during termination
|
||||
and clients (such as load balancers) to implement a connections draining functionality.
|
||||
and clients (such as load balancers) to implement connection draining functionality.
|
||||
These clients may detect terminating endpoints and implement a special logic for them.
|
||||
-->
|
||||
这种设计使得应用可以在终止期间公布自己的状态,而客户端(如负载均衡器)则可以实现连接排空功能。
|
||||
这些客户端可以检测到正在终止的端点,并为这些端点实现特殊的逻辑。
|
||||
|
||||
<!--
|
||||
In Kubernetes, endpoints that are terminating always have their `ready` status set as as `false`.
|
||||
In Kubernetes, endpoints that are terminating always have their `ready` status set as `false`.
|
||||
This needs to happen for backward
|
||||
compatibility, so existing load balancers will not use it for regular traffic.
|
||||
If traffic draining on terminating pod is needed, the actual readiness can be
|
||||
|
|
|
@ -0,0 +1,34 @@
|
|||
apiVersion: apps/v1
|
||||
kind: Deployment
|
||||
metadata:
|
||||
name: capacity-reservation
|
||||
# 你应决定要将此 Deployment 部署到哪个命名空间
|
||||
spec:
|
||||
replicas: 1
|
||||
selector:
|
||||
matchLabels:
|
||||
app.kubernetes.io/name: capacity-placeholder
|
||||
template:
|
||||
metadata:
|
||||
labels:
|
||||
app.kubernetes.io/name: capacity-placeholder
|
||||
annotations:
|
||||
kubernetes.io/description: "Capacity reservation"
|
||||
spec:
|
||||
priorityClassName: placeholder
|
||||
affinity: # 有可能的话,将这些 Pod 开销放到不同的节点
|
||||
podAntiAffinity:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
matchLabels:
|
||||
app: placeholder
|
||||
topologyKey: "kubernetes.io/hostname"
|
||||
containers:
|
||||
- name: pause
|
||||
image: registry.k8s.io/pause:3.6
|
||||
resources:
|
||||
requests:
|
||||
cpu: "50m"
|
||||
memory: "512Mi"
|
||||
limits:
|
||||
memory: "512Mi"
|
|
@ -0,0 +1,7 @@
|
|||
apiVersion: scheduling.k8s.io/v1
|
||||
kind: PriorityClass
|
||||
metadata:
|
||||
name: placeholder # 这些 Pod 表示占位容量
|
||||
value: -1000
|
||||
globalDefault: false
|
||||
description: "Negative priority for placeholder pods to enable overprovisioning."
|
|
@ -7,10 +7,13 @@ schedules:
|
|||
- endOfLifeDate: "2025-10-28"
|
||||
maintenanceModeStartDate: "2025-08-28"
|
||||
next:
|
||||
cherryPickDeadline: "2024-11-15"
|
||||
cherryPickDeadline: "2024-12-06"
|
||||
release: 1.31.4
|
||||
targetDate: "2024-12-10"
|
||||
previousPatches:
|
||||
- cherryPickDeadline: "2024-11-15"
|
||||
release: 1.31.3
|
||||
targetDate: "2024-11-19"
|
||||
previousPatches:
|
||||
- cherryPickDeadline: "2024-10-11"
|
||||
release: 1.31.2
|
||||
targetDate: "2024-10-22"
|
||||
|
@ -24,10 +27,13 @@ schedules:
|
|||
- endOfLifeDate: "2025-06-28"
|
||||
maintenanceModeStartDate: "2025-04-28"
|
||||
next:
|
||||
cherryPickDeadline: "2024-11-15"
|
||||
cherryPickDeadline: "2024-12-06"
|
||||
release: 1.30.8
|
||||
targetDate: "2024-12-10"
|
||||
previousPatches:
|
||||
- cherryPickDeadline: "2024-11-15"
|
||||
release: 1.30.7
|
||||
targetDate: "2024-11-19"
|
||||
previousPatches:
|
||||
- cherryPickDeadline: "2024-10-11"
|
||||
release: 1.30.6
|
||||
targetDate: "2024-10-22"
|
||||
|
@ -53,10 +59,13 @@ schedules:
|
|||
- endOfLifeDate: "2025-02-28"
|
||||
maintenanceModeStartDate: "2024-12-28"
|
||||
next:
|
||||
cherryPickDeadline: "2024-11-15"
|
||||
cherryPickDeadline: "2024-12-06"
|
||||
release: 1.29.12
|
||||
targetDate: "2024-12-10"
|
||||
previousPatches:
|
||||
- cherryPickDeadline: "2024-11-15"
|
||||
release: 1.29.11
|
||||
targetDate: "2024-11-19"
|
||||
previousPatches:
|
||||
- cherryPickDeadline: "2024-10-11"
|
||||
release: 1.29.10
|
||||
targetDate: "2024-10-22"
|
||||
|
@ -92,9 +101,9 @@ schedules:
|
|||
release: "1.29"
|
||||
releaseDate: "2023-12-13"
|
||||
upcoming_releases:
|
||||
- cherryPickDeadline: "2024-11-15"
|
||||
targetDate: "2024-11-19"
|
||||
- cherryPickDeadline: "2024-12-06"
|
||||
targetDate: "2024-12-10"
|
||||
- cherryPickDeadline: "2025-01-10"
|
||||
targetDate: "2025-01-14"
|
||||
- cherryPickDeadline: "2025-02-07"
|
||||
targetDate: "2025-02-11"
|
||||
|
|
|
@ -218,6 +218,8 @@ url = "https://v1-28.docs.kubernetes.io"
|
|||
[params.ui]
|
||||
# Enable to show the side bar menu in its compact state.
|
||||
sidebar_menu_compact = false
|
||||
# Show this many levels in compact mode
|
||||
ul_show = 3
|
||||
# Show expand/collapse icon for sidebar sections.
|
||||
sidebar_menu_foldable = true
|
||||
# https://github.com/gohugoio/hugo/issues/8918#issuecomment-903314696
|
||||
|
|
|
@ -1,24 +0,0 @@
|
|||
<div id="docsToc">
|
||||
<div class="pi-accordion">
|
||||
{{/* This can be any page in the docs tree. Need to find the correct ancestor.
|
||||
In a roundabout way. This will improve when Go templates gets assignment and break support (both in Go 1.11).
|
||||
*/}}
|
||||
{{ $p := . }}
|
||||
{{ .Scratch.Set "section" .CurrentSection }}
|
||||
{{ .Scratch.Set "sectionFound" false }}
|
||||
{{ $docs := site.GetPage "section" "docs" }}
|
||||
{{ if ne .CurrentSection $docs }}
|
||||
{{ range $docs.Sections }}
|
||||
{{ if not ($.Scratch.Get "sectionFound") }}
|
||||
{{ if $p.IsDescendant . }}
|
||||
{{ $.Scratch.Set "section" . }}
|
||||
{{ $.Scratch.Set "sectionFound" true }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ end }}
|
||||
{{ $section := (.Scratch.Get "section") }}
|
||||
{{ partialCached "tree.html" $section $section.RelPermalink }}
|
||||
</div> <!-- /pi-accordion -->
|
||||
<button class="push-menu-close-button" onclick="kub.toggleToc()"></button>
|
||||
</div> <!-- /docsToc -->
|
|
@ -1,36 +1,35 @@
|
|||
{{/* We cache this partial for bigger sites and set the active class client side. */}}
|
||||
{{ $sidebarCacheLimit := cond (isset .Site.Params.ui "sidebar_cache_limit") .Site.Params.ui.sidebar_cache_limit 2000 -}}
|
||||
{{ $shouldDelayActive := ge (len .Site.Pages) $sidebarCacheLimit -}}
|
||||
{{/* Always cache this partial; set the active class client side. */}}
|
||||
{{ $shouldDelayActive := true }}
|
||||
<div id="td-sidebar-menu" class="td-sidebar__inner{{ if $shouldDelayActive }} d-none{{ end }}">
|
||||
{{ if not .Site.Params.ui.sidebar_search_disable -}}
|
||||
<form class="td-sidebar__search d-flex align-items-center">
|
||||
{{ partial "search-input.html" . }}
|
||||
<button class="btn btn-link td-sidebar__toggle d-md-none p-0 ml-3 fas fa-bars" type="button" data-toggle="collapse" data-target="#td-section-nav" aria-controls="td-docs-nav" aria-expanded="false" aria-label="Toggle section navigation">
|
||||
<button class="btn btn-link td-sidebar__toggle d-md-none p-0 ml-3 fas fa-bars" type="button" data-toggle="collapse" data-target="#td-section-nav" aria-controls="td-section-nav" aria-expanded="false" aria-label="Toggle section navigation">
|
||||
</button>
|
||||
</form>
|
||||
{{ else -}}
|
||||
<div id="content-mobile">
|
||||
<form class="td-sidebar__search d-flex align-items-center">
|
||||
{{ partial "search-input.html" . }}
|
||||
<button class="btn btn-link td-sidebar__toggle d-md-none p-0 ml-3 fas fa-bars" type="button" data-toggle="collapse" data-target="#td-section-nav" aria-controls="td-docs-nav" aria-expanded="false" aria-label="Toggle section navigation">
|
||||
<button class="btn btn-link td-sidebar__toggle d-md-none p-0 ml-3 fas fa-bars" type="button" data-toggle="collapse" data-target="#td-section-nav" aria-controls="td-section-nav" aria-expanded="false" aria-label="Toggle section navigation">
|
||||
</button>
|
||||
</form>
|
||||
</div>
|
||||
<div id="content-desktop"></div>
|
||||
{{ end -}}
|
||||
<nav class="collapse td-sidebar-nav{{ if .Site.Params.ui.sidebar_menu_foldable }} foldable-nav{{ end }}" id="td-section-nav">
|
||||
<!-- {{ if (gt (len .Site.Home.Translations) 0) }}
|
||||
{{- if (and false (gt (len .Site.Home.Translations) 0) ) -}}
|
||||
<div class="nav-item dropdown d-block d-lg-none">
|
||||
{{ partial "navbar-lang-selector.html" . }}
|
||||
</div>
|
||||
{{ end }} -->
|
||||
<!-- {{ $navRoot := cond (and (ne .Params.toc_root true) (eq .Site.Home.Type "docs")) .Site.Home .FirstSection }} -->
|
||||
{{ end -}}
|
||||
{{ $navRoot := cond (and (ne .Params.toc_root true) (eq .Site.Home.Type "docs")) .Site.Home .FirstSection -}}
|
||||
{{ $ulNr := 0 -}}
|
||||
{{ $ulShow := cond (isset .Site.Params.ui "ul_show") .Site.Params.ui.ul_show 1 -}}
|
||||
{{ $sidebarMenuTruncate := cond (isset .Site.Params.ui "sidebar_menu_truncate") .Site.Params.ui.sidebar_menu_truncate 50 -}}
|
||||
{{ $currentLang := .Site.Language -}}
|
||||
{{ $currentLang := string .Site.Language -}}
|
||||
<ul class="td-sidebar-nav__section pr-md-3 ul-{{ $ulNr }}">
|
||||
{{ template "section-tree-nav-section" (dict "page" . "section" .FirstSection "shouldDelayActive" $shouldDelayActive "sidebarMenuTruncate" $sidebarMenuTruncate "ulNr" $ulNr "ulShow" (add $ulShow 1) "currentLang" $currentLang) }}
|
||||
{{ template "section-tree-nav-section" (dict "page" . "section" $navRoot "shouldDelayActive" $shouldDelayActive "sidebarMenuTruncate" $sidebarMenuTruncate "ulNr" $ulNr "ulShow" (add $ulShow 1) "currentLang" $currentLang) }}
|
||||
</ul>
|
||||
</nav>
|
||||
</div>
|
||||
|
@ -42,9 +41,10 @@
|
|||
{{ $treeRoot := cond (eq .ulNr 0) true false -}}
|
||||
{{ $ulNr := .ulNr -}}
|
||||
{{ $ulShow := .ulShow -}}
|
||||
{{ $currentLang := .currentLang -}}
|
||||
{{ $active := and (not $shouldDelayActive) (eq $s $p) -}}
|
||||
{{ $activePath := and (not $shouldDelayActive) ($p.IsDescendant $s) -}}
|
||||
{{ $show := cond (or (lt $ulNr $ulShow) $activePath (and (not $shouldDelayActive) (eq $s.Parent $p.Parent)) (and (not $shouldDelayActive) (eq $s.Parent $p)) (and (not $shouldDelayActive) ($p.IsDescendant $s.Parent))) true false -}}
|
||||
{{ $show := cond (or (lt $ulNr $ulShow) $activePath (and (not $shouldDelayActive) (eq $s.Parent $p.Parent)) (and (not $shouldDelayActive) (eq $s.Parent $p)) (not $p.Site.Params.ui.sidebar_menu_compact) (and (not $shouldDelayActive) ($p.IsDescendant $s.Parent))) true false -}}
|
||||
{{ $mid := printf "m-%s" ($s.RelPermalink | anchorize) -}}
|
||||
{{ $pages_tmp := where (union $s.Pages $s.Sections).ByWeight ".Params.toc_hide" "!=" true -}}
|
||||
{{/* We get untranslated subpages below to make sure we build all levels of the sidenav in localizationed docs sets */}}
|
||||
|
@ -60,44 +60,26 @@
|
|||
{{ $withChild := gt (len $pages) 0 -}}
|
||||
{{ $manualLink := cond (isset $s.Params "manuallink") $s.Params.manualLink ( cond (isset $s.Params "manuallinkrelref") (relref $s $s.Params.manualLinkRelref) $s.RelPermalink) -}}
|
||||
{{ $manualLinkTitle := cond (isset $s.Params "manuallinktitle") $s.Params.manualLinkTitle $s.Title -}}
|
||||
|
||||
{{ $isForeignLanguage := (ne (string $s.Lang) ($.currentLang)) -}}
|
||||
<li class="td-sidebar-nav__section-title td-sidebar-nav__section{{ if $withChild }} with-child{{ else }} without-child{{ end }}{{ if $activePath }} active-path{{ end }}{{ if (not (or $show $p.Site.Params.ui.sidebar_menu_foldable )) }} collapse{{ end }}" id="{{ $mid }}-li">
|
||||
{{ if (and $p.Site.Params.ui.sidebar_menu_foldable (ge $ulNr 1)) -}}
|
||||
<input type="checkbox" id="{{ $mid }}-check"{{ if $activePath}} checked{{ end }}/>
|
||||
<label for="{{ $mid }}-check"><a href="{{ $manualLink }}"{{ if ne $s.LinkTitle $manualLinkTitle }} title="{{ $manualLinkTitle }}"{{ end }}{{ with $s.Params.manualLinkTarget }} target="{{ . }}"{{ if eq . "_blank" }} rel="noopener"{{ end }}{{ end }} class="align-left pl-0 {{ if $active}} active{{ end }} td-sidebar-link{{ if $s.IsPage }} td-sidebar-link__page{{ else }} td-sidebar-link__section{{ end }}{{ if $treeRoot }} tree-root{{ end }}" id="{{ $mid }}">{{ with $s.Params.Icon}}<i class="{{ . }}"></i>{{ end }}<span class="{{ if $active }}td-sidebar-nav-active-item{{ end }}">{{ $s.LinkTitle }}</span></a></label>
|
||||
<input type="checkbox" id="{{ $mid }}-check"{{ if $activePath}} checked{{ end }}/>
|
||||
<label for="{{ $mid }}-check"><a href="{{ $manualLink }}"{{ if ne $s.LinkTitle $manualLinkTitle }} title="{{ $manualLinkTitle }}"{{ end }}{{ with $s.Params.manualLinkTarget }} target="{{ . }}"{{ if eq . "_blank" }} rel="noopener"{{ end }}{{ end }} class="align-left pl-0 {{ if $active}} active{{ end }} td-sidebar-link{{ if $s.IsPage }} td-sidebar-link__page{{ else }} td-sidebar-link__section{{ end }}{{ if $treeRoot }} tree-root{{ end }}" id="{{ $mid }}">{{ with $s.Params.Icon}}<i class="{{ . }}"></i>{{ end }}<span class="{{ if $active }}td-sidebar-nav-active-item{{ end }}">{{ $s.LinkTitle }}</span>{{ if $isForeignLanguage }} <small title="{{ T (printf "i18n_language_name_long_%s" $s.Lang ) }}">({{ $s.Lang | upper }})</small>{{ end -}}</a></label>
|
||||
{{ else -}}
|
||||
{{ if not $treeRoot }}
|
||||
<a href="{{ $manualLink }}"{{ if ne $s.LinkTitle $manualLinkTitle }} title="{{ $manualLinkTitle }}"{{ end }}{{ with $s.Params.manualLinkTarget }} target="{{ . }}"{{ if eq . "_blank" }} rel="noopener"{{ end }}{{ end }} class="align-left pl-0{{ if $active}} active{{ end }} td-sidebar-link{{ if $s.IsPage }} td-sidebar-link__page{{ else }} td-sidebar-link__section{{ end }}" id="{{ $mid }}">{{ with $s.Params.Icon}}<i class="{{ . }}"></i>{{ end }}<span class="{{ if $active }}td-sidebar-nav-active-item{{ end }}">{{ $s.LinkTitle }}</span></a>
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ if $withChild -}}
|
||||
{{ $ulNr := add $ulNr 1 -}}
|
||||
<ul class="ul-{{ $ulNr }}{{ if (gt $ulNr 1)}} foldable{{end}}">
|
||||
{{ $pages := where (union $s.Pages $s.Sections).ByWeight ".Params.toc_hide" "!=" true -}}
|
||||
{{ with site.Params.language_alternatives -}}
|
||||
{{ range . }}
|
||||
{{ with (where $.section.Translations ".Lang" . ) -}}
|
||||
{{ $p := index . 0 -}}
|
||||
{{ $pages = where ( $pages | lang.Merge (union $p.Pages $p.Sections)) ".Params.toc_hide" "!=" true -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ end -}}
|
||||
{{ $pages := $pages | first 50 -}}
|
||||
{{ range $pages -}}
|
||||
{{ if (not (and (eq $s $p.Site.Home) (eq .Params.toc_root true)) ) -}}
|
||||
{{ $mid := printf "m-%s" (.RelPermalink | anchorize) -}}
|
||||
{{ $active := eq . $p -}}
|
||||
{{ $isForeignLanguage := (ne (string .Lang) (string $.currentLang)) -}}
|
||||
{{ if (and $isForeignLanguage ($p.IsDescendant $s)) -}}
|
||||
<a class="td-sidebar-link td-sidebar-link__page {{ if and (not $shouldDelayActive) $active }} active{{ end }}" id="{{ $mid }}" {{ if $isForeignLanguage }}target="_blank"{{ end }} href="{{ .RelPermalink }}">
|
||||
{{ .LinkTitle }}{{ if $isForeignLanguage }} <small>({{ .Lang | upper }})</small>{{ end -}}
|
||||
</a>
|
||||
{{ else -}}
|
||||
{{ template "section-tree-nav-section" (dict "page" $p "section" . "currentLang" $.currentLang "shouldDelayActive" $shouldDelayActive "sidebarMenuTruncate" $sidebarMenuTruncate "ulNr" $ulNr "ulShow" $ulShow) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
</ul>
|
||||
{{ if not $treeRoot }}
|
||||
<a href="{{ $manualLink }}"{{ if ne $s.LinkTitle $manualLinkTitle }} title="{{ $manualLinkTitle }}"{{ end }}{{ with $s.Params.manualLinkTarget }} target="{{ . }}"{{ if eq . "_blank" }} rel="noopener"{{ end }}{{ end }} class="align-left pl-0{{ if $active}} active{{ end }} td-sidebar-link{{ if $s.IsPage }} td-sidebar-link__page{{ else }} td-sidebar-link__section{{ end }}{{ if $treeRoot }} tree-root{{ end }}" id="{{ $mid }}">{{ with $s.Params.Icon}}<i class="{{ . }}"></i>{{ end }}<span class="{{ if $active }}td-sidebar-nav-active-item{{ end }}">{{ $s.LinkTitle }}</span>{{ if $isForeignLanguage }} <small title="{{ T (printf "i18n_language_name_long_%s" $s.Lang ) }}">({{ $s.Lang | upper }})</small>{{ end -}}</a>
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
{{- if $withChild }}
|
||||
{{- $ulNr := add $ulNr 1 }}
|
||||
<ul class="ul-{{ $ulNr }}{{ if (gt $ulNr 1)}} foldable{{end}}">
|
||||
{{ range $pages -}}
|
||||
{{ $mid := printf "m-%s" (.RelPermalink | anchorize) -}}
|
||||
{{ if (not (and (eq $s $p.Site.Home) (eq .Params.toc_root true))) -}}
|
||||
{{ template "section-tree-nav-section" (dict "page" $p "section" . "shouldDelayActive" $shouldDelayActive "sidebarMenuTruncate" $sidebarMenuTruncate "ulNr" $ulNr "ulShow" $ulShow "currentLang" $currentLang) }}
|
||||
{{- end }}
|
||||
{{- end }}
|
||||
</ul>
|
||||
{{- end }}
|
||||
</li>
|
||||
{{- end }}
|
||||
{{- end }}
|
|
@ -14,23 +14,22 @@ feature:
|
|||
Note that markdown can be used in the description.
|
||||
|
||||
*/}}
|
||||
<section id="features">
|
||||
<div class="main-section">
|
||||
<h3 class="center">{{ T "main_kubernetes_features" }}</h3>
|
||||
<section class="features-container" id="features">
|
||||
{{- with resources.Get "images/wheel.svg" -}}
|
||||
<img class="kubernetes-logo wheel" src="{{ .RelPermalink }}" alt="">
|
||||
{{- end -}}
|
||||
|
||||
<h2 class="k8s-features-heading">{{ T "main_kubernetes_features" }}</h2>
|
||||
<div>
|
||||
{{ $pages := where site.Pages ".Params.feature" "!=" nil }}
|
||||
{{range $i, $p := $pages }}
|
||||
{{ if and (gt $i 0) (modBool $i 2) }}</div>{{ end }}
|
||||
{{ if modBool $i 2 }}
|
||||
<div class="feature-box">
|
||||
{{ end }}
|
||||
<div>
|
||||
{{ range $i, $p := $pages }}
|
||||
<div class="feature-box">
|
||||
{{ with .Params.feature }}
|
||||
<h4><a href="{{ $p.RelPermalink}}{{ with .anchor }}#{{ . | anchorize }}{{ end }}">{{ .title }}</a></h4>
|
||||
<h3><a href="{{ $p.RelPermalink}}{{ with .anchor }}#{{ . | anchorize }}{{ end }}">{{ .title }}</a></h3>
|
||||
{{ $description := .description | default $p.Params.description }}
|
||||
{{ $description | markdownify }}
|
||||
{{ end }}
|
||||
</div>
|
||||
|
||||
{{ end }}
|
||||
</div>
|
||||
</section>
|
||||
|
|
|
@ -5,9 +5,6 @@
|
|||
{{- $glossaryItems := $glossaryBundle.Resources.ByType "page" -}}
|
||||
{{- $term_info := $glossaryItems.GetMatch (printf "%s.md" $id ) -}}
|
||||
{{- $showFullDefinition := false -}}
|
||||
{{- if not $term_info -}}
|
||||
{{- errorf "[%s] %q: %q is not a valid glossary term_id, see ./docs/reference/glossary/* for a full list" site.Language.Lang .Page.Path $id -}}
|
||||
{{- end -}}
|
||||
{{- if or (eq "long" $length) (eq "all" $length) -}}
|
||||
{{- $showFullDefinition = true -}}
|
||||
{{- else if (eq "short" $length) -}}
|
||||
|
@ -15,9 +12,9 @@
|
|||
{{- else -}}
|
||||
{{- errorf "[%s] %q: invalid glossary definition length %q" site.Language.Lang .Page.Path $length -}}
|
||||
{{- end -}}
|
||||
{{- with $term_info.Content -}}
|
||||
{{- with $term_info -}}
|
||||
{{- if not $showFullDefinition -}}
|
||||
{{- $firstPara := index (findRE "(?s)<p>.*?</p>" . 1) 0 -}}
|
||||
{{- $firstPara := index (findRE "(?s)<p>.*?</p>" .Content 1) 0 -}}
|
||||
{{- $firstPara := $firstPara | strings.TrimSuffix "</p>" | strings.TrimPrefix "<p>" -}}
|
||||
{{- $first := slicestr $firstPara 0 1 | lower -}}
|
||||
{{- if $prepend -}}
|
||||
|
@ -28,13 +25,15 @@
|
|||
{{- end -}}
|
||||
{{- else -}}
|
||||
{{- if $prepend -}}
|
||||
{{- $firstPara := index (findRE "(?s)<p>.*?</p>" . 1) 0 -}}
|
||||
{{- $firstPara := index (findRE "(?s)<p>.*?</p>" .Content 1) 0 -}}
|
||||
{{- $firstPara := $firstPara | strings.TrimSuffix "</p>" | strings.TrimPrefix "<p>" -}}
|
||||
{{- $first := slicestr $firstPara 0 1 | lower -}}
|
||||
{{- $prepended := printf "<p>%s %s%s</p>" $prepend $first (slicestr $firstPara 1) -}}
|
||||
{{- replace . $firstPara $prepended | safeHTML -}}
|
||||
{{- replace .Content $firstPara $prepended | safeHTML -}}
|
||||
{{- else -}}
|
||||
{{- . -}}
|
||||
{{- .Content -}}
|
||||
{{- end -}}
|
||||
{{- end -}}
|
||||
{{- else -}}
|
||||
{{- errorf "[%s] %q: %q is not a valid glossary term_id, see ./docs/reference/glossary/* for a full list" site.Language.Lang .Page.Path $id -}}
|
||||
{{- end -}}
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
<a href="{{- printf "/docs/reference/generated/kubernetes-api/%s" site.Params.latest -}}" target="_blank">API reference docs</a>
|
|
@ -1 +1 @@
|
|||
Subproject commit 1c77bb24483946f11c13f882f836a940b55ad019
|
||||
Subproject commit 9f55cf34808d720bcfff9398c9f9bb7fd8fce4ec
|
Loading…
Reference in New Issue