Compare commits

...

No commits in common. "v1.13.1-k3s2" and "master" have entirely different histories.

9077 changed files with 81302 additions and 1633688 deletions

7
.clomonitor.yml Normal file
View File

@ -0,0 +1,7 @@
# CLOMonitor metadata file
# This file must be located at the root of the repository
# Checks exemptions
exemptions:
- check: artifacthub_badge # Check identifier (see https://github.com/cncf/clomonitor/blob/main/docs/checks.md#exemptions)
reason: "K3s is bundled Kubernetes distribution and it's installed via the k3s binary or script or shipped as part of OS and therefore, it does not appear on Artifact Hub independently."

8
.dockerignore Normal file
View File

@ -0,0 +1,8 @@
./bin
./etc
./pkg/data/zz_generated_bindata.go
./.vagrant
./.cache
./.dapper
./.trash-cache
./.git/objects/pack

595
.drone.yml Normal file
View File

@ -0,0 +1,595 @@
---
kind: pipeline
name: amd64
platform:
os: linux
arch: amd64
trigger:
event:
exclude:
- cron
- pull_request
clone:
retries: 3
steps:
- name: build
image: rancher/dapper:v0.6.0
secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader, unprivileged_github_token ]
environment:
GITHUB_TOKEN:
from_secret: unprivileged_github_token
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader
commands:
- dapper ci
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
- name: fossa
image: rancher/drone-fossa:latest
failure: ignore
settings:
api_key:
from_secret: FOSSA_API_KEY
when:
instance:
- drone-publish.k3s.io
ref:
include:
- "refs/heads/master"
- "refs/heads/release-*"
event:
- push
- tag
- name: docker-publish
image: plugins/docker
settings:
dockerfile: package/Dockerfile
password:
from_secret: docker_password
repo: "rancher/k3s"
username:
from_secret: docker_username
build_args_from_env:
- DRONE_TAG
when:
instance:
- drone-publish.k3s.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
- name: test
image: rancher/dapper:v0.6.0
secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]
environment:
ENABLE_REGISTRY: 'true'
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
path: /var/run/docker.sock
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
name: conformance
platform:
os: linux
arch: amd64
trigger:
event:
- cron
cron:
- nightly
steps:
- name: build
image: rancher/dapper:v0.6.0
commands:
- dapper ci
- echo "${DRONE_TAG}-amd64" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
- name: test
image: rancher/dapper:v0.6.0
environment:
ENABLE_REGISTRY: 'true'
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
path: /var/run/docker.sock
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
name: arm64
platform:
os: linux
arch: arm64
trigger:
event:
exclude:
- cron
- pull_request
clone:
retries: 3
steps:
- name: build
image: rancher/dapper:v0.6.0
secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]
environment:
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader
commands:
- dapper ci
- echo "${DRONE_TAG}-arm64" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
- name: docker-publish
image: plugins/docker
settings:
dockerfile: package/Dockerfile
password:
from_secret: docker_password
repo: "rancher/k3s"
username:
from_secret: docker_username
build_args_from_env:
- DRONE_TAG
when:
instance:
- drone-publish.k3s.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
- name: test
image: rancher/dapper:v0.6.0
secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]
environment:
ENABLE_REGISTRY: 'true'
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
path: /var/run/docker.sock
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
name: arm
platform:
os: linux
arch: arm
trigger:
event:
exclude:
- cron
clone:
retries: 3
steps:
- name: skipfiles
image: plugins/git
commands:
- export NAME=$(test $DRONE_BUILD_EVENT = pull_request && echo remotes/origin/${DRONE_COMMIT_BRANCH:-master} || echo ${DRONE_COMMIT_SHA}~)
- export DIFF=$(git --no-pager diff --name-only $NAME | grep -v -f .droneignore);
- if [ -z "$DIFF" ]; then
echo "All files in PR are on ignore list";
exit 78;
else
echo "Some files in PR are not ignored, $DIFF";
fi;
when:
event:
- pull_request
- name: build
# Keeping Dapper at v0.5.0 for armv7, as newer versions fails with
# Bad system call on this architecture. xref:
#
# https://github.com/k3s-io/k3s/pull/8959#discussion_r1439736566
# https://drone-pr.k3s.io/k3s-io/k3s/7922/3/3
image: rancher/dapper:v0.5.0
secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]
environment:
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader
commands:
- dapper ci
- echo "${DRONE_TAG}-arm" | sed -e 's/+/-/g' >.tags
volumes:
- name: docker
path: /var/run/docker.sock
- name: docker-publish
image: plugins/docker:linux-arm
settings:
dockerfile: package/Dockerfile
password:
from_secret: docker_password
repo: "rancher/k3s"
username:
from_secret: docker_username
build_args_from_env:
- DRONE_TAG
when:
instance:
- drone-publish.k3s.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
- name: test
# Refer to comment for arm/build.
image: rancher/dapper:v0.5.0
secrets: [ AWS_SECRET_ACCESS_KEY-k3s-ci-uploader, AWS_ACCESS_KEY_ID-k3s-ci-uploader ]
environment:
ENABLE_REGISTRY: 'true'
AWS_SECRET_ACCESS_KEY:
from_secret: AWS_SECRET_ACCESS_KEY-k3s-ci-uploader
AWS_ACCESS_KEY_ID:
from_secret: AWS_ACCESS_KEY_ID-k3s-ci-uploader
commands:
- docker build --target test-k3s -t k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT} -f Dockerfile.test .
- >
docker run -i -e REPO -e TAG -e DRONE_TAG -e DRONE_BUILD_EVENT -e IMAGE_NAME -e AWS_SECRET_ACCESS_KEY -e AWS_ACCESS_KEY_ID -e SONOBUOY_VERSION -e ENABLE_REGISTRY
-v /var/run/docker.sock:/var/run/docker.sock --privileged --network host -v /tmp:/tmp k3s:test-${DRONE_STAGE_ARCH}-${DRONE_COMMIT}
volumes:
- name: docker
path: /var/run/docker.sock
volumes:
- name: docker
host:
path: /var/run/docker.sock
---
kind: pipeline
name: manifest
platform:
os: linux
arch: amd64
steps:
- name: skipfiles
image: plugins/git
commands:
- export NAME=$(test $DRONE_BUILD_EVENT = pull_request && echo remotes/origin/${DRONE_COMMIT_BRANCH:-master} || echo ${DRONE_COMMIT_SHA}~)
- export DIFF=$(git --no-pager diff --name-only $NAME | grep -v -f .droneignore);
- if [ -z "$DIFF" ]; then
echo "All files in PR are on ignore list";
exit 78;
else
echo "Some files in PR are not ignored, $DIFF";
fi;
when:
event:
- push
- pull_request
- name: manifest
image: plugins/docker
environment:
DOCKER_USERNAME:
from_secret: docker_username
DOCKER_PASSWORD:
from_secret: docker_password
settings:
dry_run: true
dockerfile: Dockerfile.manifest
repo: "rancher/k3s-manifest"
build_args_from_env:
- DOCKER_USERNAME
- DOCKER_PASSWORD
- DRONE_TAG
trigger:
instance:
- drone-publish.k3s.io
ref:
- refs/head/master
- refs/tags/*
event:
include:
- tag
exclude:
- cron
depends_on:
- amd64
- arm64
- arm
---
kind: pipeline
name: dispatch
platform:
os: linux
arch: amd64
clone:
retries: 3
steps:
- name: skipfiles
image: plugins/git
commands:
- export NAME=$(test $DRONE_BUILD_EVENT = pull_request && echo remotes/origin/${DRONE_COMMIT_BRANCH:-master} || echo ${DRONE_COMMIT_SHA}~)
- export DIFF=$(git --no-pager diff --name-only $NAME | grep -v -f .droneignore);
- if [ -z "$DIFF" ]; then
echo "All files in PR are on ignore list";
exit 78;
else
echo "Some files in PR are not ignored, $DIFF";
fi;
when:
event:
- push
- pull_request
- name: dispatch
image: curlimages/curl:7.74.0
secrets: [ pat_username, github_token, release_token_k3s ]
user: root
environment:
PAT_USERNAME:
from_secret: pat_username
PAT_TOKEN:
from_secret: github_token
K3S_RELEASE_TOKEN:
from_secret: release_token_k3s
commands:
- apk -U --no-cache add bash
- scripts/dispatch
trigger:
instance:
- drone-publish.k3s.io
ref:
- refs/head/master
- refs/tags/*
event:
- tag
depends_on:
- manifest
---
kind: pipeline
name: e2e
type: docker
platform:
os: linux
arch: amd64
clone:
retries: 3
steps:
- name: skipfiles
image: plugins/git
commands:
- export NAME=$(test $DRONE_BUILD_EVENT = pull_request && echo remotes/origin/${DRONE_COMMIT_BRANCH:-master} || echo ${DRONE_COMMIT_SHA}~)
- export DIFF=$(git --no-pager diff --name-only $NAME | grep -v -f .droneignore);
- if [ -z "$DIFF" ]; then
echo "All files in PR are on ignore list";
exit 78;
else
echo "Some files in PR are not ignored, $DIFF";
fi;
when:
event:
- push
- pull_request
- name: build-e2e-image
image: docker:25.0.5
commands:
- DOCKER_BUILDKIT=1 docker build --target test-e2e -t test-e2e -f Dockerfile.test .
- apk add make git bash
- GOCOVER=1 make local-binary
- cp dist/artifacts/* /tmp/artifacts/
volumes:
- name: cache
path: /tmp/artifacts
- name: docker
path: /var/run/docker.sock
- name: test-e2e-validatecluster
depends_on:
- build-e2e-image
image: test-e2e
pull: never
resources:
cpu: 6000
memory: 10Gi
environment:
E2E_REGISTRY: 'true'
E2E_GOCOVER: 'true'
commands:
- mkdir -p dist/artifacts
- cp /tmp/artifacts/* dist/artifacts/
# Cleanup VMs that are older than 2h. Happens if a previous test panics or is canceled
- tests/e2e/scripts/cleanup_vms.sh
- tests/e2e/scripts/drone_registries.sh
- |
cd tests/e2e/validatecluster
../scripts/cleanup_vms.sh 'validatecluster_([0-9]+)_(server|agent)'
go test -v -timeout=45m ./validatecluster_test.go -ci -local
cp ./coverage.out /tmp/artifacts/validate-coverage.out
volumes:
- name: libvirt
path: /var/run/libvirt/
- name: docker
path: /var/run/docker.sock
- name: cache
path: /tmp/artifacts
- name: test-e2e-splitserver
depends_on:
- build-e2e-image
image: test-e2e
pull: never
resources:
cpu: 6000
memory: 10Gi
environment:
E2E_REGISTRY: 'true'
E2E_GOCOVER: 'true'
commands:
- mkdir -p dist/artifacts
- cp /tmp/artifacts/* dist/artifacts/
- tests/e2e/scripts/drone_registries.sh
- |
cd tests/e2e/splitserver
../scripts/cleanup_vms.sh 'splitserver_([0-9]+)'
# Stagger the launch of this test with the validatecluster test
# to prevent conflicts over libvirt network interfaces
sleep 15
go test -v -timeout=30m ./splitserver_test.go -ci -local
cp ./coverage.out /tmp/artifacts/split-coverage.out
volumes:
- name: libvirt
path: /var/run/libvirt/
- name: docker
path: /var/run/docker.sock
- name: cache
path: /tmp/artifacts
- name: test-e2e-upgradecluster
depends_on:
- build-e2e-image
image: test-e2e
pull: never
resources:
cpu: 6000
memory: 10Gi
environment:
E2E_REGISTRY: 'true'
E2E_GOCOVER: 'true'
commands:
- mkdir -p dist/artifacts
- cp /tmp/artifacts/* dist/artifacts/
- tests/e2e/scripts/drone_registries.sh
- |
if [ "$DRONE_BUILD_EVENT" = "pull_request" ]; then
cd tests/e2e/upgradecluster
# Convert release-1.XX branch to v1.XX channel
if [ "$DRONE_BRANCH" = "master" ]; then
UPGRADE_CHANNEL="latest"
else
UPGRADE_CHANNEL=$(echo $DRONE_BRANCH | sed 's/release-/v/')
# Check if the UPGRADE_CHANNEL exists, in the case of new minor releases it won't
if ! curl --head --silent --fail https://update.k3s.io/v1-release/channels/$UPGRADE_CHANNEL; then
UPGRADE_CHANNEL="latest"
fi
fi
../scripts/cleanup_vms.sh 'upgradecluster_([0-9]+)_(server|agent)'
# Stagger the launch of this test with the splitserver test
# to prevent conflicts over libvirt network interfaces
sleep 30
E2E_RELEASE_CHANNEL=$UPGRADE_CHANNEL go test -v -timeout=45m ./upgradecluster_test.go -ci -local -ginkgo.v
cp ./coverage.out /tmp/artifacts/upgrade-coverage.out
fi
volumes:
- name: libvirt
path: /var/run/libvirt/
- name: docker
path: /var/run/docker.sock
- name: cache
path: /tmp/artifacts
- name: upload to codecov
depends_on:
- test-e2e-validatecluster
- test-e2e-splitserver
- test-e2e-upgradecluster
image: robertstettner/drone-codecov
settings:
token:
from_secret: codecov_token
files:
- /tmp/artifacts/validate-coverage.out
- /tmp/artifacts/split-coverage.out
- /tmp/artifacts/upgrade-coverage.out
flags:
- e2etests
when:
event:
- push
volumes:
- name: cache
path: /tmp/artifacts
volumes:
- name: docker
host:
path: /var/run/docker.sock
- name: libvirt
host:
path: /var/run/libvirt/
- name: cache
temp: {}

8
.droneignore Normal file
View File

@ -0,0 +1,8 @@
^.*\.md$
^\.droneignore$
^\.github\/.*$
^MAINTAINERS$
^CODEOWNERS$
^LICENSE$
^DCO$
^channel\.yaml$

11
.gitattributes vendored
View File

@ -1,11 +0,0 @@
hack/verify-flags/known-flags.txt merge=union
test/test_owners.csv merge=union
**/zz_generated.*.go linguist-generated=true
**/types.generated.go linguist-generated=true
**/generated.pb.go linguist-generated=true
**/generated.proto
**/types_swagger_doc_generated.go linguist-generated=true
docs/api-reference/** linguist-generated=true
api/swagger-spec/*.json linguist-generated=true
api/openapi-spec/*.json linguist-generated=true

9
.github/.codecov.yml vendored Normal file
View File

@ -0,0 +1,9 @@
coverage:
status:
project:
default: false # disable the default status that measures entire project
pkg: # declare a new status context "pkg"
paths:
- pkg/* # only include coverage in "pkg/" folder
informational: true # Always pass check
patch: off # disable the commit only checks

View File

@ -1,27 +0,0 @@
---
name: Bug Report
about: Report a bug encountered while operating Kubernetes
---
<!-- Please use this template while reporting a bug and provide as much info as possible. Not doing so may result in your bug not being addressed in a timely manner. Thanks!-->
**What happened**:
**What you expected to happen**:
**How to reproduce it (as minimally and precisely as possible)**:
**Anything else we need to know?**:
**Environment**:
- Kubernetes version (use `kubectl version`):
- Cloud provider or hardware configuration:
- OS (e.g. from /etc/os-release):
- Kernel (e.g. `uname -a`):
- Install tools:
- Others:
<!-- DO NOT EDIT BELOW THIS LINE -->
/kind bug

36
.github/ISSUE_TEMPLATE/bug_report.md vendored Normal file
View File

@ -0,0 +1,36 @@
---
name: Bug report
about: Create a report to help us improve
title: ''
labels: ''
assignees: ''
---
<!-- Thanks for helping us to improve K3S! We welcome all bug reports. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
**Environmental Info:**
K3s Version:
<!-- Provide the output from "k3s -v" -->
Node(s) CPU architecture, OS, and Version:
<!-- Provide the output from "uname -a" on the node(s) -->
Cluster Configuration:
<!-- Provide some basic information on the cluster configuration. For example, "3 servers, 2 agents". -->
**Describe the bug:**
<!-- A clear and concise description of what the bug is. -->
**Steps To Reproduce:**
<!-- Steps to reproduce the behavior. Please include as the first step how you installed K3s on the node(s) (including all flags or environment variables). If you have customized configuration via systemd drop-ins or overrides (https://coreos.com/os/docs/latest/using-systemd-drop-in-units.html) please include those as well. -->
- Installed K3s:
**Expected behavior:**
<!-- A clear and concise description of what you expected to happen. -->
**Actual behavior:**
<!-- A clear and concise description of what actually happened. -->
**Additional context / logs:**
<!-- Add any other context and/or logs about the problem here. -->

37
.github/ISSUE_TEMPLATE/cut_release.md vendored Normal file
View File

@ -0,0 +1,37 @@
---
name: Cut Release
about: Create a new release checklist
title: 'Cut VERSION'
labels: 'kind/release'
assignees: ''
---
**Summary:**
Task covering patch release work.
Dev Complete: RELEASE_DATE (Typically ~1 week prior to upstream release date)
**List of required releases:**
_To release as soon as able for QA:_
- VERSION
_To release once have approval from QA:_
- VERSION (Never release on a Friday unless specified otherwise)
**Prep work:**
- [ ] PM: Dev and QA team to be notified of the incoming releases - add event to team calendar
- [ ] PM: Dev and QA team to be notified of the date we will mark the latest release as stable - add event to team calendar [ONLY APPLICABLE FOR LATEST MINOR RELEASE]
- [ ] QA: Review changes and understand testing efforts
- [ ] Release Captain: Prepare release notes in our private release-notes repo (submit PR for changes taking care to carefully check links and the components, once merged, create the release in GitHub and mark as a draft and check the pre-release box, fill in title, set target release branch, leave tag version blank for now until we are ready to release)
- [ ] QA: Validate and close out all issues in the release milestone.
**Vendor and release work:**
- [ ] Release Captain: Vendor in the new patch version and release rancher/kubernetes
- [ ] Release Captain: Tag and release any necessary RCs for QA to test K3s and KDM on the Rancher side
- [ ] Release Captain: Tag and release when have QA approval
**Post-Release work:**
- [ ] Release Captain: Once release is fully complete (CI is all green and all release artifacts exist), edit the release, uncheck "Pre-release", and save.
- [ ] Release Captain: Prepare PRs as needed to update [KDM](https://github.com/rancher/kontainer-driver-metadata/) in the appropriate dev branches.
- [ ] PM: Close the milestone in GitHub.

View File

@ -1,13 +0,0 @@
---
name: Enhancement Request
about: Suggest an enhancement to the Kubernetes project
---
<!-- Please only use this template for submitting enhancement requests -->
**What would you like to be added**:
**Why is this needed**:
<!-- DO NOT EDIT BELOW THIS LINE -->
/kind feature

View File

@ -1,22 +0,0 @@
---
name: Failing Test
about: Report test failures in Kubernetes CI jobs
---
<!-- Please only use this template for submitting reports about failing tests in Kubernetes CI jobs -->
**Which jobs are failing**:
**Which test(s) are failing**:
**Since when has it been failing**:
**Testgrid link**:
**Reason for failure**:
**Anything else we need to know**:
<!-- DO NOT EDIT BELOW THIS LINE -->
/kind failing-test

View File

@ -0,0 +1,23 @@
---
name: Feature request
about: Suggest an idea for this project
title: ''
labels: ''
assignees: ''
---
<!-- Thanks for helping us to improve K3S! We welcome all feature requests. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
**Is your feature request related to a problem? Please describe.**
<!-- A clear and concise description of what the problem is. Ex. I'm always frustrated when [...] -->
**Describe the solution you'd like**
<!-- A clear and concise description of what you want to happen. -->
**Describe alternatives you've considered**
<!-- A clear and concise description of any alternative solutions or features you've considered. -->
**Additional context**
<!-- Add any other context or screenshots about the feature request here. -->

26
.github/ISSUE_TEMPLATE/os_validation.md vendored Normal file
View File

@ -0,0 +1,26 @@
---
name: Validate Operating System
about: Request validation of an operating system
title: 'Validate OS VERSION'
labels: ["kind/os-validation"]
assignees: ''
---
<!-- Thanks for helping us to improve K3s! We welcome all OS validation requests. Please fill out each area of the template so we can better help you. Comments like this will be hidden when you post but you can delete them if you wish. -->
**K3s Versions to be Validated**
<!-- A list of released k3s versions to validate the OS for. -->
**Testing Considerations**
<!-- Add/remove test cases that should be considered in addition/as opposed to to the below standard ones. -->
1. Install and run sonobuoy conformance tests on a hardened cluster
2. Validate SUC upgrade
3. Install Rancher Manager
4. Validate snapshot restore via `cluster-reset-restore-path`
**Additional Information**
<!-- Add any other information or context about the OS here. Ex. "Please validate with Selinux [...]" -->

View File

@ -1,22 +0,0 @@
---
name: Support Request
about: Support request or question relating to Kubernetes
---
<!--
STOP -- PLEASE READ!
GitHub is not the right place for support requests.
If you're looking for help, check [Stack Overflow](https://stackoverflow.com/questions/tagged/kubernetes) and the [troubleshooting guide](https://kubernetes.io/docs/tasks/debug-application-cluster/troubleshooting/).
You can also post your question on the [Kubernetes Slack](http://slack.k8s.io/) or the [Discuss Kubernetes](https://discuss.kubernetes.io/) forum.
If the matter is security related, please disclose it privately via https://kubernetes.io/security/.
-->
<!-- DO NOT EDIT BELOW THIS LINE -->
/triage support

36
.github/PULL_REQUEST_TEMPLATE.md vendored Normal file
View File

@ -0,0 +1,36 @@
<!-- HTML Comments can be left in place or removed. -->
<!-- Please see our contributing guide at https://github.com/k3s-io/k3s/blob/master/CONTRIBUTING.md for guidance on opening pull requests -->
#### Proposed Changes ####
<!-- Describe the big picture of your changes here to communicate to the maintainers why we should accept this pull request. -->
#### Types of Changes ####
<!-- What types of changes does your code introduce to K3s? Bugfix, New Feature, Breaking Change, etc -->
#### Verification ####
<!-- How can the changes be verified? Please provide whatever additional information necessary to help verify the proposed changes. -->
#### Testing ####
<!-- Is this change covered by testing? If not, consider adding a Unit or Integration test. -->
<!-- See https://github.com/k3s-io/k3s/blob/master/tests/TESTING.md for more info -->
#### Linked Issues ####
<!-- Link any related issues, pull-requests, or commit hashes that are relevant to this pull request. If you are opening a PR without a corresponding issue please consider creating one first, at https://github.com/k3s-io/k3s/issues . A functional example will greatly help QA with verifying/reproducing a bug or testing new features. -->
#### User-Facing Change ####
<!--
Does this PR introduce a user-facing change? If no, just write "NONE" in the release-note block below.
If the PR requires additional action from users switching to the new release, include the string "action required".
-->
```release-note
```
#### Further Comments ####
<!-- If this is a relatively large or complex change, kick off the discussion by explaining why you chose the solution you did and what alternatives you considered, etc... -->

5
.github/SECURITY.md vendored Normal file
View File

@ -0,0 +1,5 @@
# Security Policy
## Reporting a Vulnerability
K3s supports responsible disclosure and endeavors to resolve security issues in a reasonable timeframe. To report a security vulnerability, email security@k3s.io .

29
.github/actions/setup-go/action.yaml vendored Normal file
View File

@ -0,0 +1,29 @@
name: 'Setup golang with master only caching'
description: 'A composite action that installs golang, but with a caching strategy that only updates the cache on master branch.'
runs:
using: 'composite'
steps:
- uses: actions/setup-go@v5
with:
go-version-file: 'go.mod' # Just use whatever version is in the go.mod file
cache: ${{ github.ref == 'refs/heads/master' || github.ref == 'refs/heads/release-1.32' }}
- name: Prepare for go cache
if: github.ref != 'refs/heads/master' && github.ref != 'refs/heads/release-1.32'
shell: bash
run: |
echo "GO_CACHE=$(go env GOCACHE)" | tee -a "$GITHUB_ENV"
echo "GO_MODCACHE=$(go env GOMODCACHE)" | tee -a "$GITHUB_ENV"
echo "GO_VERSION=$(go env GOVERSION | tr -d 'go')" | tee -a "$GITHUB_ENV"
- name: Setup read-only cache
if: github.ref != 'refs/heads/master' && github.ref != 'refs/heads/release-1.32'
uses: actions/cache/restore@v4
with:
path: |
${{ env.GO_MODCACHE }}
${{ env.GO_CACHE }}
# Match the cache key to the setup-go action https://github.com/actions/setup-go/blob/main/src/cache-restore.ts#L34
key: setup-go-${{ runner.os }}-${{ env.ImageOS }}-go-${{ env.GO_VERSION }}-${{ hashFiles('go.sum') }}
restore-keys: |
setup-go-${{ runner.os }}-

View File

@ -0,0 +1,29 @@
name: 'Setup Vagrant and Libvirt'
description: 'A composite action that installs latest versions of vagrant and libvirt for use on ubuntu based runners'
runs:
using: 'composite'
steps:
- name: Add vagrant to apt-get sources
shell: bash
run: |
curl -fsSL https://apt.releases.hashicorp.com/gpg | sudo gpg --dearmor -o /usr/share/keyrings/hashicorp-archive-keyring.gpg
echo "deb [signed-by=/usr/share/keyrings/hashicorp-archive-keyring.gpg] https://apt.releases.hashicorp.com $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/hashicorp.list
- name: Install vagrant and libvirt
shell: bash
run: |
sudo apt-get update
sudo apt-get install -y libvirt-daemon libvirt-daemon-system vagrant=2.4.1-1 ruby-libvirt
sudo systemctl enable --now libvirtd
- name: Install vagrant dependencies
shell: bash
run: |
sudo apt-get install -y --no-install-recommends libxslt-dev libxml2-dev libvirt-dev ruby-bundler ruby-dev zlib1g-dev
# This is a workaround for the libvirt group not being available in the current shell
# https://github.com/actions/runner-images/issues/7670#issuecomment-1900711711
- name: Make the libvirt socket rw accessible to everyone
shell: bash
run: |
sudo chmod a+rw /var/run/libvirt/libvirt-sock
- name: Install vagrant-libvirt plugin
shell: bash
run: vagrant plugin install vagrant-libvirt

2
.github/dco.yml vendored Normal file
View File

@ -0,0 +1,2 @@
require:
members: false

65
.github/dependabot.yml vendored Normal file
View File

@ -0,0 +1,65 @@
version: 2
updates:
- package-ecosystem: "github-actions"
directory: "/"
labels:
- "kind/dependabot"
reviewers:
- "k3s-io/k3s-dev"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/"
labels:
- "kind/dependabot"
reviewers:
- "k3s-io/k3s-dev"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/conformance"
labels:
- "kind/dependabot"
reviewers:
- "k3s-io/k3s-dev"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/tests/e2e/scripts"
labels:
- "kind/dependabot"
reviewers:
- "k3s-io/k3s-dev"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/package"
labels:
- "kind/dependabot"
reviewers:
- "k3s-io/k3s-dev"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/tests/integration"
labels:
- "kind/dependabot"
reviewers:
- "k3s-io/k3s-dev"
schedule:
interval: "weekly"
- package-ecosystem: "docker"
directory: "/tests/terraform"
labels:
- "kind/dependabot"
reviewers:
- "k3s-io/k3s-dev"
schedule:
interval: "weekly"

126
.github/workflows/build-k3s.yaml vendored Normal file
View File

@ -0,0 +1,126 @@
on:
workflow_call:
inputs:
arch:
type: string
description: 'Architecture to build (amd64, arm64, or arm)'
default: 'amd64'
os:
type: string
description: 'Target OS (linux or windows)'
default: 'linux'
upload-image:
type: boolean
description: 'Build and upload k3s image (only works on arm64 or amd64)'
required: false
default: false
upload-build:
type: boolean
description: 'Upload contents of build/out, used to build the k3s image externally'
required: false
default: false
permissions:
contents: read
jobs:
build:
name: Build # DO NOT CHANGE THIS NAME, we rely on it for INSTALL_K3S_PR functionality
runs-on: ${{ contains(inputs.arch, 'arm') && 'ubuntu-24.04-arm' || 'ubuntu-24.04' }}
timeout-minutes: 20
env:
BIN_EXT: ${{ inputs.os == 'windows' && '.exe' || '' }}
ARCH_EXT: ${{ inputs.os == 'windows' && '-windows' || format('-{0}', inputs.arch) }}
GOOS: ${{ inputs.os }}
steps:
- name: Checkout K3s
uses: actions/checkout@v5
- name: Set up QEMU
if: inputs.arch == 'arm'
uses: docker/setup-qemu-action@v3
with:
cache-image: false
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Determine Git Version Info
id: git_vars
run: |
source ./scripts/git_version.sh
{
echo "git_tag=${GIT_TAG}"
echo "tree_state=${TREE_STATE}"
echo "commit=${COMMIT}"
echo "dirty=${DIRTY}"
} >> "$GITHUB_OUTPUT"
- name: Build K3s Binary Native
if: inputs.arch == 'arm64' || inputs.arch == 'amd64'
env:
DOCKER_BUILD_SUMMARY: false
uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile.local
target: result
# Defined actions like this don't ingest GITHUB_ENV, so use outputs
# and manual set the build arguments
build-args: |
GIT_TAG=${{ steps.git_vars.outputs.git_tag }}
TREE_STATE=${{ steps.git_vars.outputs.tree_state }}
COMMIT=${{ steps.git_vars.outputs.commit }}
DIRTY=${{ steps.git_vars.outputs.dirty }}
push: false
provenance: mode=min
outputs: type=local,dest=.
- name: Build K3s Binary Emulated
if: inputs.arch != 'arm64' && inputs.arch != 'amd64'
env:
PLATFORM: ${{ inputs.arch == 'arm' && 'linux/arm/v7' || format('linux/{0}', inputs.arch) }}
DOCKER_BUILD_SUMMARY: false
uses: docker/build-push-action@v6
with:
context: .
file: ./Dockerfile.local
target: result
build-args: |
GIT_TAG=${{ steps.git_vars.outputs.git_tag }}
TREE_STATE=${{ steps.git_vars.outputs.tree_state }}
COMMIT=${{ steps.git_vars.outputs.commit }}
DIRTY=${{ steps.git_vars.outputs.dirty }}
push: false
provenance: mode=min
platforms: ${{ env.PLATFORM }}
outputs: type=local,dest=.
- name: Caculate binary checksum
run: |
if [ ${{ inputs.arch }} == 'amd64' ]; then
sha256sum dist/artifacts/k3s${{ env.BIN_EXT }} | sed 's|dist/artifacts/||' > dist/artifacts/k3s${{ env.BIN_EXT }}.sha256sum
elif [ ${{ inputs.arch }} == "arm" ]; then
sha256sum dist/artifacts/k3s-armhf | sed 's|dist/artifacts/||' > dist/artifacts/k3s${{ env.ARCH_EXT }}.sha256sum
else
sha256sum dist/artifacts/k3s${{ env.ARCH_EXT }}${{ env.BIN_EXT }} | sed 's|dist/artifacts/||' > dist/artifacts/k3s${{ env.ARCH_EXT }}${{ env.BIN_EXT }}.sha256sum
fi
- name: Build K3s image
if: inputs.upload-image == true && inputs.os == 'linux' && (inputs.arch == 'amd64' || inputs.arch == 'arm64')
run: ./scripts/package-image
- name: "Save K3s image"
if: inputs.upload-image == true && inputs.os == 'linux'
run: docker image save rancher/k3s -o ./dist/artifacts/k3s-image.tar
- name: "Save K3s build"
if: inputs.upload-build == true && inputs.os == 'linux'
run: |
mv ./build/out/data-linux.tar.zst ./dist/artifacts/data-linux${{ env.ARCH_EXT }}.tar.zst
- name: "Upload K3s Artifacts"
uses: actions/upload-artifact@v4
with:
name: k3s${{ env.ARCH_EXT }}
path: dist/artifacts/

49
.github/workflows/codeql.yml vendored Normal file
View File

@ -0,0 +1,49 @@
name: "CodeQL Static Analysis"
on:
schedule:
- cron: '0 4 * * 1'
workflow_dispatch: {}
permissions:
contents: read
jobs:
analyze:
name: Analyze (${{ matrix.language }})
runs-on: 'ubuntu-latest'
permissions:
security-events: write
strategy:
fail-fast: false
matrix:
include:
- language: actions
build-mode: none
# - language: go
# build-mode: manual
steps:
- name: Checkout repository
uses: actions/checkout@v5
# Initializes the CodeQL tools for scanning.
- name: Initialize CodeQL
uses: github/codeql-action/init@v3
with:
languages: ${{ matrix.language }}
build-mode: ${{ matrix.build-mode }}
# - if: matrix.build-mode == 'manual'
# shell: bash
# run: |
# sudo apt-get update && sudo apt-get install -y libseccomp-dev
# ./scripts/download
# ./scripts/build
# ./scripts/package-cli
- name: Perform CodeQL Analysis
uses: github/codeql-action/analyze@v3
with:
category: "/language:${{matrix.language}}"

217
.github/workflows/e2e.yaml vendored Normal file
View File

@ -0,0 +1,217 @@
name: E2E Test Coverage
on:
push:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/**"
- "!tests/e2e**"
- "!tests/docker**"
- ".github/**"
- "!.github/actions/**"
- "!.github/workflows/e2e.yaml"
pull_request:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/**"
- "!tests/e2e**"
- "!tests/docker**"
- ".github/**"
- "!.github/actions/**"
- "!.github/workflows/e2e.yaml"
workflow_dispatch: {}
permissions:
contents: read
jobs:
build:
permissions:
contents: read
packages: write # permissions cannot be conditional, so we need to set this for all jobs
uses: ./.github/workflows/build-k3s.yaml
with:
upload-image: true
build-arm64:
uses: ./.github/workflows/build-k3s.yaml
permissions:
contents: read
packages: write
with:
arch: arm64
upload-image: true
e2e:
name: "E2E Tests"
needs: build
runs-on: ubuntu-24.04
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
etest: [btrfs, embeddedmirror, externalip, privateregistry, rootless, s3, startup, wasm]
max-parallel: 5
steps:
- name: "Checkout"
uses: actions/checkout@v5
with: {fetch-depth: 1}
- name: Set up vagrant and libvirt
uses: ./.github/actions/vagrant-setup
- name: Vagrant R/W Cache
if: matrix.etest != 'btrfs' && github.ref == 'refs/heads/master'
uses: actions/cache@v4
with:
path: |
~/.vagrant.d/boxes
key: ${{ matrix.etest != 'btrfs' && 'vagrant-box-ubuntu-2404' || 'vagrant-box-leap' }}
- name: Vagrant Read Cache
if: matrix.etest != 'btrfs' && github.ref != 'refs/heads/master'
uses: actions/cache/restore@v4
with:
path: |
~/.vagrant.d/boxes
key: ${{ matrix.etest != 'btrfs' && 'vagrant-box-ubuntu-2404' || 'vagrant-box-leap' }}
- name: "Vagrant Plugin(s)"
run: vagrant plugin install vagrant-k3s vagrant-reload vagrant-scp
- name: Install Go
uses: ./.github/actions/setup-go
- name: Install Kubectl
run: |
curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"
sudo install -o root -g root -m 0755 kubectl /usr/local/bin/kubectl
- name: "Download k3s binary"
uses: actions/download-artifact@v5
with:
name: k3s-amd64
path: ./dist/artifacts
- name: Run ${{ matrix.etest }} Test
env:
E2E_GOCOVER: "true"
run: |
chmod +x ./dist/artifacts/k3s
cd tests/e2e/${{ matrix.etest }}
go test -timeout=45m ./${{ matrix.etest }}_test.go -test.v -ginkgo.v -ci -local
- name: On Failure, Upload Journald Logs
uses: actions/upload-artifact@v4
if: ${{ failure() }}
with:
name: e2e-${{ matrix.etest }}-logs
path: tests/e2e/${{ matrix.etest }}/*log.txt
retention-days: 30
- name: On Failure, Launch Debug Session
uses: lhotari/action-upterm@v1
if: ${{ failure() }}
with:
## If no one connects after 5 minutes, shut down server.
wait-timeout-minutes: 5
- name: Upload Results To Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: tests/e2e/${{ matrix.etest }}/coverage.out
flags: e2etests # optional
verbose: true # optional (default = false)
build-go-tests:
name: "Build Go Tests"
strategy:
matrix:
arch: [amd64, arm64]
runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }}
outputs:
channel: ${{ steps.channel_step.outputs.channel }}
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Install Go
uses: ./.github/actions/setup-go
- name: Build Go Tests
run: |
mkdir -p ./dist/artifacts
go test -c -ldflags="-w -s" -o ./dist/artifacts ./tests/docker/...
- name: Upload Go Tests
uses: actions/upload-artifact@v4
with:
name: docker-go-tests-${{ matrix.arch }}
path: ./dist/artifacts/*.test
compression-level: 9
retention-days: 1
# For upgrade and skew tests, we need to know the channel this run is based off.
# Since this is predetermined, we can run this step before the actual test job, saving time.
- name: Determine channel
id: channel_step
run: |
. ./scripts/version.sh
MINOR_VER=$(echo $VERSION_TAG | cut -d'.' -f1,2)
echo "CHANNEL=$MINOR_VER" >> $GITHUB_OUTPUT
# channel name should be v1.XX or latest
- name: Fail if channel name does not match pattern
run: |
if [[ ! ${{ steps.channel_step.outputs.channel }} =~ ^v1\.[0-9]+$|latest$ ]]; then
echo "Channel name ${{ steps.channel_step.outputs.channel }} does not match pattern"
exit 1
fi
docker-go:
needs: [build, build-arm64, build-go-tests]
name: Docker
timeout-minutes: 30
strategy:
fail-fast: false
matrix:
dtest: [autoimport, basics, bootstraptoken, cacerts, etcd, hardened, lazypull, skew, secretsencryption, snapshotrestore, svcpoliciesandfirewall, token, upgrade]
arch: [amd64, arm64]
exclude:
- dtest: autoimport
arch: arm64
- dtest: secretsencryption
arch: arm64
- dtest: snapshotrestore
arch: arm64
- dtest: svcpoliciesandfirewall
arch: arm64
runs-on: ${{ matrix.arch == 'arm64' && 'ubuntu-24.04-arm' || 'ubuntu-latest' }}
env:
CHANNEL: ${{ needs.build-go-tests.outputs.channel }}
steps:
- name: Checkout
uses: actions/checkout@v5
- name: "Download K3s image"
uses: actions/download-artifact@v5
with:
name: k3s-${{ matrix.arch }}
path: ./dist/artifacts
- name: Load and set K3s image
run: |
if [ ${{ matrix.arch }} = "arm64" ]; then
mv ./dist/artifacts/k3s-arm64 ./dist/artifacts/k3s
fi
chmod +x ./dist/artifacts/k3s
docker image load -i ./dist/artifacts/k3s-image.tar
IMAGE_TAG=$(docker image ls --format '{{.Repository}}:{{.Tag}}' | grep 'rancher/k3s')
echo "K3S_IMAGE=$IMAGE_TAG" >> $GITHUB_ENV
- name: Download Go Tests
uses: actions/download-artifact@v5
with:
name: docker-go-tests-${{ matrix.arch }}
path: ./dist/artifacts
- name: Run ${{ matrix.dtest }} Test
# Put the compiled test binary back in the same place as the test source
run: |
chmod +x ./dist/artifacts/${{ matrix.dtest }}.test
mv ./dist/artifacts/${{ matrix.dtest }}.test ./tests/docker/${{ matrix.dtest }}/
cd ./tests/docker/${{ matrix.dtest }}
# These tests use rancher/systemd-node and have different flags.
CI_TESTS="autoimport hardened secretsencryption snapshotrestore svcpoliciesandfirewall token"
if [ ${{ matrix.dtest }} = "upgrade" ] || [ ${{ matrix.dtest }} = "skew" ]; then
./${{ matrix.dtest }}.test -test.timeout=0 -test.v -ginkgo.v -k3sImage=$K3S_IMAGE -channel=$CHANNEL
elif [[ $CI_TESTS =~ ${{ matrix.dtest }} ]]; then
./${{ matrix.dtest }}.test -test.timeout=0 -test.v -ginkgo.v -ci
else
./${{ matrix.dtest }}.test -test.timeout=0 -test.v -ginkgo.v -k3sImage=$K3S_IMAGE
fi

21
.github/workflows/epic.yaml vendored Normal file
View File

@ -0,0 +1,21 @@
name: Update epics
on:
issues:
types: [opened, closed, reopened]
permissions:
contents: read
jobs:
epics:
runs-on: ubuntu-latest
name: Update epic issues
permissions:
issues: write
steps:
- name: Run epics action
uses: cloudaper/epics-action@v1
with:
github-token: ${{ secrets.GITHUB_TOKEN }}
epic-label-name: epic
auto-close-epic: false

28
.github/workflows/govulncheck.yml vendored Normal file
View File

@ -0,0 +1,28 @@
name: govulncheck
on:
push:
paths:
- go.sum
schedule:
- cron: "0 0 * * *"
workflow_dispatch: {}
permissions: read-all
jobs:
govulncheck:
name: govulncheck
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Install Go
uses: ./.github/actions/setup-go
- name: Go Generate
run: |
./scripts/download
./scripts/generate
- name: Install govulncheck
run: go install golang.org/x/vuln/cmd/govulncheck@latest
- name: Run govulncheck
run: govulncheck -format=text ./...

107
.github/workflows/install.yaml vendored Normal file
View File

@ -0,0 +1,107 @@
name: Install Script
on:
push:
branches: [main, master]
paths:
- "channel.yaml"
- "install.sh"
- "tests/install/**"
- ".github/workflows/install.yaml"
pull_request:
branches: [main, master]
paths:
- "install.sh"
- "tests/install/**"
- ".github/workflows/install.yaml"
workflow_dispatch: {}
permissions:
contents: read
jobs:
build:
uses: ./.github/workflows/build-k3s.yaml
test:
name: "Smoke Test"
needs: build
runs-on: ubuntu-latest
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
vm: [centos-9, rocky-8, rocky-9, fedora, opensuse-leap, ubuntu-2404]
max-parallel: 3
defaults:
run:
working-directory: tests/install/${{ matrix.vm }}
env:
INSTALL_K3S_SKIP_DOWNLOAD: binary
steps:
- name: "Checkout"
uses: actions/checkout@v5
with: {fetch-depth: 1}
- name: Set up vagrant and libvirt
uses: ./.github/actions/vagrant-setup
- name: "Vagrant Cache"
uses: actions/cache@v4
with:
path: |
~/.vagrant.d/boxes
key: vagrant-box-${{ matrix.vm }}
- name: "Vagrant Plugin(s)"
run: vagrant plugin install vagrant-k3s vagrant-reload vagrant-scp
- name: "Download k3s binary"
uses: actions/download-artifact@v5
with:
name: k3s-amd64
path: tests/install/${{ matrix.vm }}
- name: "Vagrant Up"
run: vagrant up --no-tty --no-provision
- name: "Upload k3s binary to VM"
run: |
chmod +x k3s
vagrant scp k3s /tmp/k3s
vagrant ssh -c "sudo mv /tmp/k3s /usr/local/bin/k3s"
vagrant provision --provision-with=k3s-upload
- name: Add binary to PATH
if: matrix.vm == 'centos-9' || matrix.vm == 'rocky-8' || matrix.vm == 'rocky-9' || matrix.vm == 'opensuse-leap'
run: vagrant provision --provision-with=add-bin-path
- name: "⏩ Install K3s"
run: |
vagrant provision --provision-with=k3s-prepare
vagrant provision --provision-with=k3s-install
if [ ${{ matrix.vm }} = 'opensuse-microos' ]; then vagrant reload --no-provision; fi
- name: "⏳ Node"
run: vagrant provision --provision-with=k3s-wait-for-node
- name: "⏳ CoreDNS"
run: vagrant provision --provision-with=k3s-wait-for-coredns
- name: "⏳ Local Storage"
run: vagrant provision --provision-with=k3s-wait-for-local-storage
continue-on-error: true
- name: "⏳ Metrics Server"
run: vagrant provision --provision-with=k3s-wait-for-metrics-server
continue-on-error: true
- name: "⏳ Traefik"
run: vagrant provision --provision-with=k3s-wait-for-traefik
continue-on-error: true
- name: "k3s-status"
run: vagrant provision --provision-with=k3s-status
- name: "k3s-procps"
run: vagrant provision --provision-with=k3s-procps
- name: "k3s-mount-directory"
run: vagrant provision --provision-with=k3s-mount-directory
- name: "k3s-uninstall"
run: vagrant provision --provision-with=k3s-uninstall
- name: "k3s-check-mount"
run: vagrant provision --provision-with=k3s-check-mount
- name: "k3s-unmount-dir"
run: vagrant provision --provision-with=k3s-unmount-dir
- name: Cleanup VM
run: vagrant destroy -f
- name: On Failure, launch debug session
uses: lhotari/action-upterm@v1
if: ${{ failure() }}
with:
## If no one connects after 5 minutes, shut down server.
wait-timeout-minutes: 5

136
.github/workflows/integration.yaml vendored Normal file
View File

@ -0,0 +1,136 @@
name: Integration Test Coverage
on:
push:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/**"
- "!tests/integration**"
- ".github/**"
- "!.github/workflows/integration.yaml"
pull_request:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/**"
- "!tests/integration**"
- "!tests/e2e**"
- ".github/**"
- "!.github/workflows/integration.yaml"
- "!.github/workflows/build-k3s.yaml"
workflow_dispatch: {}
permissions:
contents: read
env:
GOCOVERDIR: /tmp/k3scov
jobs:
build:
uses: ./.github/workflows/build-k3s.yaml
with:
os: linux
build-windows:
uses: ./.github/workflows/build-k3s.yaml
with:
os: windows
itest:
needs: build
name: Integration Tests
runs-on: ubuntu-latest
timeout-minutes: 45
strategy:
fail-fast: false
matrix:
itest: [certrotation, cacertrotation, etcdrestore, localstorage, startup, custometcdargs, etcdsnapshot, kubeflags, longhorn, secretsencryption, flannelnone]
max-parallel: 3
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 1
- name: Install Go
uses: ./.github/actions/setup-go
- name: "Download k3s binary"
uses: actions/download-artifact@v5
with:
name: k3s-amd64
path: ./dist/artifacts
- name: Run Integration Tests
run: |
chmod +x ./dist/artifacts/k3s
mkdir -p $GOCOVERDIR
cd tests/integration/${{ matrix.itest }}
sudo -E env "PATH=$PATH" go test -timeout=45m ./... -run Integration -ginkgo.v -test.v
- name: On Failure, Upload Logs
uses: actions/upload-artifact@v4
if: ${{ failure() }}
with:
name: integration-${{ matrix.itest }}-logs
path: tests/integration/${{ matrix.itest }}/*log.txt
retention-days: 30
- name: On Failure, Launch Debug Session
uses: lhotari/action-upterm@v1
if: ${{ failure() }}
with:
## If no one connects after 5 minutes, shut down server.
wait-timeout-minutes: 5
- name: Generate coverage report
run: go tool covdata textfmt -i $GOCOVERDIR -o ${{ matrix.itest }}.out
- name: Upload Results To Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./${{ matrix.itest }}.out
flags: inttests # optional
verbose: true # optional (default = false)
itest-windows:
name: Integration Tests (windows)
needs: build-windows
runs-on: windows-2022
timeout-minutes: 10
env:
GOCOVERDIR: "D:/tmp/k3scov"
steps:
- name: Checkout
uses: actions/checkout@v5
with: {fetch-depth: 1}
- name: Install Go
uses: ./.github/actions/setup-go
- name: Download k3s binary
uses: actions/download-artifact@v5
with:
name: k3s-windows
path: dist/artifacts/
- name: Run K3s
timeout-minutes: 5
env:
CONTAINERD_LOG_LEVEL: "debug"
run: |
$ErrorActionPreference = "Continue"
$PSNativeCommandUseErrorActionPreference = $true
New-Item -Type Directory -Force $Env:GOCOVERDIR | Out-Null
$Server = Start-Job -ScriptBlock { ./dist/artifacts/k3s.exe server --token=token --debug --disable=metrics-server }
Start-Sleep -Seconds 15
D:/var/lib/rancher/k3s/data/current/bin/k3s.exe kubectl apply -f ./tests/integration/startup/testdata/agnhost.yaml
D:/var/lib/rancher/k3s/data/current/bin/k3s.exe kubectl wait --for=jsonpath='{.status.phase}'=Running --timeout=5m pod/agnhost
D:/var/lib/rancher/k3s/data/current/bin/k3s.exe crictl ps
D:/var/lib/rancher/k3s/data/current/bin/k3s.exe kubectl get pod -A -o wide
D:/var/lib/rancher/k3s/data/current/bin/k3s.exe kubectl get node -o wide
$RET = $LASTEXITCODE
Stop-Job -Job $Server
Receive-Job -Wait -Job $Server
Remove-Job -Job $Server
exit $RET
- name: Generate coverage report
run: go tool covdata textfmt -i $Env:GOCOVERDIR -o windows.out
- name: Upload Results To Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./windows.out
flags: inttests # optional
verbose: true # optional (default = false)

59
.github/workflows/nightly-install.yaml vendored Normal file
View File

@ -0,0 +1,59 @@
name: Nightly Install
on:
schedule:
- cron: "0 0 * * 1-5"
workflow_dispatch: {}
permissions:
contents: read
jobs:
test:
name: "Smoke Test"
runs-on: ubuntu-latest
timeout-minutes: 40
strategy:
fail-fast: false
matrix:
channel: [stable, latest]
vm: [rocky-9, fedora, opensuse-leap, ubuntu-2404]
max-parallel: 4
defaults:
run:
working-directory: tests/install/${{ matrix.vm }}
env:
INSTALL_K3S_CHANNEL: ${{ matrix.channel }}
steps:
- name: "Checkout"
uses: actions/checkout@v5
with: {fetch-depth: 1}
- name: Set up vagrant and libvirt
uses: ./.github/actions/vagrant-setup
- name: "Vagrant Cache"
uses: actions/cache@v4
with:
path: |
~/.vagrant.d/boxes
key: vagrant-box-${{ matrix.vm }}
id: vagrant-cache
- name: "Vagrant Plugin(s)"
run: vagrant plugin install vagrant-k3s vagrant-reload
- name: "Vagrant Up ⏩ Install K3s"
run: vagrant up --no-tty
- name: "⏳ Node"
run: vagrant provision --provision-with=k3s-wait-for-node
- name: "⏳ CoreDNS"
run: vagrant provision --provision-with=k3s-wait-for-coredns
- name: "⏳ Local Storage"
run: vagrant provision --provision-with=k3s-wait-for-local-storage
continue-on-error: true
- name: "⏳ Metrics Server"
run: vagrant provision --provision-with=k3s-wait-for-metrics-server
continue-on-error: true
- name: "⏳ Traefik"
run: vagrant provision --provision-with=k3s-wait-for-traefik
continue-on-error: true
- name: "k3s-status"
run: vagrant provision --provision-with=k3s-status
- name: "k3s-procps"
run: vagrant provision --provision-with=k3s-procps

195
.github/workflows/release.yml vendored Normal file
View File

@ -0,0 +1,195 @@
name: K3s Release
on:
release:
types: [published]
permissions:
contents: read
packages: read
jobs:
build-amd64:
name: Build Binary (amd64)
uses: ./.github/workflows/build-k3s.yaml
with:
upload-build: true
build-arm64:
name: Build Binary (arm64)
uses: ./.github/workflows/build-k3s.yaml
with:
arch: arm64
upload-build: true
build-arm:
name: Build Binary (arm)
uses: ./.github/workflows/build-k3s.yaml
with:
arch: arm
upload-build: true
push-release-image:
name: Build and Push Multi-Arch Image
runs-on: ubuntu-latest
permissions:
packages: write # Needed to push images to GHCR
needs: [build-amd64, build-arm64, build-arm]
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Log in to GitHub Container Registry
uses: docker/login-action@v3
with:
registry: ghcr.io
username: ${{ github.repository_owner }}
password: ${{ secrets.GITHUB_TOKEN }}
- name: Configure image tags
id: tag_config
run: |
TAG=${GITHUB_REF#refs/tags/}
# Base configuration - always transform the main tag
# Transforms v1.32.4-rc1+k3s1 → v1.32.4-rc1-k3s1
BASE_CONFIG="type=raw,value=${TAG//+/-}"
if [[ "${TAG,,}" == *"rc"* ]]; then
echo "RC release detected: $TAG"
echo "tag_spec=$BASE_CONFIG" >> $GITHUB_OUTPUT
else
echo "Stable release detected: $TAG"
echo "tag_spec=$BASE_CONFIG
type=semver,pattern=v{{major}}.{{minor}}" >> $GITHUB_OUTPUT
fi
- name: Extract Docker metadata
id: meta
uses: docker/metadata-action@v5
with:
images: |
ghcr.io/${{ github.repository_owner }}/k3s
flavor: latest=false
tags: ${{ steps.tag_config.outputs.tag_spec }}
- name: "Download K3s build"
uses: actions/download-artifact@v5
with:
pattern: k3s*
path: ./dist/artifacts
merge-multiple: true
- name: Prepare build folder
run: |
mkdir -p ./build/out
cp ./dist/artifacts/data-* ./build/out
- name: Build and push K3s runtime image
uses: docker/build-push-action@v6
with:
context: .
file: ./package/Dockerfile
platforms: linux/amd64,linux/arm64,linux/arm/v7
push: true
tags: ${{ steps.meta.outputs.tags }}
labels: ${{ steps.meta.outputs.labels }}
build-args: |
DRONE_TAG=${{ github.ref_name }}
upload-release-airgap:
name: Build Airgap Pkg (${{ matrix.arch }})
runs-on: ubuntu-latest # Runs on standard runner, docker pulls with --platform
permissions:
contents: write # Needed to update release with assets
strategy:
matrix:
arch: [amd64, arm64, arm]
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Set up Docker Buildx
uses: docker/setup-buildx-action@v3
- name: Install Dependencies
run: sudo apt-get update -y && sudo apt-get install -y zstd pigz
- name: Create Airgap Package (${{ matrix.arch }})
run: |
mkdir -p ./dist/artifacts
./scripts/package-airgap ${{ matrix.arch }}
- name: Caculate Airgap sha256sum
run: sha256sum dist/artifacts/k3s-airgap-images-${{ matrix.arch }}* | sed 's|dist/artifacts/||' > dist/artifacts/k3s-airgap-images-${{ matrix.arch }}.sha256sum
- name: Upload Airgap sha256sum
uses: actions/upload-artifact@v4
with:
name: k3s-airgap-images-${{ matrix.arch }}.sha256sum
path: dist/artifacts/k3s-airgap-images-${{ matrix.arch }}.sha256sum
- name: Upload k3s-images.txt to Release
uses: softprops/action-gh-release@v2
# This action is recommended by GITHUB, they don't support a first party action for releases
# See https://github.com/actions/create-release?tab=readme-ov-file#github-action---releases-api
if: ${{ matrix.arch == 'amd64' }}
with:
files: |
dist/artifacts/k3s-images.txt
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Upload Airgap Assets to Release
uses: softprops/action-gh-release@v2
with:
files: |
dist/artifacts/k3s-airgap-images*
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
upload-release-assets:
name: Prepare and Upload Release Assets
permissions:
contents: write # Needed to update release with assets
runs-on: ubuntu-latest
needs: [build-amd64, build-arm64, build-arm, upload-release-airgap]
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: "Download Binaries and Airgap sha256sum"
uses: actions/download-artifact@v5
with:
pattern: k3s*
path: ./dist/artifacts
merge-multiple: true
- name: "Combine and format sha256sum files"
run: |
for arch in amd64 arm64 arm; do
output_file="./dist/artifacts/sha256sum-${arch}.txt"
cat ./dist/artifacts/k3s-airgap-images-$arch*.sha256sum >> "$output_file"
rm ./dist/artifacts/k3s-airgap-images-$arch*.sha256sum
if [[ "$arch" == "amd64" ]]; then
cat ./dist/artifacts/k3s.sha256sum >> "$output_file"
rm ./dist/artifacts/k3s.sha256sum # Remove the original file to avoid uploading it
else
cat ./dist/artifacts/k3s-${arch}.sha256sum >> "$output_file"
rm ./dist/artifacts/k3s-${arch}.sha256sum # Remove the original file to avoid uploading it
fi
done
- name: Upload Assets to Release
uses: softprops/action-gh-release@v2.2.1
with:
files: |
dist/artifacts/k3s*
dist/artifacts/sha256sum*
env:
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

79
.github/workflows/scorecard.yml vendored Normal file
View File

@ -0,0 +1,79 @@
# This workflow uses actions that are not certified by GitHub. They are provided
# by a third-party and are governed by separate terms of service, privacy
# policy, and support documentation.
name: Scorecard supply-chain security
on:
# For Branch-Protection check. Only the default branch is supported. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#branch-protection
branch_protection_rule:
# To guarantee Maintained check is occasionally updated. See
# https://github.com/ossf/scorecard/blob/main/docs/checks.md#maintained
schedule:
- cron: '25 12 * * 5'
push:
branches: [ "master" ]
# Declare default permissions as read only.
permissions: read-all
jobs:
analysis:
name: Scorecard analysis
runs-on: ubuntu-latest
# `publish_results: true` only works when run from the default branch. conditional can be removed if disabled.
if: github.event.repository.default_branch == github.ref_name || github.event_name == 'pull_request'
permissions:
# Needed to upload the results to code-scanning dashboard.
security-events: write
# Needed to publish results and get a badge (see publish_results below).
id-token: write
# Uncomment the permissions below if installing in a private repository.
# contents: read
# actions: read
steps:
- name: "Checkout code"
uses: actions/checkout@ff7abcd0c3c05ccf6adc123a8cd1fd4fb30fb493 # v4.2.2
with:
persist-credentials: false
- name: "Run analysis"
uses: ossf/scorecard-action@05b42c624433fc40578a4040d5cf5e36ddca8cde # v2.4.2
with:
results_file: results.sarif
results_format: sarif
# (Optional) "write" PAT token. Uncomment the `repo_token` line below if:
# - you want to enable the Branch-Protection check on a *public* repository, or
# - you are installing Scorecard on a *private* repository
# To create the PAT, follow the steps in https://github.com/ossf/scorecard-action?tab=readme-ov-file#authentication-with-fine-grained-pat-optional.
# TODO expires on July 15, 2026
repo_token: ${{ secrets.SCORECARD_TOKEN }}
# Public repositories:
# - Publish results to OpenSSF REST API for easy access by consumers
# - Allows the repository to include the Scorecard badge.
# - See https://github.com/ossf/scorecard-action#publishing-results.
# For private repositories:
# - `publish_results` will always be set to `false`, regardless
# of the value entered here.
publish_results: true
# (Optional) Uncomment file_mode if you have a .gitattributes with files marked export-ignore
# file_mode: git
# Upload the results as artifacts (optional). Commenting out will disable uploads of run results in SARIF
# format to the repository Actions tab.
- name: "Upload artifact"
uses: actions/upload-artifact@4cec3d8aa04e39d1a68397de0c4cd6fb9dce8ec1 # v4.6.1
with:
name: SARIF file
path: results.sarif
retention-days: 5
# Upload the results to GitHub's code scanning dashboard (optional).
# Commenting out will disable upload of results to your repo's Code Scanning dashboard
- name: "Upload to code-scanning"
uses: github/codeql-action/upload-sarif@v3
with:
sarif_file: results.sarif

53
.github/workflows/stale.yml vendored Normal file
View File

@ -0,0 +1,53 @@
name: Stalebot
on:
schedule:
- cron: '0 20 * * *'
workflow_dispatch:
permissions:
contents: read
jobs:
stalebot:
runs-on: ubuntu-latest
permissions:
issues: write
contents: write
steps:
- name: Close Stale Issues
uses: actions/stale@v9.1.0
with:
# ensure PRs are exempt
days-before-pr-stale: -1
day-before-pr-closed: -1
days-before-issue-stale: 45
days-before-issue-close: 14
stale-issue-label: status/stale
exempt-all-milestones: true
exempt-all-assignees: true
exempt-issue-labels:
internal,
kind/bug,
kind/bug-qa,
kind/task,
kind/feature,
kind/enhancement,
kind/design,
kind/ci-improvements,
kind/performance,
kind/flaky-test,
kind/documentation,
kind/epic,
kind/upstream-issue,
priority/backlog,
priority/critical-urgent,
priority/important-longterm,
priority/important-soon,
priority/low,
priority/medium,
priority/high,
priority/urgent,
stale-issue-message: >
This repository uses a bot to automatically label issues which have not had any activity (commit/comment/label)
for 45 days. This helps us manage the community issues better. If the issue is still relevant, please add a comment to the
issue so the bot can remove the label and we know it is still valid. If it is no longer relevant (or possibly fixed in the
latest release), the bot will automatically close the issue in 14 days. Thank you for your contributions.

129
.github/workflows/trivy-scan.yml vendored Normal file
View File

@ -0,0 +1,129 @@
name: Trivy Scan Result
on:
workflow_run:
workflows: ["Trivy Scan Trigger"]
types:
- completed
permissions:
contents: read
jobs:
trivy_scan:
if: github.event.workflow_run.conclusion == 'success'
runs-on: ubuntu-latest
permissions:
contents: write # Required to checkout the PR's head SHA.
outputs:
pr_number: ${{ steps.pr_context.outputs.pr_number }}
steps:
# For some reason with workflow_run.id, download-artifact does not work.
# Github Docs explicity provide an example of using github-script to download artifacts.
- name: 'Download artifact'
uses: actions/github-script@v8
with:
script: |
let allArtifacts = await github.rest.actions.listWorkflowRunArtifacts({
owner: context.repo.owner,
repo: context.repo.repo,
run_id: context.payload.workflow_run.id,
});
let matchArtifact = allArtifacts.data.artifacts.filter((artifact) => {
return artifact.name == "pr-context-for-scan"
})[0];
let download = await github.rest.actions.downloadArtifact({
owner: context.repo.owner,
repo: context.repo.repo,
artifact_id: matchArtifact.id,
archive_format: 'zip',
});
const fs = require('fs');
fs.writeFileSync('pr-context-for-scan.zip', Buffer.from(download.data));
- name: 'Unzip artifact to pr-context'
run: unzip pr-context-for-scan.zip -d pr-context
- name: Setup PR context
id: pr_context
run: |
pr_number=$(cat pr-context/pr_number)
echo "pr_number=$pr_number" >> $GITHUB_OUTPUT
- name: Load K3s Image
run: docker load -i pr-context/k3s.tar
- name: Download Rancher's VEX Hub report
run: curl -fsSO https://raw.githubusercontent.com/rancher/vexhub/refs/heads/main/reports/rancher.openvex.json
- name: Run Trivy vulnerability scanner
uses: aquasecurity/trivy-action@0.33.1
with:
image-ref: 'rancher/k3s:latest'
format: 'table'
severity: "HIGH,CRITICAL"
output: "trivy-report.txt"
env:
TRIVY_VEX: rancher.openvex.json
TRIVY_SHOW_SUPPRESSED: true
- name: Upload Trivy Report
uses: actions/upload-artifact@v4
with:
name: trivy-report
path: trivy-report.txt
retention-days: 2
if-no-files-found: error
report_results:
needs: trivy_scan
if: always() # Run even if the scan fails.
runs-on: ubuntu-latest
permissions:
pull-requests: write # Required to post comments.
steps:
- name: Download Trivy Report artifact
uses: actions/download-artifact@v5
if: needs.trivy_scan.result == 'success'
with:
name: trivy-report
path: .
- name: Add Trivy Report to PR
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_REPO: ${{ github.repository }}
SCAN_RESULT: ${{ needs.trivy_scan.result }}
PR_NUMBER: ${{ needs.trivy_scan.outputs.pr_number }}
run: |
if [[ "$SCAN_RESULT" == "failure" ]]; then
gh issue comment $PR_NUMBER -b ":x: Trivy scan action failed, check logs :x:"
exit 0
fi
if [ -s trivy-report.txt ] && [ -n "$(grep -v '^\s*$' trivy-report.txt)" ]; then
echo '```' | cat - trivy-report.txt > temp && mv temp trivy-report.txt
echo '```' >> trivy-report.txt
gh issue comment $PR_NUMBER -F trivy-report.txt
else
echo ':star2: No High or Critical CVEs Found :star2:' > trivy-report.txt
gh issue comment $PR_NUMBER -F trivy-report.txt
fi
remove_label:
if: always() # Run even if the scan fails.
needs: trivy_scan
runs-on: ubuntu-latest
permissions:
pull-requests: write # Required to remove labels from the PR.
steps:
- name: Remove 'scan-with-trivy' label
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
GH_REPO: ${{ github.repository }}
PR_NUMBER: ${{ needs.trivy_scan.outputs.pr_number }}
run: |
gh pr edit $PR_NUMBER --remove-label "scan-with-trivy"

68
.github/workflows/trivy-trigger.yml vendored Normal file
View File

@ -0,0 +1,68 @@
name: Trivy Scan Trigger
# This workflow is triggered when a pull request is labeled with 'scan-with-trivy'.
# This can only be initiated by a user who is a member of the k3s-io organization and has write permissions.
# It isolates the built of k3s within a unprivileged enviroment.
# The follow up unprivileged workflow will then use the artifact created here to run the scan
# and report the results back to the PR.
on:
pull_request:
types: [labeled]
permissions:
contents: read
jobs:
trigger-scan:
if: github.event.label.name == 'scan-with-trivy'
runs-on: ubuntu-latest
steps:
- name: Verify actor is a member of k3s-io organization and has write permissions
uses: actions/github-script@v8
with:
script: |
const org = 'k3s-io';
const actor = context.actor;
const { repo, owner } = context.repo;
try {
const result = await github.rest.orgs.checkMembershipForUser({
org,
username: actor,
});
} catch (error) {
core.setFailed(`User ${actor} is not an public member of the ${org} organization`);
}
const { data: { permission } } = await github.rest.repos.getCollaboratorPermissionLevel({
owner,
repo,
username: actor
});
if (permission !== 'admin' && permission !== 'write') {
core.setFailed(`User @${actor} does not have write permission. Scan can only be triggered by repository collaborators with write access.`);
}
- name: Checkout repository
uses: actions/checkout@v5
- name: Build And Save K3s Image
run: |
make local-image
make tag-image-latest
docker save -o k3s.tar rancher/k3s:latest
- name: Create PR context artifact
run: |
mkdir -p pr-context
echo "${{ github.event.pull_request.number }}" > pr-context/pr_number
mv k3s.tar pr-context/k3s.tar
- name: Upload PR context artifact
uses: actions/upload-artifact@v4
with:
name: pr-context-for-scan
path: pr-context/
retention-days: 1

89
.github/workflows/unitcoverage.yaml vendored Normal file
View File

@ -0,0 +1,89 @@
name: Unit Test Coverage
on:
push:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/snapshotter/**"
- "tests/install/**"
- "tests/cgroup/**"
- ".github/**"
- "!.github/workflows/unitcoverage.yaml"
pull_request:
paths-ignore:
- "**.md"
- "channel.yaml"
- "install.sh"
- "tests/snapshotter/**"
- "tests/install/**"
- "tests/cgroup/**"
- ".github/**"
- "!.github/workflows/unitcoverage.yaml"
workflow_dispatch: {}
permissions:
contents: read
jobs:
test-unit-linux:
name: Unit Tests (linux)
runs-on: ubuntu-24.04
timeout-minutes: 20
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 1
- name: Install Go
uses: ./.github/actions/setup-go
- name: Run Unit Tests
run: |
go test -coverpkg ./pkg/... -coverprofile coverage.out ./pkg/... -run Unit
go tool cover -func coverage.out
- name: On Failure, Launch Debug Session
if: ${{ failure() }}
uses: lhotari/action-upterm@v1
with:
wait-timeout-minutes: 5
- name: Upload Results To Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage.out
flags: unittests # optional
verbose: true # optional (default = false)
test-unit-windows:
name: Unit Tests (windows)
runs-on: windows-2022
timeout-minutes: 20
steps:
- name: Checkout
uses: actions/checkout@v5
with:
fetch-depth: 1
- name: Install Go
uses: actions/setup-go@v6
with:
cache: false
- name: Run Unit Tests
run: |
go test -coverpkg ./pkg/... -coverprofile coverage.out ./pkg/... -run Unit
go tool cover -func coverage.out
- name: Upload Results To Codecov
uses: codecov/codecov-action@v5
with:
token: ${{ secrets.CODECOV_TOKEN }}
files: ./coverage.out
flags: unittests # optional
verbose: true # optional (default = false)
test-mods:
name: Test K8s Modules
runs-on: ubuntu-24.04
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Build test-mods
run: docker build --target test-mods -t k3s:mod -f Dockerfile.test .
- name: Run test-mods
run: docker run -i k3s:mod

46
.github/workflows/updatecli.yaml vendored Normal file
View File

@ -0,0 +1,46 @@
name: "Updatecli: Dependency Management"
on:
schedule:
# Runs at 06 PM UTC
- cron: '0 18 * * 0'
# Allows you to run this workflow manually from the Actions tab
workflow_dispatch:
permissions:
contents: read
jobs:
updatecli:
runs-on: ubuntu-latest
permissions:
contents: write
issues: write
pull-requests: write
if: github.ref == 'refs/heads/master'
steps:
- name: Checkout
uses: actions/checkout@v5
- name: Install Go
uses: actions/setup-go@v6
with:
go-version: 'stable'
cache: false
- name: Delete leftover UpdateCLI branches
run: |
gh pr list --search "is:closed is:pr head:updatecli_" --json headRefName --jq ".[].headRefName" | sort -u > closed_prs_branches.txt
gh pr list --search "is:open is:pr head:updatecli_" --json headRefName --jq ".[].headRefName" | sort -u > open_prs_branches.txt
for branch in $(comm -23 closed_prs_branches.txt open_prs_branches.txt); do if (git ls-remote --exit-code --heads origin "$branch"); then echo "Deleting leftover UpdateCLI branch - $branch"; git push origin --delete "$branch"; fi done
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
- name: Install Updatecli
uses: updatecli/updatecli-action@v2
- name: Apply Updatecli
# Never use '--debug' option, because it might leak the access tokens.
run: "updatecli apply --clean --config ./updatecli/updatecli.d/ --values ./updatecli/values.yaml"
env:
UPDATECLI_GITHUB_ACTOR: ${{ github.actor }}
UPDATECLI_GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

169
.gitignore vendored
View File

@ -1,131 +1,40 @@
# OSX leaves these everywhere on SMB shares
._*
# OSX trash
*.swp
/.dapper
/.tags
/.idea
/.trash-cache
.vagrant/
/.kube
/.cache
/.docker
/.*_history
/.viminfo
/.lesshst
/*.log
/bin
/etc
/build
/data-dir
/dist
/image/root
/image/agent
/image/go_build_agent
/image/main.squashfs
/package/k3s
/package/data-*
/pkg/deploy/embed/
/pkg/static/embed/
/pkg/data/embed/
__pycache__
/tests/.pytest_cache/
/tests/.tox/
/tests/.vscode
/tests/**/*log.txt
/tests/**/vagrant.log
/sonobuoy-output
*.tmp
config/local.tfvars
*.terraform
*.tfstate
.terraform.lock.hcl
.DS_Store
# Eclipse files
.classpath
.project
.settings/**
# Files generated by JetBrains IDEs, e.g. IntelliJ IDEA
.idea/
*.iml
# Vscode files
.vscode
# This is where the result of the go build goes
/output*/
/_output*/
/_output
# Emacs save files
*~
\#*\#
.\#*
# Vim-related files
[._]*.s[a-w][a-z]
[._]s[a-w][a-z]
*.un~
Session.vim
.netrwhist
# cscope-related files
cscope.*
# Go test binaries
*.test
/hack/.test-cmd-auth
# JUnit test output from ginkgo e2e tests
/junit*.xml
# Mercurial files
**/.hg
**/.hg*
# Vagrant
.vagrant
network_closure.sh
# Local cluster env variables
/cluster/env.sh
# Compiled binaries in third_party
/third_party/pkg
# Also ignore etcd installed by hack/install-etcd.sh
/third_party/etcd*
/default.etcd
# User cluster configs
.kubeconfig
.tags*
# Version file for dockerized build
.dockerized-kube-version-defs
# Web UI
/www/master/node_modules/
/www/master/npm-debug.log
/www/master/shared/config/development.json
# Karma output
/www/test_out
# precommit temporary directories created by ./hack/verify-generated-docs.sh and ./hack/lib/util.sh
/_tmp/
/doc_tmp/
# Test artifacts produced by Jenkins jobs
/_artifacts/
# Go dependencies installed on Jenkins
/_gopath/
# Config directories created by gcloud and gsutil on Jenkins
/.config/gcloud*/
/.gsutil/
# CoreOS stuff
/cluster/libvirt-coreos/coreos_*.img
# Juju Stuff
/cluster/juju/charms/*
/cluster/juju/bundles/local.yaml
# Downloaded Kubernetes binary release
/kubernetes/
# direnv .envrc files
.envrc
# Downloaded kubernetes binary release tar ball
kubernetes.tar.gz
# generated files in any directory
# TODO(thockin): uncomment this when we stop committing the generated files.
#zz_generated.*
zz_generated_*_test.go
# make-related metadata
/.make/
# Just in time generated data in the source, should never be committed
/test/e2e/generated/bindata.go
# This file used by some vendor repos (e.g. github.com/go-openapi/...) to store secret variables and should not be ignored
!\.drone\.sec
# Godeps workspace
/Godeps/_workspace
/bazel-*
*.pyc
# generated by verify-godeps.sh
vendordiff.patch

46
.golangci.json Normal file
View File

@ -0,0 +1,46 @@
{
"linters": {
"disable-all": true,
"enable": [
"govet",
"revive",
"goimports",
"misspell",
"gofmt"
]
},
"run": {
"deadline": "5m"
},
"issues": {
"exclude-dirs": [
"build",
"contrib",
"manifests",
"package",
"scripts",
"vendor"
],
"exclude-files": [
"/zz_generated_"
],
"exclude-rules": [
{
"linters": "typecheck",
"text": "imported but not used"
},
{
"linters": "typecheck",
"text": "build constraints exclude all Go files"
},
{
"linters": "revive",
"text": "should have comment"
},
{
"linters": "revive",
"text": "exported"
}
]
}
}

17
ADOPTERS.md Normal file
View File

@ -0,0 +1,17 @@
## k3s Adopters
A non-exhaustive list of k3s adopters is provided below. To add your company to this list, please open a [PR](https://github.com/k3s-io/k3s/pulls).
- [Rocket.Chat](https://rocket.chat)
- [Ayedo.de](https://ayedo.de/)
- [PITS Global Data Recovery Services](https://www.pitsdatarecovery.net/)
- [External Secrets Inc.](https://externalsecrets.com)
- [Uffizzi](https://www.uffizzi.com/)
- [Child Rescue Coalition](https://www.childrescuecoalition.org)
**_Other Projects_** - While the above list provides a number of official adopters, k3s' compact and simple nature provides a clean base for other projects to build off of, or to embed. Some such projects are listed below:
- SUSE's RKE2 (or RKE Government) [RKE2](https://github.com/rancher/rke2/)
- [k3ai](https://k3ai.github.io/)
- SUSE's [Rancher Desktop](https://rancherdesktop.io/)
- [Kairos](https://kairos.io)
- [Getdeck Beiboot](https://github.com/Getdeck/beiboot)

43
BUILDING.md Normal file
View File

@ -0,0 +1,43 @@
**Note:** In case you are looking for the pre-built releases see the [release page](https://github.com/k3s-io/k3s/releases/latest).
## Build k3s from source
Before getting started, bear in mind that this repository includes all of Kubernetes history, so consider shallow cloning with (`--depth 1`) to speed up the process.
```bash
git clone --depth 1 https://github.com/k3s-io/k3s.git
```
To build the full release binary, you may now run `make`, which will create `./dist/artifacts/k3s`.
To build the binaries using `make` without running linting (i.e.: if you have uncommitted changes):
```bash
SKIP_VALIDATE=true make
```
In case you make any changes to [go.mod](go.mod), you should run `go mod tidy` before running `make`.
### macOS considerations
The shell scripts in charge of the build process (the ones behind `make`) rely on GNU utils (i.e., `sed`), [which slightly differ on macOS](https://unix.stackexchange.com/a/79357). So, if you need to build k3s on a macOS environment, it is suggested to use the virtual machine defined on this repository's [Vagrantfile](Vagrantfile) to perform the tasks mentioned above.
To start the virtual machine, you will need [vagrant](https://www.vagrantup.com/) and [virtual box](https://www.virtualbox.org/) installed. Then prompt:
```bash
$ vagrant up
[... vm provisioning logs ...]
```
Once the virtual machine is provisioned, you should be able to ssh into it by doing `vagrant ssh` and perform any building task there:
```bash
$ vagrant ssh
[... ssh connection logs ..]
$ uname -a
Linux k3s-0-alpine312 5.11.0-41-generic
$ make
[... k3s build logs ...]
```
All the artifacts built within the VM will be synchronized with the directory where the `vagrant up` command was issued. For vagrant related commands please refer to [its cli documentation](https://www.vagrantup.com/docs/cli).

File diff suppressed because it is too large Load Diff

2
CODEOWNERS Normal file
View File

@ -0,0 +1,2 @@
* @k3s-io/k3s-dev

2
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,2 @@
# Community Code of Conduct
k3s observes the [CNCF Community Code of Conduct](https://github.com/cncf/foundation/blob/main/code-of-conduct.md).

77
CONTRIBUTING.md Normal file
View File

@ -0,0 +1,77 @@
# Contributing to K3s #
Thanks for taking the time to contribute to K3s!
Please review and follow the [Code of Conduct](CODE_OF_CONDUCT.md).
Contributing is not limited to writing code and submitting a PR. Feel free to submit an [issue](https://github.com/k3s-io/k3s/issues/new/choose) or comment on an existing one to report a bug, provide feedback, or suggest a new feature. You can also join the discussion on [slack](https://rancher-users.slack.com/channels/k3s).
Of course, contributing code is more than welcome! To keep things simple, if you're fixing a small issue, you can simply submit a PR and we will pick it up. However, if you're planning to submit a bigger PR to implement a new feature or fix a relatively complex bug, please open an issue that explains the change and the motivation for it. If you're addressing a bug, please explain how to reproduce it.
If you're interested in contributing documentation, please note the following:
- Doc issues are raised in this repository, and they are tracked under the `kind/documentation` label.
- Pull requests are submitted to the K3s documentation source in the [k3s-io docs repository.](https://github.com/k3s-io/docs).
If you're interested in contributing new tests, please see the [TESTING.md](./tests/TESTING.md).
## Code Convention
See the [code conventions documentation](./docs/contrib/code_conventions.md) for more information on how to write code for K3s.
### Opening PRs and organizing commits
PRs should generally address only 1 issue at a time. If you need to fix two bugs, open two separate PRs. This will keep the scope of your pull requests smaller and allow them to be reviewed and merged more quickly.
When possible, fill out as much detail in the pull request template as is reasonable. Most important is to reference the GitHub issue that you are addressing with the PR.
**NOTE:** GitHub has [a feature](https://docs.github.com/en/github/managing-your-work-on-github/linking-a-pull-request-to-an-issue#linking-a-pull-request-to-an-issue-using-a-keyword) that will automatically close issues referenced with a keyword (such as "Fixes") by a PR or commit once the PR/commit is merged. Don't use these keywords. We don't want issues to be automatically closed. We want our testers to independently verify and close them.
Generally, pull requests should consist of a single logical commit. However, if your PR is for a large feature, you may need a more logical breakdown of commits. This is fine as long as each commit is a single logical unit.
The other exception to this single-commit rule is if your PR includes a change to a vendored dependency or generated code. To make reviewing easier, these changes should be segregated into their own commit. Note that as we migrate from using the vendor directory to a pure go module model for our projects, this will be less of an issue.
As the issue and the PR already include all the required information, commit messages are normally empty. The title of the commit should summarize in a few words what the commit is trying to do.
For each commit, please ensure you sign off as mentioned below in the [Developer Certificate Of Origin section](#developer-certificate-of-origin).
### Reviewing, addressing feedback, and merging
Generally, pull requests need two approvals from maintainers to be merged. One exception to this is when a PR is simply a "pull through" that is just updating a dependency from other Rancher-managed vendor packages or any minor third-party vendor update. In this case, only one approval is needed.
When addressing review feedback, it is helpful to the reviewer if additional changes are made in new commits. This allows the reviewer to easily see the delta between what they previously reviewed and the changes you added to address their feedback.
Once a PR has the necessary approvals, it can be merged. Heres how the merge should be handled:
- If the PR is a single logical commit, the merger should use the “Rebase and merge” option. This keeps the git commit history very clean and simple and eliminates noise from "merge commits."
- If the PR is more than one logical commit, the merger should use the “Create a merge commit” option.
- If the PR consists of more than one commit because the author added commits to address feedback, the commits should be squashed into a single commit (or more than one logical commit, if it is a big feature that needs more commits). This can be achieved in one of two ways:
- The merger can use the “Squash and merge” option. If they do this, the merger is responsible for cleaning up the commit message according to the previously stated commit message guidance.
- The pull request author, after getting the requisite approvals, can reorganize the commits as they see fit (using, for example, git rebase -i) and re-push.
## Developer Certificate Of Origin ##
To contribute to this project, you must agree to the Developer Certificate of Origin (DCO) for each commit you make. The DCO is a simple statement that you, as a contributor, have the legal right to make the contribution.
See the [DCO](DCO) file for the full text of what you must agree to.
To signify that you agree to the DCO for a commit, you add a line to the git
commit message:
```txt
Signed-off-by: Jane Smith <jane.smith@example.com>
```
In most cases, you can add this signoff to your commit automatically with the
`-s` flag to `git commit`. Please use your real name and a reachable email address.
## Golangci-lint ##
There is a CI check for formatting on our code, you'll need to install `goimports` to be able to attend this check, you can do it by running the command:
```
go install golang.org/x/tools/cmd/goimports@latest
```
then run:
```
make format
```

38
DCO Normal file
View File

@ -0,0 +1,38 @@
Developer Certificate of Origin
Version 1.1
Copyright (C) 2004, 2006 The Linux Foundation and its contributors.
1 Letterman Drive
Suite D4700
San Francisco, CA, 94129
Everyone is permitted to copy and distribute verbatim copies of this
license document, but changing it is not allowed.
Developer's Certificate of Origin 1.1
By making a contribution to this project, I certify that:
(a) The contribution was created in whole or in part by me and I
have the right to submit it under the open source license
indicated in the file; or
(b) The contribution is based upon previous work that, to the best
of my knowledge, is covered under an appropriate open source
license and I have the right under that license to submit that
work with modifications, whether created in whole or in part
by me, under the same open source license (unless I am
permitted to submit under a different license), as indicated
in the file; or
(c) The contribution was provided directly to me by some other
person who certified (a), (b) or (c) and I have not modified
it.
(d) I understand and agree that this project and the contribution
are public and that a record of the contribution (including all
personal information I submit with it, including my sign-off) is
maintained indefinitely and may be redistributed consistent with
this project or the open source license(s) involved.

59
Dockerfile.dapper Normal file
View File

@ -0,0 +1,59 @@
ARG GOLANG=golang:1.24.6-alpine3.22
FROM ${GOLANG}
# Install necessary packages
RUN apk -U --no-cache add \
bash git gcc musl-dev docker vim less file curl wget ca-certificates jq linux-headers \
zlib-dev tar zip squashfs-tools npm coreutils python3 py3-pip openssl-dev libffi-dev libseccomp \
libseccomp-dev libseccomp-static make libuv-static sqlite-dev sqlite-static libselinux \
libselinux-dev zlib-dev zlib-static zstd pigz alpine-sdk binutils-gold btrfs-progs-dev \
btrfs-progs-static gawk yq pipx \
&& [ "$(go env GOARCH)" = "amd64" ] && apk -U --no-cache add mingw-w64-gcc || true
# Install AWS CLI
RUN PIPX_BIN_DIR=/usr/local/bin pipx install awscli
# Install Trivy
ENV TRIVY_VERSION="0.59.0"
RUN case "$(go env GOARCH)" in \
arm64) TRIVY_ARCH="ARM64" ;; \
amd64) TRIVY_ARCH="64bit" ;; \
s390x) TRIVY_ARCH="s390x" ;; \
*) TRIVY_ARCH="" ;; \
esac && \
if [ -n "${TRIVY_ARCH}" ]; then \
wget --no-verbose "https://github.com/aquasecurity/trivy/releases/download/v${TRIVY_VERSION}/trivy_${TRIVY_VERSION}_Linux-${TRIVY_ARCH}.tar.gz" \
&& tar -zxvf "trivy_${TRIVY_VERSION}_Linux-${TRIVY_ARCH}.tar.gz" \
&& mv trivy /usr/local/bin; \
fi
# Install goimports
RUN GOPROXY=direct go install golang.org/x/tools/cmd/goimports@gopls/v0.16.0
# Cleanup
RUN rm -rf /go/src /go/pkg
# Install golangci-lint for amd64
RUN if [ "$(go env GOARCH)" = "amd64" ]; then \
curl -sL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.55.2; \
fi
# Set SELINUX environment variable
ARG SELINUX=true
ENV SELINUX=${SELINUX}
# Set Dapper configuration variables
ENV DAPPER_RUN_ARGS="--privileged -v k3s-cache:/go/src/github.com/k3s-io/k3s/.cache -v trivy-cache:/root/.cache/trivy" \
DAPPER_ENV="REPO TAG DRONE_TAG IMAGE_NAME SKIP_VALIDATE SKIP_IMAGE SKIP_AIRGAP AWS_SECRET_ACCESS_KEY AWS_ACCESS_KEY_ID GITHUB_TOKEN GOLANG GOCOVER GOOS DEBUG" \
DAPPER_SOURCE="/go/src/github.com/k3s-io/k3s/" \
DAPPER_OUTPUT="./bin ./dist ./build/out ./build/static ./pkg/static ./pkg/deploy" \
DAPPER_DOCKER_SOCKET=true \
CROSS=true \
STATIC_BUILD=true
# Set $HOME separately because it refers to $DAPPER_SOURCE, set above
ENV HOME=${DAPPER_SOURCE}
WORKDIR ${DAPPER_SOURCE}
ENTRYPOINT ["./scripts/entry.sh"]
CMD ["ci"]

78
Dockerfile.local Normal file
View File

@ -0,0 +1,78 @@
ARG GOLANG=golang:1.24.6-alpine3.22
FROM ${GOLANG} AS infra
RUN apk -U --no-cache add bash git gcc musl-dev docker vim less file curl wget ca-certificates jq linux-headers \
zlib-dev tar zip squashfs-tools npm coreutils openssl-dev libffi-dev libseccomp libseccomp-dev \
libseccomp-static make libuv-static sqlite-dev sqlite-static libselinux libselinux-dev zlib-dev zlib-static \
zstd pigz alpine-sdk binutils-gold btrfs-progs-dev btrfs-progs-static gawk yq pipx \
&& \
if [ "$(go env GOARCH)" = "amd64" ]; then \
apk -U --no-cache add mingw-w64-gcc; \
fi
# Install goimports
RUN GOPROXY=direct go install golang.org/x/tools/cmd/goimports@gopls/v0.16.0
RUN rm -rf /go/src /go/pkg
RUN if [ "$(go env GOARCH)" = "amd64" ]; then \
curl -sL https://raw.githubusercontent.com/golangci/golangci-lint/master/install.sh | sh -s v1.51.2; \
fi
ARG SELINUX=true
ENV SELINUX=$SELINUX
ENV STATIC_BUILD=true
ENV SRC_DIR=/go/src/github.com/k3s-io/k3s
WORKDIR ${SRC_DIR}/
FROM infra AS manifests
ARG GIT_TAG
ARG TREE_STATE
ARG COMMIT
ARG DIRTY
ARG GOOS
ENV NO_DAPPER=true
# Used by both build and validate stages, better caching if we do this in a separate stage
COPY ./scripts/ ./scripts
COPY ./go.mod ./go.sum ./main.go ./
COPY ./manifests ./manifests
RUN mkdir -p bin dist
RUN --mount=type=cache,id=gomod,target=/go/pkg/mod \
./scripts/download
FROM manifests AS validate
ARG SKIP_VALIDATE
COPY . .
RUN --mount=type=cache,id=gomod,target=/go/pkg/mod \
--mount=type=cache,id=gobuild,target=/root/.cache/go-build \
--mount=type=cache,id=lint,target=/root/.cache/golangci-lint \
./scripts/validate
FROM manifests AS build
ARG GOCOVER
ARG DEBUG
COPY ./cmd ./cmd
COPY ./tests ./tests
COPY ./pkg ./pkg
RUN --mount=type=cache,id=gomod,target=/go/pkg/mod \
--mount=type=cache,id=gobuild,target=/root/.cache/go-build \
./scripts/build
COPY ./contrib ./contrib
RUN --mount=type=cache,id=gomod,target=/go/pkg/mod \
--mount=type=cache,id=gobuild,target=/root/.cache/go-build \
./scripts/package-cli
RUN ./scripts/binary_size_check.sh
FROM scratch AS result
ENV SRC_DIR=/go/src/github.com/k3s-io/k3s
COPY --from=build ${SRC_DIR}/dist /dist
COPY --from=build ${SRC_DIR}/bin /bin
COPY --from=build ${SRC_DIR}/build/out /build/out
COPY --from=build ${SRC_DIR}/build/static /build/static
COPY --from=build ${SRC_DIR}/pkg/static /pkg/static
COPY --from=build ${SRC_DIR}/pkg/deploy /pkg/deploy

19
Dockerfile.manifest Normal file
View File

@ -0,0 +1,19 @@
ARG GOLANG=golang:1.24.6-alpine3.22
FROM ${GOLANG}
COPY --from=plugins/manifest:1.2.3 /bin/* /bin/
RUN apk -U --no-cache add bash
ARG DOCKER_USERNAME
ENV DOCKER_USERNAME $DOCKER_USERNAME
ARG DOCKER_PASSWORD
ENV DOCKER_PASSWORD $DOCKER_PASSWORD
ARG DRONE_TAG
ENV DRONE_TAG $DRONE_TAG
COPY ./scripts/manifest /bin/
RUN manifest

61
Dockerfile.test Normal file
View File

@ -0,0 +1,61 @@
ARG GOLANG=golang:1.24.6-alpine3.22
FROM ${GOLANG} AS test-base
RUN apk -U --no-cache add bash jq
ENV K3S_SOURCE=/go/src/github.com/k3s-io/k3s/
WORKDIR ${K3S_SOURCE}
COPY . ${K3S_SOURCE}
FROM test-base AS test-mods
COPY ./scripts/test-mods /bin/
ENTRYPOINT ["/bin/test-mods"]
FROM test-base AS test-k3s
RUN apk -U --no-cache add git gcc musl-dev docker curl coreutils openssl procps findutils yq
ENV SONOBUOY_VERSION=0.57.2
RUN OS=linux; \
ARCH=$(go env GOARCH); \
RELEASE=$(curl -fs https://storage.googleapis.com/kubernetes-release/release/stable.txt); \
if [ "${ARCH}" == "amd64" ] || [ "${ARCH}" == "arm64" ] || [ "${ARCH}" == "s390x" ]; then \
curl -sL "https://github.com/vmware-tanzu/sonobuoy/releases/download/v${SONOBUOY_VERSION}/sonobuoy_${SONOBUOY_VERSION}_${OS}_${ARCH}.tar.gz" | \
tar -xzf - -C /usr/local/bin; \
fi; \
curl -fsL https://storage.googleapis.com/kubernetes-release/release/${RELEASE}/bin/linux/${ARCH}/kubectl -o /usr/local/bin/kubectl; \
chmod a+x /usr/local/bin/kubectl;
ENV TEST_CLEANUP=true
ENTRYPOINT ["./scripts/entry.sh"]
CMD ["test"]
FROM vagrantlibvirt/vagrant-libvirt:sha-a94ce0d AS test-e2e
RUN apt-get update && apt-get install -y docker.io wget
ENV VAGRANT_DISABLE_STRICT_DEPENDENCY_ENFORCEMENT=1
RUN vagrant plugin install vagrant-k3s --plugin-version 0.4.0
RUN vagrant plugin install vagrant-reload vagrant-scp
# Workaround for older vagrant-libvirt image and new vagrant infra wesbites
# See https://github.com/hashicorp/vagrant/issues/13571 and
# https://github.com/vagrant-libvirt/vagrant-libvirt/issues/1840
RUN wget https://app.vagrantup.com/bento/boxes/ubuntu-24.04/versions/202404.26.0/providers/libvirt.box -O bento-ubuntu24.04-202404.26.0.box
RUN vagrant box add bento/ubuntu-24.04 bento-ubuntu24.04-202404.26.0.box
RUN cd /.vagrant.d/boxes/bento-VAGRANTSLASH-ubuntu-24.04/ && mv 0 202404.26.0 && echo -n "https://app.vagrantup.com/bento/boxes/ubuntu-24.04" > metadata_url
RUN curl -LO "https://dl.k8s.io/release/$(curl -L -s https://dl.k8s.io/release/stable.txt)/bin/linux/amd64/kubectl"; \
chmod +x ./kubectl; \
mv ./kubectl /usr/local/bin/kubectl
RUN GO_VERSION=go1.23.6; \
curl -O -L "https://golang.org/dl/${GO_VERSION}.linux-amd64.tar.gz"; \
rm -rf /usr/local/go; \
tar -C /usr/local -xzf ${GO_VERSION}.linux-amd64.tar.gz;
ENV PATH="${PATH}:/usr/local/go/bin"

243
GOVERNANCE.md Normal file
View File

@ -0,0 +1,243 @@
# K3s Project Governance
This governance explains how the K3s project is run. As such that's a living document that can be updated at anytime.
- [Values](#values)
- [Maintainers](#maintainers)
- [Becoming a Maintainer](#becoming-a-maintainer)
- [Meetings](#meetings)
- [CNCF Resources](#cncf-resources)
- [Code of Conduct Enforcement](#code-of-conduct)
- [Security Response Team](#security-response-team)
- [Voting](#voting)
- [Modifications](#modifying-this-charter)
## Values
K3s and its leadership embrace the following values:
* Openness: Communication and decision-making happens in the open and is discoverable for future reference. As much as possible, all discussions and work take place in public forums and open repositories.
* Fairness: All stakeholders have the opportunity to provide feedback and submit contributions, which will be considered on their merits.
* Community over Product or Company: Sustaining and growing our community takes priority over shipping code or sponsors' organizational goals. Each contributor participates in the project as an individual.
* Inclusivity: We innovate through different perspectives and skill sets, which can only be accomplished in a welcoming and respectful environment.
* Participation: Responsibilities within the project are earned through participation, and there is a clear path up the contributor ladder into leadership positions.
## Community Roles
* **Users**: Members that engage with the K3s community via any medium (Slack, GitHub, mailing lists, etc.).
* **Contributors**: Regular contributions to projects (documentation, code reviews, responding to issues, participation in proposal discussions, contributing code, etc.). Contributors are potential Maintainers.
* **Reviewers**: Review contributions from other members.
* **Maintainers**: The K3s project leaders. They are responsible for the overall health and direction of the project; final reviewers of PRs and responsible for releases. Some Maintainers are responsible for one or more components within a project, acting as technical leads for that component. Maintainers are expected to contribute code and documentation, review PRs including ensuring quality of code, triage issues, proactively fix bugs, and perform maintenance tasks for these components.
## Maintainers
K3s Maintainers have write access to the [project GitHub repository](https://github.com/k3s-io/k3s/). They can merge their own patches(after approval process) or patches from others. The current Maintainers can be found in [MAINTAINERS.md](https://github.com/k3s-io/k3s/blob/master/MAINTAINERS). Maintainers collectively manage the project's resources and contributors.
This privilege carries specific responsibilities: Maintainers are people who care about the K3s project and want to help it grow and improve. A Maintainer is not just someone who can make changes, but someone who has demonstrated their ability to collaborate with the team, get the most knowledgeable people to review code and docs, contribute high-quality code, and follow through to fix issues (in code or tests).
A Maintainer is a contributor to the project's success and a citizen helping the project succeed.
The collective team of all Maintainers is known as the Maintainer Council, which is the governing body for the project.
###
###
### Becoming a Maintainer
Anyone is eligible to become a Maintainer, you need to demonstrate a few or more of the following:
* demonstrate availability and capability to meet the Maintainer expectations above
* commitment to the project:
* participate in discussions, community meeting, contributions, code and documentation reviews for period 6 months or more,
* perform reviews for 15 non-trivial pull requests,
* contribute 30 non-trivial pull requests and have them merged,
* ability to write quality code and/or documentation,
* ability to collaborate with the team,
* understanding of how the team works (policies, processes for testing and code review, etc),
* understanding of the project's code base and coding and documentation style.
A new Maintainer must be proposed by an existing Maintainer by sending a message to the [developer mailing list](mailto:k3s-maintainers@lists.cncf.io) and opening PR in [MAINTAINERS](https://github.com/k3s-io/k3s/blob/master/MAINTAINERS). A [supermajority](#Supermajority) vote of existing Maintainers approves the application. Maintainer nominations will be evaluated without prejudice to employer or demographics.
Maintainers who are selected will be granted the necessary GitHub rights, and invited to the [private Maintainer mailing list](mailto:k3s-maintainers@lists.cncf.io).
Maintainers responsibilities:
* **Code Quality & Reviews**: Review and merge pull requests, ensure adherence to project standards, and manage the codebase
* **Issue Management**: Triage incoming issues, provide guidance, and close or categorize as needed
* **Releases**: Oversee versioning, changelogs, and publication of new releases
* **Community Stewardship**: Foster a welcoming and inclusive environment, respond to contributor questions, and enforce the Code of Conduct
* **Documentation**: Maintain clear user and contributor documentation
* **Project Direction**: Set and communicate project goals, prioritize features and fixes
* **Security & Dependencies**: Keep dependencies secure and up to date, monitor for vulnerabilities
* **Onboarding & Mentorship**: Support new contributors and encourage diverse forms of contribution
* **Advocacy & Outreach**: Promote the project externally and collaborate with other communities when appropriate
### Removing a Maintainer
Maintainers may resign at any time if they feel that they will not be able to continue fulfilling their project duties.
Maintainers may also be removed after being inactive, failure to fulfill their Maintainer responsibilities, violating the Code of Conduct, or other reasons. Inactivity is defined as a period of very low or no activity in the project for a year or more, with no definite schedule to return to full Maintainer activity.
A Maintainer may be removed at any time by a [supermajority](#Supermajority) vote of the remaining Maintainers.
Depending on the reason for removal, a Maintainer may be converted to Emeritus status. Emeritus Maintainers will still be consulted on some project matters, and can be rapidly returned
to Maintainer status if their availability changes.
## Reviewer
Reviewers are able to review code for quality and correctness on some part of a subproject. They are knowledgeable about both the codebase and software engineering principles.
**Requirements**
* Knowledgeable about the codebase
* Sponsored by a Maintainer
* New reviewer must be nominated by an existing Maintainer or reviewer or self-nominated and must be elected by a [supermajority](#Supermajority) of existing Maintainers
**Responsibilities and privileges**
* Code reviewer status may be a precondition to accepting large code contributions
* Responsible for project quality control via code reviews
* Focus on code quality and correctness, including testing and factoring
* May also review for more holistic issues, but not a requirement
* Expected to be responsive to review requests
* Assigned PRs to review related to subproject of expertise
* Assigned test bugs related to subproject of expertise
## Community Ladder
User \-\> Contributor \-\> Reviewer \-\> Maintainer
## Supermajority
A **supermajority** is defined as two-thirds (2/3) of active Maintainers. A supermajority is calculated based on the number of votes cast, excluding abstentions.
Maintainers may vote "agree / yes / +1", "disagree / no / -1", or "abstain". An "abstain" vote does not count toward the total used to calculate the supermajority — it is equivalent to not voting.
A vote passes when at least two-thirds of the non-abstaining votes are in favor within the voting period.
Votes must be cast within a defined voting period (e.g., 7 calendar days). If a quorum (more than 50% of active Maintainers) is not met during this period, the vote is considered invalid and may be rescheduled.
Failure to vote on major decisions will be considered a sign of inactivity and may indicate that a Maintainer is not fulfilling their responsibilities. This behavior will be taken into account during periodic reviews of Maintainer status.
Examples:
| Votes Cast | Yes Votes | No Votes | Required Yes Votes (≥2/3) | Result |
|------------|-----------|----------|----------------------------|----------------------|
| 9 | 6 | 3 | 6 | ✅ Passes (6 = 2/3) |
| 9 | 5 | 4 | 6 | ❌ Fails (5 < 2/3) |
| 6 | 4 | 2 | 4 | ✅ Passes (4 = 2/3) |
| 6 | 3 | 3 | 4 | ❌ Fails (3 < 2/3) |
| 12 | 8 | 4 | 8 | ✅ Passes (8 = 2/3) |
| 12 | 7 | 5 | 8 | ❌ Fails (7 < 2/3) |
## Simple majority
A [**Simple majority**](#simple-majority) is defined as: more than half of the votes cast in a decision-making process.
Examples:
| Votes Cast | Yes Votes | No Votes | Result |
|------------|-----------|----------|----------------------|
| 10 | 6 | 4 | ✅ Passes (6 > 5) |
| 10 | 5 | 5 | ❌ Fails (not > 50%) |
| 7 | 4 | 3 | ✅ Passes (4 > 3) |
| 7 | 3 | 3 | ❌ Fails (tie; not >) |
## Voting and Decision Making
While most business in K3s is conducted by "[lazy consensus](https://community.apache.org/committers/lazyConsensus.html)", periodically the Maintainers may need to vote on specific actions or changes. A vote can be taken on [the developer mailing list](mailto:cncf-k3s-dev@lists.cncf.io) or [the private Maintainer mailing list](mailto:cncf-k3s-maintainers@lists.cncf.io) for security or conduct matters.
Votes may also be taken at [the community meeting](https://k3s.io/community/#community-meetings). Any Maintainer may demand a vote be taken.
Most votes require a [simple majority](#simple-majority) of all Maintainers to succeed, except where otherwise noted. [Supermajority](#Supermajority) votes mean at least two-thirds of all existing Maintainers.
Ideally, all project decisions are resolved by consensus. If impossible, any Maintainer may call a vote. Unless otherwise specified in this document, any vote will be decided by a [supermajority](#Supermajority) of Maintainers.
In case of situation with not enough participation from maintainer for **non** critical decision we can lower the supermajority to [**simple majority**](#simple-majority).
For any **critital** decisions [CNCF TOC](https://www.cncf.io/people/technical-oversight-committee/) should be consulted for approvals and moving forward.
## Voting requirements:
* Adding a Maintainer: [Supermajority](#Supermajority)
* Removing a Maintainer: [Supermajority](#Supermajority)
* Requesting CNCF resources: [Simple majority](#simple-majority)
* Charter and Governance: [Supermajority](#Supermajority)
If a vote does not meet quorum (e.g., fewer than 50% of Maintainers vote), the vote may be postponed or escalated to a follow-up meeting.
## Proposal Process(ADRs)
One of the most important aspects in any open source community is the concept of proposals. Large changes to the codebase and/or new features should be preceded by a proposal in our repository [ADRs]([https://github.com/k3s-io/k3s/tree/master/docs/adrs](https://github.com/k3s-io/k3s/tree/master/docs/adrs)). This process allows for all members of the community to weigh in on the concept (including the technical details), share their comments and ideas, and offer to help. It also ensures that members are not duplicating work or inadvertently stepping on toes by making large conflicting changes.
The project roadmap is defined by accepted proposals.
Proposals should cover the high-level objectives, use cases, and technical recommendations on how to implement. In general, the community member(s) interested in implementing the proposal should be either deeply engaged in the proposal process or be an author of the proposal.
The proposal should be documented as a separate markdown file pushed [ADRs]([https://github.com/k3s-io/k3s/tree/master/docs/adrs](https://github.com/k3s-io/k3s/tree/master/docs/adrs)).directory in the [k3s](https://github.com/k3s-io/k3s/tree/master/docs/adrs) repository via PR. The name of the file should follow the name pattern `<short meaningful words joined by '-'>.md`, e.g: `clear-old-tags-with-policies.md`.
Use the [Proposal Template](https://github.com/k3s-io/k3s/tree/master/docs/adrs/template.md) as a starting point.(need to open PR for template)
### Proposal Lifecycle
The proposal PR can be marked with different status labels to represent the status of the proposal:
* Proposed: Proposal is just created.
* Reviewing: Proposal is under review and discussion.
* Accepted: Proposal is reviewed and accepted (either by consensus or vote).
* Rejected: Proposal is reviewed and rejected (either by consensus or vote).
A proposal may only be accepted and merged after receiving approval from at least two maintainers who are not the original author of the proposal.
### Proposal Threshold
The need for a proposal (in the form of an ADR or design document) is determined primarily by scope. Pull requests that introduce major changes — such as architectural overhauls, system-wide patterns, or significant new features — may prompt maintainers to request a proposal for further discussion and alignment.
Maintainers may comment on a PR with a request to "submit an ADR" when a change is deemed too substantial for review in isolation.
Smaller, self-contained changes (e.g., bug fixes, minor enhancements, or localized refactors) typically do not require a proposal and can proceed through the standard PR workflow.
## Meetings
Time zones permitting, Maintainers are expected to participate in the public developer meeting, which occurs [Community meetings](https://k3s.io/community/\#community-meetings)
Maintainers will also have closed meetings in order to discuss security reports or [Code of Conduct](https://github.com/k3s-io/k3s/blob/master/CODE_OF_CONDUCT.md) violations. Such meetings should be scheduled by any Maintainer on receipt of a security issue or CoC report. All current Maintainers must be invited to such closed meetings, except for any Maintainer who is accused of a CoC violation.
## CNCF Resources
Any Maintainer may suggest a request for CNCF resources, either in the [mailing list](k3s-Maintainers@lists.cncf.io), or during a meeting. A [Simple majority](#simple-majority) of Maintainers approves the request. The Maintainers may also choose to delegate working with the CNCF to non-Maintainer community members, who will then be added to the [CNCF's Maintainer List](https://github.com/cncf/foundation/blob/main/project-maintainers.csv) for that purpose.
## Code of Conduct
[Code of Conduct](https://github.com/k3s-io/k3s/blob/master/CODE_OF_CONDUCT.md) violations by community members will be discussed and resolved on the [private Maintainer mailing list](https://lists.cncf.io/g/cncf-k3s-maintainers). If a Maintainer is directly involved in the report, the Maintainers will instead designate two Maintainers to work with the CNCF Code of Conduct Committee in resolving it.
## Security Response Team
The Maintainers will appoint a Security Response Team to handle security reports. This committee may simply consist of the Maintainer Council themselves. If this responsibility is delegated, the Maintainers will appoint a team of at least two contributors to handle it. The Maintainers will review who is assigned to this at least once a year.
The Security Response Team is responsible for handling all reports of security holes and breaches according to the [security policy](https://github.com/k3s-io/k3s?tab=security-ov-file\#readme).
## Modifying this Charter
Changes to this Governance and its supporting documents may be approved by a [supermajority](#Supermajority) vote of the Maintainers.
##
##
## **Thanks**
Many thanks in advance to everyone who contributes their time and effort to making K3s both a successful project as well as a successful community. The strength of our software shines in the strengths of each individual community member. Thank YOU\!
Some content in this document was built(inspired) upon the work in the [Kubernetes](https://github.com/kubernetes/community), [Linkerd](https://github.com/linkerd/linkerd2/blob/main/GOVERNANCE.md), [Helm](https://github.com/helm/community/blob/main/governance), [Harbor](https://github.com/goharbor/community/blob/main/GOVERNANCE.md), [Contour](https://github.com/projectcontour/community/blob/main/GOVERNANCE.md) Communities\! KUDOs to all of them\!

25
LICENSE
View File

@ -175,28 +175,3 @@
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "[]"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright [yyyy] [name of copyright owner]
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

23
MAINTAINERS Normal file
View File

@ -0,0 +1,23 @@
# The following is the list of current K3s maintainers
# Github ID, Name, Email Address
brandond, Brad Davidson, brad.davidson@suse.com
briandowns, Brian Downs, brian.downs@suse.com
brooksn, Brooks Newberry, brooks.newberry@suse.com
caroline-suse-rancher, Caroline Davis, caroline.davis@suse.com
cwayne18, Chris Wayne, chris.wayne@suse.com
dereknola, Derek Nola, derek.nola@suse.com
galal-hussein, Hussein Galal, hussein.galalabdelazizahmed@suse.com
manuelbuil, Manuel Buil, mbuil@suse.com
matttrach, Matt Trachier, matt.trachier@suse.com
mdrahman-suse, MD Rahman, md.rahman@suse.com
Oats87, Chris Kim, chris.kim@suse.com
rancher-max, Max Ross, max.ross@suse.com
rbrtbnfgl, Roberto Bonafiglia, roberto.bonafiglia@suse.com
ShylajaDevadiga, Shylaja Devadiga, shylaja.devadiga@suse.com
thomasferrandiz, Thomas Ferrandiz, thomas.ferrandiz@suse.com
VestigeJ, Justin Janes, justin.janes@suse.com
# Community Management
OrlinVasilev, Orlin Vasilev, orlin.vasilev@suse.com
robertsirc, Robert Sirchia, robert.sirchia@suse.com

80
Makefile Normal file
View File

@ -0,0 +1,80 @@
TARGETS := $(shell ls scripts | grep -v \\.sh)
GO_FILES ?= $$(find . -name '*.go')
SHELL := /bin/bash
.dapper:
@echo Downloading dapper
@curl -sL https://releases.rancher.com/dapper/v0.6.0/dapper-$$(uname -s)-$$(uname -m) > .dapper.tmp
@@chmod +x .dapper.tmp
@./.dapper.tmp -v
@mv .dapper.tmp .dapper
.PHONY: docker.sock
docker.sock:
while ! docker version 1>/dev/null; do sleep 1; done
$(TARGETS): .dapper docker.sock
./.dapper $@
.PHONY: deps
deps:
go mod tidy
release:
./scripts/release.sh
.DEFAULT_GOAL := ci
.PHONY: $(TARGETS)
build/data:
mkdir -p $@
.PHONY: binary-size-check
binary-size-check:
scripts/binary_size_check.sh
.PHONY: image-scan
image-scan:
scripts/image_scan.sh $(IMAGE)
format:
gofmt -s -l -w $(GO_FILES)
goimports -w $(GO_FILES)
.PHONY: local-validate
local-validate:
DOCKER_BUILDKIT=1 docker build \
--build-arg="SKIP_VALIDATE=$(SKIP_VALIDATE)" \
--build-arg="DEBUG=$(DEBUG)" \
--progress=plain \
-f Dockerfile.local --target=validate .
.PHONY: local-binary
local-binary:
@echo "INFO: Building K3s binaries and assets..."
. ./scripts/git_version.sh && \
DOCKER_BUILDKIT=1 docker build \
--build-arg "GIT_TAG=$$GIT_TAG" \
--build-arg "TREE_STATE=$$TREE_STATE" \
--build-arg "COMMIT=$$COMMIT" \
--build-arg "DIRTY=$$DIRTY" \
--build-arg="GOCOVER=$(GOCOVER)" \
--build-arg="GOOS=$(GOOS)" \
--build-arg="DEBUG=$(DEBUG)" \
-f Dockerfile.local --target=result --output=. .
.PHONY: local-image
local-image: local-binary
@echo "INFO: Building K3s image..."
./scripts/package-image
.PHONY: local-airgap
local-airgap:
@echo "INFO: Building K3s airgap tarball..."
./scripts/package-airgap
.PHONY: local-ci
local-ci: local-binary local-image local-airgap

266
README.md
View File

@ -1,101 +1,195 @@
# Kubernetes
K3s - Lightweight Kubernetes
===============================================
[![FOSSA Status](https://app.fossa.com/api/projects/custom%2B25850%2Fgithub.com%2Fk3s-io%2Fk3s.svg?type=shield)](https://app.fossa.com/projects/custom%2B25850%2Fgithub.com%2Fk3s-io%2Fk3s?ref=badge_shield)
[![Nightly CI](https://github.com/k3s-io/k3s/actions/workflows/nightly-install.yaml/badge.svg)](https://github.com/k3s-io/k3s/actions/workflows/nightly-install.yaml)
[![Build Status](https://drone-publish.k3s.io/api/badges/k3s-io/k3s/status.svg)](https://drone-publish.k3s.io/k3s-io/k3s)
[![Integration Test Coverage](https://github.com/k3s-io/k3s/actions/workflows/integration.yaml/badge.svg)](https://github.com/k3s-io/k3s/actions/workflows/integration.yaml)
[![Unit Test Coverage](https://github.com/k3s-io/k3s/actions/workflows/unitcoverage.yaml/badge.svg)](https://github.com/k3s-io/k3s/actions/workflows/unitcoverage.yaml)
[![OpenSSF Best Practices](https://www.bestpractices.dev/projects/6835/badge)](https://www.bestpractices.dev/projects/6835)
[![OpenSSF Scorecard](https://api.scorecard.dev/projects/github.com/k3s-io/k3s/badge)](https://scorecard.dev/viewer/?uri=github.com/k3s-io/k3s)
[![Releases](https://img.shields.io/github/downloads/k3s-io/k3s/total.svg)](https://github.com/k3s-io/k3s/tags?label=Downloads)
[![CLOMonitor](https://img.shields.io/endpoint?url=https://clomonitor.io/api/projects/cncf/k3s/badge)](https://clomonitor.io/projects/cncf/k3s)
<img src="https://github.com/kubernetes/kubernetes/raw/master/logo/logo.png" width="100">
Lightweight Kubernetes. Production ready, easy to install, half the memory, all in a binary less than 100 MB.
----
Great for:
Kubernetes without the features I don't care about.
* Edge
* IoT
* CI
* Development
* ARM
* Embedding k8s
* Situations where a PhD in k8s clusterology is infeasible
Some of the removed features
* OpenAPI/Swagger
* cloud-controller-manager
* kube aggregation
* APIs (NOTE: most of these are old APIs that have been replaced)
* admissionregistration/v1alpha1, authentication/v1beta1, authorization/v1beta1, certificates/v1beta1, events/v1beta1, imagepolicy/v1alpha1, rbac/v1alpha1, settings/v1alpha1, storage/v1alpha1,
* Authentication
* bootstrap token, oidc
* Authorization
* ABAC
* All alpha features
* Cloud Providers (all of them)
* Controllers
* Bootstrap
* Certificates
* Cloud
* Cloud based node IPAM
* Route
* Credential Providers AWS/GCP/Azure/Rancher
* Kubelet
* Device Plugin
* Certificates
* Checkpoint
* Device Manager
* Custom Metrics
* Dockershim (IMPORTANT: No docker support, the only runtime that works in containerd)
* GPU
* Mount Pod
* Network
* Hairpin
* Kubenet
* rkt
* Volume Drivers
* aws_ebs, azure_dd, azure_file, cephfs, cinder, fc, flocker, gce_pd, glusterfs, iscsi, photon_pd, portworx, quobyte, rbd, scaleio, storageos, vsphere_volume
* Admission Controllers
* admin, alwayspullimages, antiaffinity, defaulttolerationseconds, deny, eventratelimit, exec, extendedresourcetoleration, gc, imagepolicy, initialreosurces, limitranger, namespace, noderestriction, persistentvolume, podnodeselector, podpreset, podtolerationrestriction, priority, resourcequota, security, securitycontext, storageobjectinuseprotection
What is left? A lot. Basically all your normal pod/deployment/service stuff is there. Most apps for kubernetes will run just fine.
Full Build
----------
# Setup your GOPATH. Note that this code should be at k8s.io/kubernetes in your GOPATH, not github.com/ibuildthecloud/k3s
go build -o kubectl ./cmd/kubectl
go build -o hyperkube ./cmd/hyperkube
Now just run hyperkube as you normally would.
Super Opinionated Approach that probably won't work for you
-----------------------------------------------------------
# Setup your GOPATH. Note that this code should be at k8s.io/kubernetes in your GOPATH, not github.com/ibuildthecloud/k3s
go build -o k3s
go build -o kubectl ./cmd/kubectl
Run
What is this?
---
Run containerd
K3s is a [fully conformant](https://github.com/cncf/k8s-conformance/pulls?q=is%3Apr+k3s) production-ready Kubernetes distribution with the following changes:
1. It is packaged as a single binary.
1. It adds support for sqlite3 as the default storage backend. Etcd3, MariaDB, MySQL, and Postgres are also supported.
1. It wraps Kubernetes and other components in a single, simple launcher.
1. It is secure by default with reasonable defaults for lightweight environments.
1. It has minimal to no OS dependencies (just a sane kernel and cgroup mounts needed).
1. It eliminates the need to expose a port on Kubernetes worker nodes for the kubelet API by exposing this API to the Kubernetes control plane nodes over a websocket tunnel.
K3s bundles the following technologies together into a single cohesive distribution:
* [Containerd](https://containerd.io/) & [runc](https://github.com/opencontainers/runc)
* [Flannel](https://github.com/flannel-io/flannel) for CNI
* [CoreDNS](https://coredns.io/)
* [Metrics Server](https://github.com/kubernetes-sigs/metrics-server)
* [Traefik](https://containo.us/traefik/) for ingress
* [Klipper-lb](https://github.com/k3s-io/klipper-lb) as an embedded service load balancer provider
* [Kube-router](https://www.kube-router.io/) netpol controller for network policy
* [Helm-controller](https://github.com/k3s-io/helm-controller) to allow for CRD-driven deployment of helm manifests
* [Kine](https://github.com/k3s-io/kine) as a datastore shim that allows etcd to be replaced with other databases
* [Local-path-provisioner](https://github.com/rancher/local-path-provisioner) for provisioning volumes using local storage
* [Host utilities](https://github.com/k3s-io/k3s-root) such as iptables/nftables, ebtables, ethtool, & socat
These technologies can be disabled or swapped out for technologies of your choice.
Additionally, K3s simplifies Kubernetes operations by maintaining functionality for:
* Managing the TLS certificates of Kubernetes components
* Managing the connection between worker and server nodes
* Auto-deploying Kubernetes resources from local manifests in realtime as they are changed.
* Managing an embedded etcd cluster
What's with the name?
--------------------
We wanted an installation of Kubernetes that was half the size in terms of memory footprint. Kubernetes is a
10 letter word stylized as k8s. So something half as big as Kubernetes would be a 5 letter word stylized as
K3s. A '3' is also an '8' cut in half vertically. There is neither a long-form of K3s nor official pronunciation.
Is this a fork?
---------------
No, it's a distribution. A fork implies continued divergence from the original. This is not K3s's goal or practice. K3s explicitly intends not to change any core Kubernetes functionality. We seek to remain as close to upstream Kubernetes as possible. However, we maintain a small set of patches (well under 1000 lines) important to K3s's use case and deployment model. We maintain patches for other components as well. When possible, we contribute these changes back to the upstream projects, for example, with [SELinux support in containerd](https://github.com/containerd/cri/pull/1487/commits/24209b91bf361e131478d15cfea1ab05694dc3eb). This is a common practice amongst software distributions.
K3s is a distribution because it packages additional components and services necessary for a fully functional cluster that go beyond vanilla Kubernetes. These are opinionated choices on technologies for components like ingress, storage class, network policy, service load balancer, and even container runtime. These choices and technologies are touched on in more detail in the [What is this?](#what-is-this) section.
How is this lightweight or smaller than upstream Kubernetes?
---
There are two major ways that K3s is lighter weight than upstream Kubernetes:
1. The memory footprint to run it is smaller
2. The binary, which contains all the non-containerized components needed to run a cluster, is smaller
The memory footprint is reduced primarily by running many components inside of a single process. This eliminates significant overhead that would otherwise be duplicated for each component.
The binary is smaller by removing third-party storage drivers and cloud providers, explained in more detail below.
What have you removed from upstream Kubernetes?
---
This is a common point of confusion because it has changed over time. Early versions of K3s had much more removed than the current version. K3s currently removes two things:
1. In-tree storage drivers
1. In-tree cloud provider
Both of these have out-of-tree alternatives in the form of [CSI](https://github.com/container-storage-interface/spec/blob/master/spec.md) and [CCM](https://kubernetes.io/docs/tasks/administer-cluster/running-cloud-controller/), which work in K3s and which upstream is moving towards.
We remove these to achieve a smaller binary size. They can be removed while remaining conformant because neither affects core Kubernetes functionality. They are also dependent on third-party cloud or data center technologies/services, which may not be available in many K3s' use cases.
Getting Started
---
- [Quick Install](https://docs.k3s.io/quick-start)
- [Architecture](https://docs.k3s.io/architecture)
- [FAQ](https://docs.k3s.io/faq)
- [Contribute](CONTRIBUTING.md)
Community
---
- ### Slack
Join [Slack](https://slack.rancher.io/) to chat with K3s developers and other K3s users. Great place to learn and ask questions: [#k3s](https://rancher-users.slack.com/archives/CGGQEHPPW) and [#k3s-contributor](https://rancher-users.slack.com/archives/CGXR87T8B) and [#k3s](https://cloud-native.slack.com/archives/C0196ULKX8S) channel in [CNCF Slack](https://cloud-native.slack.com)
- ### Getting involved
[GitHub Issues](https://github.com/k3s-io/k3s/issues) - Submit your issues and feature requests via GitHub.
- ### Community Meetings and Office hours
The K3s developer community hangs out on Zoom to chat. Everybody is welcome.
**Add the [Linux Foundation iCal](https://webcal.prod.itx.linuxfoundation.org/lfx/a092M00001IkYIjQAN) to your calendar**:
- AMS/EMEA TZ 10:00 am PST - every *second* Tuesday of the month
- EMEA/APAC TimeZone friendly - every *third* Tuesday of the month
**Meeting notes and agenda**: https://hackmd.io/@k3s/meet-notes/
**Meeting recordings**: [K3s Channel](https://www.youtube.com/watch?v=HRuJROA6Z3k&list=PLlBG85HKlLE9KFDqJ_K6NOpup-zVw8ANl&pp=gAQB)
You can check also the full details on the website: https://k3s.io/community
What's next?
---
Check out our [roadmap](ROADMAP.md) to see what we have planned moving forward.
Release cadence
---
K3s maintains pace with upstream Kubernetes releases. Our goal is to release patch releases within one week, and new minors within 30 days.
Our release versioning reflects the version of upstream Kubernetes that is being released. For example, the K3s release [v1.27.4+k3s1](https://github.com/k3s-io/k3s/releases/tag/v1.27.4%2Bk3s1) maps to the `v1.27.4` Kubernetes release. We add a postfix in the form of `+k3s<number>` to allow us to make additional releases using the same version of upstream Kubernetes while remaining [semver](https://semver.org/) compliant. For example, if we discovered a high severity bug in `v1.27.4+k3s1` and needed to release an immediate fix for it, we would release `v1.27.4+k3s2`.
Documentation
-------------
Please see [the official docs site](https://docs.k3s.io) for complete documentation.
Quick-Start - Install Script
--------------
The `install.sh` script provides a convenient way to download K3s and add a service to systemd or openrc.
To install k3s as a service, run:
```bash
# Download and install containerd and runc
sudo curl -fL -o /usr/local/bin/runc https://github.com/opencontainers/runc/releases/download/v1.0.0-rc5/runc.amd64
sudo chmod +x /usr/local/bin/runc
curl -fsL https://github.com/containerd/containerd/releases/download/v1.1.1/containerd-1.1.1.linux-amd64.tar.gz | sudo tar xvf /usr/src/containerd.tgz -C /usr/local/bin bin/ --strip-components=1
# Some CNI
sudo mkdir -p /opt/cni/bin
curl -fsL https://github.com/containernetworking/plugins/releases/download/v0.7.1/cni-plugins-amd64-v0.7.1.tgz | sudo tar xvzf - -C /opt/cni/bin ./loopback
sudo containerd &
curl -sfL https://get.k3s.io | sh -
```
Run Kubernetes
A kubeconfig file is written to `/etc/rancher/k3s/k3s.yaml` and the service is automatically started or restarted.
The install script will install K3s and additional utilities, such as `kubectl`, `crictl`, `k3s-killall.sh`, and `k3s-uninstall.sh`, for example:
```bash
# Server
./k3s
# Agent (If doing this on another host copy the ./data folder)
sudo ./k3s agent
# Install Networking
export KUBECONFIG=./data/cred/kubeconfig.yaml
curl -s "https://cloud.weave.works/k8s/net?k8s-version=$(./kubectl version | base64 | tr -d '\n')" | ./kubectl apply -f -
sudo kubectl get nodes
```
Your kubeconfig file is in `./data/cred/kubeconfig.yaml`
`K3S_TOKEN` is created at `/var/lib/rancher/k3s/server/node-token` on your server.
To install on worker nodes, pass `K3S_URL` along with
`K3S_TOKEN` environment variables, for example:
Enjoy.
```bash
curl -sfL https://get.k3s.io | K3S_URL=https://myserver:6443 K3S_TOKEN=XXX sh -
```
Manual Download
---------------
1. Download `k3s` from latest [release](https://github.com/k3s-io/k3s/releases/latest), x86_64, armhf, arm64 and s390x are supported.
1. Run the server.
```bash
sudo k3s server &
# Kubeconfig is written to /etc/rancher/k3s/k3s.yaml
sudo k3s kubectl get nodes
# On a different node run the below. NODE_TOKEN comes from
# /var/lib/rancher/k3s/server/node-token on your server
sudo k3s agent --server https://myserver:6443 --token ${NODE_TOKEN}
```
Contributing
------------
Please check out our [contributing guide](CONTRIBUTING.md) if you're interested in contributing to K3s.
Security
--------
Security issues in K3s can be reported by sending an email to [security@k3s.io](mailto:security@k3s.io).
Please do not file issues about security issues.

7
ROADMAP.md Normal file
View File

@ -0,0 +1,7 @@
Roadmap
---
The k3s project uses [GitHub Milestones](http://github.com/k3s-io/k3s/milestones) to track the progress of changes going into the project.
The k3s release cycle moves in cadence with upstream Kubernetes, with an aim to have new minor releases out within 30 days of upstream .0 releases. To follow incoming changes, watching the [Backlog](https://github.com/orgs/k3s-io/projects/5) and [Current Development](https://github.com/orgs/k3s-io/projects/6) GitHub Projects is the most up to date way to see what's coming in upcooming releases.
The development of k3s itself happens in the `master` branch, which correlates to the most recent Kubernetes minor release. These changes are then backported to the active release lines (at this time, `release-[N]`, `release-[N-1]`, and `release-[N-2]`)

77
channel.yaml Normal file
View File

@ -0,0 +1,77 @@
# Example channels config
channels:
- name: stable
latest: v1.33.4+k3s1
- name: latest
latestRegexp: .*
excludeRegexp: (^[^+]+-|v1\.25\.5\+k3s1|v1\.26\.0\+k3s1)
- name: testing
latestRegexp: -(alpha|beta|rc)
- name: v1.16
latestRegexp: v1\.16\..*
excludeRegexp: ^[^+]+-
- name: v1.16-testing
latestRegexp: v1\.16\.[0-9]*-(alpha|beta|rc)
- name: v1.17
latestRegexp: v1\.17\..*
excludeRegexp: ^[^+]+-
- name: v1.17-testing
latestRegexp: v1\.17\.[0-9]*-(alpha|beta|rc)
- name: v1.18
latestRegexp: v1\.18\..*
excludeRegexp: ^[^+]+-
- name: v1.18-testing
latestRegexp: v1\.18\.[0-9]*-(alpha|beta|rc)
# Starting with 1.19, we aren't going to add a *-testing channel for minor releases
- name: v1.19
latestRegexp: v1\.19\..*
excludeRegexp: ^[^+]+-
- name: v1.20
latestRegexp: v1\.20\..*
excludeRegexp: ^[^+]+-
- name: v1.21
latestRegexp: v1\.21\..*
excludeRegexp: ^[^+]+-
- name: v1.22
latestRegexp: v1\.22\..*
excludeRegexp: ^[^+]+-
- name: v1.23
latestRegexp: v1\.23\..*
excludeRegexp: ^[^+]+-
- name: v1.24
latestRegexp: v1\.24\..*
excludeRegexp: (^[^+]+-|v1\.24\.9\+k3s1)
- name: v1.25
latestRegexp: v1\.25\..*
excludeRegexp: (^[^+]+-|v1\.25\.5\+k3s1)
- name: v1.26
latestRegexp: v1\.26\..*
excludeRegexp: (^[^+]+-|v1\.26\.0\+k3s1)
- name: v1.27
latestRegexp: v1\.27\..*
excludeRegexp: ^[^+]+-
- name: v1.28
latestRegexp: v1\.28\..*
excludeRegexp: ^[^+]+-
- name: v1.29
latestRegexp: v1\.29\..*
excludeRegexp: ^[^+]+-
- name: v1.30
latestRegexp: v1\.30\..*
excludeRegexp: ^[^+]+-
- name: v1.31
latestRegexp: v1\.31\..*
excludeRegexp: ^[^+]+-
- name: v1.32
latestRegexp: v1\.32\..*
excludeRegexp: ^[^+]+-
- name: v1.33
latestRegexp: v1\.33\..*
excludeRegexp: ^[^+]+-
- name: v1.34
latestRegexp: v1\.34\..*
excludeRegexp: ^[^+]+-
github:
owner: k3s-io
repo: k3s
redirectBase: https://github.com/k3s-io/k3s/releases/tag/

View File

@ -1,36 +0,0 @@
package(default_visibility = ["//visibility:public"])
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/clicheck:all-srcs",
"//cmd/cloud-controller-manager:all-srcs",
"//cmd/controller-manager/app:all-srcs",
"//cmd/gendocs:all-srcs",
"//cmd/genkubedocs:all-srcs",
"//cmd/genman:all-srcs",
"//cmd/genswaggertypedocs:all-srcs",
"//cmd/genutils:all-srcs",
"//cmd/genyaml:all-srcs",
"//cmd/hyperkube:all-srcs",
"//cmd/importverifier:all-srcs",
"//cmd/kube-apiserver:all-srcs",
"//cmd/kube-controller-manager:all-srcs",
"//cmd/kube-proxy:all-srcs",
"//cmd/kube-scheduler:all-srcs",
"//cmd/kubeadm:all-srcs",
"//cmd/kubectl:all-srcs",
"//cmd/kubelet:all-srcs",
"//cmd/kubemark:all-srcs",
"//cmd/linkcheck:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,10 +0,0 @@
reviewers:
- dchen1107
- lavalamp
- mikedanese
- thockin
approvers:
- dchen1107
- lavalamp
- mikedanese
- thockin

25
cmd/agent/main.go Normal file
View File

@ -0,0 +1,25 @@
package main
import (
"context"
"errors"
"os"
"github.com/k3s-io/k3s/pkg/cli/agent"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/configfilearg"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
func main() {
app := cmds.NewApp()
app.DisableSliceFlagSeparator = true
app.Commands = []*cli.Command{
cmds.NewAgentCommand(agent.Run),
}
if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) {
logrus.Fatalf("Error: %v", err)
}
}

28
cmd/cert/main.go Normal file
View File

@ -0,0 +1,28 @@
package main
import (
"context"
"errors"
"os"
"github.com/k3s-io/k3s/pkg/cli/cert"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/configfilearg"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
func main() {
app := cmds.NewApp()
app.Commands = []*cli.Command{
cmds.NewCertCommands(
cert.Check,
cert.Rotate,
cert.RotateCA,
),
}
if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) {
logrus.Fatal(err)
}
}

26
cmd/completion/main.go Normal file
View File

@ -0,0 +1,26 @@
package main
import (
"context"
"errors"
"os"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/cli/completion"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
func main() {
app := cmds.NewApp()
app.Commands = []*cli.Command{
cmds.NewCompletionCommand(
completion.Bash,
completion.Zsh,
),
}
if err := app.Run(os.Args); err != nil && !errors.Is(err, context.Canceled) {
logrus.Fatal(err)
}
}

11
cmd/containerd/main.go Normal file
View File

@ -0,0 +1,11 @@
package main
import (
"github.com/k3s-io/k3s/pkg/containerd"
"k8s.io/klog/v2"
)
func main() {
klog.InitFlags(nil)
containerd.Main()
}

View File

@ -1,14 +0,0 @@
approvers:
- deads2k
- cheftako
- sttts
reviewers:
- caesarxuchao
- cheftako
- deads2k
- lavalamp
- liggitt
- sttts
- stewart-yu
labels:
- sig/api-machinery

View File

@ -1,44 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"helper.go",
"serve.go",
],
importpath = "k8s.io/kubernetes/cmd/controller-manager/app",
visibility = ["//visibility:public"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/util/configz:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/config:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/filters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/request:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/filters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/mux:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/routes:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//vendor/github.com/prometheus/client_golang/prometheus:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/controller-manager/app/options:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,55 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"fmt"
"net/http"
"time"
"k8s.io/apimachinery/pkg/util/wait"
clientset "k8s.io/client-go/kubernetes"
"k8s.io/klog"
)
// WaitForAPIServer waits for the API Server's /healthz endpoint to report "ok" with timeout.
func WaitForAPIServer(client clientset.Interface, timeout time.Duration) error {
var lastErr error
err := wait.PollImmediate(time.Second, timeout, func() (bool, error) {
healthStatus := 0
result := client.Discovery().RESTClient().Get().AbsPath("/healthz").Do().StatusCode(&healthStatus)
if result.Error() != nil {
lastErr = fmt.Errorf("failed to get apiserver /healthz status: %v", result.Error())
return false, nil
}
if healthStatus != http.StatusOK {
content, _ := result.Raw()
lastErr = fmt.Errorf("APIServer isn't healthy: %v", string(content))
klog.Warningf("APIServer isn't healthy yet: %v. Waiting a little while.", string(content))
return false, nil
}
return true, nil
})
if err != nil {
return fmt.Errorf("%v: %v", err, lastErr)
}
return nil
}

View File

@ -1,52 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"cloudprovider.go",
"debugging.go",
"generic.go",
"globalflags.go",
"kubecloudshared.go",
"servicecontroller.go",
],
importpath = "k8s.io/kubernetes/cmd/controller-manager/app/options",
visibility = ["//visibility:public"],
deps = [
"//pkg/client/leaderelectionconfig:go_default_library",
"//pkg/cloudprovider/providers:go_default_library",
"//pkg/controller/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/config:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/apis/config:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)
go_test(
name = "go_default_test",
srcs = ["globalflags_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
],
)

View File

@ -1,56 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
)
// CloudProviderOptions holds the cloudprovider options.
type CloudProviderOptions struct {
CloudConfigFile string
Name string
}
// Validate checks validation of cloudprovider options.
func (s *CloudProviderOptions) Validate() []error {
allErrors := []error{}
return allErrors
}
// AddFlags adds flags related to cloudprovider for controller manager to the specified FlagSet.
func (s *CloudProviderOptions) AddFlags(fs *pflag.FlagSet) {
fs.StringVar(&s.Name, "cloud-provider", s.Name,
"The provider for cloud services. Empty string for no provider.")
fs.StringVar(&s.CloudConfigFile, "cloud-config", s.CloudConfigFile,
"The path to the cloud provider configuration file. Empty string for no configuration file.")
}
// ApplyTo fills up cloudprovider config with options.
func (s *CloudProviderOptions) ApplyTo(cfg *kubectrlmgrconfig.CloudProviderConfiguration) error {
if s == nil {
return nil
}
cfg.Name = s.Name
cfg.CloudConfigFile = s.CloudConfigFile
return nil
}

View File

@ -1,63 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
apiserverconfig "k8s.io/apiserver/pkg/apis/config"
)
// DebuggingOptions holds the Debugging options.
type DebuggingOptions struct {
EnableProfiling bool
EnableContentionProfiling bool
}
// AddFlags adds flags related to debugging for controller manager to the specified FlagSet.
func (o *DebuggingOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
fs.BoolVar(&o.EnableProfiling, "profiling", o.EnableProfiling,
"Enable profiling via web interface host:port/debug/pprof/")
fs.BoolVar(&o.EnableContentionProfiling, "contention-profiling", o.EnableContentionProfiling,
"Enable lock contention profiling, if profiling is enabled")
}
// ApplyTo fills up Debugging config with options.
func (o *DebuggingOptions) ApplyTo(cfg *apiserverconfig.DebuggingConfiguration) error {
if o == nil {
return nil
}
cfg.EnableProfiling = o.EnableProfiling
cfg.EnableContentionProfiling = o.EnableContentionProfiling
return nil
}
// Validate checks validation of DebuggingOptions.
func (o *DebuggingOptions) Validate() []error {
if o == nil {
return nil
}
errs := []error{}
return errs
}

View File

@ -1,128 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"fmt"
"strings"
apimachineryconfig "k8s.io/apimachinery/pkg/apis/config"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/util/sets"
apiserverconfig "k8s.io/apiserver/pkg/apis/config"
apiserverflag "k8s.io/apiserver/pkg/util/flag"
"k8s.io/kubernetes/pkg/client/leaderelectionconfig"
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
)
// GenericControllerManagerConfigurationOptions holds the options which are generic.
type GenericControllerManagerConfigurationOptions struct {
Port int32
Address string
MinResyncPeriod metav1.Duration
ClientConnection apimachineryconfig.ClientConnectionConfiguration
ControllerStartInterval metav1.Duration
LeaderElection apiserverconfig.LeaderElectionConfiguration
Debugging *DebuggingOptions
Controllers []string
}
// NewGenericControllerManagerConfigurationOptions returns generic configuration default values for both
// the kube-controller-manager and the cloud-contoller-manager. Any common changes should
// be made here. Any individual changes should be made in that controller.
func NewGenericControllerManagerConfigurationOptions(cfg kubectrlmgrconfig.GenericControllerManagerConfiguration) *GenericControllerManagerConfigurationOptions {
o := &GenericControllerManagerConfigurationOptions{
Port: cfg.Port,
Address: cfg.Address,
MinResyncPeriod: cfg.MinResyncPeriod,
ClientConnection: cfg.ClientConnection,
ControllerStartInterval: cfg.ControllerStartInterval,
LeaderElection: cfg.LeaderElection,
Debugging: &DebuggingOptions{},
Controllers: cfg.Controllers,
}
return o
}
// AddFlags adds flags related to generic for controller manager to the specified FlagSet.
func (o *GenericControllerManagerConfigurationOptions) AddFlags(fss *apiserverflag.NamedFlagSets, allControllers, disabledByDefaultControllers []string) {
if o == nil {
return
}
o.Debugging.AddFlags(fss.FlagSet("debugging"))
genericfs := fss.FlagSet("generic")
genericfs.DurationVar(&o.MinResyncPeriod.Duration, "min-resync-period", o.MinResyncPeriod.Duration, "The resync period in reflectors will be random between MinResyncPeriod and 2*MinResyncPeriod.")
genericfs.StringVar(&o.ClientConnection.ContentType, "kube-api-content-type", o.ClientConnection.ContentType, "Content type of requests sent to apiserver.")
genericfs.Float32Var(&o.ClientConnection.QPS, "kube-api-qps", o.ClientConnection.QPS, "QPS to use while talking with kubernetes apiserver.")
genericfs.Int32Var(&o.ClientConnection.Burst, "kube-api-burst", o.ClientConnection.Burst, "Burst to use while talking with kubernetes apiserver.")
genericfs.DurationVar(&o.ControllerStartInterval.Duration, "controller-start-interval", o.ControllerStartInterval.Duration, "Interval between starting controller managers.")
// TODO: complete the work of the cloud-controller-manager (and possibly other consumers of this code) respecting the --controllers flag
genericfs.StringSliceVar(&o.Controllers, "controllers", o.Controllers, fmt.Sprintf(""+
"A list of controllers to enable. '*' enables all on-by-default controllers, 'foo' enables the controller "+
"named 'foo', '-foo' disables the controller named 'foo'.\nAll controllers: %s\nDisabled-by-default controllers: %s",
strings.Join(allControllers, ", "), strings.Join(disabledByDefaultControllers, ", ")))
leaderelectionconfig.BindFlags(&o.LeaderElection, genericfs)
}
// ApplyTo fills up generic config with options.
func (o *GenericControllerManagerConfigurationOptions) ApplyTo(cfg *kubectrlmgrconfig.GenericControllerManagerConfiguration) error {
if o == nil {
return nil
}
if err := o.Debugging.ApplyTo(&cfg.Debugging); err != nil {
return err
}
cfg.Port = o.Port
cfg.Address = o.Address
cfg.MinResyncPeriod = o.MinResyncPeriod
cfg.ClientConnection = o.ClientConnection
cfg.ControllerStartInterval = o.ControllerStartInterval
cfg.LeaderElection = o.LeaderElection
cfg.Controllers = o.Controllers
return nil
}
// Validate checks validation of GenericOptions.
func (o *GenericControllerManagerConfigurationOptions) Validate(allControllers []string, disabledByDefaultControllers []string) []error {
if o == nil {
return nil
}
errs := []error{}
errs = append(errs, o.Debugging.Validate()...)
allControllersSet := sets.NewString(allControllers...)
for _, controller := range o.Controllers {
if controller == "*" {
continue
}
if strings.HasPrefix(controller, "-") {
controller = controller[1:]
}
if !allControllersSet.Has(controller) {
errs = append(errs, fmt.Errorf("%q is not in the list of known controllers", controller))
}
}
return errs
}

View File

@ -1,120 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
)
// KubeCloudSharedOptions holds the options shared between kube-controller-manager
// and cloud-controller-manager.
type KubeCloudSharedOptions struct {
CloudProvider *CloudProviderOptions
ExternalCloudVolumePlugin string
UseServiceAccountCredentials bool
AllowUntaggedCloud bool
RouteReconciliationPeriod metav1.Duration
NodeMonitorPeriod metav1.Duration
ClusterName string
ClusterCIDR string
AllocateNodeCIDRs bool
CIDRAllocatorType string
ConfigureCloudRoutes bool
NodeSyncPeriod metav1.Duration
}
// NewKubeCloudSharedOptions returns common/default configuration values for both
// the kube-controller-manager and the cloud-contoller-manager. Any common changes should
// be made here. Any individual changes should be made in that controller.
func NewKubeCloudSharedOptions(cfg kubectrlmgrconfig.KubeCloudSharedConfiguration) *KubeCloudSharedOptions {
o := &KubeCloudSharedOptions{
CloudProvider: &CloudProviderOptions{},
ExternalCloudVolumePlugin: cfg.ExternalCloudVolumePlugin,
UseServiceAccountCredentials: cfg.UseServiceAccountCredentials,
RouteReconciliationPeriod: cfg.RouteReconciliationPeriod,
NodeMonitorPeriod: cfg.NodeMonitorPeriod,
ClusterName: cfg.ClusterName,
ConfigureCloudRoutes: cfg.ConfigureCloudRoutes,
}
return o
}
// AddFlags adds flags related to shared variable for controller manager to the specified FlagSet.
func (o *KubeCloudSharedOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
o.CloudProvider.AddFlags(fs)
fs.StringVar(&o.ExternalCloudVolumePlugin, "external-cloud-volume-plugin", o.ExternalCloudVolumePlugin, "The plugin to use when cloud provider is set to external. Can be empty, should only be set when cloud-provider is external. Currently used to allow node and volume controllers to work for in tree cloud providers.")
fs.BoolVar(&o.UseServiceAccountCredentials, "use-service-account-credentials", o.UseServiceAccountCredentials, "If true, use individual service account credentials for each controller.")
fs.BoolVar(&o.AllowUntaggedCloud, "allow-untagged-cloud", false, "Allow the cluster to run without the cluster-id on cloud instances. This is a legacy mode of operation and a cluster-id will be required in the future.")
fs.MarkDeprecated("allow-untagged-cloud", "This flag is deprecated and will be removed in a future release. A cluster-id will be required on cloud instances.")
fs.DurationVar(&o.RouteReconciliationPeriod.Duration, "route-reconciliation-period", o.RouteReconciliationPeriod.Duration, "The period for reconciling routes created for Nodes by cloud provider.")
fs.DurationVar(&o.NodeMonitorPeriod.Duration, "node-monitor-period", o.NodeMonitorPeriod.Duration,
"The period for syncing NodeStatus in NodeController.")
fs.StringVar(&o.ClusterName, "cluster-name", o.ClusterName, "The instance prefix for the cluster.")
fs.StringVar(&o.ClusterCIDR, "cluster-cidr", o.ClusterCIDR, "CIDR Range for Pods in cluster. Requires --allocate-node-cidrs to be true")
fs.BoolVar(&o.AllocateNodeCIDRs, "allocate-node-cidrs", false, "Should CIDRs for Pods be allocated and set on the cloud provider.")
fs.StringVar(&o.CIDRAllocatorType, "cidr-allocator-type", "RangeAllocator", "Type of CIDR allocator to use")
fs.BoolVar(&o.ConfigureCloudRoutes, "configure-cloud-routes", true, "Should CIDRs allocated by allocate-node-cidrs be configured on the cloud provider.")
fs.DurationVar(&o.NodeSyncPeriod.Duration, "node-sync-period", 0, ""+
"This flag is deprecated and will be removed in future releases. See node-monitor-period for Node health checking or "+
"route-reconciliation-period for cloud provider's route configuration settings.")
fs.MarkDeprecated("node-sync-period", "This flag is currently no-op and will be deleted.")
}
// ApplyTo fills up KubeCloudShared config with options.
func (o *KubeCloudSharedOptions) ApplyTo(cfg *kubectrlmgrconfig.KubeCloudSharedConfiguration) error {
if o == nil {
return nil
}
if err := o.CloudProvider.ApplyTo(&cfg.CloudProvider); err != nil {
return err
}
cfg.ExternalCloudVolumePlugin = o.ExternalCloudVolumePlugin
cfg.UseServiceAccountCredentials = o.UseServiceAccountCredentials
cfg.AllowUntaggedCloud = o.AllowUntaggedCloud
cfg.RouteReconciliationPeriod = o.RouteReconciliationPeriod
cfg.NodeMonitorPeriod = o.NodeMonitorPeriod
cfg.ClusterName = o.ClusterName
cfg.ClusterCIDR = o.ClusterCIDR
cfg.AllocateNodeCIDRs = o.AllocateNodeCIDRs
cfg.CIDRAllocatorType = o.CIDRAllocatorType
cfg.ConfigureCloudRoutes = o.ConfigureCloudRoutes
cfg.NodeSyncPeriod = o.NodeSyncPeriod
return nil
}
// Validate checks validation of KubeCloudSharedOptions.
func (o *KubeCloudSharedOptions) Validate() []error {
if o == nil {
return nil
}
errs := []error{}
errs = append(errs, o.CloudProvider.Validate()...)
return errs
}

View File

@ -1,58 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
kubectrlmgrconfig "k8s.io/kubernetes/pkg/controller/apis/config"
)
// ServiceControllerOptions holds the ServiceController options.
type ServiceControllerOptions struct {
ConcurrentServiceSyncs int32
}
// AddFlags adds flags related to ServiceController for controller manager to the specified FlagSet.
func (o *ServiceControllerOptions) AddFlags(fs *pflag.FlagSet) {
if o == nil {
return
}
fs.Int32Var(&o.ConcurrentServiceSyncs, "concurrent-service-syncs", o.ConcurrentServiceSyncs, "The number of services that are allowed to sync concurrently. Larger number = more responsive service management, but more CPU (and network) load")
}
// ApplyTo fills up ServiceController config with options.
func (o *ServiceControllerOptions) ApplyTo(cfg *kubectrlmgrconfig.ServiceControllerConfiguration) error {
if o == nil {
return nil
}
cfg.ConcurrentServiceSyncs = o.ConcurrentServiceSyncs
return nil
}
// Validate checks validation of ServiceControllerOptions.
func (o *ServiceControllerOptions) Validate() []error {
if o == nil {
return nil
}
errs := []error{}
return errs
}

View File

@ -1,68 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package app
import (
"github.com/prometheus/client_golang/prometheus"
"net/http"
goruntime "runtime"
apiserverconfig "k8s.io/apiserver/pkg/apis/config"
genericapifilters "k8s.io/apiserver/pkg/endpoints/filters"
apirequest "k8s.io/apiserver/pkg/endpoints/request"
apiserver "k8s.io/apiserver/pkg/server"
genericfilters "k8s.io/apiserver/pkg/server/filters"
"k8s.io/apiserver/pkg/server/healthz"
"k8s.io/apiserver/pkg/server/mux"
"k8s.io/apiserver/pkg/server/routes"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/util/configz"
)
// BuildHandlerChain builds a handler chain with a base handler and CompletedConfig.
func BuildHandlerChain(apiHandler http.Handler, authorizationInfo *apiserver.AuthorizationInfo, authenticationInfo *apiserver.AuthenticationInfo) http.Handler {
requestInfoResolver := &apirequest.RequestInfoFactory{}
failedHandler := genericapifilters.Unauthorized(legacyscheme.Codecs, false)
handler := apiHandler
if authorizationInfo != nil {
handler = genericapifilters.WithAuthorization(apiHandler, authorizationInfo.Authorizer, legacyscheme.Codecs)
}
if authenticationInfo != nil {
handler = genericapifilters.WithAuthentication(handler, authenticationInfo.Authenticator, failedHandler, nil)
}
handler = genericapifilters.WithRequestInfo(handler, requestInfoResolver)
handler = genericfilters.WithPanicRecovery(handler)
return handler
}
// NewBaseHandler takes in CompletedConfig and returns a handler.
func NewBaseHandler(c *apiserverconfig.DebuggingConfiguration, checks ...healthz.HealthzChecker) *mux.PathRecorderMux {
mux := mux.NewPathRecorderMux("controller-manager")
healthz.InstallHandler(mux, checks...)
if c.EnableProfiling {
routes.Profiling{}.Install(mux)
if c.EnableContentionProfiling {
goruntime.SetBlockProfileRate(1)
}
}
configz.InstallHandler(mux)
mux.Handle("/metrics", prometheus.Handler())
return mux
}

7
cmd/ctr/main.go Normal file
View File

@ -0,0 +1,7 @@
package main
import "github.com/k3s-io/k3s/pkg/ctr"
func main() {
ctr.Main()
}

32
cmd/encrypt/main.go Normal file
View File

@ -0,0 +1,32 @@
package main
import (
"context"
"errors"
"os"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/cli/secretsencrypt"
"github.com/k3s-io/k3s/pkg/configfilearg"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
func main() {
app := cmds.NewApp()
app.Commands = []*cli.Command{
cmds.NewSecretsEncryptCommands(
secretsencrypt.Status,
secretsencrypt.Enable,
secretsencrypt.Disable,
secretsencrypt.Prepare,
secretsencrypt.Rotate,
secretsencrypt.Reencrypt,
secretsencrypt.RotateKeys,
),
}
if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) {
logrus.Fatal(err)
}
}

29
cmd/etcdsnapshot/main.go Normal file
View File

@ -0,0 +1,29 @@
package main
import (
"context"
"errors"
"os"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/cli/etcdsnapshot"
"github.com/k3s-io/k3s/pkg/configfilearg"
"github.com/sirupsen/logrus"
"github.com/urfave/cli/v2"
)
func main() {
app := cmds.NewApp()
app.Commands = []*cli.Command{
cmds.NewEtcdSnapshotCommands(
etcdsnapshot.Delete,
etcdsnapshot.List,
etcdsnapshot.Prune,
etcdsnapshot.Save,
),
}
if err := app.Run(configfilearg.MustParse(os.Args)); err != nil && !errors.Is(err, context.Canceled) {
logrus.Fatal(err)
}
}

View File

@ -1,49 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
load("//pkg/version:def.bzl", "version_x_defs")
go_binary(
name = "hyperkube",
embed = [":go_default_library"],
x_defs = version_x_defs(),
)
go_library(
name = "go_default_library",
srcs = ["main.go"],
importpath = "k8s.io/kubernetes/cmd/hyperkube",
deps = [
"//cmd/cloud-controller-manager/app:go_default_library",
"//cmd/kube-apiserver/app:go_default_library",
"//cmd/kube-controller-manager/app:go_default_library",
"//cmd/kube-proxy/app:go_default_library",
"//cmd/kube-scheduler/app:go_default_library",
"//cmd/kubelet/app:go_default_library",
"//pkg/client/metrics/prometheus:go_default_library",
"//pkg/kubectl/cmd:go_default_library",
"//pkg/version/prometheus:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,2 +0,0 @@
labels:
- sig/release

View File

@ -1,173 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// A binary that can morph into all of the other kubernetes binaries. You can
// also soft-link to it busybox style.
//
package main
import (
"errors"
goflag "flag"
"fmt"
"math/rand"
"os"
"path"
"path/filepath"
"time"
"github.com/spf13/cobra"
"github.com/spf13/pflag"
"k8s.io/apiserver/pkg/server"
utilflag "k8s.io/apiserver/pkg/util/flag"
"k8s.io/apiserver/pkg/util/logs"
kubeapiserver "k8s.io/kubernetes/cmd/kube-apiserver/app"
kubecontrollermanager "k8s.io/kubernetes/cmd/kube-controller-manager/app"
kubeproxy "k8s.io/kubernetes/cmd/kube-proxy/app"
kubescheduler "k8s.io/kubernetes/cmd/kube-scheduler/app"
kubelet "k8s.io/kubernetes/cmd/kubelet/app"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
kubectl "k8s.io/kubernetes/pkg/kubectl/cmd"
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
)
func main() {
rand.Seed(time.Now().UnixNano())
hyperkubeCommand, allCommandFns := NewHyperKubeCommand(server.SetupSignalHandler())
// TODO: once we switch everything over to Cobra commands, we can go back to calling
// utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
// normalize func and add the go flag set by hand.
pflag.CommandLine.SetNormalizeFunc(utilflag.WordSepNormalizeFunc)
pflag.CommandLine.AddGoFlagSet(goflag.CommandLine)
// utilflag.InitFlags()
logs.InitLogs()
defer logs.FlushLogs()
basename := filepath.Base(os.Args[0])
if err := commandFor(basename, hyperkubeCommand, allCommandFns).Execute(); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err)
os.Exit(1)
}
}
func commandFor(basename string, defaultCommand *cobra.Command, commands []func() *cobra.Command) *cobra.Command {
for _, commandFn := range commands {
command := commandFn()
if command.Name() == basename {
return command
}
for _, alias := range command.Aliases {
if alias == basename {
return command
}
}
}
return defaultCommand
}
// NewHyperKubeCommand is the entry point for hyperkube
func NewHyperKubeCommand(stopCh <-chan struct{}) (*cobra.Command, []func() *cobra.Command) {
// these have to be functions since the command is polymorphic. Cobra wants you to be top level
// command to get executed
apiserver := func() *cobra.Command {
ret := kubeapiserver.NewAPIServerCommand(stopCh)
// add back some unfortunate aliases that should be removed
ret.Aliases = []string{"apiserver"}
return ret
}
controller := func() *cobra.Command {
ret := kubecontrollermanager.NewControllerManagerCommand()
// add back some unfortunate aliases that should be removed
ret.Aliases = []string{"controller-manager"}
return ret
}
proxy := func() *cobra.Command {
ret := kubeproxy.NewProxyCommand()
// add back some unfortunate aliases that should be removed
ret.Aliases = []string{"proxy"}
return ret
}
scheduler := func() *cobra.Command {
ret := kubescheduler.NewSchedulerCommand()
// add back some unfortunate aliases that should be removed
ret.Aliases = []string{"scheduler"}
return ret
}
kubectlCmd := func() *cobra.Command { return kubectl.NewDefaultKubectlCommand() }
kubelet := func() *cobra.Command { return kubelet.NewKubeletCommand(stopCh) }
commandFns := []func() *cobra.Command{
apiserver,
controller,
proxy,
scheduler,
kubectlCmd,
kubelet,
}
makeSymlinksFlag := false
cmd := &cobra.Command{
Use: "hyperkube",
Short: "Request a new project",
Run: func(cmd *cobra.Command, args []string) {
if len(args) != 0 || !makeSymlinksFlag {
cmd.Help()
os.Exit(1)
}
if err := makeSymlinks(os.Args[0], commandFns); err != nil {
fmt.Fprintf(os.Stderr, "%v\n", err.Error())
}
},
}
cmd.Flags().BoolVar(&makeSymlinksFlag, "make-symlinks", makeSymlinksFlag, "create a symlink for each server in current directory")
cmd.Flags().MarkHidden("make-symlinks") // hide this flag from appearing in servers' usage output
for i := range commandFns {
cmd.AddCommand(commandFns[i]())
}
return cmd, commandFns
}
// makeSymlinks will create a symlink for each command in the local directory.
func makeSymlinks(targetName string, commandFns []func() *cobra.Command) error {
wd, err := os.Getwd()
if err != nil {
return err
}
var errs bool
for _, commandFn := range commandFns {
command := commandFn()
link := path.Join(wd, command.Name())
err := os.Symlink(targetName, link)
if err != nil {
errs = true
fmt.Println(err)
}
}
if errs {
return errors.New("Error creating one or more symlinks.")
}
return nil
}

408
cmd/k3s/main.go Normal file
View File

@ -0,0 +1,408 @@
package main
import (
"bytes"
"context"
"errors"
"io"
"io/fs"
"os"
"os/exec"
"path/filepath"
"slices"
"strconv"
"strings"
"github.com/k3s-io/k3s/pkg/cli/cmds"
"github.com/k3s-io/k3s/pkg/configfilearg"
"github.com/k3s-io/k3s/pkg/data"
"github.com/k3s-io/k3s/pkg/datadir"
"github.com/k3s-io/k3s/pkg/dataverify"
"github.com/k3s-io/k3s/pkg/flock"
"github.com/k3s-io/k3s/pkg/untar"
"github.com/k3s-io/k3s/pkg/version"
pkgerrors "github.com/pkg/errors"
"github.com/rancher/wrangler/v3/pkg/resolvehome"
"github.com/sirupsen/logrus"
"github.com/spf13/pflag"
"github.com/urfave/cli/v2"
)
var criDefaultConfigPath = "/etc/crictl.yaml"
var externalCLIActions = []string{"crictl", "ctr", "kubectl"}
// main entrypoint for the k3s multicall binary
func main() {
if findDebug(os.Args) {
logrus.SetLevel(logrus.DebugLevel)
}
dataDir := findDataDir(os.Args)
// Handle direct invocation via symlink alias (multicall binary behavior)
if runCLIs(dataDir) {
return
}
tokenCommand := internalCLIAction(version.Program+"-"+cmds.TokenCommand, dataDir, os.Args)
etcdsnapshotCommand := internalCLIAction(version.Program+"-"+cmds.EtcdSnapshotCommand, dataDir, os.Args)
secretsencryptCommand := internalCLIAction(version.Program+"-"+cmds.SecretsEncryptCommand, dataDir, os.Args)
certCommand := internalCLIAction(version.Program+"-"+cmds.CertCommand, dataDir, os.Args)
// Handle subcommand invocation (k3s server, k3s crictl, etc)
app := cmds.NewApp()
app.EnableBashCompletion = true
app.DisableSliceFlagSeparator = true
app.Commands = []*cli.Command{
cmds.NewServerCommand(internalCLIAction(version.Program+"-server"+programPostfix, dataDir, os.Args)),
cmds.NewAgentCommand(internalCLIAction(version.Program+"-agent"+programPostfix, dataDir, os.Args)),
cmds.NewKubectlCommand(externalCLIAction("kubectl", dataDir)),
cmds.NewCRICTL(externalCLIAction("crictl", dataDir)),
cmds.NewCtrCommand(externalCLIAction("ctr", dataDir)),
cmds.NewCheckConfigCommand(externalCLIAction("check-config", dataDir)),
cmds.NewTokenCommands(
tokenCommand,
tokenCommand,
tokenCommand,
tokenCommand,
tokenCommand,
),
cmds.NewEtcdSnapshotCommands(
etcdsnapshotCommand,
etcdsnapshotCommand,
etcdsnapshotCommand,
etcdsnapshotCommand,
),
cmds.NewSecretsEncryptCommands(
secretsencryptCommand,
secretsencryptCommand,
secretsencryptCommand,
secretsencryptCommand,
secretsencryptCommand,
secretsencryptCommand,
secretsencryptCommand,
),
cmds.NewCertCommands(
certCommand,
certCommand,
certCommand,
),
cmds.NewCompletionCommand(
internalCLIAction(version.Program+"-completion", dataDir, os.Args),
internalCLIAction(version.Program+"-completion", dataDir, os.Args),
),
}
if err := app.Run(os.Args); err != nil && !errors.Is(err, context.Canceled) {
logrus.Fatalf("Error: %v", err)
}
}
// findDebug reads debug settings from the environment, CLI args, and config file.
func findDebug(args []string) bool {
debug, _ := strconv.ParseBool(os.Getenv(version.ProgramUpper + "_DEBUG"))
if debug {
return debug
}
fs := pflag.NewFlagSet("debug-set", pflag.ContinueOnError)
fs.ParseErrorsWhitelist.UnknownFlags = true
fs.SetOutput(io.Discard)
fs.BoolVarP(&debug, "debug", "", false, "(logging) Turn on debug logs")
fs.Parse(args)
if debug {
return debug
}
debug, _ = strconv.ParseBool(configfilearg.MustFindString(args, "debug", externalCLIActions...))
return debug
}
// findDataDir reads data-dir settings from the environment, CLI args, and config file.
// If not found, the default will be used, which varies depending on whether
// k3s is being run as root or not.
func findDataDir(args []string) string {
dataDir := os.Getenv(version.ProgramUpper + "_DATA_DIR")
if dataDir != "" {
return dataDir
}
fs := pflag.NewFlagSet("data-dir-set", pflag.ContinueOnError)
fs.ParseErrorsWhitelist.UnknownFlags = true
fs.SetOutput(io.Discard)
fs.StringVarP(&dataDir, "data-dir", "d", "", "Data directory")
fs.Parse(args)
if dataDir != "" {
return dataDir
}
dataDir = configfilearg.MustFindString(args, "data-dir", externalCLIActions...)
if d, err := datadir.Resolve(dataDir); err == nil {
dataDir = d
} else {
logrus.Warnf("Failed to resolve user home directory: %s", err)
}
return dataDir
}
// findPreferBundledBin searches for prefer-bundled-bin from the config file, then CLI args.
// we use pflag to process the args because we not yet parsed flags bound to the cli.Context
func findPreferBundledBin(args []string) bool {
var preferBundledBin bool
fs := pflag.NewFlagSet("prefer-set", pflag.ContinueOnError)
fs.ParseErrorsWhitelist.UnknownFlags = true
fs.SetOutput(io.Discard)
fs.BoolVar(&preferBundledBin, "prefer-bundled-bin", false, "Prefer bundled binaries")
preferRes := configfilearg.MustFindString(args, "prefer-bundled-bin", externalCLIActions...)
if preferRes != "" {
preferBundledBin, _ = strconv.ParseBool(preferRes)
}
fs.Parse(args)
return preferBundledBin
}
// runCLIs handles the case where the binary is being executed as a symlink alias,
// /usr/local/bin/crictl for example. If the executable name is one of the external
// binaries, it calls it directly and returns true. If it's not an external binary,
// it returns false so that standard CLI wrapping can occur.
func runCLIs(dataDir string) bool {
progName := filepath.Base(os.Args[0])
if slices.Contains(externalCLIActions, progName) {
if err := externalCLI(progName, dataDir, os.Args[1:]); err != nil && !errors.Is(err, context.Canceled) {
logrus.Fatal(err)
}
return true
}
return false
}
// externalCLIAction returns a function that will call an external binary, be used as the Action of a cli.Command.
func externalCLIAction(cmd, dataDir string) func(cli *cli.Context) error {
return func(cli *cli.Context) error {
return externalCLI(cmd, dataDir, cli.Args().Slice())
}
}
// externalCLI calls an external binary, fixing up argv[0] to the correct name.
// crictl needs extra help to find its config file so we do that here too.
func externalCLI(cli, dataDir string, args []string) error {
if cli == "crictl" {
if os.Getenv("CRI_CONFIG_FILE") == "" {
os.Setenv("CRI_CONFIG_FILE", findCriConfig(dataDir))
}
}
return stageAndRun(dataDir, cli, append([]string{cli}, args...), false)
}
// internalCLIAction returns a function that will call a K3s internal command, be used as the Action of a cli.Command.
func internalCLIAction(cmd, dataDir string, args []string) func(ctx *cli.Context) error {
return func(ctx *cli.Context) error {
// We don't want the Info logs seen when printing the autocomplete script
if cmd == "k3s-completion" {
logrus.SetLevel(logrus.ErrorLevel)
}
return stageAndRunCLI(ctx, cmd, dataDir, args)
}
}
// stageAndRunCLI calls an external binary.
func stageAndRunCLI(cli *cli.Context, cmd string, dataDir string, args []string) error {
return stageAndRun(dataDir, cmd, args, true)
}
// stageAndRun does the actual work of setting up and calling an external binary.
func stageAndRun(dataDir, cmd string, args []string, calledAsInternal bool) error {
dir, err := extract(dataDir)
if err != nil {
return pkgerrors.WithMessage(err, "extracting data")
}
logrus.Debugf("Asset dir %s", dir)
pathList := []string{
filepath.Clean(filepath.Join(dir, "..", "cni")),
filepath.Join(dir, "bin"),
}
if findPreferBundledBin(args) {
pathList = append(
pathList,
filepath.Join(dir, "bin", "aux"),
os.Getenv("PATH"),
)
} else {
pathList = append(
pathList,
os.Getenv("PATH"),
filepath.Join(dir, "bin", "aux"),
)
}
if err := os.Setenv("PATH", strings.Join(pathList, string(os.PathListSeparator))); err != nil {
return err
}
cmd, err = exec.LookPath(cmd)
if err != nil {
return err
}
logrus.Debugf("Running %s %v", cmd, args)
return runExec(cmd, args, calledAsInternal)
}
// getAssetAndDir returns the name of the bindata asset, along with a directory path
// derived from the data-dir and bindata asset name.
func getAssetAndDir(dataDir string) (string, string) {
asset := data.AssetNames()[0]
dir := filepath.Join(dataDir, "data", strings.SplitN(filepath.Base(asset), ".", 2)[0])
return asset, dir
}
// extract checks for and if necessary unpacks the bindata archive, returning the unique path
// to the extracted bindata asset.
func extract(dataDir string) (string, error) {
// check if content already exists in requested data-dir
asset, dir := getAssetAndDir(dataDir)
if _, err := os.Stat(filepath.Join(dir, "bin", "k3s"+programPostfix)); err == nil {
return dir, nil
}
// check if content exists in default path as a fallback, prior
// to extracting. This will prevent re-extracting into the user's home
// dir if the assets already exist in the default path.
if dataDir != datadir.DefaultDataDir {
_, defaultDir := getAssetAndDir(datadir.DefaultDataDir)
if _, err := os.Stat(filepath.Join(defaultDir, "bin", "k3s"+programPostfix)); err == nil {
return defaultDir, nil
}
}
// acquire a data directory lock
os.MkdirAll(filepath.Join(dataDir, "data"), 0755)
lockFile := filepath.Join(dataDir, "data", ".lock")
logrus.Infof("Acquiring lock file %s", lockFile)
lock, err := flock.Acquire(lockFile)
if err != nil {
return "", err
}
defer flock.Release(lock)
// check again if target directory exists
if _, err := os.Stat(dir); err == nil {
return dir, nil
}
logrus.Infof("Preparing data dir %s", dir)
content, err := data.Asset(asset)
if err != nil {
return "", err
}
buf := bytes.NewBuffer(content)
tempDest := dir + "-tmp"
defer os.RemoveAll(tempDest)
os.RemoveAll(tempDest)
if err := untar.Untar(buf, tempDest); err != nil {
return "", err
}
if err := dataverify.Verify(filepath.Join(tempDest, "bin")); err != nil {
return "", err
}
// Rotate 'current' symlink into 'previous', and create a new 'current' that points
// at the new directory.
currentSymLink := filepath.Join(dataDir, "data", "current")
previousSymLink := filepath.Join(dataDir, "data", "previous")
if _, err := os.Lstat(currentSymLink); err == nil {
if err := os.Rename(currentSymLink, previousSymLink); err != nil {
return "", err
}
}
if err := os.Symlink(dir, currentSymLink); err != nil {
return "", err
}
// Rename the new directory into place after updating symlinks, so that the k3s binary check at the start
// of this function only succeeds if everything else has been completed successfully.
if err := os.Rename(tempDest, dir); err != nil {
return "", err
}
// Create a stable CNI bin dir and place it first in the path so that users have a
// consistent location to drop their own CNI plugin binaries.
cniPath := filepath.Join(dataDir, "data", "cni")
cniBin := filepath.Join(dir, "bin", "cni")
if err := os.MkdirAll(cniPath, 0755); err != nil {
return "", err
}
// Create symlink that points at the cni multicall binary itself
logrus.Debugf("Creating symlink %s -> %s", filepath.Join(cniPath, "cni"), cniBin)
os.Remove(filepath.Join(cniPath, "cni"))
if err := os.Symlink(cniBin, filepath.Join(cniPath, "cni")); err != nil {
return "", err
}
// Find symlinks that point to the cni multicall binary, and clone them in the stable CNI bin dir.
// Non-symlink plugins in the stable CNI bin dir will not be overwritten, to allow users to replace our
// CNI plugins with their own versions if they want. Note that the cni multicall binary itself is always
// symlinked into the stable bin dir and should not be replaced.
ents, err := os.ReadDir(filepath.Join(dir, "bin"))
if err != nil {
return "", err
}
for _, ent := range ents {
if info, err := ent.Info(); err == nil && info.Mode()&fs.ModeSymlink != 0 {
if target, err := os.Readlink(filepath.Join(dir, "bin", ent.Name())); err == nil && target == "cni" {
src := filepath.Join(cniPath, ent.Name())
// Check if plugin already exists in stable CNI bin dir
if info, err := os.Lstat(src); err == nil {
if info.Mode()&fs.ModeSymlink != 0 {
// Exists and is a symlink, remove it so we can create a new symlink for the new bin.
os.Remove(src)
} else {
// Not a symlink, leave it alone
logrus.Debugf("Not replacing non-symlink CNI plugin %s with mode %O", src, info.Mode())
continue
}
}
logrus.Debugf("Creating symlink %s -> %s", src, cniBin)
if err := os.Symlink(cniBin, src); err != nil {
return "", err
}
}
}
}
return dir, nil
}
// findCriConfig returns the path to crictl.yaml
// crictl won't search multiple locations for a config file. It will fall back to looking in
// the same directory as the crictl binary, but that's it. We need to check the various possible
// data-dir locations ourselves and then point it at the right one. We check:
// - the configured data-dir
// - the default user data-dir (assuming we can find the user's home directory)
// - the default system data-dir
// - the default path from upstream crictl
func findCriConfig(dataDir string) string {
searchList := []string{filepath.Join(dataDir, "agent", criDefaultConfigPath)}
if homeDataDir, err := resolvehome.Resolve(datadir.DefaultHomeDataDir); err == nil {
searchList = append(searchList, filepath.Join(homeDataDir, "agent", criDefaultConfigPath))
} else {
logrus.Warnf("Failed to resolve user home directory: %s", err)
}
searchList = append(searchList, filepath.Join(datadir.DefaultDataDir, "agent", criDefaultConfigPath))
searchList = append(searchList, criDefaultConfigPath)
for _, path := range searchList {
_, err := os.Stat(path)
if err == nil {
return path
}
if !errors.Is(err, os.ErrNotExist) {
logrus.Warnf("Failed to %s", err)
}
}
return ""
}

20
cmd/k3s/main_linux.go Normal file
View File

@ -0,0 +1,20 @@
//go:build linux
// +build linux
package main
import (
"os"
"syscall"
pkgerrors "github.com/pkg/errors"
)
const programPostfix = ""
func runExec(cmd string, args []string, calledAsInternal bool) (err error) {
if err := syscall.Exec(cmd, args, os.Environ()); err != nil {
return pkgerrors.WithMessagef(err, "exec %s failed", cmd)
}
return nil
}

59
cmd/k3s/main_test.go Normal file
View File

@ -0,0 +1,59 @@
package main
import "testing"
func Test_UnitFindPreferBundledBin(t *testing.T) {
tests := []struct {
name string
args []string
want bool
}{
{
name: "Single argument",
args: []string{"--prefer-bundled-bin"},
want: true,
},
{
name: "no argument",
args: []string{""},
want: false,
},
{
name: "Argument with equal true",
args: []string{"--prefer-bundled-bin=true"},
want: true,
},
{
name: "Argument with equal false",
args: []string{"--prefer-bundled-bin=false"},
want: false,
},
{
name: "Argument with equal 1",
args: []string{"--prefer-bundled-bin=1"},
want: true,
},
{
name: "Argument with equal 0",
args: []string{"--prefer-bundled-bin=0"},
want: false,
},
{
name: "Multiple arguments",
args: []string{"--abcd", "--prefer-bundled-bin", "--efgh"},
want: true,
},
{
name: "Repeated arguments",
args: []string{"--abcd", "--prefer-bundled-bin=false", "--prefer-bundled-bin"},
want: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
if got := findPreferBundledBin(tt.args); got != tt.want {
t.Errorf("findPreferBundledBin() = %+v\nWant = %+v", got, tt.want)
}
})
}
}

24
cmd/k3s/main_windows.go Normal file
View File

@ -0,0 +1,24 @@
//go:build windows
// +build windows
package main
import (
"os"
"os/exec"
)
const programPostfix = ".exe"
func runExec(cmd string, args []string, calledAsInternal bool) (err error) {
// syscall.Exec: not supported by windows
if calledAsInternal {
args = args[1:]
}
cmdObj := exec.Command(cmd, args...)
cmdObj.Stdout = os.Stdout
cmdObj.Stderr = os.Stderr
cmdObj.Stdin = os.Stdin
cmdObj.Env = os.Environ()
return cmdObj.Run()
}

View File

@ -1,44 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
load("//pkg/version:def.bzl", "version_x_defs")
go_binary(
name = "kube-apiserver",
embed = [":go_default_library"],
pure = "on",
x_defs = version_x_defs(),
)
go_library(
name = "go_default_library",
srcs = ["apiserver.go"],
importpath = "k8s.io/kubernetes/cmd/kube-apiserver",
deps = [
"//cmd/kube-apiserver/app:go_default_library",
"//pkg/client/metrics/prometheus:go_default_library",
"//pkg/version/prometheus:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/kube-apiserver/app:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,27 +0,0 @@
approvers:
- caesarxuchao
- deads2k
- lavalamp
- liggitt
- mml
- nikhiljindal
- smarterclayton
- sttts
reviewers:
- lavalamp
- smarterclayton
- wojtek-t
- deads2k
- derekwaynecarr
- caesarxuchao
- mikedanese
- liggitt
- nikhiljindal
- ncdc
- sttts
- hzxuzhonghu
- CaoShuFeng
- yue9944882
labels:
- sig/api-machinery
- area/apiserver

View File

@ -1,50 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// apiserver is the main api server and master for the cluster.
// it is responsible for serving the cluster management API.
package main
import (
"fmt"
"math/rand"
"os"
"time"
"k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/util/logs"
"k8s.io/kubernetes/cmd/kube-apiserver/app"
_ "k8s.io/kubernetes/pkg/client/metrics/prometheus" // for client metric registration
_ "k8s.io/kubernetes/pkg/version/prometheus" // for version metric registration
)
func main() {
rand.Seed(time.Now().UnixNano())
command := app.NewAPIServerCommand(server.SetupSignalHandler())
// TODO: once we switch everything over to Cobra commands, we can go back to calling
// utilflag.InitFlags() (by removing its pflag.Parse() call). For now, we have to set the
// normalize func and add the go flag set by hand.
// utilflag.InitFlags()
logs.InitLogs()
defer logs.FlushLogs()
if err := command.Execute(); err != nil {
fmt.Fprintf(os.Stderr, "error: %v\n", err)
os.Exit(1)
}
}

View File

@ -1,98 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = [
"aggregator.go",
"apiextensions.go",
"server.go",
],
importpath = "k8s.io/kubernetes/cmd/kube-apiserver/app",
visibility = ["//visibility:public"],
deps = [
"//cmd/kube-apiserver/app/options:go_default_library",
"//pkg/api/legacyscheme:go_default_library",
"//pkg/capabilities:go_default_library",
"//pkg/controller/serviceaccount:go_default_library",
"//pkg/generated/openapi:go_default_library",
"//pkg/kubeapiserver:go_default_library",
"//pkg/kubeapiserver/admission:go_default_library",
"//pkg/kubeapiserver/authenticator:go_default_library",
"//pkg/kubeapiserver/authorizer/modes:go_default_library",
"//pkg/kubeapiserver/options:go_default_library",
"//pkg/kubeapiserver/server:go_default_library",
"//pkg/master:go_default_library",
"//pkg/master/controller/crdregistration:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//pkg/master/tunneler:go_default_library",
"//pkg/registry/cachesize:go_default_library",
"//pkg/registry/rbac/rest:go_default_library",
"//pkg/serviceaccount:go_default_library",
"//pkg/util/flag:go_default_library",
"//pkg/util/reflector/prometheus:go_default_library",
"//pkg/util/workqueue/prometheus:go_default_library",
"//pkg/version:go_default_library",
"//pkg/version/verflag:go_default_library",
"//plugin/pkg/auth/authenticator/token/bootstrap:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/cmd/server/options:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/apis/meta/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/errors:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authentication/authenticator:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/authorization/authorizer:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/endpoints/openapi:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/features:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/filters:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/storage:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/etcd3/preflight:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/webhook:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/tools/cache:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apiserver:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apiserver/scheme:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/controllers/autoregister:go_default_library",
"//vendor/github.com/go-openapi/spec:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/kube-openapi/pkg/common:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/kube-apiserver/app/options:all-srcs",
"//cmd/kube-apiserver/app/testing:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,304 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to create a Kubernetes
// APIServer by binding together the API, master and APIServer infrastructure.
// It can be configured and called directly or via the hyperkube framework.
package app
import (
"fmt"
"io/ioutil"
"net/http"
"strings"
"sync"
"k8s.io/klog"
apiextensionsinformers "k8s.io/apiextensions-apiserver/pkg/client/informers/internalversion"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/features"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/healthz"
genericoptions "k8s.io/apiserver/pkg/server/options"
utilfeature "k8s.io/apiserver/pkg/util/feature"
kubeexternalinformers "k8s.io/client-go/informers"
"k8s.io/client-go/tools/cache"
"k8s.io/kube-aggregator/pkg/apis/apiregistration"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/v1"
"k8s.io/kube-aggregator/pkg/apis/apiregistration/v1beta1"
aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver"
aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme"
apiregistrationclient "k8s.io/kube-aggregator/pkg/client/clientset_generated/internalclientset/typed/apiregistration/internalversion"
informers "k8s.io/kube-aggregator/pkg/client/informers/internalversion/apiregistration/internalversion"
"k8s.io/kube-aggregator/pkg/controllers/autoregister"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/pkg/master/controller/crdregistration"
)
func createAggregatorConfig(
kubeAPIServerConfig genericapiserver.Config,
commandOptions *options.ServerRunOptions,
externalInformers kubeexternalinformers.SharedInformerFactory,
serviceResolver aggregatorapiserver.ServiceResolver,
proxyTransport *http.Transport,
pluginInitializers []admission.PluginInitializer,
) (*aggregatorapiserver.Config, error) {
// make a shallow copy to let us twiddle a few things
// most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the aggregator
genericConfig := kubeAPIServerConfig
// override genericConfig.AdmissionControl with kube-aggregator's scheme,
// because aggregator apiserver should use its own scheme to convert its own resources.
commandOptions.Admission.ApplyTo(
&genericConfig,
externalInformers,
genericConfig.LoopbackClientConfig,
aggregatorscheme.Scheme,
pluginInitializers...)
// copy the etcd options so we don't mutate originals.
etcdOptions := *commandOptions.Etcd
etcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
etcdOptions.StorageConfig.Codec = aggregatorscheme.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion, v1.SchemeGroupVersion)
genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}
// override MergedResourceConfig with aggregator defaults and registry
if err := commandOptions.APIEnablement.ApplyTo(
&genericConfig,
aggregatorapiserver.DefaultAPIResourceConfigSource(),
aggregatorscheme.Scheme); err != nil {
return nil, err
}
var err error
var certBytes, keyBytes []byte
if len(commandOptions.ProxyClientCertFile) > 0 && len(commandOptions.ProxyClientKeyFile) > 0 {
certBytes, err = ioutil.ReadFile(commandOptions.ProxyClientCertFile)
if err != nil {
return nil, err
}
keyBytes, err = ioutil.ReadFile(commandOptions.ProxyClientKeyFile)
if err != nil {
return nil, err
}
}
aggregatorConfig := &aggregatorapiserver.Config{
GenericConfig: &genericapiserver.RecommendedConfig{
Config: genericConfig,
SharedInformerFactory: externalInformers,
},
ExtraConfig: aggregatorapiserver.ExtraConfig{
ProxyClientCert: certBytes,
ProxyClientKey: keyBytes,
ServiceResolver: serviceResolver,
ProxyTransport: proxyTransport,
},
}
return aggregatorConfig, nil
}
func createAggregatorServer(aggregatorConfig *aggregatorapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget, apiExtensionInformers apiextensionsinformers.SharedInformerFactory) (*aggregatorapiserver.APIAggregator, error) {
aggregatorServer, err := aggregatorConfig.Complete().NewWithDelegate(delegateAPIServer)
if err != nil {
return nil, err
}
// create controllers for auto-registration
apiRegistrationClient, err := apiregistrationclient.NewForConfig(aggregatorConfig.GenericConfig.LoopbackClientConfig)
if err != nil {
return nil, err
}
autoRegistrationController := autoregister.NewAutoRegisterController(aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(), apiRegistrationClient)
apiServices := apiServicesToRegister(delegateAPIServer, autoRegistrationController)
crdRegistrationController := crdregistration.NewAutoRegistrationController(
apiExtensionInformers.Apiextensions().InternalVersion().CustomResourceDefinitions(),
autoRegistrationController)
aggregatorServer.GenericAPIServer.AddPostStartHook("kube-apiserver-autoregistration", func(context genericapiserver.PostStartHookContext) error {
go crdRegistrationController.Run(5, context.StopCh)
go func() {
// let the CRD controller process the initial set of CRDs before starting the autoregistration controller.
// this prevents the autoregistration controller's initial sync from deleting APIServices for CRDs that still exist.
// we only need to do this if CRDs are enabled on this server. We can't use discovery because we are the source for discovery.
if aggregatorConfig.GenericConfig.MergedResourceConfig.AnyVersionForGroupEnabled("apiextensions.k8s.io") {
crdRegistrationController.WaitForInitialSync()
}
autoRegistrationController.Run(5, context.StopCh)
}()
return nil
})
aggregatorServer.GenericAPIServer.AddHealthzChecks(
makeAPIServiceAvailableHealthzCheck(
"autoregister-completion",
apiServices,
aggregatorServer.APIRegistrationInformers.Apiregistration().InternalVersion().APIServices(),
),
)
return aggregatorServer, nil
}
func makeAPIService(gv schema.GroupVersion) *apiregistration.APIService {
apiServicePriority, ok := apiVersionPriorities[gv]
if !ok {
// if we aren't found, then we shouldn't register ourselves because it could result in a CRD group version
// being permanently stuck in the APIServices list.
klog.Infof("Skipping APIService creation for %v", gv)
return nil
}
return &apiregistration.APIService{
ObjectMeta: metav1.ObjectMeta{Name: gv.Version + "." + gv.Group},
Spec: apiregistration.APIServiceSpec{
Group: gv.Group,
Version: gv.Version,
GroupPriorityMinimum: apiServicePriority.group,
VersionPriority: apiServicePriority.version,
},
}
}
// makeAPIServiceAvailableHealthzCheck returns a healthz check that returns healthy
// once all of the specified services have been observed to be available at least once.
func makeAPIServiceAvailableHealthzCheck(name string, apiServices []*apiregistration.APIService, apiServiceInformer informers.APIServiceInformer) healthz.HealthzChecker {
// Track the auto-registered API services that have not been observed to be available yet
pendingServiceNamesLock := &sync.RWMutex{}
pendingServiceNames := sets.NewString()
for _, service := range apiServices {
pendingServiceNames.Insert(service.Name)
}
// When an APIService in the list is seen as available, remove it from the pending list
handleAPIServiceChange := func(service *apiregistration.APIService) {
pendingServiceNamesLock.Lock()
defer pendingServiceNamesLock.Unlock()
if !pendingServiceNames.Has(service.Name) {
return
}
if apiregistration.IsAPIServiceConditionTrue(service, apiregistration.Available) {
pendingServiceNames.Delete(service.Name)
}
}
// Watch add/update events for APIServices
apiServiceInformer.Informer().AddEventHandler(cache.ResourceEventHandlerFuncs{
AddFunc: func(obj interface{}) { handleAPIServiceChange(obj.(*apiregistration.APIService)) },
UpdateFunc: func(old, new interface{}) { handleAPIServiceChange(new.(*apiregistration.APIService)) },
})
// Don't return healthy until the pending list is empty
return healthz.NamedCheck(name, func(r *http.Request) error {
pendingServiceNamesLock.RLock()
defer pendingServiceNamesLock.RUnlock()
if pendingServiceNames.Len() > 0 {
return fmt.Errorf("missing APIService: %v", pendingServiceNames.List())
}
return nil
})
}
// priority defines group priority that is used in discovery. This controls
// group position in the kubectl output.
type priority struct {
// group indicates the order of the group relative to other groups.
group int32
// version indicates the relative order of the version inside of its group.
version int32
}
// The proper way to resolve this letting the aggregator know the desired group and version-within-group order of the underlying servers
// is to refactor the genericapiserver.DelegationTarget to include a list of priorities based on which APIs were installed.
// This requires the APIGroupInfo struct to evolve and include the concept of priorities and to avoid mistakes, the core storage map there needs to be updated.
// That ripples out every bit as far as you'd expect, so for 1.7 we'll include the list here instead of being built up during storage.
var apiVersionPriorities = map[schema.GroupVersion]priority{
{Group: "", Version: "v1"}: {group: 18000, version: 1},
// extensions is above the rest for CLI compatibility, though the level of unqualified resource compatibility we
// can reasonably expect seems questionable.
{Group: "extensions", Version: "v1beta1"}: {group: 17900, version: 1},
// to my knowledge, nothing below here collides
{Group: "apps", Version: "v1beta1"}: {group: 17800, version: 1},
{Group: "apps", Version: "v1beta2"}: {group: 17800, version: 9},
{Group: "apps", Version: "v1"}: {group: 17800, version: 15},
{Group: "events.k8s.io", Version: "v1beta1"}: {group: 17750, version: 5},
{Group: "authentication.k8s.io", Version: "v1"}: {group: 17700, version: 15},
{Group: "authentication.k8s.io", Version: "v1beta1"}: {group: 17700, version: 9},
{Group: "authorization.k8s.io", Version: "v1"}: {group: 17600, version: 15},
{Group: "authorization.k8s.io", Version: "v1beta1"}: {group: 17600, version: 9},
{Group: "autoscaling", Version: "v1"}: {group: 17500, version: 15},
{Group: "autoscaling", Version: "v2beta1"}: {group: 17500, version: 9},
{Group: "autoscaling", Version: "v2beta2"}: {group: 17500, version: 1},
{Group: "batch", Version: "v1"}: {group: 17400, version: 15},
{Group: "batch", Version: "v1beta1"}: {group: 17400, version: 9},
{Group: "batch", Version: "v2alpha1"}: {group: 17400, version: 9},
{Group: "certificates.k8s.io", Version: "v1beta1"}: {group: 17300, version: 9},
{Group: "networking.k8s.io", Version: "v1"}: {group: 17200, version: 15},
{Group: "policy", Version: "v1beta1"}: {group: 17100, version: 9},
{Group: "rbac.authorization.k8s.io", Version: "v1"}: {group: 17000, version: 15},
{Group: "rbac.authorization.k8s.io", Version: "v1beta1"}: {group: 17000, version: 12},
{Group: "rbac.authorization.k8s.io", Version: "v1alpha1"}: {group: 17000, version: 9},
{Group: "settings.k8s.io", Version: "v1alpha1"}: {group: 16900, version: 9},
{Group: "storage.k8s.io", Version: "v1"}: {group: 16800, version: 15},
{Group: "storage.k8s.io", Version: "v1beta1"}: {group: 16800, version: 9},
{Group: "storage.k8s.io", Version: "v1alpha1"}: {group: 16800, version: 1},
{Group: "apiextensions.k8s.io", Version: "v1beta1"}: {group: 16700, version: 9},
{Group: "admissionregistration.k8s.io", Version: "v1"}: {group: 16700, version: 15},
{Group: "admissionregistration.k8s.io", Version: "v1beta1"}: {group: 16700, version: 12},
{Group: "admissionregistration.k8s.io", Version: "v1alpha1"}: {group: 16700, version: 9},
{Group: "scheduling.k8s.io", Version: "v1beta1"}: {group: 16600, version: 12},
{Group: "scheduling.k8s.io", Version: "v1alpha1"}: {group: 16600, version: 9},
{Group: "coordination.k8s.io", Version: "v1beta1"}: {group: 16500, version: 9},
{Group: "auditregistration.k8s.io", Version: "v1alpha1"}: {group: 16400, version: 1},
// Append a new group to the end of the list if unsure.
// You can use min(existing group)-100 as the initial value for a group.
// Version can be set to 9 (to have space around) for a new group.
}
func apiServicesToRegister(delegateAPIServer genericapiserver.DelegationTarget, registration autoregister.AutoAPIServiceRegistration) []*apiregistration.APIService {
apiServices := []*apiregistration.APIService{}
for _, curr := range delegateAPIServer.ListedPaths() {
if curr == "/api/v1" {
apiService := makeAPIService(schema.GroupVersion{Group: "", Version: "v1"})
registration.AddAPIServiceToSyncOnStart(apiService)
apiServices = append(apiServices, apiService)
continue
}
if !strings.HasPrefix(curr, "/apis/") {
continue
}
// this comes back in a list that looks like /apis/rbac.authorization.k8s.io/v1alpha1
tokens := strings.Split(curr, "/")
if len(tokens) != 4 {
continue
}
apiService := makeAPIService(schema.GroupVersion{Group: tokens[2], Version: tokens[3]})
if apiService == nil {
continue
}
registration.AddAPIServiceToSyncOnStart(apiService)
apiServices = append(apiServices, apiService)
}
return apiServices
}

View File

@ -1,90 +0,0 @@
/*
Copyright 2017 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to create a Kubernetes
// APIServer by binding together the API, master and APIServer infrastructure.
// It can be configured and called directly or via the hyperkube framework.
package app
import (
"k8s.io/apiextensions-apiserver/pkg/apis/apiextensions/v1beta1"
apiextensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver"
apiextensionsoptions "k8s.io/apiextensions-apiserver/pkg/cmd/server/options"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/features"
genericapiserver "k8s.io/apiserver/pkg/server"
genericoptions "k8s.io/apiserver/pkg/server/options"
utilfeature "k8s.io/apiserver/pkg/util/feature"
"k8s.io/apiserver/pkg/util/webhook"
kubeexternalinformers "k8s.io/client-go/informers"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
)
func createAPIExtensionsConfig(
kubeAPIServerConfig genericapiserver.Config,
externalInformers kubeexternalinformers.SharedInformerFactory,
pluginInitializers []admission.PluginInitializer,
commandOptions *options.ServerRunOptions,
masterCount int,
serviceResolver webhook.ServiceResolver,
authResolverWrapper webhook.AuthenticationInfoResolverWrapper,
) (*apiextensionsapiserver.Config, error) {
// make a shallow copy to let us twiddle a few things
// most of the config actually remains the same. We only need to mess with a couple items related to the particulars of the apiextensions
genericConfig := kubeAPIServerConfig
// override genericConfig.AdmissionControl with apiextensions' scheme,
// because apiextentions apiserver should use its own scheme to convert resources.
commandOptions.Admission.ApplyTo(
&genericConfig,
externalInformers,
genericConfig.LoopbackClientConfig,
apiextensionsapiserver.Scheme,
pluginInitializers...)
// copy the etcd options so we don't mutate originals.
etcdOptions := *commandOptions.Etcd
etcdOptions.StorageConfig.Paging = utilfeature.DefaultFeatureGate.Enabled(features.APIListChunking)
etcdOptions.StorageConfig.Codec = apiextensionsapiserver.Codecs.LegacyCodec(v1beta1.SchemeGroupVersion)
genericConfig.RESTOptionsGetter = &genericoptions.SimpleRestOptionsFactory{Options: etcdOptions}
// override MergedResourceConfig with apiextensions defaults and registry
if err := commandOptions.APIEnablement.ApplyTo(
&genericConfig,
apiextensionsapiserver.DefaultAPIResourceConfigSource(),
apiextensionsapiserver.Scheme); err != nil {
return nil, err
}
apiextensionsConfig := &apiextensionsapiserver.Config{
GenericConfig: &genericapiserver.RecommendedConfig{
Config: genericConfig,
SharedInformerFactory: externalInformers,
},
ExtraConfig: apiextensionsapiserver.ExtraConfig{
CRDRESTOptionsGetter: apiextensionsoptions.NewCRDRESTOptionsGetter(etcdOptions),
MasterCount: masterCount,
AuthResolverWrapper: authResolverWrapper,
ServiceResolver: serviceResolver,
},
}
return apiextensionsConfig, nil
}
func createAPIExtensionsServer(apiextensionsConfig *apiextensionsapiserver.Config, delegateAPIServer genericapiserver.DelegationTarget) (*apiextensionsapiserver.CustomResourceDefinitions, error) {
return apiextensionsConfig.Complete().New(delegateAPIServer)
}

View File

@ -1,76 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_library",
"go_test",
)
go_library(
name = "go_default_library",
srcs = [
"globalflags.go",
"options.go",
"validation.go",
],
importpath = "k8s.io/kubernetes/cmd/kube-apiserver/app/options",
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/cloudprovider/providers:go_default_library",
"//pkg/features:go_default_library",
"//pkg/kubeapiserver/options:go_default_library",
"//pkg/kubelet/client:go_default_library",
"//pkg/master/ports:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//pkg/serviceaccount:go_default_library",
"//staging/src/k8s.io/apiextensions-apiserver/pkg/apiserver:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/net:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/admission:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library",
"//staging/src/k8s.io/kube-aggregator/pkg/apiserver/scheme:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = [
"globalflags_test.go",
"options_test.go",
],
embed = [":go_default_library"],
deps = [
"//pkg/api/legacyscheme:go_default_library",
"//pkg/apis/core:go_default_library",
"//pkg/kubeapiserver/options:go_default_library",
"//pkg/kubelet/client:go_default_library",
"//pkg/master/reconcilers:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/diff:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/options:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/storage/storagebackend:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library",
"//staging/src/k8s.io/apiserver/plugin/pkg/audit/buffered:go_default_library",
"//staging/src/k8s.io/apiserver/plugin/pkg/audit/truncate:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//vendor/github.com/spf13/pflag:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
)

View File

@ -1,37 +0,0 @@
/*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"github.com/spf13/pflag"
"k8s.io/apiserver/pkg/util/globalflag"
// ensure libs have a chance to globally register their flags
_ "k8s.io/apiserver/pkg/admission"
)
// AddCustomGlobalFlags explicitly registers flags that internal packages register
// against the global flagsets from "flag". We do this in order to prevent
// unwanted flags from leaking into the kube-apiserver's flagset.
func AddCustomGlobalFlags(fs *pflag.FlagSet) {
// Lookup flags in global flag set and re-register the values with our flagset.
// Adds flags from k8s.io/apiserver/pkg/admission.
globalflag.Register(fs, "default-not-ready-toleration-seconds")
globalflag.Register(fs, "default-unreachable-toleration-seconds")
}

View File

@ -1,240 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package options contains flags and options for initializing an apiserver
package options
import (
"net"
"strings"
"time"
utilnet "k8s.io/apimachinery/pkg/util/net"
genericoptions "k8s.io/apiserver/pkg/server/options"
"k8s.io/apiserver/pkg/storage/storagebackend"
apiserverflag "k8s.io/apiserver/pkg/util/flag"
api "k8s.io/kubernetes/pkg/apis/core"
_ "k8s.io/kubernetes/pkg/features" // add the kubernetes feature gates
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
kubeletclient "k8s.io/kubernetes/pkg/kubelet/client"
"k8s.io/kubernetes/pkg/master/ports"
"k8s.io/kubernetes/pkg/master/reconcilers"
"k8s.io/kubernetes/pkg/serviceaccount"
)
// ServerRunOptions runs a kubernetes api server.
type ServerRunOptions struct {
GenericServerRunOptions *genericoptions.ServerRunOptions
Etcd *genericoptions.EtcdOptions
SecureServing *genericoptions.SecureServingOptionsWithLoopback
InsecureServing *genericoptions.DeprecatedInsecureServingOptionsWithLoopback
Audit *genericoptions.AuditOptions
Features *genericoptions.FeatureOptions
Admission *kubeoptions.AdmissionOptions
Authentication *kubeoptions.BuiltInAuthenticationOptions
Authorization *kubeoptions.BuiltInAuthorizationOptions
CloudProvider *kubeoptions.CloudProviderOptions
StorageSerialization *kubeoptions.StorageSerializationOptions
APIEnablement *genericoptions.APIEnablementOptions
AllowPrivileged bool
EnableLogsHandler bool
EventTTL time.Duration
KubeletConfig kubeletclient.KubeletClientConfig
KubernetesServiceNodePort int
MaxConnectionBytesPerSec int64
ServiceClusterIPRange net.IPNet // TODO: make this a list
ServiceNodePortRange utilnet.PortRange
SSHKeyfile string
SSHUser string
ProxyClientCertFile string
ProxyClientKeyFile string
EnableAggregatorRouting bool
MasterCount int
EndpointReconcilerType string
ServiceAccountSigningKeyFile string
ServiceAccountIssuer serviceaccount.TokenGenerator
ServiceAccountTokenMaxExpiration time.Duration
}
// NewServerRunOptions creates a new ServerRunOptions object with default parameters
func NewServerRunOptions() *ServerRunOptions {
s := ServerRunOptions{
GenericServerRunOptions: genericoptions.NewServerRunOptions(),
Etcd: genericoptions.NewEtcdOptions(storagebackend.NewDefaultConfig(kubeoptions.DefaultEtcdPathPrefix, nil)),
SecureServing: kubeoptions.NewSecureServingOptions(),
InsecureServing: kubeoptions.NewInsecureServingOptions(),
Audit: genericoptions.NewAuditOptions(),
Features: genericoptions.NewFeatureOptions(),
Admission: kubeoptions.NewAdmissionOptions(),
Authentication: kubeoptions.NewBuiltInAuthenticationOptions().WithAll(),
Authorization: kubeoptions.NewBuiltInAuthorizationOptions(),
CloudProvider: kubeoptions.NewCloudProviderOptions(),
StorageSerialization: kubeoptions.NewStorageSerializationOptions(),
APIEnablement: genericoptions.NewAPIEnablementOptions(),
EnableLogsHandler: true,
EventTTL: 1 * time.Hour,
MasterCount: 1,
EndpointReconcilerType: string(reconcilers.LeaseEndpointReconcilerType),
KubeletConfig: kubeletclient.KubeletClientConfig{
Port: ports.KubeletPort,
ReadOnlyPort: ports.KubeletReadOnlyPort,
PreferredAddressTypes: []string{
// --override-hostname
string(api.NodeHostName),
// internal, preferring DNS if reported
string(api.NodeInternalDNS),
string(api.NodeInternalIP),
// external, preferring DNS if reported
string(api.NodeExternalDNS),
string(api.NodeExternalIP),
},
EnableHttps: true,
HTTPTimeout: time.Duration(5) * time.Second,
},
ServiceNodePortRange: kubeoptions.DefaultServiceNodePortRange,
}
s.ServiceClusterIPRange = kubeoptions.DefaultServiceIPCIDR
// Overwrite the default for storage data format.
//s.Etcd.DefaultStorageMediaType = "application/vnd.kubernetes.protobuf"
return &s
}
// Flags returns flags for a specific APIServer by section name
func (s *ServerRunOptions) Flags() (fss apiserverflag.NamedFlagSets) {
// Add the generic flags.
s.GenericServerRunOptions.AddUniversalFlags(fss.FlagSet("generic"))
s.Etcd.AddFlags(fss.FlagSet("etcd"))
s.SecureServing.AddFlags(fss.FlagSet("secure serving"))
s.InsecureServing.AddFlags(fss.FlagSet("insecure serving"))
s.InsecureServing.AddUnqualifiedFlags(fss.FlagSet("insecure serving")) // TODO: remove it until kops stops using `--address`
s.Audit.AddFlags(fss.FlagSet("auditing"))
s.Features.AddFlags(fss.FlagSet("features"))
s.Authentication.AddFlags(fss.FlagSet("authentication"))
s.Authorization.AddFlags(fss.FlagSet("authorization"))
s.CloudProvider.AddFlags(fss.FlagSet("cloud provider"))
s.StorageSerialization.AddFlags(fss.FlagSet("storage"))
s.APIEnablement.AddFlags(fss.FlagSet("api enablement"))
s.Admission.AddFlags(fss.FlagSet("admission"))
// Note: the weird ""+ in below lines seems to be the only way to get gofmt to
// arrange these text blocks sensibly. Grrr.
fs := fss.FlagSet("misc")
fs.DurationVar(&s.EventTTL, "event-ttl", s.EventTTL,
"Amount of time to retain events.")
fs.BoolVar(&s.AllowPrivileged, "allow-privileged", s.AllowPrivileged,
"If true, allow privileged containers. [default=false]")
fs.BoolVar(&s.EnableLogsHandler, "enable-logs-handler", s.EnableLogsHandler,
"If true, install a /logs handler for the apiserver logs.")
// Deprecated in release 1.9
fs.StringVar(&s.SSHUser, "ssh-user", s.SSHUser,
"If non-empty, use secure SSH proxy to the nodes, using this user name")
fs.MarkDeprecated("ssh-user", "This flag will be removed in a future version.")
// Deprecated in release 1.9
fs.StringVar(&s.SSHKeyfile, "ssh-keyfile", s.SSHKeyfile,
"If non-empty, use secure SSH proxy to the nodes, using this user keyfile")
fs.MarkDeprecated("ssh-keyfile", "This flag will be removed in a future version.")
fs.Int64Var(&s.MaxConnectionBytesPerSec, "max-connection-bytes-per-sec", s.MaxConnectionBytesPerSec, ""+
"If non-zero, throttle each user connection to this number of bytes/sec. "+
"Currently only applies to long-running requests.")
fs.IntVar(&s.MasterCount, "apiserver-count", s.MasterCount,
"The number of apiservers running in the cluster, must be a positive number. (In use when --endpoint-reconciler-type=master-count is enabled.)")
fs.StringVar(&s.EndpointReconcilerType, "endpoint-reconciler-type", string(s.EndpointReconcilerType),
"Use an endpoint reconciler ("+strings.Join(reconcilers.AllTypes.Names(), ", ")+")")
// See #14282 for details on how to test/try this option out.
// TODO: remove this comment once this option is tested in CI.
fs.IntVar(&s.KubernetesServiceNodePort, "kubernetes-service-node-port", s.KubernetesServiceNodePort, ""+
"If non-zero, the Kubernetes master service (which apiserver creates/maintains) will be "+
"of type NodePort, using this as the value of the port. If zero, the Kubernetes master "+
"service will be of type ClusterIP.")
fs.IPNetVar(&s.ServiceClusterIPRange, "service-cluster-ip-range", s.ServiceClusterIPRange, ""+
"A CIDR notation IP range from which to assign service cluster IPs. This must not "+
"overlap with any IP ranges assigned to nodes for pods.")
fs.Var(&s.ServiceNodePortRange, "service-node-port-range", ""+
"A port range to reserve for services with NodePort visibility. "+
"Example: '30000-32767'. Inclusive at both ends of the range.")
// Kubelet related flags:
fs.BoolVar(&s.KubeletConfig.EnableHttps, "kubelet-https", s.KubeletConfig.EnableHttps,
"Use https for kubelet connections.")
fs.StringSliceVar(&s.KubeletConfig.PreferredAddressTypes, "kubelet-preferred-address-types", s.KubeletConfig.PreferredAddressTypes,
"List of the preferred NodeAddressTypes to use for kubelet connections.")
fs.UintVar(&s.KubeletConfig.Port, "kubelet-port", s.KubeletConfig.Port,
"DEPRECATED: kubelet port.")
fs.MarkDeprecated("kubelet-port", "kubelet-port is deprecated and will be removed.")
fs.UintVar(&s.KubeletConfig.ReadOnlyPort, "kubelet-read-only-port", s.KubeletConfig.ReadOnlyPort,
"DEPRECATED: kubelet port.")
fs.DurationVar(&s.KubeletConfig.HTTPTimeout, "kubelet-timeout", s.KubeletConfig.HTTPTimeout,
"Timeout for kubelet operations.")
fs.StringVar(&s.KubeletConfig.CertFile, "kubelet-client-certificate", s.KubeletConfig.CertFile,
"Path to a client cert file for TLS.")
fs.StringVar(&s.KubeletConfig.KeyFile, "kubelet-client-key", s.KubeletConfig.KeyFile,
"Path to a client key file for TLS.")
fs.StringVar(&s.KubeletConfig.CAFile, "kubelet-certificate-authority", s.KubeletConfig.CAFile,
"Path to a cert file for the certificate authority.")
// TODO: delete this flag in 1.13
repair := false
fs.BoolVar(&repair, "repair-malformed-updates", false, "deprecated")
fs.MarkDeprecated("repair-malformed-updates", "This flag will be removed in a future version")
fs.StringVar(&s.ProxyClientCertFile, "proxy-client-cert-file", s.ProxyClientCertFile, ""+
"Client certificate used to prove the identity of the aggregator or kube-apiserver "+
"when it must call out during a request. This includes proxying requests to a user "+
"api-server and calling out to webhook admission plugins. It is expected that this "+
"cert includes a signature from the CA in the --requestheader-client-ca-file flag. "+
"That CA is published in the 'extension-apiserver-authentication' configmap in "+
"the kube-system namespace. Components receiving calls from kube-aggregator should "+
"use that CA to perform their half of the mutual TLS verification.")
fs.StringVar(&s.ProxyClientKeyFile, "proxy-client-key-file", s.ProxyClientKeyFile, ""+
"Private key for the client certificate used to prove the identity of the aggregator or kube-apiserver "+
"when it must call out during a request. This includes proxying requests to a user "+
"api-server and calling out to webhook admission plugins.")
fs.BoolVar(&s.EnableAggregatorRouting, "enable-aggregator-routing", s.EnableAggregatorRouting,
"Turns on aggregator routing requests to endpoints IP rather than cluster IP.")
fs.StringVar(&s.ServiceAccountSigningKeyFile, "service-account-signing-key-file", s.ServiceAccountSigningKeyFile, ""+
"Path to the file that contains the current private key of the service account token issuer. The issuer will sign issued ID tokens with this private key. (Requires the 'TokenRequest' feature gate.)")
return fss
}

View File

@ -1,97 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package options
import (
"errors"
"fmt"
apiextensionsapiserver "k8s.io/apiextensions-apiserver/pkg/apiserver"
utilfeature "k8s.io/apiserver/pkg/util/feature"
aggregatorscheme "k8s.io/kube-aggregator/pkg/apiserver/scheme"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/features"
)
// TODO: Longer term we should read this from some config store, rather than a flag.
func validateClusterIPFlags(options *ServerRunOptions) []error {
var errs []error
if options.ServiceClusterIPRange.IP == nil {
errs = append(errs, errors.New("no --service-cluster-ip-range specified"))
}
var ones, bits = options.ServiceClusterIPRange.Mask.Size()
if bits-ones > 20 {
errs = append(errs, errors.New("specified --service-cluster-ip-range is too large"))
}
return errs
}
func validateServiceNodePort(options *ServerRunOptions) []error {
var errs []error
if options.KubernetesServiceNodePort < 0 || options.KubernetesServiceNodePort > 65535 {
errs = append(errs, fmt.Errorf("--kubernetes-service-node-port %v must be between 0 and 65535, inclusive. If 0, the Kubernetes master service will be of type ClusterIP", options.KubernetesServiceNodePort))
}
if options.KubernetesServiceNodePort > 0 && !options.ServiceNodePortRange.Contains(options.KubernetesServiceNodePort) {
errs = append(errs, fmt.Errorf("kubernetes service port range %v doesn't contain %v", options.ServiceNodePortRange, (options.KubernetesServiceNodePort)))
}
return errs
}
func validateTokenRequest(options *ServerRunOptions) []error {
var errs []error
enableAttempted := options.ServiceAccountSigningKeyFile != "" ||
options.Authentication.ServiceAccounts.Issuer != "" ||
len(options.Authentication.APIAudiences) != 0
enableSucceeded := options.ServiceAccountIssuer != nil
if enableAttempted && !utilfeature.DefaultFeatureGate.Enabled(features.TokenRequest) {
errs = append(errs, errors.New("the TokenRequest feature is not enabled but --service-account-signing-key-file, --service-account-issuer and/or --api-audiences flags were passed"))
}
if enableAttempted && !enableSucceeded {
errs = append(errs, errors.New("--service-account-signing-key-file, --service-account-issuer, and --api-audiences should be specified together"))
}
return errs
}
// Validate checks ServerRunOptions and return a slice of found errs.
func (s *ServerRunOptions) Validate() []error {
var errs []error
if s.MasterCount <= 0 {
errs = append(errs, fmt.Errorf("--apiserver-count should be a positive number, but value '%d' provided", s.MasterCount))
}
errs = append(errs, s.Etcd.Validate()...)
errs = append(errs, validateClusterIPFlags(s)...)
errs = append(errs, validateServiceNodePort(s)...)
errs = append(errs, s.SecureServing.Validate()...)
errs = append(errs, s.Authentication.Validate()...)
errs = append(errs, s.Authorization.Validate()...)
errs = append(errs, s.Audit.Validate()...)
errs = append(errs, s.Admission.Validate()...)
errs = append(errs, s.InsecureServing.Validate()...)
errs = append(errs, s.APIEnablement.Validate(legacyscheme.Scheme, apiextensionsapiserver.Scheme, aggregatorscheme.Scheme)...)
errs = append(errs, validateTokenRequest(s)...)
return errs
}

View File

@ -1,568 +0,0 @@
/*
Copyright 2014 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app does all of the work necessary to create a Kubernetes
// APIServer by binding together the API, master and APIServer infrastructure.
// It can be configured and called directly or via the hyperkube framework.
package app
import (
"fmt"
"io/ioutil"
"net"
"net/http"
"net/url"
"os"
"strings"
"time"
"github.com/spf13/cobra"
utilerrors "k8s.io/apimachinery/pkg/util/errors"
utilnet "k8s.io/apimachinery/pkg/util/net"
"k8s.io/apimachinery/pkg/util/sets"
"k8s.io/apiserver/pkg/admission"
"k8s.io/apiserver/pkg/authentication/authenticator"
"k8s.io/apiserver/pkg/authorization/authorizer"
genericapiserver "k8s.io/apiserver/pkg/server"
"k8s.io/apiserver/pkg/server/filters"
serveroptions "k8s.io/apiserver/pkg/server/options"
serverstorage "k8s.io/apiserver/pkg/server/storage"
apiserverflag "k8s.io/apiserver/pkg/util/flag"
"k8s.io/apiserver/pkg/util/globalflag"
"k8s.io/apiserver/pkg/util/webhook"
clientgoinformers "k8s.io/client-go/informers"
clientgoclientset "k8s.io/client-go/kubernetes"
certutil "k8s.io/client-go/util/cert"
"k8s.io/klog"
aggregatorapiserver "k8s.io/kube-aggregator/pkg/apiserver"
"k8s.io/kubernetes/cmd/kube-apiserver/app/options"
"k8s.io/kubernetes/pkg/api/legacyscheme"
"k8s.io/kubernetes/pkg/capabilities"
serviceaccountcontroller "k8s.io/kubernetes/pkg/controller/serviceaccount"
"k8s.io/kubernetes/pkg/kubeapiserver"
kubeapiserveradmission "k8s.io/kubernetes/pkg/kubeapiserver/admission"
kubeauthenticator "k8s.io/kubernetes/pkg/kubeapiserver/authenticator"
"k8s.io/kubernetes/pkg/kubeapiserver/authorizer/modes"
kubeoptions "k8s.io/kubernetes/pkg/kubeapiserver/options"
kubeserver "k8s.io/kubernetes/pkg/kubeapiserver/server"
"k8s.io/kubernetes/pkg/master"
"k8s.io/kubernetes/pkg/master/reconcilers"
"k8s.io/kubernetes/pkg/registry/cachesize"
rbacrest "k8s.io/kubernetes/pkg/registry/rbac/rest"
"k8s.io/kubernetes/pkg/serviceaccount"
utilflag "k8s.io/kubernetes/pkg/util/flag"
_ "k8s.io/kubernetes/pkg/util/reflector/prometheus" // for reflector metric registration
_ "k8s.io/kubernetes/pkg/util/workqueue/prometheus" // for workqueue metric registration
"k8s.io/kubernetes/pkg/version"
"k8s.io/kubernetes/pkg/version/verflag"
)
const etcdRetryLimit = 60
const etcdRetryInterval = 1 * time.Second
var (
DefaultProxyDialerFn utilnet.DialFunc
)
// NewAPIServerCommand creates a *cobra.Command object with default parameters
func NewAPIServerCommand(stopCh <-chan struct{}) *cobra.Command {
s := options.NewServerRunOptions()
cmd := &cobra.Command{
Use: "kube-apiserver",
Long: `The Kubernetes API server validates and configures data
for the api objects which include pods, services, replicationcontrollers, and
others. The API Server services REST operations and provides the frontend to the
cluster's shared state through which all other components interact.`,
RunE: func(cmd *cobra.Command, args []string) error {
verflag.PrintAndExitIfRequested()
utilflag.PrintFlags(cmd.Flags())
// set default options
completedOptions, err := Complete(s)
if err != nil {
return err
}
// validate options
if errs := completedOptions.Validate(); len(errs) != 0 {
return utilerrors.NewAggregate(errs)
}
return Run(completedOptions, stopCh)
},
}
fs := cmd.Flags()
namedFlagSets := s.Flags()
verflag.AddFlags(namedFlagSets.FlagSet("global"))
globalflag.AddGlobalFlags(namedFlagSets.FlagSet("global"), cmd.Name())
options.AddCustomGlobalFlags(namedFlagSets.FlagSet("generic"))
for _, f := range namedFlagSets.FlagSets {
fs.AddFlagSet(f)
}
usageFmt := "Usage:\n %s\n"
cols, _, _ := apiserverflag.TerminalSize(cmd.OutOrStdout())
cmd.SetUsageFunc(func(cmd *cobra.Command) error {
fmt.Fprintf(cmd.OutOrStderr(), usageFmt, cmd.UseLine())
apiserverflag.PrintSections(cmd.OutOrStderr(), namedFlagSets, cols)
return nil
})
cmd.SetHelpFunc(func(cmd *cobra.Command, args []string) {
fmt.Fprintf(cmd.OutOrStdout(), "%s\n\n"+usageFmt, cmd.Long, cmd.UseLine())
apiserverflag.PrintSections(cmd.OutOrStdout(), namedFlagSets, cols)
})
return cmd
}
type startupConfig struct {
Handler http.Handler
Authenticator authenticator.Request
}
var StartupConfig = make(chan startupConfig, 1)
// Run runs the specified APIServer. This should never exit.
func Run(completeOptions completedServerRunOptions, stopCh <-chan struct{}) error {
// To help debugging, immediately log version
klog.Infof("Version: %+v", version.Get())
config, server, err := CreateServerChain(completeOptions, stopCh)
if err != nil {
return err
}
StartupConfig <- startupConfig{
Handler: server.Handler,
Authenticator: config.GenericConfig.Authentication.Authenticator,
}
return server.PrepareRun().Run(stopCh)
}
// CreateServerChain creates the apiservers connected via delegation.
func CreateServerChain(completedOptions completedServerRunOptions, stopCh <-chan struct{}) (*master.Config, *genericapiserver.GenericAPIServer, error) {
if DefaultProxyDialerFn != nil {
completedOptions.KubeletConfig.Dial = DefaultProxyDialerFn
}
kubeAPIServerConfig, insecureServingInfo, serviceResolver, pluginInitializer, admissionPostStartHook, err := CreateKubeAPIServerConfig(completedOptions)
if err != nil {
return nil, nil, err
}
// If additional API servers are added, they should be gated.
apiExtensionsConfig, err := createAPIExtensionsConfig(*kubeAPIServerConfig.GenericConfig, kubeAPIServerConfig.ExtraConfig.VersionedInformers, pluginInitializer, completedOptions.ServerRunOptions, completedOptions.MasterCount,
serviceResolver, webhook.NewDefaultAuthenticationInfoResolverWrapper(nil, kubeAPIServerConfig.GenericConfig.LoopbackClientConfig))
if err != nil {
return nil, nil, err
}
apiExtensionsServer, err := createAPIExtensionsServer(apiExtensionsConfig, genericapiserver.NewEmptyDelegate())
if err != nil {
return nil, nil, err
}
kubeAPIServer, err := CreateKubeAPIServer(kubeAPIServerConfig, apiExtensionsServer.GenericAPIServer, admissionPostStartHook)
if err != nil {
return nil, nil, err
}
// otherwise go down the normal path of standing the aggregator up in front of the API server
// this wires up openapi
kubeAPIServer.GenericAPIServer.PrepareRun()
// This will wire up openapi for extension api server
apiExtensionsServer.GenericAPIServer.PrepareRun()
// aggregator comes last in the chain
aggregatorConfig, err := createAggregatorConfig(*kubeAPIServerConfig.GenericConfig, completedOptions.ServerRunOptions, kubeAPIServerConfig.ExtraConfig.VersionedInformers, serviceResolver, nil, pluginInitializer)
if err != nil {
return nil, nil, err
}
aggregatorServer, err := createAggregatorServer(aggregatorConfig, kubeAPIServer.GenericAPIServer, apiExtensionsServer.Informers)
if err != nil {
// we don't need special handling for innerStopCh because the aggregator server doesn't create any go routines
return nil, nil, err
}
if insecureServingInfo != nil {
insecureHandlerChain := kubeserver.BuildInsecureHandlerChain(aggregatorServer.GenericAPIServer.UnprotectedHandler(), kubeAPIServerConfig.GenericConfig)
if err := insecureServingInfo.Serve(insecureHandlerChain, kubeAPIServerConfig.GenericConfig.RequestTimeout, stopCh); err != nil {
return nil, nil, err
}
}
return kubeAPIServerConfig, aggregatorServer.GenericAPIServer, nil
}
// CreateKubeAPIServer creates and wires a workable kube-apiserver
func CreateKubeAPIServer(kubeAPIServerConfig *master.Config, delegateAPIServer genericapiserver.DelegationTarget, admissionPostStartHook genericapiserver.PostStartHookFunc) (*master.Master, error) {
kubeAPIServer, err := kubeAPIServerConfig.Complete().New(delegateAPIServer)
if err != nil {
return nil, err
}
kubeAPIServer.GenericAPIServer.AddPostStartHookOrDie("start-kube-apiserver-admission-initializer", admissionPostStartHook)
return kubeAPIServer, nil
}
// CreateKubeAPIServerConfig creates all the resources for running the API server, but runs none of them
func CreateKubeAPIServerConfig(
s completedServerRunOptions,
) (
config *master.Config,
insecureServingInfo *genericapiserver.DeprecatedInsecureServingInfo,
serviceResolver aggregatorapiserver.ServiceResolver,
pluginInitializers []admission.PluginInitializer,
admissionPostStartHook genericapiserver.PostStartHookFunc,
lastErr error,
) {
var genericConfig *genericapiserver.Config
var storageFactory *serverstorage.DefaultStorageFactory
var versionedInformers clientgoinformers.SharedInformerFactory
genericConfig, versionedInformers, insecureServingInfo, serviceResolver, pluginInitializers, admissionPostStartHook, storageFactory, lastErr = buildGenericConfig(s.ServerRunOptions)
if lastErr != nil {
return
}
capabilities.Initialize(capabilities.Capabilities{
AllowPrivileged: s.AllowPrivileged,
// TODO(vmarmol): Implement support for HostNetworkSources.
PrivilegedSources: capabilities.PrivilegedSources{
HostNetworkSources: []string{},
HostPIDSources: []string{},
HostIPCSources: []string{},
},
PerConnectionBandwidthLimitBytesPerSec: s.MaxConnectionBytesPerSec,
})
serviceIPRange, apiServerServiceIP, lastErr := master.DefaultServiceIPRange(s.ServiceClusterIPRange)
if lastErr != nil {
return
}
config = &master.Config{
GenericConfig: genericConfig,
ExtraConfig: master.ExtraConfig{
APIResourceConfigSource: storageFactory.APIResourceConfigSource,
StorageFactory: storageFactory,
EventTTL: s.EventTTL,
KubeletClientConfig: s.KubeletConfig,
EnableLogsSupport: s.EnableLogsHandler,
ServiceIPRange: serviceIPRange,
APIServerServiceIP: apiServerServiceIP,
APIServerServicePort: 443,
ServiceNodePortRange: s.ServiceNodePortRange,
KubernetesServiceNodePort: s.KubernetesServiceNodePort,
EndpointReconcilerType: reconcilers.Type(s.EndpointReconcilerType),
MasterCount: s.MasterCount,
ServiceAccountIssuer: s.ServiceAccountIssuer,
ServiceAccountMaxExpiration: s.ServiceAccountTokenMaxExpiration,
VersionedInformers: versionedInformers,
},
}
return
}
// BuildGenericConfig takes the master server options and produces the genericapiserver.Config associated with it
func buildGenericConfig(
s *options.ServerRunOptions,
) (
genericConfig *genericapiserver.Config,
versionedInformers clientgoinformers.SharedInformerFactory,
insecureServingInfo *genericapiserver.DeprecatedInsecureServingInfo,
serviceResolver aggregatorapiserver.ServiceResolver,
pluginInitializers []admission.PluginInitializer,
admissionPostStartHook genericapiserver.PostStartHookFunc,
storageFactory *serverstorage.DefaultStorageFactory,
lastErr error,
) {
genericConfig = genericapiserver.NewConfig(legacyscheme.Codecs)
genericConfig.MergedResourceConfig = master.DefaultAPIResourceConfigSource()
if lastErr = s.GenericServerRunOptions.ApplyTo(genericConfig); lastErr != nil {
return
}
if lastErr = s.InsecureServing.ApplyTo(&insecureServingInfo, &genericConfig.LoopbackClientConfig); lastErr != nil {
return
}
if lastErr = s.SecureServing.ApplyTo(&genericConfig.SecureServing, &genericConfig.LoopbackClientConfig); lastErr != nil {
return
}
if lastErr = s.Authentication.ApplyTo(genericConfig); lastErr != nil {
return
}
if lastErr = s.Features.ApplyTo(genericConfig); lastErr != nil {
return
}
if lastErr = s.APIEnablement.ApplyTo(genericConfig, master.DefaultAPIResourceConfigSource(), legacyscheme.Scheme); lastErr != nil {
return
}
genericConfig.LongRunningFunc = filters.BasicLongRunningRequestCheck(
sets.NewString("watch", "proxy"),
sets.NewString("attach", "exec", "proxy", "log", "portforward"),
)
kubeVersion := version.Get()
genericConfig.Version = &kubeVersion
storageFactoryConfig := kubeapiserver.NewStorageFactoryConfig()
storageFactoryConfig.ApiResourceConfig = genericConfig.MergedResourceConfig
completedStorageFactoryConfig, err := storageFactoryConfig.Complete(s.Etcd, s.StorageSerialization)
if err != nil {
lastErr = err
return
}
storageFactory, lastErr = completedStorageFactoryConfig.New()
if lastErr != nil {
return
}
if lastErr = s.Etcd.ApplyWithStorageFactoryTo(storageFactory, genericConfig); lastErr != nil {
return
}
// Use protobufs for self-communication.
// Since not every generic apiserver has to support protobufs, we
// cannot default to it in generic apiserver and need to explicitly
// set it in kube-apiserver.
genericConfig.LoopbackClientConfig.ContentConfig.ContentType = "application/vnd.kubernetes.protobuf"
kubeClientConfig := genericConfig.LoopbackClientConfig
clientgoExternalClient, err := clientgoclientset.NewForConfig(kubeClientConfig)
if err != nil {
lastErr = fmt.Errorf("failed to create real external clientset: %v", err)
return
}
versionedInformers = clientgoinformers.NewSharedInformerFactory(clientgoExternalClient, 10*time.Minute)
genericConfig.Authentication.Authenticator, err = BuildAuthenticator(s, clientgoExternalClient, versionedInformers)
if err != nil {
lastErr = fmt.Errorf("invalid authentication config: %v", err)
return
}
genericConfig.Authorization.Authorizer, genericConfig.RuleResolver, err = BuildAuthorizer(s, versionedInformers)
if err != nil {
lastErr = fmt.Errorf("invalid authorization config: %v", err)
return
}
if !sets.NewString(s.Authorization.Modes...).Has(modes.ModeRBAC) {
genericConfig.DisabledPostStartHooks.Insert(rbacrest.PostStartHookName)
}
admissionConfig := &kubeapiserveradmission.Config{
ExternalInformers: versionedInformers,
LoopbackClientConfig: genericConfig.LoopbackClientConfig,
CloudConfigFile: s.CloudProvider.CloudConfigFile,
}
serviceResolver = buildServiceResolver(s.EnableAggregatorRouting, genericConfig.LoopbackClientConfig.Host, versionedInformers)
authInfoResolverWrapper := webhook.NewDefaultAuthenticationInfoResolverWrapper(nil, genericConfig.LoopbackClientConfig)
lastErr = s.Audit.ApplyTo(
genericConfig,
genericConfig.LoopbackClientConfig,
versionedInformers,
serveroptions.NewProcessInfo("kube-apiserver", "kube-system"),
&serveroptions.WebhookOptions{
AuthInfoResolverWrapper: authInfoResolverWrapper,
ServiceResolver: serviceResolver,
},
)
if lastErr != nil {
return
}
pluginInitializers, admissionPostStartHook, err = admissionConfig.New(nil, serviceResolver)
if err != nil {
lastErr = fmt.Errorf("failed to create admission plugin initializer: %v", err)
return
}
err = s.Admission.ApplyTo(
genericConfig,
versionedInformers,
kubeClientConfig,
legacyscheme.Scheme,
pluginInitializers...)
if err != nil {
lastErr = fmt.Errorf("failed to initialize admission: %v", err)
}
return
}
// BuildAuthenticator constructs the authenticator
func BuildAuthenticator(s *options.ServerRunOptions, extclient clientgoclientset.Interface, versionedInformer clientgoinformers.SharedInformerFactory) (authenticator.Request, error) {
authenticatorConfig := s.Authentication.ToAuthenticationConfig()
if s.Authentication.ServiceAccounts.Lookup {
authenticatorConfig.ServiceAccountTokenGetter = serviceaccountcontroller.NewGetterFromClient(extclient)
}
return authenticatorConfig.New()
}
// BuildAuthorizer constructs the authorizer
func BuildAuthorizer(s *options.ServerRunOptions, versionedInformers clientgoinformers.SharedInformerFactory) (authorizer.Authorizer, authorizer.RuleResolver, error) {
authorizationConfig := s.Authorization.ToAuthorizationConfig(versionedInformers)
return authorizationConfig.New()
}
// completedServerRunOptions is a private wrapper that enforces a call of Complete() before Run can be invoked.
type completedServerRunOptions struct {
*options.ServerRunOptions
}
// Complete set default ServerRunOptions.
// Should be called after kube-apiserver flags parsed.
func Complete(s *options.ServerRunOptions) (completedServerRunOptions, error) {
var options completedServerRunOptions
// set defaults
if err := s.GenericServerRunOptions.DefaultAdvertiseAddress(s.SecureServing.SecureServingOptions); err != nil {
return options, err
}
if err := kubeoptions.DefaultAdvertiseAddress(s.GenericServerRunOptions, s.InsecureServing.DeprecatedInsecureServingOptions); err != nil {
return options, err
}
serviceIPRange, apiServerServiceIP, err := master.DefaultServiceIPRange(s.ServiceClusterIPRange)
if err != nil {
return options, fmt.Errorf("error determining service IP ranges: %v", err)
}
s.ServiceClusterIPRange = serviceIPRange
if err := s.SecureServing.MaybeDefaultWithSelfSignedCerts(s.GenericServerRunOptions.AdvertiseAddress.String(), []string{"kubernetes.default.svc", "kubernetes.default", "kubernetes"}, []net.IP{apiServerServiceIP}); err != nil {
return options, fmt.Errorf("error creating self-signed certificates: %v", err)
}
if len(s.GenericServerRunOptions.ExternalHost) == 0 {
if len(s.GenericServerRunOptions.AdvertiseAddress) > 0 {
s.GenericServerRunOptions.ExternalHost = s.GenericServerRunOptions.AdvertiseAddress.String()
} else {
if hostname, err := os.Hostname(); err == nil {
s.GenericServerRunOptions.ExternalHost = hostname
} else {
return options, fmt.Errorf("error finding host name: %v", err)
}
}
klog.Infof("external host was not specified, using %v", s.GenericServerRunOptions.ExternalHost)
}
s.Authentication.ApplyAuthorization(s.Authorization)
// Use (ServiceAccountSigningKeyFile != "") as a proxy to the user enabling
// TokenRequest functionality. This defaulting was convenient, but messed up
// a lot of people when they rotated their serving cert with no idea it was
// connected to their service account keys. We are taking this oppurtunity to
// remove this problematic defaulting.
if s.ServiceAccountSigningKeyFile == "" {
// Default to the private server key for service account token signing
if len(s.Authentication.ServiceAccounts.KeyFiles) == 0 && s.SecureServing.ServerCert.CertKey.KeyFile != "" {
if kubeauthenticator.IsValidServiceAccountKeyFile(s.SecureServing.ServerCert.CertKey.KeyFile) {
s.Authentication.ServiceAccounts.KeyFiles = []string{s.SecureServing.ServerCert.CertKey.KeyFile}
} else {
klog.Warning("No TLS key provided, service account token authentication disabled")
}
}
}
if s.ServiceAccountSigningKeyFile != "" && s.Authentication.ServiceAccounts.Issuer != "" {
sk, err := certutil.PrivateKeyFromFile(s.ServiceAccountSigningKeyFile)
if err != nil {
return options, fmt.Errorf("failed to parse service-account-issuer-key-file: %v", err)
}
if s.Authentication.ServiceAccounts.MaxExpiration != 0 {
lowBound := time.Hour
upBound := time.Duration(1<<32) * time.Second
if s.Authentication.ServiceAccounts.MaxExpiration < lowBound ||
s.Authentication.ServiceAccounts.MaxExpiration > upBound {
return options, fmt.Errorf("the serviceaccount max expiration must be between 1 hour to 2^32 seconds")
}
}
s.ServiceAccountIssuer, err = serviceaccount.JWTTokenGenerator(s.Authentication.ServiceAccounts.Issuer, sk)
if err != nil {
return options, fmt.Errorf("failed to build token generator: %v", err)
}
s.ServiceAccountTokenMaxExpiration = s.Authentication.ServiceAccounts.MaxExpiration
}
if s.Etcd.EnableWatchCache {
klog.V(2).Infof("Initializing cache sizes based on %dMB limit", s.GenericServerRunOptions.TargetRAMMB)
sizes := cachesize.NewHeuristicWatchCacheSizes(s.GenericServerRunOptions.TargetRAMMB)
if userSpecified, err := serveroptions.ParseWatchCacheSizes(s.Etcd.WatchCacheSizes); err == nil {
for resource, size := range userSpecified {
sizes[resource] = size
}
}
s.Etcd.WatchCacheSizes, err = serveroptions.WriteWatchCacheSizes(sizes)
if err != nil {
return options, err
}
}
// TODO: remove when we stop supporting the legacy group version.
if s.APIEnablement.RuntimeConfig != nil {
for key, value := range s.APIEnablement.RuntimeConfig {
if key == "v1" || strings.HasPrefix(key, "v1/") ||
key == "api/v1" || strings.HasPrefix(key, "api/v1/") {
delete(s.APIEnablement.RuntimeConfig, key)
s.APIEnablement.RuntimeConfig["/v1"] = value
}
if key == "api/legacy" {
delete(s.APIEnablement.RuntimeConfig, key)
}
}
}
options.ServerRunOptions = s
return options, nil
}
func buildServiceResolver(enabledAggregatorRouting bool, hostname string, informer clientgoinformers.SharedInformerFactory) webhook.ServiceResolver {
var serviceResolver webhook.ServiceResolver
if enabledAggregatorRouting {
serviceResolver = aggregatorapiserver.NewEndpointServiceResolver(
informer.Core().V1().Services().Lister(),
informer.Core().V1().Endpoints().Lister(),
)
} else {
serviceResolver = aggregatorapiserver.NewClusterIPServiceResolver(
informer.Core().V1().Services().Lister(),
)
}
// resolve kubernetes.default.svc locally
if localHost, err := url.Parse(hostname); err == nil {
serviceResolver = aggregatorapiserver.NewLoopbackServiceResolver(serviceResolver, localHost)
}
return serviceResolver
}
func readCAorNil(file string) ([]byte, error) {
if len(file) == 0 {
return nil, nil
}
return ioutil.ReadFile(file)
}

View File

@ -1,45 +0,0 @@
package(default_visibility = ["//visibility:public"])
load(
"@io_bazel_rules_go//go:def.bzl",
"go_binary",
"go_library",
)
load("//pkg/version:def.bzl", "version_x_defs")
go_binary(
name = "kube-controller-manager",
embed = [":go_default_library"],
pure = "on",
x_defs = version_x_defs(),
)
go_library(
name = "go_default_library",
srcs = ["controller-manager.go"],
importpath = "k8s.io/kubernetes/cmd/kube-controller-manager",
deps = [
"//cmd/kube-controller-manager/app:go_default_library",
"//pkg/client/metrics/prometheus:go_default_library",
"//pkg/util/reflector/prometheus:go_default_library",
"//pkg/util/workqueue/prometheus:go_default_library",
"//pkg/version/prometheus:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/logs:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/kube-controller-manager/app:all-srcs",
],
tags = ["automanaged"],
)

View File

@ -1,68 +0,0 @@
approvers:
- deads2k
- lavalamp
- mikedanese
- sttts
reviewers:
- '249043822'
- a-robinson
- brendandburns
- caesarxuchao
- cjcullen
- dalanlan
- david-mcmahon
- davidopp
- ddysher
- deads2k
- derekwaynecarr
- eparis
- erictune
- errordeveloper
- feiskyer
- fgrzadkowski
- ghodss
- girishkalele
- gmarek
- goltermann
- humblec
- ingvagabund
- janetkuo
- jayunit100
- jbeda
- jdef
- jlowdermilk
- johscheuer
- jsafrane
- jszczepkowski
- justinsb
- lavalamp
- liggitt
- luxas
- madhusudancs
- markturansky
- mfanjie
- mikedanese
- mml
- mqliang
- mwielgus
- nikhiljindal
- ping035627
- piosz
- pmorie
- quinton-hoole
- resouer
- roberthbailey
- rootfs
- rrati
- saad-ali
- screeley44
- sjenning
- smarterclayton
- soltysh
- spiffxp
- sttts
- thockin
- timothysc
- wojtek-t
labels:
- sig/api-machinery

View File

@ -1,171 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library", "go_test")
go_library(
name = "go_default_library",
srcs = [
"apps.go",
"autoscaling.go",
"batch.go",
"bootstrap.go",
"certificates.go",
"cloudproviders.go",
"controllermanager.go",
"core.go",
"import_known_versions.go",
"plugins.go",
"policy.go",
"rbac.go",
],
importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app",
visibility = ["//visibility:public"],
deps = [
"//cmd/controller-manager/app:go_default_library",
"//cmd/controller-manager/app/options:go_default_library",
"//cmd/kube-controller-manager/app/config:go_default_library",
"//cmd/kube-controller-manager/app/options:go_default_library",
"//pkg/apis/apps/install:go_default_library",
"//pkg/apis/authentication/install:go_default_library",
"//pkg/apis/authorization/install:go_default_library",
"//pkg/apis/autoscaling/install:go_default_library",
"//pkg/apis/batch/install:go_default_library",
"//pkg/apis/certificates/install:go_default_library",
"//pkg/apis/coordination/install:go_default_library",
"//pkg/apis/core/install:go_default_library",
"//pkg/apis/events/install:go_default_library",
"//pkg/apis/extensions/install:go_default_library",
"//pkg/apis/policy/install:go_default_library",
"//pkg/apis/rbac/install:go_default_library",
"//pkg/apis/scheduling/install:go_default_library",
"//pkg/apis/settings/install:go_default_library",
"//pkg/apis/storage/install:go_default_library",
"//pkg/cloudprovider/providers:go_default_library",
"//pkg/controller:go_default_library",
"//pkg/controller/apis/config:go_default_library",
"//pkg/controller/bootstrap:go_default_library",
"//pkg/controller/certificates/approver:go_default_library",
"//pkg/controller/certificates/cleaner:go_default_library",
"//pkg/controller/certificates/rootcacertpublisher:go_default_library",
"//pkg/controller/certificates/signer:go_default_library",
"//pkg/controller/clusterroleaggregation:go_default_library",
"//pkg/controller/cronjob:go_default_library",
"//pkg/controller/daemon:go_default_library",
"//pkg/controller/deployment:go_default_library",
"//pkg/controller/disruption:go_default_library",
"//pkg/controller/endpoint:go_default_library",
"//pkg/controller/garbagecollector:go_default_library",
"//pkg/controller/job:go_default_library",
"//pkg/controller/namespace:go_default_library",
"//pkg/controller/nodeipam:go_default_library",
"//pkg/controller/nodeipam/ipam:go_default_library",
"//pkg/controller/nodelifecycle:go_default_library",
"//pkg/controller/podautoscaler:go_default_library",
"//pkg/controller/podautoscaler/metrics:go_default_library",
"//pkg/controller/podgc:go_default_library",
"//pkg/controller/replicaset:go_default_library",
"//pkg/controller/replication:go_default_library",
"//pkg/controller/resourcequota:go_default_library",
"//pkg/controller/route:go_default_library",
"//pkg/controller/service:go_default_library",
"//pkg/controller/serviceaccount:go_default_library",
"//pkg/controller/statefulset:go_default_library",
"//pkg/controller/ttl:go_default_library",
"//pkg/controller/ttlafterfinished:go_default_library",
"//pkg/controller/volume/attachdetach:go_default_library",
"//pkg/controller/volume/expand:go_default_library",
"//pkg/controller/volume/persistentvolume:go_default_library",
"//pkg/controller/volume/pvcprotection:go_default_library",
"//pkg/controller/volume/pvprotection:go_default_library",
"//pkg/features:go_default_library",
"//pkg/quota/v1/generic:go_default_library",
"//pkg/quota/v1/install:go_default_library",
"//pkg/serviceaccount:go_default_library",
"//pkg/util/configz:go_default_library",
"//pkg/util/flag:go_default_library",
"//pkg/util/metrics:go_default_library",
"//pkg/version:go_default_library",
"//pkg/version/verflag:go_default_library",
"//pkg/volume:go_default_library",
"//pkg/volume/awsebs:go_default_library",
"//pkg/volume/azure_dd:go_default_library",
"//pkg/volume/azure_file:go_default_library",
"//pkg/volume/cinder:go_default_library",
"//pkg/volume/csi:go_default_library",
"//pkg/volume/fc:go_default_library",
"//pkg/volume/flexvolume:go_default_library",
"//pkg/volume/flocker:go_default_library",
"//pkg/volume/gcepd:go_default_library",
"//pkg/volume/glusterfs:go_default_library",
"//pkg/volume/host_path:go_default_library",
"//pkg/volume/iscsi:go_default_library",
"//pkg/volume/local:go_default_library",
"//pkg/volume/nfs:go_default_library",
"//pkg/volume/photon_pd:go_default_library",
"//pkg/volume/portworx:go_default_library",
"//pkg/volume/quobyte:go_default_library",
"//pkg/volume/rbd:go_default_library",
"//pkg/volume/scaleio:go_default_library",
"//pkg/volume/storageos:go_default_library",
"//pkg/volume/util:go_default_library",
"//pkg/volume/vsphere_volume:go_default_library",
"//staging/src/k8s.io/api/core/v1:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/runtime/schema:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/runtime:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/uuid:go_default_library",
"//staging/src/k8s.io/apimachinery/pkg/util/wait:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/healthz:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server/mux:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/feature:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/flag:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/util/globalflag:go_default_library",
"//staging/src/k8s.io/client-go/discovery/cached:go_default_library",
"//staging/src/k8s.io/client-go/dynamic:go_default_library",
"//staging/src/k8s.io/client-go/informers:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/restmapper:go_default_library",
"//staging/src/k8s.io/client-go/scale:go_default_library",
"//staging/src/k8s.io/client-go/tools/leaderelection:go_default_library",
"//staging/src/k8s.io/client-go/tools/leaderelection/resourcelock:go_default_library",
"//staging/src/k8s.io/client-go/util/cert:go_default_library",
"//staging/src/k8s.io/client-go/util/flowcontrol:go_default_library",
"//staging/src/k8s.io/cloud-provider:go_default_library",
"//staging/src/k8s.io/csi-api/pkg/client/clientset/versioned:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/custom_metrics:go_default_library",
"//staging/src/k8s.io/metrics/pkg/client/external_metrics:go_default_library",
"//vendor/github.com/spf13/cobra:go_default_library",
"//vendor/k8s.io/klog:go_default_library",
"//vendor/k8s.io/utils/exec:go_default_library",
],
)
go_test(
name = "go_default_test",
srcs = ["controller_manager_test.go"],
embed = [":go_default_library"],
deps = [
"//staging/src/k8s.io/apimachinery/pkg/util/sets:go_default_library",
"//vendor/github.com/stretchr/testify/assert:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [
":package-srcs",
"//cmd/kube-controller-manager/app/config:all-srcs",
"//cmd/kube-controller-manager/app/options:all-srcs",
"//cmd/kube-controller-manager/app/testing:all-srcs",
],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

View File

@ -1,97 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and
// nodes.
//
package app
import (
"fmt"
"net/http"
"time"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/util/flowcontrol"
"k8s.io/kubernetes/pkg/controller/daemon"
"k8s.io/kubernetes/pkg/controller/deployment"
"k8s.io/kubernetes/pkg/controller/replicaset"
"k8s.io/kubernetes/pkg/controller/statefulset"
)
func startDaemonSetController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "daemonsets"}] {
return nil, false, nil
}
dsc, err := daemon.NewDaemonSetsController(
ctx.InformerFactory.Apps().V1().DaemonSets(),
ctx.InformerFactory.Apps().V1().ControllerRevisions(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Core().V1().Nodes(),
ctx.ClientBuilder.ClientOrDie("daemon-set-controller"),
flowcontrol.NewBackOff(1*time.Second, 15*time.Minute),
)
if err != nil {
return nil, true, fmt.Errorf("error creating DaemonSets controller: %v", err)
}
go dsc.Run(int(ctx.ComponentConfig.DaemonSetController.ConcurrentDaemonSetSyncs), ctx.Stop)
return nil, true, nil
}
func startStatefulSetController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "statefulsets"}] {
return nil, false, nil
}
go statefulset.NewStatefulSetController(
ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Apps().V1().StatefulSets(),
ctx.InformerFactory.Core().V1().PersistentVolumeClaims(),
ctx.InformerFactory.Apps().V1().ControllerRevisions(),
ctx.ClientBuilder.ClientOrDie("statefulset-controller"),
).Run(1, ctx.Stop)
return nil, true, nil
}
func startReplicaSetController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "replicasets"}] {
return nil, false, nil
}
go replicaset.NewReplicaSetController(
ctx.InformerFactory.Apps().V1().ReplicaSets(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.ClientBuilder.ClientOrDie("replicaset-controller"),
replicaset.BurstReplicas,
).Run(int(ctx.ComponentConfig.ReplicaSetController.ConcurrentRSSyncs), ctx.Stop)
return nil, true, nil
}
func startDeploymentController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "apps", Version: "v1", Resource: "deployments"}] {
return nil, false, nil
}
dc, err := deployment.NewDeploymentController(
ctx.InformerFactory.Apps().V1().Deployments(),
ctx.InformerFactory.Apps().V1().ReplicaSets(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.ClientBuilder.ClientOrDie("deployment-controller"),
)
if err != nil {
return nil, true, fmt.Errorf("error creating Deployment controller: %v", err)
}
go dc.Run(int(ctx.ComponentConfig.DeploymentController.ConcurrentDeploymentSyncs), ctx.Stop)
return nil, true, nil
}

View File

@ -1,109 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and
// nodes.
//
package app
import (
"net/http"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/client-go/dynamic"
"k8s.io/client-go/scale"
"k8s.io/kubernetes/pkg/controller/podautoscaler"
"k8s.io/kubernetes/pkg/controller/podautoscaler/metrics"
resourceclient "k8s.io/metrics/pkg/client/clientset/versioned/typed/metrics/v1beta1"
"k8s.io/metrics/pkg/client/custom_metrics"
"k8s.io/metrics/pkg/client/external_metrics"
)
func startHPAController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "autoscaling", Version: "v1", Resource: "horizontalpodautoscalers"}] {
return nil, false, nil
}
if ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerUseRESTClients {
// use the new-style clients if support for custom metrics is enabled
return startHPAControllerWithRESTClient(ctx)
}
return startHPAControllerWithLegacyClient(ctx)
}
func startHPAControllerWithRESTClient(ctx ControllerContext) (http.Handler, bool, error) {
clientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
apiVersionsGetter := custom_metrics.NewAvailableAPIsGetter(hpaClient.Discovery())
// invalidate the discovery information roughly once per resync interval our API
// information is *at most* two resync intervals old.
go custom_metrics.PeriodicallyInvalidate(
apiVersionsGetter,
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod.Duration,
ctx.Stop)
metricsClient := metrics.NewRESTMetricsClient(
resourceclient.NewForConfigOrDie(clientConfig),
custom_metrics.NewForConfig(clientConfig, ctx.RESTMapper, apiVersionsGetter),
external_metrics.NewForConfigOrDie(clientConfig),
)
return startHPAControllerWithMetricsClient(ctx, metricsClient)
}
func startHPAControllerWithLegacyClient(ctx ControllerContext) (http.Handler, bool, error) {
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
metricsClient := metrics.NewHeapsterMetricsClient(
hpaClient,
metrics.DefaultHeapsterNamespace,
metrics.DefaultHeapsterScheme,
metrics.DefaultHeapsterService,
metrics.DefaultHeapsterPort,
)
return startHPAControllerWithMetricsClient(ctx, metricsClient)
}
func startHPAControllerWithMetricsClient(ctx ControllerContext, metricsClient metrics.MetricsClient) (http.Handler, bool, error) {
hpaClient := ctx.ClientBuilder.ClientOrDie("horizontal-pod-autoscaler")
hpaClientConfig := ctx.ClientBuilder.ConfigOrDie("horizontal-pod-autoscaler")
// we don't use cached discovery because DiscoveryScaleKindResolver does its own caching,
// so we want to re-fetch every time when we actually ask for it
scaleKindResolver := scale.NewDiscoveryScaleKindResolver(hpaClient.Discovery())
scaleClient, err := scale.NewForConfig(hpaClientConfig, ctx.RESTMapper, dynamic.LegacyAPIPathResolverFunc, scaleKindResolver)
if err != nil {
return nil, false, err
}
go podautoscaler.NewHorizontalController(
hpaClient.CoreV1(),
scaleClient,
hpaClient.AutoscalingV1(),
ctx.RESTMapper,
metricsClient,
ctx.InformerFactory.Autoscaling().V1().HorizontalPodAutoscalers(),
ctx.InformerFactory.Core().V1().Pods(),
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerSyncPeriod.Duration,
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerDownscaleStabilizationWindow.Duration,
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerTolerance,
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerCPUInitializationPeriod.Duration,
ctx.ComponentConfig.HPAController.HorizontalPodAutoscalerInitialReadinessDelay.Duration,
).Run(ctx.Stop)
return nil, true, nil
}

View File

@ -1,57 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and
// nodes.
//
package app
import (
"fmt"
"net/http"
"k8s.io/apimachinery/pkg/runtime/schema"
"k8s.io/kubernetes/pkg/controller/cronjob"
"k8s.io/kubernetes/pkg/controller/job"
)
func startJobController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "batch", Version: "v1", Resource: "jobs"}] {
return nil, false, nil
}
go job.NewJobController(
ctx.InformerFactory.Core().V1().Pods(),
ctx.InformerFactory.Batch().V1().Jobs(),
ctx.ClientBuilder.ClientOrDie("job-controller"),
).Run(int(ctx.ComponentConfig.JobController.ConcurrentJobSyncs), ctx.Stop)
return nil, true, nil
}
func startCronJobController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "batch", Version: "v1beta1", Resource: "cronjobs"}] {
return nil, false, nil
}
cjc, err := cronjob.NewCronJobController(
ctx.ClientBuilder.ClientOrDie("cronjob-controller"),
)
if err != nil {
return nil, true, fmt.Errorf("error creating CronJob controller: %v", err)
}
go cjc.Run(ctx.Stop)
return nil, true, nil
}

View File

@ -1,126 +0,0 @@
/*
Copyright 2016 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
// Package app implements a server that runs a set of active
// components. This includes replication controllers, service endpoints and
// nodes.
//
package app
import (
"fmt"
"os"
"k8s.io/klog"
"net/http"
"k8s.io/apimachinery/pkg/runtime/schema"
kubeoptions "k8s.io/kubernetes/cmd/kube-controller-manager/app/options"
"k8s.io/kubernetes/pkg/controller/certificates/approver"
"k8s.io/kubernetes/pkg/controller/certificates/cleaner"
"k8s.io/kubernetes/pkg/controller/certificates/signer"
)
func startCSRSigningController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}] {
return nil, false, nil
}
if ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile == "" || ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile == "" {
return nil, false, nil
}
// Deprecation warning for old defaults.
//
// * If the signing cert and key are the default paths but the files
// exist, warn that the paths need to be specified explicitly in a
// later release and the defaults will be removed. We don't expect this
// to be the case.
//
// * If the signing cert and key are default paths but the files don't exist,
// bail out of startController without logging.
var keyFileExists, keyUsesDefault, certFileExists, certUsesDefault bool
_, err := os.Stat(ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile)
certFileExists = !os.IsNotExist(err)
certUsesDefault = (ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile == kubeoptions.DefaultClusterSigningCertFile)
_, err = os.Stat(ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile)
keyFileExists = !os.IsNotExist(err)
keyUsesDefault = (ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile == kubeoptions.DefaultClusterSigningKeyFile)
switch {
case (keyFileExists && keyUsesDefault) || (certFileExists && certUsesDefault):
klog.Warningf("You might be using flag defaulting for --cluster-signing-cert-file and" +
" --cluster-signing-key-file. These defaults are deprecated and will be removed" +
" in a subsequent release. Please pass these options explicitly.")
case (!keyFileExists && keyUsesDefault) && (!certFileExists && certUsesDefault):
// This is what we expect right now if people aren't
// setting up the signing controller. This isn't
// actually a problem since the signer is not a
// required controller.
return nil, false, nil
default:
// Note that '!filesExist && !usesDefaults' is obviously
// operator error. We don't handle this case here and instead
// allow it to be handled by NewCSR... below.
}
c := ctx.ClientBuilder.ClientOrDie("certificate-controller")
signer, err := signer.NewCSRSigningController(
c,
ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(),
ctx.ComponentConfig.CSRSigningController.ClusterSigningCertFile,
ctx.ComponentConfig.CSRSigningController.ClusterSigningKeyFile,
ctx.ComponentConfig.CSRSigningController.ClusterSigningDuration.Duration,
)
if err != nil {
return nil, false, fmt.Errorf("failed to start certificate controller: %v", err)
}
go signer.Run(1, ctx.Stop)
return nil, true, nil
}
func startCSRApprovingController(ctx ControllerContext) (http.Handler, bool, error) {
if !ctx.AvailableResources[schema.GroupVersionResource{Group: "certificates.k8s.io", Version: "v1beta1", Resource: "certificatesigningrequests"}] {
return nil, false, nil
}
approver := approver.NewCSRApprovingController(
ctx.ClientBuilder.ClientOrDie("certificate-controller"),
ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(),
)
go approver.Run(1, ctx.Stop)
return nil, true, nil
}
func startCSRCleanerController(ctx ControllerContext) (http.Handler, bool, error) {
cleaner := cleaner.NewCSRCleanerController(
ctx.ClientBuilder.ClientOrDie("certificate-controller").CertificatesV1beta1().CertificateSigningRequests(),
ctx.InformerFactory.Certificates().V1beta1().CertificateSigningRequests(),
)
go cleaner.Run(1, ctx.Stop)
return nil, true, nil
}
func startRootCACertPublisher(ctx ControllerContext) (http.Handler, bool, error) {
return nil, false, nil
}

View File

@ -1,29 +0,0 @@
load("@io_bazel_rules_go//go:def.bzl", "go_library")
go_library(
name = "go_default_library",
srcs = ["config.go"],
importpath = "k8s.io/kubernetes/cmd/kube-controller-manager/app/config",
visibility = ["//visibility:public"],
deps = [
"//pkg/controller/apis/config:go_default_library",
"//staging/src/k8s.io/apiserver/pkg/server:go_default_library",
"//staging/src/k8s.io/client-go/kubernetes:go_default_library",
"//staging/src/k8s.io/client-go/rest:go_default_library",
"//staging/src/k8s.io/client-go/tools/record:go_default_library",
],
)
filegroup(
name = "package-srcs",
srcs = glob(["**"]),
tags = ["automanaged"],
visibility = ["//visibility:private"],
)
filegroup(
name = "all-srcs",
srcs = [":package-srcs"],
tags = ["automanaged"],
visibility = ["//visibility:public"],
)

Some files were not shown because too many files have changed in this diff Show More