Merge branch 'master' into spell_fixes

pull/6771/head
Madhuri Kumari 2017-12-27 09:49:06 +05:30 committed by GitHub
commit bf283d4d41
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4083 changed files with 128909 additions and 543989 deletions

View File

@ -1,11 +1,5 @@
> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> For 1.9 Features: set Milestone to `1.9` and Base Branch to `release-1.9`
> ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
> NOTE: Please check the “Allow edits from maintainers” box (see image below) to
> [allow reviewers to fix problems](https://help.github.com/articles/allowing-changes-to-a-pull-request-branch-created-from-a-fork/) on your patch and speed up the review process.
> NOTE: After opening the PR, please *un-check and re-check* the ["Allow edits from maintainers"](https://help.github.com/articles/allowing-changes-to-a-pull-request-branch-created-from-a-fork/) box so that maintainers can work on your patch and speed up the review process. This is a temporary workaround to address a known issue with GitHub.>
>
> Please delete this note before submitting the pull request.
>
> NOTE: After opening the PR, please *un-check and re-check* the "Allow edits from maintainers" box. This is a temporary workaround to address a known issue with GitHub.
![Allow edits from maintainers checkbox](https://help.github.com/assets/images/help/pull_requests/allow-maintainers-to-make-edits-sidebar-checkbox.png)

View File

@ -18,16 +18,21 @@ defaults:
scope:
path: ""
values:
fullversion: "v1.8.0"
version: "v1.8"
fullversion: "v1.9.0"
version: "v1.9"
githubbranch: "master"
docsbranch: "master"
versions:
- fullversion: "v1.8.0"
version: "v1.8"
githubbranch: "v1.8.0"
docsbranch: "release-1.8"
- fullversion: "v1.9.0"
version: "v1.9"
githubbranch: "v1.9.0"
docsbranch: "release-1.9"
url: https://kubernetes.io/docs/home/
- fullversion: "v1.8.4"
version: "v1.8"
githubbranch: "v1.8.4"
docsbranch: "release-1.8"
url: https://v1-8.docs.kubernetes.io/docs/home/
- fullversion: "v1.7.6"
version: "v1.7"
githubbranch: "v1.7.6"
@ -43,11 +48,6 @@ defaults:
githubbranch: "v1.5.7"
docsbranch: "release-1.5"
url: https://v1-5.docs.kubernetes.io/docs/
- fullversion: "v1.4.12"
version: "v1.4"
githubbranch: "v1.4.12"
docsbranch: "release-1.4"
url: https://v1-4.docs.kubernetes.io/docs/
deprecated: false
currentUrl: https://kubernetes.io/docs/home/
nextUrl: http://kubernetes-io-vnext-staging.netlify.com/

View File

@ -24,6 +24,7 @@ toc:
- title: Extending Kubernetes
section:
- docs/concepts/overview/extending.md
- title: Extending the Kubernetes API
section:
- docs/concepts/api-extension/apiserver-aggregation.md

View File

@ -0,0 +1,9 @@
id: annotation
name: Annotation
full-link: docs/concepts/overview/working-with-objects/annotations
tags:
- fundamental
short-description: >
A key-value pair that is used to attach arbitrary non-identifying metadata to objects.
long-description: >
The metadata in an annotation can be small or large, structured or unstructured, and can include characters not permitted by labels. Clients such as tools and libraries can retrieve this metadata.

View File

@ -0,0 +1,13 @@
id: configmap
name: ConfigMap
full-link: /docs/tasks/configure-pod-container/configmap/
related:
- pod
- secret
tags:
- core-object
short-description: >
An API object used to store non-confidential data in key-value pairs. Can be consumed as environment variables, command-line arguments, or config files in a {% glossary_tooltip text="volume" term_id="volume" %}.
long-description: >
Allows you to decouple environment-specific configuration from your {% glossary_tooltip text="container images" term_id="container" %}, so that your applications are easily portable.
When storing confidential data use a [Secret](https://kubernetes.io/docs/concepts/configuration/secret/).

View File

@ -0,0 +1,10 @@
id: daemonset
name: DaemonSet
full-link: /docs/concepts/workloads/controllers/daemonset
tags:
- fundamental
- workload
short-description: >
Ensures a copy of a {% glossary_tooltip term_id="pod" %} is running across a set of nodes in a {% glossary_tooltip term_id="cluster" %}.
long-description: >
Used to deploy system daemons such as log collectors and monitoring agents that typically must run on every {% glossary_tooltip term_id="node" %}.

View File

@ -0,0 +1,15 @@
id: horizontal-pod-autoscaler
name: Horizontal Pod Autoscaler
full-link: /docs/tasks/run-application/horizontal-pod-autoscale/
aka:
- HPA
related:
- pod
tags:
- operation
short-description: >
An API resource that automatically scales the number of pod replicas based on targeted CPU
utilization or custom metric targets.
long-description: >
HPA is typically used with replication controllers, deployments or replica sets and cannot be
applied to objects that cannot be scaled, for example DaemonSets.

View File

@ -0,0 +1,9 @@
id: image
name: Image
tags:
- fundamental
short-description: >
Stored instance of a container that holds a set of software needed to run an application.
long-description: >
A way of packaging software that allows it to be stored in a container registry, pulled to a local system, and run as an application. Meta data is included in the image that can indicate what executable to run, who built it, and other information.

9
_data/glossary/job.yaml Normal file
View File

@ -0,0 +1,9 @@
id: job
name: Job
full-link: /docs/concepts/workloads/controllers/jobs-run-to-completion
tags:
- core-object
short-description: >
A finite or batch task that runs to completion.
long-description: >
Creates one or more {% glossary_tooltip term_id="pod" %} objects and ensures that a specified number of them successfully terminate. As Pods successfully complete, the Job tracks the successful completions.

View File

@ -0,0 +1,10 @@
id: kubelet
name: Kubelet
full-link: docs/reference/generated/kubelet
tags:
- fundamental
- core-object
short-description: >
An agent that runs on each node in the cluster. It makes sure that containers are running in a pod.
long-description: >
The kubelet takes a set of PodSpecs that are provided through various mechanisms and ensures that the containers described in those PodSpecs are running and healthy. The kubelet doesnt manage containers which were not created by Kubernetes.

View File

@ -0,0 +1,9 @@
id: labels
name: Labels
full-link: /docs/concepts/overview/working-with-objects/labels
tags:
- fundamental
short-description: >
Used to tag objects with identifying attributes that are meaningful and relevant to users.
long-description: >
Labels are key/value pairs that are attached to objects, such as pods. They can be used to organize and to select subsets of objects.

View File

@ -0,0 +1,9 @@
id: namespace
name: Namespace
full-link: /docs/concepts/overview/working-with-objects/namespaces
tags:
- fundamental
short-description: >
An abstraction used by Kubernetes to support virtual clusters on the same physical {% glossary_tooltip term_id="cluster" %}.
long-description: >
Namespaces are used to organize objects in a cluster and provide a way to divide cluster resources. Names of resources need to be unique within a namespace, but not across namespaces.

View File

@ -0,0 +1,13 @@
id: network-policy
name: Network Policy
full-link: /docs/concepts/services-networking/network-policies/
aka:
- NetworkPolicy
tags:
- networking
- architecture
- extension
short-description: >
A specification of how groups of Pods are allowed to communicate with each other and with other network endpoints.
long-description: >
Network Policies help you declaratively configure which Pods are allowed to connect to each other, which namespaces are allowed to communicate, and more specifically which port numbers to enforce each policy on. `NetworkPolicy` resources use labels to select Pods and define rules which specify what traffic is allowed to the selected Pods. Network Policies are implemented by a supported network plugin provided by a network provider. Be aware that creating a network resource without a controller to implement it will have no effect.

13
_data/glossary/node.yaml Normal file
View File

@ -0,0 +1,13 @@
id: node
name: Node
full-link: /docs/concepts/architecture/nodes/
aka:
- Minion
related:
- deployment
tags:
- fundamental
short-description: >
A node is a worker machine in Kubernetes.
long-description: >
A worker machine may be a VM or physical machine, depending on the cluster. It has the {% glossary_tooltip text="Services" term_id="service" %} necessary to run {% glossary_tooltip text="Pods" term_id="pod" %} and is managed by the master components. The {% glossary_tooltip text="Services" term_id="service" %} on a node include Docker, kubelet and kube-proxy.

View File

@ -0,0 +1,15 @@
id: persistent-volume-claim
name: Persistent Volume Claim
full-link: /docs/concepts/storage/persistent-volumes/
related:
- persistent-volume
- statefulset
- deployment
- pod
tags:
- core-object
- storage
short-description: >
Claims storage resources defined in a {% glossary_tooltip text="PersistentVolume (PV)" term_id="persistent-volume" %} so that it can be mounted as a volume in a container.
long-description: |
Specifies the amount of storage, how the storage will be accessed (read-only, read-write and/or exclusive) and how it is reclaimed (retained, recycled or deleted). Details of the storage itself are in the PersistentVolume specification.

View File

@ -0,0 +1,17 @@
id: persistent-volume
name: Persistent Volume
full-link: /docs/concepts/storage/persistent-volumes/
related:
- statefulset
- deployment
- persistent-volume-claim
- pod
tags:
- core-object
- storage
short-description: >
An API object that represents a piece of storage in the cluster. Available as a general, pluggable resource that persists beyond the lifecycle of any individual {% glossary_tooltip term_id="pod" %}.
long-description: |
PersistentVolumes (PVs) provide an API that abstracts details of how storage is provided from how it is consumed.
PVs are used directly in scenarios where storage can be be created ahead of time (static provisioning).
For scenarios that require on-demand storage (dynamic provisioning), PersistentVolumeClaims (PVCs) are used instead.

View File

@ -2,9 +2,10 @@ id: platform-developer
name: Platform Developer
aka:
- Kubernetes Developer
- Extension Developer
tags:
- user-type
short-description: >
A person who customizes the Kubernetes platform to fit the needs of their project.
long-description: >
A platform developer may, for example, use [Custom Resources](/docs/concepts/api-extension/custom-resources/) or [Extend the Kubernetes API with the aggregation layer](/docs/concepts/api-extension/apiserver-aggregation/) to add functionality to their instance of Kubernetes, specifically for their application.
A platform developer may, for example, use [Custom Resources](/docs/concepts/api-extension/custom-resources/) or [Extend the Kubernetes API with the aggregation layer](/docs/concepts/api-extension/apiserver-aggregation/) to add functionality to their instance of Kubernetes, specifically for their application. Some Platform Developers are also {% glossary_tooltip text="contributors" term_id="contributor" %} and develop extensions which are contributed to the Kubernetes community. Others develop closed-source commercial or site-specific extensions.

View File

@ -0,0 +1,19 @@
id: pod-security-policy
name: Pod Security Policy
full-link: /docs/concepts/policy/pod-security-policy/
related:
- pod
- container
- sidecar
- deployment
- statefulset
- security
tags:
- core-object
- fundamental
short-description: >
Enables fine-grained authorization of {% glossary_tooltip term_id="pod" %} creation and updates.
long-description: >
A cluster-level resource that controls security sensitive aspects of the Pod specification.
The `PodSecurityPolicy` objects define a set of conditions that a Pod must run with in order to be accepted into the system, as well as defaults for the related fields.
Pod Security Policy control is implemented as an optional admission controller.

View File

@ -0,0 +1,13 @@
id: replica-set
name: ReplicaSet
full-link: https://kubernetes.io/docs/concepts/workloads/controllers/replicaset/
related:
- replication-controller
tags:
- core-object
- architecture
short-description: >
ReplicaSet is the next-generation Replication Controller.
long-description: >
ReplicaSet, like ReplicationController, ensures that a specified number of pods replicas are running at one time.
ReplicaSet supports the new set-based selector requirements as described in the labels user guide, whereas a Replication Controller only supports equality-based selector requirements.

View File

@ -0,0 +1,9 @@
id: replication-controller
name: Replication Controller
tags:
- fundamental
short-description: >
Kubernetes service that ensures a specific number of instances of a pod are always running.
long-description: >
Will automatically add or remove running instances of a pod, based on a set value for that pod. Allows the pod to return to the defined number of instances if pods are deleted or if too many are started by mistake.

View File

@ -0,0 +1,11 @@
id: resource-quota
name: Resource Quotas
full-link: /docs/concepts/policy/resource-quotas/
tags:
- fundamental
- operation
- architecture
short-description: >
Provides constraints that limit aggregate resource consumption per {% glossary_tooltip term_id="namespace" %}.
long-description: >
Limits the quantity of objects that can be created in a namespace by type, as well as the total amount of compute resources that may be consumed by resources in that project.

15
_data/glossary/secret.yml Normal file
View File

@ -0,0 +1,15 @@
id: secret
name: Secret
full-link: /docs/concepts/configuration/secret/
related:
- pods
- volume
tags:
- core-object
- security
short-description: >
Stores sensitive information, such as passwords, OAuth tokens, and ssh keys.
long-description: >
Allows for more control over how sensitive information is used and reduces the risk of accidental exposure, including [encryption](https://kubernetes.io/docs/tasks/administer-cluster/encrypt-data/#ensure-all-secrets-are-encrypted) at rest.
A {% glossary_tooltip text="Pod" term_id="pod" %} references the secret as a file in a volume mount or by the kubelet pulling images for a pod.
Secrets are great for confidential data and [ConfigMaps](https://kubernetes.io/docs/tasks/configure-pod-container/configmap/) for non-confidential data.

View File

@ -0,0 +1,9 @@
id: security-context
name: Security Context
full-link: /docs/tasks/configure-pod-container/security-context/
tags:
- security
short-description: >
The securityContext field defines privilege and access control settings for a Pod or Container, including the runtime UID and GID.
long-description: >
The securityContext field in a {% glossary_tooltip term_id="pod" %} (applying to all containers) or container is used to set the user (runAsUser) and group (fsGroup), capabilities, privilege settings, and security policies (SELinux/AppArmor/Seccomp) that container processes use.

View File

@ -0,0 +1,10 @@
id: service-account
name: Service Account
full-link: /docs/tasks/configure-pod-container/configure-service-account/
tags:
- fundamental
- core-object
short-description: >
Provides an identity for processes that run in a Pod {% glossary_tooltip text="Pods" term_id="pod" %}.
long-description: >
When processes inside Pods access the cluster, they are authenticated by the API server as a particular service account, for example, `default`. When you create a Pod, if you do not specify a service account, it is automatically assigned the default service account in the same namespace {% glossary_tooltip text="Namespace" term_id="namespace" %}.

View File

@ -5,4 +5,4 @@ tags:
short-description: >
An extension API that enables applications running in Kubernetes clusters to easily use external managed software offerings, such as a datastore service offered by a cloud provider.
long-description: >
Service Catalog provides a way to list, provision, and bind with external {% glossary_tooltip text="Managed Services" term_id="managed-service" %} from {% glossary_tooltip text="Service Brokers" term_id="service-broker" %} without needing detailed knowledge about how those services are created or managed.
It provides a way to list, provision, and bind with external {% glossary_tooltip text="Managed Services" term_id="managed-service" %} from {% glossary_tooltip text="Service Brokers" term_id="service-broker" %} without needing detailed knowledge about how those services are created or managed.

View File

@ -0,0 +1,17 @@
id: volume
name: Volume
full-link: /docs/concepts/storage/volumes/
related:
- pod
- container
- secret
tags:
- core-object
- fundamental
short-description: >
A directory containing data, accessible to the containers in a {% glossary_tooltip text="pod" term_id="pod" %}.
long-description: >
A Kubernetes volume lives as long as the {% glossary_tooltip text="pod" term_id="pod" %} that encloses it.
Consequently, a volume outlives any {% glossary_tooltip text="containers" term_id="container" %} that run within the
{% glossary_tooltip text="pod" term_id="pod" %}, and data is preserved across
{% glossary_tooltip text="container" term_id="container" %} restarts.

View File

@ -28,8 +28,8 @@ toc:
- title: API Reference
section:
- title: v1.8
path: /docs/api-reference/v1.8/
- title: v1.9
path: /docs/reference/generated/kubernetes-api/v1.9/
- docs/reference/labels-annotations-taints.md
- title: OpenAPI and Swagger
section:
@ -40,37 +40,35 @@ toc:
- title: Federation API
section:
- docs/reference/federation/v1/operations.html
- docs/reference/federation/v1/definitions.html
- docs/reference/federation/v1beta1/operations.html
- docs/reference/federation/v1beta1/definitions.html
- docs/reference/federation/extensions/v1beta1/operations.html
- docs/reference/federation/extensions/v1beta1/definitions.html
- docs/reference/generated/federation/v1/operations.html
- docs/reference/generated/federation/v1/definitions.html
- docs/reference/generated/federation/extensions/v1beta1/operations.html
- docs/reference/generated/federation/extensions/v1beta1/definitions.html
- title: kubectl CLI
section:
- docs/user-guide/kubectl-overview.md
- docs/user-guide/kubectl/index.md
- title: v1.8 Commands
path: /docs/user-guide/kubectl/v1.8/
- title: v1.7 Commands
path: /docs/user-guide/kubectl/v1.7/
- title: v1.6 Commands
path: /docs/user-guide/kubectl/v1.6/
- title: v1.5 Commands
path: /docs/user-guide/kubectl/v1.5/
- docs/user-guide/docker-cli-to-kubectl.md
- docs/user-guide/kubectl-conventions.md
- docs/user-guide/jsonpath.md
- docs/user-guide/kubectl-cheatsheet.md
- docs/reference/kubectl/overview.md
- docs/reference/generated/kubectl/kubectl.md
- title: kubectl Commands
path: /docs/reference/generated/kubectl/kubectl-commands.html
- docs/reference/kubectl/docker-cli-to-kubectl.md
- docs/reference/kubectl/conventions.md
- docs/reference/kubectl/jsonpath.md
- docs/reference/kubectl/cheatsheet.md
- title: Cloud Controller Manager
- title: Setup Tools Reference
section:
- docs/reference/generated/cloud-controller-manager.md
- title: Setup Tools
section:
- docs/reference/generated/kubeadm.md
- title: Kubeadm
section:
- docs/reference/setup-tools/kubeadm/kubeadm.md
- docs/reference/setup-tools/kubeadm/kubeadm-init.md
- docs/reference/setup-tools/kubeadm/kubeadm-join.md
- docs/reference/setup-tools/kubeadm/kubeadm-upgrade.md
- docs/reference/setup-tools/kubeadm/kubeadm-config.md
- docs/reference/setup-tools/kubeadm/kubeadm-reset.md
- docs/reference/setup-tools/kubeadm/kubeadm-token.md
- docs/reference/setup-tools/kubeadm/kubeadm-version.md
- docs/reference/setup-tools/kubeadm/kubeadm-alpha.md
- title: Kubefed
section:
- docs/reference/generated/kubefed.md
@ -80,7 +78,7 @@ toc:
- docs/reference/generated/kubefed_unjoin.md
- docs/reference/generated/kubefed_version.md
- title: Config Reference
- title: Command-line Tools Reference
section:
- docs/reference/generated/kubelet.md
- docs/admin/kubelet-authentication-authorization.md
@ -89,6 +87,7 @@ toc:
- docs/reference/generated/kube-proxy.md
- docs/reference/generated/kube-scheduler.md
- docs/admin/kubelet-tls-bootstrapping.md
- docs/reference/generated/cloud-controller-manager.md
- docs/reference/generated/federation-apiserver.md
- docs/reference/generated/federation-controller-manager.md

View File

@ -15,7 +15,7 @@ toc:
- docs/tasks/configure-pod-container/assign-cpu-resource.md
- docs/tasks/configure-pod-container/quality-service-pod.md
- docs/tasks/configure-pod-container/assign-cpu-ram-container.md
- docs/tasks/configure-pod-container/opaque-integer-resource.md
- docs/tasks/configure-pod-container/extended-resource.md
- docs/tasks/configure-pod-container/configure-volume-storage.md
- docs/tasks/configure-pod-container/configure-persistent-volume-storage.md
- docs/tasks/configure-pod-container/configure-projected-volume-storage.md
@ -128,7 +128,7 @@ toc:
- docs/tasks/administer-cluster/quota-memory-cpu-namespace.md
- docs/tasks/administer-cluster/quota-pod-namespace.md
- docs/tasks/administer-cluster/quota-api-object.md
- docs/tasks/administer-cluster/opaque-integer-resource-node.md
- docs/tasks/administer-cluster/extended-resource-node.md
- docs/tasks/administer-cluster/cpu-management-policies.md
- docs/tasks/administer-cluster/access-cluster-api.md
- docs/tasks/administer-cluster/access-cluster-services.md
@ -140,9 +140,11 @@ toc:
- docs/tasks/administer-cluster/upgrade-1-6.md
- docs/tasks/administer-cluster/kubeadm-upgrade-1-7.md
- docs/tasks/administer-cluster/kubeadm-upgrade-1-8.md
- docs/tasks/administer-cluster/kubeadm-upgrade-1-9.md
- docs/tasks/administer-cluster/namespaces.md
- docs/tasks/administer-cluster/namespaces-walkthrough.md
- docs/tasks/administer-cluster/dns-horizontal-autoscaling.md
- docs/tasks/administer-cluster/coredns.md
- docs/tasks/administer-cluster/safely-drain-node.md
- docs/tasks/administer-cluster/cpu-memory-limit.md
- docs/tasks/administer-cluster/out-of-resource.md
@ -168,6 +170,7 @@ toc:
- docs/tasks/administer-cluster/configure-multiple-schedulers.md
- docs/tasks/administer-cluster/ip-masq-agent.md
- docs/tasks/administer-cluster/dns-custom-nameservers.md
- docs/tasks/administer-cluster/pvc-protection.md
- title: Federation - Run an App on Multiple Clusters
section:

View File

@ -6,7 +6,7 @@ toc:
- title: Native Tools
section:
- title: Kubectl
path: /docs/user-guide/kubectl/
path: /docs/reference/kubectl/overview/
- title: Kubeadm
path: /docs/getting-started-guides/kubeadm
- title: Kubefed

View File

@ -11,8 +11,8 @@
type: 0,
name: 'Puppet',
logo: 'puppet',
link: 'https://puppet.com/blog/managing-kubernetes-configuration-puppet',
blurb: 'The Puppet module for Kubernetes makes it easy to manage Pods, Replication Controllers, Services and more in Kubernetes, and to build domain-specific interfaces to one\'s Kubernetes configuration.'
link: 'https://puppet.com/blog/announcing-kream-and-new-kubernetes-helm-and-docker-modules',
blurb: 'We\'ve developed tools and products to make your adoption of Kubernetes as efficient as possible, covering your full workflow cycle from development to production. And now Puppet Pipelines for Containers is your complete DevOps dashboard for Kubernetes.'
},
{
type: 0,
@ -104,35 +104,56 @@
logo: 'diamanti',
link: 'https://www.diamanti.com/products/',
blurb: 'Diamanti deploys containers with guaranteed performance using Kubernetes in the first hyperconverged appliance purpose built for containerized applications.'
},
},
{
type: 0,
name: 'Aporeto',
logo: 'aporeto',
link: 'https://aporeto.com/trireme',
blurb: 'Aporeto makes cloud-native applications secure by default without impacting developer velocity and works at any scale, on any cloud.'
},
},
{
type: 2,
name: 'Giant Swarm',
logo: 'giant_swarm',
link: 'https://giantswarm.io',
blurb: 'Giant Swarm provides fully-managed Kubernetes Clusters in your location of choice, so you can focus on your product.'
},
},
{
type: 3,
name: 'Giant Swarm',
logo: 'giant_swarm',
link: 'https://giantswarm.io/product/',
blurb: 'Giant Swarm - Managed Kubernetes on AWS'
},
{
type: 3,
name: 'Hasura',
logo: 'hasura',
link: 'https://hasura.io',
blurb: 'Hasura - Hasura'
},
{
type: 3,
name: 'Mirantis',
logo: 'mirantis',
link: 'https://www.mirantis.com/software/kubernetes/',
blurb: 'Mirantis - Mirantis Cloud Platform'
},
},
{
type: 2,
name: 'Mirantis',
logo: 'mirantis',
link: 'https://content.mirantis.com/Containerizing-OpenStack-on-Kubernetes-Video-Landing-Page.html',
blurb: 'Mirantis builds and manages private clouds with open source software such as OpenStack, deployed as containers orchestrated by Kubernetes.'
},
{
type: 0,
name: 'Kubernetic',
logo: 'kubernetic',
link: 'https://kubernetic.com/',
blurb: 'Kubernetic is a Kubernetes Desktop client that simplifies and democratizes cluster management for DevOps.'
},
},
{
type: 1,
name: 'Reactive Ops',
@ -196,6 +217,13 @@
link: 'http://www.inwinstack.com/index.php/en/solutions-en/',
blurb: 'Our container service leverages OpenStack-based infrastructure and its container orchestration engine Magnum to manage Kubernetes clusters.'
},
{
type: 3,
name: 'InwinSTACK',
logo: 'inwinstack',
link: 'https://github.com/inwinstack/kube-ansible',
blurb: 'inwinSTACK - kube-ansible'
},
{
type: 1,
name: 'Semantix',
@ -428,12 +456,33 @@
blurb: 'Kenzan is a software engineering and full-service consulting firm that provides customized, end-to-end solutions that drive change through digital transformation.'
},
{
type: 0,
type: 3,
name: 'Kublr',
logo: 'kublr',
link: 'http://kublr.com',
blurb: 'SSimplify and speed up the management of your containerized applications at scale.'
blurb: 'Kublr - Accelerate and control the deployment, scaling, monitoring and management of your containerized applications.'
},
{
type: 3,
name: 'Nirmata',
logo: 'nirmata',
link: 'https://www.nirmata.com/',
blurb: 'Nirmata - Nirmata Managed Kubernetes'
},
{
type: 3,
name: 'TenxCloud',
logo: 'tenxcloud',
link: 'https://tenxcloud.com',
blurb: 'TenxCloud - TenxCloud Container Engine (TCE)'
},
{
type: 3,
name: 'Twistlock',
logo: 'twistlock',
link: 'https://www.twistlock.com/',
blurb: 'Twistlock - Twistlock'
},
{
type: 0,
name: 'Endocode AG',
@ -614,7 +663,7 @@
name: 'Sphere Software, LLC',
logo: 'spheresoftware',
link: 'https://sphereinc.com/kubernetes/',
blurb: 'Architect and implement scalable applications using Kubernetes in Google Cloud, AWS, and Azure with our team of experts.'
blurb: 'The Sphere Software team of experts allows customers to architect and implement scalable applications using Kubernetes in Google Cloud, AWS, and Azure.'
},
{
type: 1,
@ -684,6 +733,13 @@
name: 'Canonical',
logo: 'canonical',
link: 'https://www.ubuntu.com/kubernetes',
blurb: 'The Canonical Distribution of Kubernetes enables you to operate Kubernetes clusters on demand on any major public cloud and private infrastructure.'
},
{
type: 2,
name: 'Canonical',
logo: 'canonical',
link: 'https://www.ubuntu.com/kubernetes',
blurb: 'Canonical Ltd. - Canonical Distribution of Kubernetes'
},
{
@ -707,6 +763,13 @@
link: 'https://www.ibm.com/cloud/container-service',
blurb: 'IBM - IBM Cloud Container Service'
},
{
type: 2,
name: 'IBM',
logo: 'ibm',
link: 'https://www.ibm.com/cloud-computing/bluemix/containers',
blurb: 'The IBM Bluemix Container Service combines Docker and Kubernetes to deliver powerful tools, an intuitive user experiences, and built-in security and isolation to enable rapid delivery of applications all while leveraging Cloud Services including cognitive capabilities from Watson.'
},
{
type: 3,
name: 'Samsung',
@ -721,6 +784,13 @@
link: 'https://www.ibm.com/cloud-computing/products/ibm-cloud-private/',
blurb: 'IBM - IBM Cloud Private'
},
{
type: 3,
name: 'Kinvolk',
logo: 'kinvolk',
link: 'https://github.com/kinvolk/kube-spawn',
blurb: 'Kinvolk - kube-spawn'
},
{
type: 3,
name: 'Heptio',
@ -728,6 +798,13 @@
link: 'https://aws.amazon.com/quickstart/architecture/heptio-kubernetes',
blurb: 'Heptio - AWS-Quickstart'
},
{
type: 2,
name: 'Heptio',
logo: 'heptio',
link: 'http://heptio.com',
blurb: 'Heptio helps businesses of all sizes get closer to the vibrant Kubernetes community.'
},
{
type: 3,
name: 'StackPointCloud',
@ -735,6 +812,13 @@
link: 'https://stackpoint.io',
blurb: 'StackPointCloud - StackPointCloud'
},
{
type: 2,
name: 'StackPointCloud',
logo: 'stackpoint',
link: 'https://stackpoint.io',
blurb: 'StackPointCloud offers a wide range of support plans for managed Kubernetes clusters built through its universal control plane for Kubernetes Anywhere.'
},
{
type: 3,
name: 'Caicloud',
@ -742,6 +826,13 @@
link: 'https://caicloud.io/products/compass',
blurb: 'Caicloud - Compass'
},
{
type: 2,
name: 'Caicloud',
logo: 'caicloud',
link: 'https://caicloud.io/',
blurb: 'Founded by ex-Googlers,and early Kubernetes contributors, Caicloud leverages Kubernetes to provide container products which have successfully served Fortune 500 enterprises, and further utilizes Kubernetes as a vehicle to deliver ultra-speed deep learning experience.'
},
{
type: 3,
name: 'Alibaba',
@ -763,6 +854,13 @@
link: 'http://www.huaweicloud.com/product/cce.html',
blurb: 'Huawei - Huawei Cloud Container Engine'
},
{
type: 2,
name: 'Huawei',
logo: 'huawei',
link: 'http://developer.huawei.com/ict/en/site-paas',
blurb: 'FusionStage is an enterprise-grade Platform as a Service product, the core of which is based on mainstream open source container technology including Kubernetes and Docker.'
},
{
type: 3,
name: 'Google',
@ -788,7 +886,7 @@
type: 3,
name: 'Loodse',
logo: 'loodse',
link: 'https://loodse.io',
link: 'https://loodse.com',
blurb: 'Loodse - Kubermatic Container Engine'
},
{
@ -798,6 +896,13 @@
link: 'https://github.com/Azure/acs-engine',
blurb: 'Microsoft - Azure acs-engine'
},
{
type: 3,
name: 'Microsoft',
logo: 'microsoft',
link: 'https://docs.microsoft.com/en-us/azure/aks/',
blurb: 'Microsoft - Azure Container Service AKS'
},
{
type: 3,
name: 'Oracle',
@ -847,6 +952,13 @@
link: 'https://github.com/kubernetes-incubator/bootkube',
blurb: 'CoreOS - bootkube'
},
{
type: 2,
name: 'CoreOS',
logo: 'coreos',
link: 'https://coreos.com/',
blurb: 'Tectonic is the enterprise-ready Kubernetes product, by CoreOS. It adds key features to allow you to manage, update, and control clusters in production.'
},
{
type: 3,
name: 'Weaveworks',
@ -861,6 +973,13 @@
link: 'http://www.wise2c.com/solution',
blurb: 'Wise2C Technology - WiseCloud'
},
{
type: 2,
name: 'Wise2c',
logo: 'wise2c',
link: 'http://www.wise2c.com',
blurb: 'Using Kubernetes to providing IT continuous delivery and Enterprise grade container management solution to Financial Industry.'
},
{
type: 3,
name: 'Docker',
@ -896,6 +1015,20 @@
link: 'https://cloud.vmware.com/pivotal-container-service',
blurb: 'Pivotal/VMware - Pivotal Container Service (PKS)'
},
{
type: 3,
name: 'Alauda',
logo: 'alauda',
link: 'http://www.alauda.cn/product/detail/id/68.html',
blurb: 'Alauda - Alauda EE'
},
{
type: 3,
name: 'EasyStack',
logo: 'easystack',
link: 'https://easystack.cn/eks/',
blurb: 'EasyStack - EasyStack Kubernetes Service (EKS)'
},
{
type: 3,
name: 'CoreOS',
@ -910,6 +1043,20 @@
link: 'https://gopaddle.io',
blurb: 'goPaddle is a DevOps platform for Kubernetes developers. It simplifies the Kubernetes Service creation and maintenance through source to image conversion, build & version management, team management, access controls and audit logs, single click provision of Kubernetes Clusters across multiple clouds from a single console.'
},
{
type: 0,
name: 'Vexxhost',
logo: 'vexxhost',
link: 'https://vexxhost.com/public-cloud/container-services/kubernetes/',
blurb: 'VEXXHOST offers a high-performance container management service powered by Kubernetes and OpenStack Magnum.'
},
{
type: 1,
name: 'Component Soft',
logo: 'componentsoft',
link: 'https://www.componentsoft.eu/?p=3925',
blurb: 'Component Soft offers training, consultation and support around open cloud technologies like Kubernetes, Docker, Openstack and Ceph.'
},
{
type: 0,
name: 'Datera',
@ -945,6 +1092,41 @@
link: 'https://docs.portworx.com/scheduler/kubernetes/install.html',
blurb: 'With Portworx, you can manage any database or stateful service on any infrastructure using Kubernetes. You get a single data management layer for all of your stateful services, no matter where they run.'
},
{
type: 1,
name: 'Object Computing, Inc.',
logo: 'objectcomputing',
link: 'https://objectcomputing.com/services/software-engineering/devops/kubernetes-services',
blurb: 'Our portfolio of DevOps consulting services includes Kubernetes support, development, and training.'
},
{
type: 1,
name: 'Isotoma',
logo: 'isotoma',
link: 'https://www.isotoma.com/blog/2017/10/24/containerisation-tips-for-using-kubernetes-with-aws/',
blurb: 'Based in the North of England, Amazon partners who are delivering Kubernetes solutions on AWS for replatforming and native development.'
},
{
type: 1,
name: 'Servian',
logo: 'servian',
link: 'https://www.servian.com/cloud-and-technology/',
blurb: 'Based in Australia, Servian provides advisory, consulting and managed services to support both application and data centric kubernetes use cases.'
},
{
type: 1,
name: 'Redzara',
logo: 'redzara',
link: 'http://redzara.com/cloud-service',
blurb: 'Redzara has wide and in-depth experience in Cloud automation, now taking one giant step by providing container service offering and services to our customers.'
},
{
type: 0,
name: 'Dataspine',
logo: 'dataspine',
link: 'http://dataspine.xyz/',
blurb: 'Dataspine is building a secure, elastic and serverless deployment platform for production ML/AI workloads on top of k8s.'
},
{
type: 0,
name: 'Logdna',

View File

@ -6415,7 +6415,7 @@ The resulting set of endpoints can be viewed as:<br>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">names</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">string array</p></td>
<td class="tableblock halign-left valign-top"></td>

View File

@ -6671,7 +6671,7 @@ The resulting set of endpoints can be viewed as:<br>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">names</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">string array</p></td>
<td class="tableblock halign-left valign-top"></td>

View File

@ -6850,7 +6850,7 @@ The resulting set of endpoints can be viewed as:<br>
<tbody>
<tr>
<td class="tableblock halign-left valign-top"><p class="tableblock">names</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["gcr.io/google_containers/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">Names by which this image is known. e.g. ["k8s.gcr.io/hyperkube:v1.0.7", "dockerhub.io/google_containers/hyperkube:v1.0.7"]</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">true</p></td>
<td class="tableblock halign-left valign-top"><p class="tableblock">string array</p></td>
<td class="tableblock halign-left valign-top"></td>

View File

@ -25,6 +25,7 @@ This renders the definition of the glossary term inside a `<div>`, preserving Ma
| --- | --- | --- |
| `term_id` | N/A (Required) | The `id` of the glossary term whose definition will be used. (This `id` is the same as the filename of the term, i.e. `_data/glossary/<ID>.yml`.) |
| `length` | "short" | Specifies which term definition should be used ("short" for the `short-definition`, "long" for `long-description`, "all" when both should be included). |
| `prepend` | "Service Catalog is" | A prefix which can be attached in front of a term's short definition (which is one or more sentence fragments). |
#### (2) `glossary_tooltip` tag

View File

@ -55,11 +55,17 @@ module Jekyll
class Definition < Base
VALID_PARAM_NAMES = [
:term_id,
:length
:length,
:prepend,
].freeze
def render(context)
include_snippet(context)
text = include_snippet(context)
if @args[:prepend]
text.sub(/<p>(.)/) { "<p>#{@args[:prepend]} #{$1.downcase}" }
else
text
end
end
end

View File

@ -76,8 +76,9 @@
/docs/api-reference/v1.5/* https://v1-5.docs.kubernetes.io/docs/reference/ 301
/docs/api-reference/v1.6/* https://v1-6.docs.kubernetes.io/docs/reference/ 301
/docs/api-reference/v1.7/* https://v1-7.docs.kubernetes.io/docs/reference/ 301
/docs/api-reference/v1/definitions/ /docs/api-reference/v1.8/ 301
/docs/api-reference/v1/operations/ /docs/api-reference/v1.8/ 301
/docs/api-reference/v1.8/* https://v1-8.docs.kubernetes.io/docs/api-reference/v1.8/:splat 301
/docs/api-reference/v1/definitions/ /docs/api-reference/v1.9/ 301
/docs/api-reference/v1/operations/ /docs/api-reference/v1.9/ 301
/docs/concepts/abstractions/controllers/garbage-collection/ /docs/concepts/workloads/controllers/garbage-collection/ 301
/docs/concepts/abstractions/controllers/petsets/ /docs/concepts/workloads/controllers/statefulset/ 301
@ -138,8 +139,8 @@
/docs/contribute/style-guide/ /docs/home/contribute/style-guide/ 301
/docs/contribute/write-new-topic/ /docs/home/contribute/write-new-topic/ 301
/docs/deprecate/ /ddocs/reference/deprecation-policy/ 301
/docs/deprecated/ /ddocs/reference/deprecation-policy/ 301
/docs/deprecate/ /docs/reference/deprecation-policy/ 301
/docs/deprecated/ /docs/reference/deprecation-policy/ 301
/docs/deprecation-policy/ /docs/reference/deprecation-policy/ 301
/docs/federation/api-reference/ /docs/reference/federation/v1/operations/ 301
@ -165,6 +166,7 @@
/docs/getting-started-guides/juju/ /docs/getting-started-guides/ubuntu/installation/ 301
/docs/getting-started-guides/kargo/ /docs/getting-started-guides/kubespray/ 301
/docs/getting-started-guides/kubeadm/ /docs/setup/independent/create-cluster-kubeadm/ 301
/docs/getting-started-guides/kubectl/ /docs/reference/kubectl/overview/ 301
/docs/getting-started-guides/logging/ /docs/concepts/cluster-administration/logging/ 301
/docs/getting-started-guides/logging-elasticsearch/ /docs/tasks/debug-application-cluster/logging-elasticsearch-kibana/ 301
/docs/getting-started-guides/meanstack/ https://medium.com/google-cloud/running-a-mean-stack-on-google-cloud-platform-with-kubernetes-149ca81c2b5d/ 301
@ -183,16 +185,20 @@
/docs/hellonode/ /docs/tutorials/stateless-application/hello-minikube/ 301
/docs/home/coreos/ /docs/getting-started-guides/coreos/ 301
/docs/home/deprecation-policy/ /docs/reference/deprecation-policy/ 301
/docs/reference/federation/extensions/v1beta1/definitions/ /docs/reference/generated/federation/extensions/v1beta1/definitions/ 301
/docs/reference/federation/extensions/v1beta1/operations/ /docs/reference/generated/federation/extensions/v1beta1/operations/ 301
/docs/reference/federation/v1/definitions/ /docs/reference/generated/federation/v1/definitions/ 301
/docs/reference/federation/v1/operations/ /docs/reference/generated/federation/v1/operations/ 301
/docs/reference/federation/v1beta1/definitions/ /docs/reference/federation/extensions/v1beta1/definitions/ 301
/docs/reference/federation/v1beta1/operations/ /docs/reference/federation/extensions/v1beta1/operations/ 301
/docs/reference/generated/kubectl/kubectl-options/ /docs/reference/generated/kubectl/kubectl/ 301
/docs/reporting-security-issues/ /security/ 301
/docs/resources-reference/1_5/* /docs/resources-reference/v1.5/ 301
/docs/resources-reference/1_5/* https://v1-5.docs.kubernetes.io/docs/resources-reference/v1.5/ 301
/docs/resources-reference/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/resources-reference/v1.5/ 301
/docs/resources-reference/1_6/* /docs/resources-reference/v1.6/ 301
/docs/resources-reference/1_7/* /docs/resources-reference/v1.7/ 301
/docs/resources-reference/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/resources-reference/v1.5/ 301
/docs/resources-reference/v1.8/* /docs/api-reference/v1.8/:splat 301
/docs/roadmap/ https://github.com/kubernetes/kubernetes/milestones/ 301
@ -229,6 +235,7 @@
/docs/tasks/configure-pod-container/downward-api-volume-expose-pod-information/ /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/ 301
/docs/tasks/configure-pod-container/environment-variable-expose-pod-information/ /docs/tasks/inject-data-application/environment-variable-expose-pod-information/ 301
/docs/tasks/configure-pod-container/limit-range/ /docs/tasks/administer-cluster/cpu-memory-limit/ 301
/docs/tasks/configure-pod-container/opaque-integer-resource/ /docs/concepts/configuration/manage-compute-resources-container/#opaque-integer-resources-alpha-feature 301
/docs/tasks/configure-pod-container/projected-volume/ /docs/tasks/configure-pod-container/configure-projected-volume-storage/ 301
/docs/tasks/configure-pod-container/romana-network-policy/ /docs/tasks/administer-cluster/romana-network-policy/ 301
/docs/tasks/configure-pod-container/weave-network-policy/ /docs/tasks/administer-cluster/weave-network-policy/ 301
@ -292,6 +299,7 @@
/docs/user-guide/debugging-services/ /docs/tasks/debug-application-cluster/debug-service/ 301
/docs/user-guide/deploying-applications/ /docs/tasks/run-application/run-stateless-application-deployment/ 301
/docs/user-guide/deployments/ /docs/concepts/workloads/controllers/deployment/ 301
/docs/user-guide/docker-cli-to-kubectl/ /docs/reference/kubectl/docker-cli-to-kubectl/
/docs/user-guide/downward-api/ /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/ 301
/docs/user-guide/downward-api/README/ /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/ 301
/docs/user-guide/downward-api/volume/ /docs/tasks/inject-data-application/downward-api-volume-expose-pod-information/ 301
@ -324,13 +332,20 @@
/docs/user-guide/ingress/ /docs/concepts/services-networking/ingress/ 301
/docs/user-guide/ingress.md /docs/concepts/services-networking/ingress/ 301
/docs/user-guide/introspection-and-debugging/ /docs/tasks/debug-application-cluster/debug-application-introspection/ 301
/docs/user-guide/jsonpath/ /docs/reference/kubectl/jsonpath/
/docs/user-guide/jobs/ /docs/concepts/workloads/controllers/jobs-run-to-completion/ 301
/docs/user-guide/jobs/expansions/ /docs/tasks/job/parallel-processing-expansion/ 301
/docs/user-guide/jobs/work-queue-1/ /docs/tasks/job/coarse-parallel-processing-work-queue/ 301
/docs/user-guide/jobs/work-queue-2/ /docs/tasks/job/fine-parallel-processing-work-queue/ 301
/docs/user-guide/kubeconfig-file/ /docs/tasks/access-application-cluster/authenticate-across-clusters-kubeconfig/ 301
/docs/user-guide/kubectl-overview/ /docs/reference/kubectl/overview/
/docs/user-guide/kubectl/ /docs/reference/generated/kubectl/kubectl-options/
/docs/user-guide/kubectl/v1.8/* https://v1-8.docs.kubernetes.io/docs/reference/generated/kubectl/kubectl-commands/:splat 301
/docs/user-guide/kubectl/v1.9/* /docs/reference/generated/kubectl/kubectl-commands/:splat 301
/docs/user-guide/kubectl-conventions/ /docs/reference/kubectl/conventions/
/docs/user-guide/kubectl-cheatsheet/ /docs/reference/kubectl/cheatsheet/
/docs/user-guide/kubectl/1_5/* https://v1-5.docs.kubernetes.io/docs/user-guide/kubectl/v1.5/ 301
/docs/user-guide/kubectl/kubectl_*/ /docs/user-guide/kubectl/v1.7/#:splat 301
/docs/user-guide/kubectl/kubectl_*/ /docs/reference/generated/kubectl/kubectl-commands#:splat 301
/docs/user-guide/kubectl/v1.5/node_modules/* https://v1-5.docs.kubernetes.io/docs/user-guide/kubectl/v1.5/ 301
/docs/user-guide/kubectl/v1.6/node_modules/* https://v1-6.docs.kubernetes.io/docs/user-guide/kubectl/v1.6/ 301
/docs/user-guide/labels/ /docs/concepts/overview/working-with-objects/labels/ 301
@ -409,7 +424,7 @@
/v1.1/docs/admin/networking.html /docs/concepts/cluster-administration/networking/ 301
/v1.1/docs/getting-started-guides/ /docs/tutorials/kubernetes-basics/ 301
https://kubernetes-io-v1-7.netlify.com/* https://v1-7.docs.kubernetes.io/"splat 301
https://kubernetes-io-v1-7.netlify.com/* https://v1-7.docs.kubernetes.io/:splat 301
/docs/admin/cloud-controller-manager/ /docs/reference/generated/cloud-controller-manager/ 301
/docs/admin/kube-apiserver/ /docs/reference/generated/kube-apiserver/ 301
@ -428,3 +443,4 @@ https://kubernetes-io-v1-7.netlify.com/* https://v1-7.docs.kubernetes.io/"spl
/docs/admin/kubefed_unjoin/ /docs/reference/generated/kubefed_unjoin/ 301
/docs/admin/kubefed_version/ /docs/reference/generated/kubefed_version/ 301
/docs/reference/generated/kubeadm/ /docs/reference/setup-tools/kubeadm/kubeadm/ 301

View File

@ -85,7 +85,7 @@ AWS使用的规格为
```yaml
containers:
- name: fluentd-cloud-logging
image: gcr.io/google_containers/fluentd-gcp:1.16
image: k8s.gcr.io/fluentd-gcp:1.16
resources:
limits:
cpu: 100m

View File

@ -40,7 +40,7 @@ title: 节点设置校验
# $LOG_DIR 是测试结果输出的路径。
sudo docker run -it --rm --privileged --net=host \
-v /:/rootfs -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
gcr.io/google_containers/node-test:0.2
k8s.gcr.io/node-test:0.2
```
## 针对其他硬件体系结构运行节点合规性测试
@ -61,7 +61,7 @@ Kubernetes 也为其他硬件体系结构的系统提供了节点合规性测试
sudo docker run -it --rm --privileged --net=host \
-v /:/rootfs:ro -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
-e FOCUS=MirrorPod \ # 只运行MirrorPod测试
gcr.io/google_containers/node-test:0.2
k8s.gcr.io/node-test:0.2
```
为跳过指定的测试,用正则表达式来描述将要跳过的测试,并重载 `SKIP` 环境变量。
@ -70,7 +70,7 @@ sudo docker run -it --rm --privileged --net=host \
sudo docker run -it --rm --privileged --net=host \
-v /:/rootfs:ro -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
-e SKIP=MirrorPod \ # 运行除MirrorPod外的所有测试
gcr.io/google_containers/node-test:0.2
k8s.gcr.io/node-test:0.2
```
节点合规性测试是[节点端到端测试](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/devel/e2e-node-tests.md)的一个容器化的版本。

View File

@ -0,0 +1,248 @@
title: 云控制器管理器的基本概念
## 云控制器管理器
云控制器管理器CCM这个概念创建的初衷是为了让特定的云服务供应商代码和Kubernetes核心相互独立演化。云控制器管理器与其他主要组件如Kubernetes控制器管理器API服务器和调度程序同时运行。云控制器管理器也可以作为Kubernetes的插件启动这种情况下CCM运行在Kubernetes系统之上。
云控制器管理器基于插件机制设计允许新的云服务供应商通过插件轻松地与Kubernetes集成。目前已经有在Kubernetes上加入新的云服务供应商计划并为云服务供应商提供从原先的旧模式迁移到新CCM模式的方案。
本文讨论了云控制器管理器背后的概念,并提供了相关功能的详细信息。
下面这张图描述了没有云控制器管理器的Kubernetes集群架构
![无云控制器管理器的 K8s 集群架构](/images/docs/pre-ccm-arch.png)
## 设计
在上图中Kubernetes和云服务供应商通过几个不同的组件进行了集成分别是
* Kubelet
* Kubernetes 控制管理器
* Kubernetes API服务器
而CCM整合了前三个组件中的所有依赖于云的逻辑用来创建与云的单点集成。新架构如下图所示
![有云控制器管理器的 K8s 集群架构](/images/docs/post-ccm-arch.png)
## CCM的组件
CCM突破了Kubernetes控制器管理器KCM的一些功能并将其作为一个独立的进程运行。具体而言它打破了KCM中与云相关的控制器。KCM具有以下依赖于云的控制器引擎
* 节点控制器
* 卷控制器
* 路由控制器
* 服务控制器
在1.8版本中当前运行中的CCM从上面的列表中运行以下控制器
* 节点控制器
* 路由控制器
* 服务控制器
另外,它运行另一个名为 PersistentVolumeLabels Controller 的控制器。这个控制器负责对在GCP和AWS云里创建的PersistentVolumes的域ZoneRegion标签进行设置。
**注意**卷控制器被特意设计为CCM之外的一部分。由于其中涉及到的复杂性和对现有供应商特定卷的逻辑抽象因此决定了卷控制器不会被移动到CCM之中。
原本计划使用CCM来支持卷的目的是为了引入FlexVolume卷来支持可插拔卷。然而官方正在计划使用更具备竞争力的CSI来取代FlexVolume卷。
考虑到这些正在进行中的变化我们决定暂时停止当前工作直至CSI准备就绪。
云服务供应商工作组wg-cloud-provider正在开展相关工作以实现通过CCM支持PersistentVolume的功能。详细信息请参见[kubernetes/kubernetes52371](https://github.com/kubernetes/kubernetes/pull/52371)。
## CCM功能
CCM从Kubernetes组件中继承了与云服务供应商相关的功能。本节基于被CCM继承其功能的组件展开描述。
### 1. Kubernetes 控制器管理器
CCM的大部分功能都来自KCM。 如上一节所述CCM运行以下控制引擎
* 节点控制器
* 路由控制器
* 服务控制器
* PersistentVolumeLabels控制器
#### 节点控制器
节点控制器负责通过从云服务供应商获得有关在集群中运行的节点的信息来初始化节点。节点控制器执行以下功能:
1.使用云特定域Zone/区Region标签初始化节点。
1.使用特定于云的实例详细信息初始化节点,例如类型和大小。
1.获取节点的网络地址和主机名。
1.如果节点无响应检查该节点是否已从云中删除。如果该节点已从云中删除则删除Kubernetes节点对象。
#### 路由控制器
路由控制器负责为云配置正确的路由以便Kubernetes集群中不同节点上的容器可以相互通信。路由控制器仅适用于Google Compute Engine平台。
#### 服务控制器
服务控制器负责监听服务的创建、更新和删除事件。根据Kubernetes中各个服务的当前状态它将配置云负载平衡器如ELB或Google LB以反映Kubernetes中的服务状态。此外它还确保云负载均衡器的服务后端保持最新。
#### PersistentVolumeLabels 控制器
PersistentVolumeLabels控制器在AWS的EBS卷、GCE的PD卷创建时申请标签这使得用户不再需要手动设置这些卷标签。
这些标签对于pod的调度工作是非常重要的因为这些卷只能在它们所在的域Zone/区Region内工作因此所有使用这些卷的pod都必须要在同一个域/区中才能保证进行调度正常进行。
PersistentVolumeLabels控制器是专门为CCM创建的; 也就是说在CCM创建之前它是不存在的。这样做是为了将Kubernetes API服务器它是一个许可控制器中的PV标签逻辑移动到CCM。 它不在KCM上运行。
### 2. Kubelet
Node控制器包含kubelet中依赖于云的功能。在系统引入CCM组件之前是由kubelet采用包含云特定信息的方式对节点进行初始化如IP地址、区Region/域Zone标签和实例类型信息引入CCM之后这部分的初始化操作就从kubelet转移到了CCM中。
在引入CCM后的新的模型中kubelet采用不包含云特定信息的方式初始化一个节点。但是它会为新创建的节点添加一个污点使得该节点不可被立即调度直到CCM使用包含云的特定信息初始化节点后才会删除该污点使得该节点可被调度。
### 3. Kubernetes API服务器
PersistentVolumeLabels控制器将Kubernetes API服务器的依赖于云的功能移至CCM如前面部分所述。
## 插件机制
云控制器管理器使用Go接口与外部对接从而实现功能扩展。具体来说它使用了[这里](https://github.com/kubernetes/kubernetes/blob/master/pkg/cloudprovider/cloud.go)定义的CloudProvider接口。
上面强调的四个共享控制器的实现以及一些辅助设施scaffolding和共享的云服务供应商接口将被保留在Kubernetes核心当中。但云服务供应商特有的实现将会建立在核心之外并实现核心中定义的接口。
有关开发插件的更多信息,请参阅
[开发云控制器管理器](/docs/tasks/administrators-cluster/developing-cloud-controller-manager/)。
## 授权
本节分解了CCM对各种API对象的访问以执行其操作。
### 节点控制器
节点控制器仅适用于节点对象。它需要完全访问权限来获取、列出、创建、更新、修补、监视和删除节点对象。
v1/Node:
- Get
- List
- Create
- Update
- Patch
- Watch
### 路由控制器
路由控制器监听节点对象的创建并配置合适的路由。它需要对节点对象的访问权限。
v1/Node:
- Get
### 服务控制器
服务控制器侦听服务对象创建、更新和删除事件,然后对这些服务的端点进行恰当的配置。
要访问服务,它需要罗列和监控权限。要更新服务,它需要修补和更新权限。
要为服务设置端点,需要访问创建、列表、获取、监视和更新。
v1/Service:
- List
- Get
- Watch
- Patch
- Update
### PersistentVolumeLabels 控制器
PersistentVolumeLabels控制器监听PersistentVolumePV创建事件并更新它们。该控制器需要访问列表、查看、获取和更新PV的权限。
v1/PersistentVolume:
- Get
- List
- Watch
- Update
### 其它
CCM核心的实现需要创建事件的权限为了确保安全操作需要创建ServiceAccounts的权限。
v1/Event:
- Create
- Patch
- Update
v1/ServiceAccount:
- Create
针对CCM的RBAC ClusterRole如下所示
```yaml
apiVersion: rbac.authorization.k8s.io/v1beta1
kind: ClusterRole
metadata:
name: cloud-controller-manager
rules:
- apiGroups:
- ""
resources:
- events
verbs:
- create
- patch
- update
- apiGroups:
- ""
resources:
- nodes
verbs:
- '*'
- apiGroups:
- ""
resources:
- nodes/status
verbs:
- patch
- apiGroups:
- ""
resources:
- services
verbs:
- list
- patch
- update
- watch
- apiGroups:
- ""
resources:
- serviceaccounts
verbs:
- create
- apiGroups:
- ""
resources:
- persistentvolumes
verbs:
- get
- list
- update
- watch
- apiGroups:
- ""
resources:
- endpoints
verbs:
- create
- get
- list
- watch
- update
```
## 供应商实施
以下云服务供应商为自己的云部署了CCM。
* [Digital Ocean]()
* [Oracle]()
* [Azure]()
* [GCE]()
* [AWS]()
## 群集管理
[这里](/docs/tasks/administer-cluster/running-cloud-controller/#cloud-controller-manager)提供了配置和运行CCM的完整说明。

View File

@ -39,7 +39,7 @@ redirect_from:
### 地址
这些字段组合的用法取决于你的云服务商或者裸金属配置。
这些字段组合的用法取决于你的云服务商或者裸配置。
* HostNameHostName 和 node 内核报告的相同。可以通过 kubelet 的 `--hostname-override` 参数覆盖。
* ExternalIP通常是可以外部路由的 node IP 地址(从集群外可访问)。
@ -115,7 +115,7 @@ Node 条件使用一个 JSON 对象表示。例如,下面的响应描述了一
```
Kubernetes 会在内部创一个 node 对象(象征 node并基于 `metadata.name` 字段(我们假设 `metadata.name` 能够被解析)通过健康检查来验证 node。如果 node 可用,意即所有必要服务都已运行,它就符合了运行一个 pod 的条件;否则它将被所有的集群动作忽略指导变为可用。请注意Kubernetes 将保存不可用 node 的对象除非它被客户端显式的删除。Kubernetes 将持续检查 node 是否变的可用。
Kubernetes 会在内部创一个 node 对象(象征 node并基于 `metadata.name` 字段(我们假设 `metadata.name` 能够被解析)通过健康检查来验证 node。如果 node 可用,意即所有必要服务都已运行,它就符合了运行一个 pod 的条件;否则它将被所有的集群动作忽略直到变为可用。请注意Kubernetes 将保存不可用 node 的对象除非它被客户端显式的删除。Kubernetes 将持续检查 node 是否变的可用。
当前有3个组件同 Kubernetes node 接口交互node 控制器、kubelet 和 kubectl。
@ -216,7 +216,7 @@ metadata:
spec:
containers:
- name: sleep-forever
image: gcr.io/google_containers/pause:0.8.0
image: k8s.gcr.io/pause:0.8.0
resources:
requests:
cpu: 100m

View File

@ -0,0 +1,248 @@
---
cn-approvers:
- lichuqiang
title: 证书
---
* TOC
{:toc}
## 创建证书
当使用客户端证书进行认证时,用户可以使用现有部署脚本,或者通过 `easyrsa`、`openssl` 或
`cfssl` 手动生成证书。
### 使用现有部署脚本
**现有部署脚本** 位于
`cluster/saltbase/salt/generate-cert/make-ca-cert.sh`
执行该脚本时需传入两个参数。 第一个参数为 API 服务器的 IP 地址,第二个参数为对象的候补名称列表,
形如 `IP:<ip地址> 或 DNS:<dns名称>`
脚本生成三个文件: `ca.crt`、`server.crt` 和 `server.key`
最后,将以下参数加入到 API 服务器的启动参数中:
```
--client-ca-file=/srv/kubernetes/ca.crt
--tls-cert-file=/srv/kubernetes/server.crt
--tls-private-key-file=/srv/kubernetes/server.key
```
### easyrsa
使用 **easyrsa** 能够手动地为集群生成证书。
1. 下载、解压并初始化 easyrsa3 的补丁版本。
curl -L -O https://storage.googleapis.com/kubernetes-release/easy-rsa/easy-rsa.tar.gz
tar xzf easy-rsa.tar.gz
cd easy-rsa-master/easyrsa3
./easyrsa init-pki
1. 生成 CA通过 `--batch` 参数设置自动模式。 通过 `--req-cn` 设置默认使用的 CN
./easyrsa --batch "--req-cn=${MASTER_IP}@`date +%s`" build-ca nopass
1. 生成服务器证书和密钥。
参数 `--subject-alt-name` 设置了访问 API 服务器时可能使用的 IP 和 DNS 名称。 `MASTER_CLUSTER_IP`
通常为 `--service-cluster-ip-range` 参数中指定的服务 CIDR 的 首个 IP 地址,`--service-cluster-ip-range`同时用于
API 服务器和控制器管理器组件。 `--days` 参数用于设置证书的有效期限。
下面的示例还假设用户使用 `cluster.local` 作为默认的 DNS 域名。
./easyrsa --subject-alt-name="IP:${MASTER_IP}"\
"IP:${MASTER_CLUSTER_IP},"\
"DNS:kubernetes,"\
"DNS:kubernetes.default,"\
"DNS:kubernetes.default.svc,"\
"DNS:kubernetes.default.svc.cluster,"\
"DNS:kubernetes.default.svc.cluster.local" \
--days=10000 \
build-server-full server nopass
1. 拷贝 `pki/ca.crt``pki/issued/server.crt``pki/private/server.key` 至您的目录。
1. 填充并在 API 服务器的启动参数中添加以下参数:
--client-ca-file=/yourdirectory/ca.crt
--tls-cert-file=/yourdirectory/server.crt
--tls-private-key-file=/yourdirectory/server.key
### openssl
使用 **openssl** 能够手动地为集群生成证书。
1. 生成密钥位数为 2048 的 ca.key
openssl genrsa -out ca.key 2048
1. 依据 ca.key 生成 ca.crt (使用 -days 参数来设置证书有效时间):
openssl req -x509 -new -nodes -key ca.key -subj "/CN=${MASTER_IP}" -days 10000 -out ca.crt
1. 生成密钥位数为 2048 的 server.key
openssl genrsa -out server.key 2048
1. 创建用于生成证书签名请求CSR的配置文件。
确保在将其保存至文件(如`csr.conf`)之前将尖括号标记的值(如`<MASTER_IP>`
替换为你想使用的真实值。 注意:`MASTER_CLUSTER_IP` 是前面小节中描述的 API 服务器的服务集群 IP
(service cluster IP)。 下面的示例也假设用户使用 `cluster.local` 作为默认的 DNS 域名。
[ req ]
default_bits = 2048
prompt = no
default_md = sha256
req_extensions = req_ext
distinguished_name = dn
[ dn ]
C = <country>
ST = <state>
L = <city>
O = <organization>
OU = <organization unit>
CN = <MASTER_IP>
[ req_ext ]
subjectAltName = @alt_names
[ alt_names ]
DNS.1 = kubernetes
DNS.2 = kubernetes.default
DNS.3 = kubernetes.default.svc
DNS.4 = kubernetes.default.svc.cluster
DNS.5 = kubernetes.default.svc.cluster.local
IP.1 = <MASTER_IP>
IP.2 = <MASTER_CLUSTER_IP>
[ v3_ext ]
authorityKeyIdentifier=keyid,issuer:always
basicConstraints=CA:FALSE
keyUsage=keyEncipherment,dataEncipherment
extendedKeyUsage=serverAuth,clientAuth
subjectAltName=@alt_names
1. 基于配置文件生成证书签名请求:
openssl req -new -key server.key -out server.csr -config csr.conf
1. 使用 ca.key、ca.crt 和 server.csr 生成服务器证书:
openssl x509 -req -in server.csr -CA ca.crt -CAkey ca.key \
-CAcreateserial -out server.crt -days 10000 \
-extensions v3_ext -extfile csr.conf
1. 查看证书:
openssl x509 -noout -text -in ./server.crt
最后,添加同样的参数到 API 服务器的启动参数中。
### cfssl
**cfssl** 是另一种用来生成证书的工具。
1. 按如下所示的方式下载、解压并准备命令行工具。
注意:你可能需要基于硬件架构和你所使用的 cfssl 版本对示例命令进行修改。
curl -LO https://pkg.cfssl.org/R1.2/cfssl_linux-amd64 -o cfssl
chmod +x cfssl
curl -LO https://pkg.cfssl.org/R1.2/cfssljson_linux-amd64 -o cfssljson
chmod +x cfssljson
curl -LO https://pkg.cfssl.org/R1.2/cfssl-certinfo_linux-amd64 -o cfssl-certinfo
chmod +x cfssl-certinfo
1. 创建目录来存放物料,并初始化 cfssl
mkdir cert
cd cert
../cfssl print-defaults config > config.json
../cfssl print-defaults csr > csr.json
1. 创建用来生成 CA 文件的 JSON 配置文件,例如 `ca-config.json`
{
"signing": {
"default": {
"expiry": "8760h"
},
"profiles": {
"kubernetes": {
"usages": [
"signing",
"key encipherment",
"server auth",
"client auth",
],
"expiry": "8760h"
}
}
}
}
1. 创建用来生成 CA 证书签名请求CSR的 JSON 配置文件,例如 `ca-csr.json`
确保将尖括号标记的值替换为你想使用的真实值。
{
"CN": "kubernetes",
"key": {
"algo": "rsa",
"size": 2048
},
"names":[{
"C": "<country>",
"ST": "<state>",
"L": "<city>",
"O": "<organization>",
"OU": "<organization unit>",
}]
}
1. 生成 CA 密钥(`ca-key.pem`)和证书(`ca.pem`
../cfssl gencert -initca ca-csr.json | ../cfssljson -bare ca
1. 按如下所示的方式创建用来为 API 服务器生成密钥和证书的 JSON 配置文件。
确保将尖括号标记的值替换为你想使用的真实值。 `MASTER_CLUSTER_IP` 是前面小节中描述的
API 服务器的服务集群 IP。 下面的示例也假设用户使用 `cluster.local` 作为默认的 DNS 域名。
{
"CN": "kubernetes",
"hosts": [
"127.0.0.1",
"<MASTER_IP>",
"<MASTER_CLUSTER_IP>",
"kubernetes",
"kubernetes.default",
"kubernetes.default.svc",
"kubernetes.default.svc.cluster",
"kubernetes.default.svc.cluster.local"
],
"key": {
"algo": "rsa",
"size": 2048
},
"names": [{
"C": "<country>",
"ST": "<state>",
"L": "<city>",
"O": "<organization>",
"OU": "<organization unit>"
}]
}
1. 为 API 服务器生成密钥和证书,生成的秘钥和证书分别默认保存在文件 `server-key.pem`
`server.pem` 中:
../cfssl gencert -ca=ca.pem -ca-key=ca-key.pem \
--config=ca-config.json -profile=kubernetes \
server-csr.json | ../cfssljson -bare server
## 分发自签名 CA 证书
客户端节点可能拒绝承认自签名 CA 证书有效。
对于非生产环境的部署,或运行在企业防火墙后的部署,用户可以向所有客户端分发自签名 CA 证书,
并刷新本地的有效证书列表。
在每个客户端上执行以下操作:
```bash
$ sudo cp ca.crt /usr/local/share/ca-certificates/kubernetes.crt
$ sudo update-ca-certificates
Updating certificates in /etc/ssl/certs...
1 added, 0 removed; done.
Running hooks in /etc/ca-certificates/update.d....
done.
```
## 证书 API
您可以按照[这里](/docs/tasks/tls/managing-tls-in-a-cluster)记录的方式,
使用 `certificates.k8s.io` API 来准备 x509 证书,用于认证。

View File

@ -15,7 +15,7 @@ title: Managing Compute Resources for Containers
*CPU* 和 *内存* 都是 *资源类型*。资源类型具有基本单位。CPU 的单位是 core内存的单位是 byte。
CPU和内存统称为*计算资源*,也可以称为*资源*。计算资源的数量是可以被请求、分配和消耗的可测量的。它们与 [API 资源](/docs/api/) 不同。 API 资源(如 Pod 和 [Service](/docs/user-guide/services))是可通过 Kubernetes API server 读取和修改的对象。
CPU和内存统称为*计算资源*,也可以称为*资源*。计算资源的数量是可以被请求、分配、消耗和可测量的。它们与 [API 资源](/docs/api/) 不同。 API 资源(如 Pod 和 [Service](/docs/user-guide/services))是可通过 Kubernetes API server 读取和修改的对象。
## Pod 和 容器的资源请求和限制
@ -199,7 +199,7 @@ Conditions:
Events:
FirstSeen LastSeen Count From SubobjectPath Reason Message
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {scheduler } scheduled Successfully assigned simmemleak-hra99 to kubernetes-node-tf0f
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "gcr.io/google_containers/pause:0.8.0" already present on machine
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD pulled Pod container image "k8s.gcr.io/pause:0.8.0" already present on machine
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD created Created with docker id 6a41280f516d
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} implicitly required container POD started Started with docker id 6a41280f516d
Tue, 07 Jul 2015 12:53:51 -0700 Tue, 07 Jul 2015 12:53:51 -0700 1 {kubelet kubernetes-node-tf0f} spec.containers{simmemleak} created Created with docker id 87348f12526a

View File

@ -23,4 +23,4 @@ spec:
- another-node-label-value
containers:
- name: with-node-affinity
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0

View File

@ -26,4 +26,4 @@ spec:
topologyKey: kubernetes.io/hostname
containers:
- name: with-pod-affinity
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0

View File

@ -518,7 +518,7 @@ spec:
secretName: dotfile-secret
containers:
- name: dotfile-test-container
image: gcr.io/google_containers/busybox
image: k8s.gcr.io/busybox
command:
- ls
- "-l"

View File

@ -39,7 +39,7 @@ Master 组件可以在集群中的任何节点上运行。然而,为了简单
### 云控制器管理器-(cloud-controller-manager)
cloud-controller-manager 是用于与底层云提供商交互的控制器。云控制器管理器二进制是 Kubernetes v1.6 版本中引入的 Alpha 功能。
cloud-controller-manager 是用于与底层云提供商交互的控制器。云控制器管理器可执行组件是 Kubernetes v1.6 版本中引入的 Alpha 功能。
cloud-controller-manager 仅运行云提供商特定的控制器循环。您必须在 kube-controller-manager 中禁用这些控制器循环,您可以通过在启动 kube-controller-manager 时将 `--cloud-provider` 标志设置为`external`来禁用控制器循环。

View File

@ -70,7 +70,7 @@ Kubernetes 满足了生产中运行应用程序的许多常见的需求,例如
* [Pod](/docs/user-guide/pods/) 提供复合应用并保留一个应用一个容器的容器模型,
* [挂载外部存储](/docs/user-guide/volumes/),
* [Secret管理](/docs/user-guide/secrets/),
* [应用健康检查](/docs/user-guide/production-pods/#liveness-and-readiness-probes-aka-health-checks),
* [应用健康检查](/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/),
* [副本应用实例](/docs/user-guide/replication-controller/),
* [横向自动扩缩容](/docs/user-guide/horizontal-pod-autoscaling/),
* [服务发现](/docs/user-guide/connecting-applications/),

View File

@ -366,16 +366,7 @@ LoadBalancer Ingress: a320587ffd19711e5a37606cf4a74574-1142138393.us-east-1.el
...
```
## 进一步阅读
Kubernetes 也支持联合 Service能够跨多个集群和云提供商为 Service 提供逐步增强的可用性、更优的容错、更好的可伸缩性。
查看 [联合 Service 用户指南](/docs/concepts/cluster-administration/federation-service-discovery/) 获取更进一步信息。
## 下一步
[了解更多关于 Kubernetes 的特性,有助于在生产环境中可靠地运行容器](/docs/user-guide/production-pods)

View File

@ -85,7 +85,7 @@ Init 容器支持应用容器的全部字段和特性,包括资源限制、数
* 克隆 Git 仓库到数据卷。
* 将配置值放到配置文件中,运行模板工具为主应用容器动态地生成配置文件。例如,在配置文件中存放 POD_IP 值,并使用 Jinja 生成主应用配置文件。
更多详细用法示例,可以在 [StatefulSet 文档](/docs/concepts/abstractions/controllers/statefulsets/) 和 [生产环境 Pod 指南](/docs/user-guide/production-pods.md#handling-initialization) 中找到。
更多详细用法示例,可以在 [StatefulSet 文档](/docs/concepts/abstractions/controllers/statefulsets/) 和 [Pod 初始化](/docs/tasks/configure-pod-container/configure-pod-initialization) 中找到。

View File

@ -108,7 +108,7 @@ spec:
containers:
- args:
- /server
image: gcr.io/google_containers/liveness
image: k8s.gcr.io/liveness
livenessProbe:
httpGet:
# when "host" is not defined, "PodIP" will be used

View File

@ -0,0 +1,104 @@
---
title: 知名标签Label、注解Annotation和 Taints
---
Kubernetes 保留了 kubernetes.io 名字空间下的所有标签和注解。 本文描述了知名的
kubernetes.io 标签和注解。
本文既作为这些标签和注解值的参考,也就这些标签和注解的赋值进行了说明。
**目录:**
<!-- BEGIN MUNGE: GENERATED_TOC -->
- [知名标签、注解和 Taints](#well-known-labels-annotations-and-taints)
- [beta.kubernetes.io/arch](#betakubernetesioarch)
- [beta.kubernetes.io/os](#betakubernetesioos)
- [kubernetes.io/hostname](#kubernetesiohostname)
- [beta.kubernetes.io/instance-type](#betakubernetesioinstance-type)
- [failure-domain.beta.kubernetes.io/region](#failure-domainbetakubernetesioregion)
- [failure-domain.beta.kubernetes.io/zone](#failure-domainbetakubernetesiozone)
<!-- END MUNGE: GENERATED_TOC -->
## beta.kubernetes.io/arch
示例:`beta.kubernetes.io/arch=amd64`
用于:节点
Kubelet 用 Go 中定义的 `runtime.GOARCH` 值来填充该标签。 这在诸如混用 arm 和 x86 节点的情况下很有用。
## beta.kubernetes.io/os
示例:`beta.kubernetes.io/os=linux`
用于:节点
Kubelet 用该 Go 中定义的 `runtime.GOOS` 值来填充该标签。 这在集群中存在不同操作系统的节点时很有用(尽管当前 Kubernetes 只支持 Linux 操作系统)。
## kubernetes.io/hostname
示例:`kubernetes.io/hostname=ip-172-20-114-199.ec2.internal`
用于:节点
Kubelet 用 hostname 值来填充该标签。 注意:可以通过向 kubelet 传入 `--hostname-override`
参数对 “真正的” hostname 进行修改。
## beta.kubernetes.io/instance-type
示例:`beta.kubernetes.io/instance-type=m3.medium`
用于:节点
Kubelet 用 `cloudprovider` 中定义的实例类型来填充该标签。 未使用 `cloudprovider` 时不会设置该标签。
该标签在想要将某些负载定向到特定实例类型的节点上时会很有用,但通常用户更希望依赖 Kubernetes 调度器来执行基于资源的调度,所以用户应该致力于基于属性而不是实例类型来进行调度(例如:需要一个 CPU而不是 `g2.2xlarge`)。
## failure-domain.beta.kubernetes.io/region
参考 [failure-domain.beta.kubernetes.io/zone](#failure-domainbetakubernetesiozone).
## failure-domain.beta.kubernetes.io/zone
示例:
`failure-domain.beta.kubernetes.io/region=us-east-1`
`failure-domain.beta.kubernetes.io/zone=us-east-1c`
用于节点、PersistentVolume
用于节点: Kubelet 用 `cloudprovider` 中定义的区域zone信息来填充该标签。 未使用 `cloudprovider`
时不会设置该标签,但如果该标签在你的拓扑中有意义的话,应该考虑设置。
用于 PersistentVolume在 GCE 和 AWS 中,`PersistentVolumeLabel` 准入控制器会自动添加区域标签。
在单区的集群中Kubernetes 会自动将同一副本控制器或服务下的 pod 分散到不同的节点上 (以降低故障的影响)。
在多区的集群中,这种分散的行为扩展到跨区的层面 (以降低区域故障的影响)。 跨区分散通过 SelectorSpreadPriority
来实现。
这是一种尽力而为best-effort的处置方式 如果集群中的区域是异构的 (例如:不同区域之间的节点数量、
节点类型或 pod 资源需求不同),可能使得 pod 在各区域间无法均匀分布。 如有需要,用户可以使用同质的区域
(节点数量和类型相同) 来减小 pod 分布不均的可能性。
由于卷不能跨区域挂载attach调度器 (通过 VolumeZonePredicate 断言) 也会保证需要特定卷的 pod
被调度到卷所在的区域中。
区域和地域region的实际值无关紧要两者的层次含义也没有严格的定义。 最终期望是,除非整个地域故障,
否则某一区域节点的故障不应该影响到其他区域的节点。 例如,通常区域间应该避免共用同一个网络交换机。
具体的规划取决于特定的基础设备—— three-rack 设备所选择的设置与多数据中心截然不同。
如果 `PersistentVolumeLabel` 准入控制器不支持自动为 PersistentVolume 打标签,且用户希望防止 pod
跨区域进行卷的挂载,应考虑手动打标签 (或对 `PersistentVolumeLabel` 增加支持)。 如果用户的基础设施没有这种约束,则不需要为卷添加区域标签。
<!-- BEGIN MUNGE: GENERATED_ANALYTICS -->
[![分析](https://kubernetes-site.appspot.com/UA-36037335-10/GitHub/docs/api-reference/labels-annotations-taints.md?pixel)]()
<!-- END MUNGE: GENERATED_ANALYTICS -->

View File

@ -9,7 +9,7 @@ metadata:
spec:
containers:
- name: master
image: gcr.io/google_containers/redis:v1
image: k8s.gcr.io/redis:v1
env:
- name: MASTER
value: "true"

View File

@ -178,7 +178,7 @@ $ kubectl get pods valid-pod --namespace=limit-example -o yaml | grep -C 6 resou
uid: 3b1bfd7a-f53c-11e5-b066-64510658e388
spec:
containers:
- image: gcr.io/google_containers/serve_hostname
- image: k8s.gcr.io/serve_hostname
imagePullPolicy: Always
name: kubernetes-serve-hostname
resources:

View File

@ -13,7 +13,7 @@ spec:
spec:
containers:
- name: autoscaler
image: gcr.io/google_containers/cluster-proportional-autoscaler-amd64:1.0.0
image: k8s.gcr.io/cluster-proportional-autoscaler-amd64:1.0.0
resources:
requests:
cpu: "20m"

View File

@ -7,4 +7,4 @@ metadata:
spec:
containers:
- name: pod-with-no-annotation-container
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0

View File

@ -8,4 +8,4 @@ spec:
schedulerName: default-scheduler
containers:
- name: pod-with-default-annotation-container
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0

View File

@ -8,4 +8,4 @@ spec:
schedulerName: my-scheduler
containers:
- name: pod-with-second-annotation-container
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0

View File

@ -15,7 +15,7 @@ spec:
- -c
- touch /tmp/healthy; sleep 30; rm -rf /tmp/healthy; sleep 600
image: gcr.io/google_containers/busybox
image: k8s.gcr.io/busybox
livenessProbe:
exec:

View File

@ -9,7 +9,7 @@ spec:
- name: liveness
args:
- /server
image: gcr.io/google_containers/liveness
image: k8s.gcr.io/liveness
livenessProbe:
httpGet:
path: /healthz

View File

@ -7,7 +7,7 @@ metadata:
spec:
containers:
- name: goproxy
image: gcr.io/google_containers/goproxy:0.1
image: k8s.gcr.io/goproxy:0.1
ports:
- containerPort: 8080
readinessProbe:

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: gcr.io/google_containers/busybox:1.24
image: k8s.gcr.io/busybox:1.24
command: [ "sh", "-c"]
args:
- while true; do

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: test-container
image: gcr.io/google_containers/busybox
image: k8s.gcr.io/busybox
command: [ "sh", "-c"]
args:
- while true; do

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: client-container
image: gcr.io/google_containers/busybox:1.24
image: k8s.gcr.io/busybox:1.24
command: ["sh", "-c"]
args:
- while true; do

View File

@ -12,7 +12,7 @@ metadata:
spec:
containers:
- name: client-container
image: gcr.io/google_containers/busybox
image: k8s.gcr.io/busybox
command: ["sh", "-c"]
args:
- while true; do

View File

@ -41,13 +41,13 @@ spec:
containers:
-
name: gpu-container-1
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0
resources:
limits:
alpha.kubernetes.io/nvidia-gpu: 2 # requesting 2 GPUs
-
name: gpu-container-2
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0
resources:
limits:
alpha.kubernetes.io/nvidia-gpu: 3 # requesting 3 GPUs
@ -141,7 +141,7 @@ metadata:
spec:
containers:
- name: gpu-container-1
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0
resources:
limits:
alpha.kubernetes.io/nvidia-gpu: 1

View File

@ -13,9 +13,9 @@ title: 管理巨页HugePages
{% capture prerequisites %}
1. 为了使节点能够上报巨页容量Kubernetes 节点必须预先分配巨页。
1. 为了使节点能够上报巨页容量Kubernetes 节点必须预先分配巨页。
每个节点只能预先分配一种特定规格的巨页。
1. 用户必须在整个系统中将专用的 **alpha** 特性开关 `HugePages` 设置为 true `--feature-gates="HugePages=true"`。
1. 用户必须在整个系统中将专用的 **alpha** 特性开关 `HugePages` 设置为 true `--feature-gates=HugePages=true`。
节点会自动发现全部巨页资源,并作为可供调度的资源进行上报。
@ -54,7 +54,7 @@ spec:
- 巨页的资源需求和限制必须相等。 该条件在指定了资源限制,而没有指定需求的情况下默认成立。
- 巨页是被隔离在 pod 作用域的,计划在将来的迭代中实现容器级别的隔离。
- 巨页对 EmptyDir 卷提供支持EmptyDir 卷所使用的巨页,不能够超出 pod 请求的内存容量。
- 通过带有 `SHM_HUGETLB``shmget()` 使用巨页的应用,必须运行在一个与
- 通过带有 `SHM_HUGETLB``shmget()` 使用巨页的应用,必须运行在一个与
`proc/sys/vm/hugetlb_shm_group` 匹配的补充组下。
## (待实现的)特性

View File

@ -33,7 +33,7 @@ Kubernetes 集群中运行的应用通过抽象的 Service 查找彼此,相互
你必须拥有一个正常工作的 Kubernetes 1.5 集群,用来运行本文中的示例。该示例使用一个简单的 nginx webserver 回送它接收到的请求的 HTTP 头中的源 IP 地址。你可以像下面这样创建它:
```console
$ kubectl run source-ip-app --image=gcr.io/google_containers/echoserver:1.4
$ kubectl run source-ip-app --image=k8s.gcr.io/echoserver:1.4
deployment "source-ip-app" created
```

View File

@ -434,7 +434,7 @@ Kubernetes 1.7 版本的 StatefulSet 控制器支持自动更新。更新策略
Patch `web` StatefulSet 的容器镜像。
```shell
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"gcr.io/google_containers/nginx-slim:0.7"}]'
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.7"}]'
"web" patched
```
@ -470,9 +470,9 @@ web-0 1/1 Running 0 3s
```shell{% raw %}
kubectl get pod -l app=nginx -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.containers[0].image}{"\n"}{end}'
web-0 gcr.io/google_containers/nginx-slim:0.7
web-1 gcr.io/google_containers/nginx-slim:0.8
web-2 gcr.io/google_containers/nginx-slim:0.8
web-0 k8s.gcr.io/nginx-slim:0.7
web-1 k8s.gcr.io/nginx-slim:0.8
web-2 k8s.gcr.io/nginx-slim:0.8
{% endraw %}```
`web-0` has had its image updated, but `web-0` and `web-1` still have the original
@ -513,9 +513,9 @@ web-2 1/1 Running 0 36s
```shell{% raw %}
kubectl get pod -l app=nginx -o jsonpath='{range .items[*]}{.metadata.name}{"\t"}{.spec.containers[0].image}{"\n"}{end}'
web-0 gcr.io/google_containers/nginx-slim:0.7
web-1 gcr.io/google_containers/nginx-slim:0.7
web-2 gcr.io/google_containers/nginx-slim:0.7
web-0 k8s.gcr.io/nginx-slim:0.7
web-1 k8s.gcr.io/nginx-slim:0.7
web-2 k8s.gcr.io/nginx-slim:0.7
{% endraw %}
```
@ -539,7 +539,7 @@ statefulset "web" patched
在一个终端窗口中 patch `web` StatefulSet 来再次的改变容器镜像。
```shell
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"gcr.io/google_containers/nginx-slim:0.8"}]'
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.8"}]'
statefulset "web" patched
```
@ -589,9 +589,9 @@ StatefulSet 里的 Pod 采用和序号相反的顺序更新。在更新下一个
```shell{% raw %}
for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done
gcr.io/google_containers/nginx-slim:0.8
gcr.io/google_containers/nginx-slim:0.8
gcr.io/google_containers/nginx-slim:0.8
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/nginx-slim:0.8
k8s.gcr.io/nginx-slim:0.8
{% endraw %}
```
@ -617,7 +617,7 @@ statefulset "web" patched
再次 Patch StatefulSet 来改变容器镜像。
```shell
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"gcr.io/google_containers/nginx-slim:0.7"}]'
kubectl patch statefulset web --type='json' -p='[{"op": "replace", "path": "/spec/template/spec/containers/0/image", "value":"k8s.gcr.io/nginx-slim:0.7"}]'
statefulset "web" patched
```
@ -646,7 +646,7 @@ web-2 1/1 Running 0 18s
```shell{% raw %}
get po web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
gcr.io/google_containers/nginx-slim:0.8
k8s.gcr.io/nginx-slim:0.8
{% endraw %}
```
@ -683,7 +683,7 @@ web-2 1/1 Running 0 18s
```shell{% raw %}
kubectl get po web-2 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
gcr.io/google_containers/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.7
{% endraw %}
```
@ -721,7 +721,7 @@ web-1 1/1 Running 0 18s
```shell{% raw %}
get po web-1 --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'
gcr.io/google_containers/nginx-slim:0.8
k8s.gcr.io/nginx-slim:0.8
{% endraw %}
```
@ -767,9 +767,9 @@ web-0 1/1 Running 0 3s
```shell{% raw %}
for p in 0 1 2; do kubectl get po web-$p --template '{{range $i, $c := .spec.containers}}{{$c.image}}{{end}}'; echo; done
gcr.io/google_containers/nginx-slim:0.7
gcr.io/google_containers/nginx-slim:0.7
gcr.io/google_containers/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.7
k8s.gcr.io/nginx-slim:0.7
{% endraw %}
```

View File

@ -1,4 +1,3 @@
---
apiVersion: v1
kind: Service
metadata:
@ -27,7 +26,7 @@ spec:
spec:
containers:
- name: nginx
image: gcr.io/google_containers/nginx-slim:0.8
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web

View File

@ -1,4 +1,3 @@
---
apiVersion: v1
kind: Service
metadata:
@ -28,7 +27,7 @@ spec:
spec:
containers:
- name: nginx
image: gcr.io/google_containers/nginx-slim:0.8
image: k8s.gcr.io/nginx-slim:0.8
ports:
- containerPort: 80
name: web
@ -42,4 +41,4 @@ spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: 1Gi
storage: 1Gi

View File

@ -1,4 +1,3 @@
---
apiVersion: v1
kind: Pod
metadata:

View File

@ -7,7 +7,7 @@ cid: community
<section id="hero" class="light-text">
<h1>Community</h1>
</section>
<a href="https://goo.gl/nhbhXw"><img src="/images/KubeCon_NA_Community.png" alt="KubeConNA" width="100%" align="center"></a>
<a href="http://events.linuxfoundation.org/events/kubecon-and-cloudnativecon-europe"><img src="/images/KubeCon_EU_Community.jpg" alt="KubeConEU" width="100%" align="center"></a>
<section id="mainContent">
<main>
<div class="content">

View File

@ -1,6 +1,6 @@
---
approvers:
- bprashanth
- lavalamp
- davidopp
- derekwaynecarr
- erictune
@ -14,44 +14,62 @@ title: Using Admission Controllers
## What are they?
An admission control plug-in is a piece of code that intercepts requests to the Kubernetes
API server prior to persistence of the object, but after the request is authenticated
and authorized. The plug-in code is in the API server process
and must be compiled into the binary in order to be used at this time.
An admission controller is a piece of code that intercepts requests to the
Kubernetes API server prior to persistence of the object, but after the request
is authenticated and authorized. The controllers consist of the
[list](#what-does-each-admission-controller-do) below, are compiled into the
`kube-apiserver` binary, and may only be configured by the cluster
administrator. In that list, there are two special controllers:
MutatingAdmissionWebhook and ValidatingAdmissionWebhook. These execute the
mutating and validating (respectively) [admission control
webhooks](/docs/admin/extensible-admission-controllers.md#external-admission-webhooks)
which are configured in the API.
Each admission control plug-in is run in sequence before a request is accepted into the cluster. If
any of the plug-ins in the sequence reject the request, the entire request is rejected immediately
and an error is returned to the end-user.
Admission controllers may be "validating", "mutating", or both. Mutating
controllers may modify the objects they admit; validating controllers may not.
Admission control plug-ins may mutate the incoming object in some cases to apply system configured
defaults. In addition, admission control plug-ins may mutate related resources as part of request
processing to do things like increment quota usage.
The admission control process proceeds in two phases. In the first phase,
mutating admission controllers are run. In the second phase, validating
admission controllers are run. Note again that some of the controllers are
both. In both phases, the controllers are run in the order specified by the
`--admission-control` flag of `kube-apiserver`.
If any of the controllers in either phase reject the request, the entire
request is rejected immediately and an error is returned to the end-user.
Finally, in addition to sometimes mutating the object in question, admission
controllers may sometimes have side effects, that is, mutate related
resources as part of request processing. Incrementing quota usage is the
canonical example of why this is necessary. Any such side-effect needs a
corresponding reclamation or reconciliation process, as a given admission
controller does not know for sure that a given request will pass all of the
other admission controllers.
## Why do I need them?
Many advanced features in Kubernetes require an admission control plug-in to be enabled in order
Many advanced features in Kubernetes require an admission controller to be enabled in order
to properly support the feature. As a result, a Kubernetes API server that is not properly
configured with the right set of admission control plug-ins is an incomplete server and will not
configured with the right set of admission controllers is an incomplete server and will not
support all the features you expect.
## How do I turn on an admission control plug-in?
## How do I turn on an admission controller?
The Kubernetes API server supports a flag, `admission-control` that takes a comma-delimited,
ordered list of admission control choices to invoke prior to modifying objects in the cluster.
## What does each plug-in do?
## What does each admission controller do?
### AlwaysAdmit
Use this plugin by itself to pass-through all requests.
Use this admission controller by itself to pass-through all requests.
### AlwaysPullImages
This plug-in modifies every new Pod to force the image pull policy to Always. This is useful in a
This admission controller modifies every new Pod to force the image pull policy to Always. This is useful in a
multitenant cluster so that users can be assured that their private images can only be used by those
who have the credentials to pull them. Without this plug-in, once an image has been pulled to a
who have the credentials to pull them. Without this admission controller, once an image has been pulled to a
node, any pod from any user can use it simply by knowing the image's name (assuming the Pod is
scheduled onto the right node), without any authorization check against the image. When this plug-in
scheduled onto the right node), without any authorization check against the image. When this admission controller
is enabled, images are always pulled prior to starting containers, which means valid credentials are
required.
@ -61,22 +79,22 @@ Rejects all requests. Used for testing.
### DefaultStorageClass
This plug-in observes creation of `PersistentVolumeClaim` objects that do not request any specific storage class
This admission controller observes creation of `PersistentVolumeClaim` objects that do not request any specific storage class
and automatically adds a default storage class to them.
This way, users that do not request any special storage class do not need to care about them at all and they
will get the default one.
This plug-in does not do anything when no default storage class is configured. When more than one storage
This admission controller does not do anything when no default storage class is configured. When more than one storage
class is marked as default, it rejects any creation of `PersistentVolumeClaim` with an error and administrator
must revisit `StorageClass` objects and mark only one as default.
This plugin ignores any `PersistentVolumeClaim` updates; it acts only on creation.
This admission controller ignores any `PersistentVolumeClaim` updates; it acts only on creation.
See [persistent volume](/docs/concepts/storage/persistent-volumes/) documentation about persistent volume claims and
storage classes and how to mark a storage class as default.
### DefaultTolerationSeconds
This plug-in sets the default forgiveness toleration for pods to tolerate
This admission controller sets the default forgiveness toleration for pods to tolerate
the taints `notready:NoExecute` and `unreachable:NoExecute` for 5 minutes,
if the pods don't already have toleration for taints
`node.kubernetes.io/not-ready:NoExecute` or
@ -84,26 +102,26 @@ if the pods don't already have toleration for taints
### DenyExecOnPrivileged (deprecated)
This plug-in will intercept all requests to exec a command in a pod if that pod has a privileged container.
This admission controller will intercept all requests to exec a command in a pod if that pod has a privileged container.
If your cluster supports privileged containers, and you want to restrict the ability of end-users to exec
commands in those containers, we strongly encourage enabling this plug-in.
commands in those containers, we strongly encourage enabling this admission controller.
This functionality has been merged into [DenyEscalatingExec](#denyescalatingexec).
### DenyEscalatingExec
This plug-in will deny exec and attach commands to pods that run with escalated privileges that
This admission controller will deny exec and attach commands to pods that run with escalated privileges that
allow host access. This includes pods that run as privileged, have access to the host IPC namespace, and
have access to the host PID namespace.
If your cluster supports containers that run with escalated privileges, and you want to
restrict the ability of end-users to exec commands in those containers, we strongly encourage
enabling this plug-in.
enabling this admission controller.
### EventRateLimit (alpha)
This plug-in is introduced in v1.9 to mitigate the problem where the API server gets flooded by
This admission controller is introduced in v1.9 to mitigate the problem where the API server gets flooded by
event requests. The cluster admin can specify event rate limits by:
* Ensuring that `eventratelimit.admission.k8s.io/v1alpha1=true` is included in the
@ -137,18 +155,17 @@ EventRateLimit:
See the [EventRateLimit proposal](https://git.k8s.io/community/contributors/design-proposals/api-machinery/admission_control_event_rate_limit.md)
for more details.
### GenericAdmissionWebhook (alpha)
### ExtendedResourceToleration
This plug-in is related to the [Dynamic Admission Control](/docs/admin/extensible-admission-controllers)
introduced in v1.7.
The plug-in calls the webhooks configured via `ExternalAdmissionHookConfiguration`,
and only admits the operation if all the webhooks admit it.
Currently, the plug-in always fails open.
In other words, it ignores the failed calls to a webhook.
This plug-in is introduced in v1.9 to facilitate creation of dedicated nodes with extended resources.
If operators want to create dedicated nodes with extended resources (like GPUs, FPGAs etc.), they are expected to
taint the node with the extended resource name as the key. This admission controller, if enabled, automatically
adds tolerations for such taints to pods requesting extended resources, so users don't have to manually
add these tolerations.
### ImagePolicyWebhook
The ImagePolicyWebhook plug-in allows a backend webhook to make admission decisions. You enable this plug-in by setting the admission-control option as follows:
The ImagePolicyWebhook admission controller allows a backend webhook to make admission decisions. You enable this admission controller by setting the admission-control option as follows:
```shell
--admission-control=ImagePolicyWebhook
@ -185,7 +202,7 @@ clusters:
users:
- name: name-of-api-server
user:
client-certificate: /path/to/cert.pem # cert for the webhook plugin to use
client-certificate: /path/to/cert.pem # cert for the webhook admission controller to use
client-key: /path/to/key.pem # key matching the cert
```
For additional HTTP configuration, refer to the [kubeconfig](/docs/concepts/cluster-administration/authenticate-across-clusters-kubeconfig/) documentation.
@ -260,18 +277,18 @@ In any case, the annotations are provided by the user and are not validated by K
### Initializers (alpha)
This plug-in is introduced in v1.7.
The plug-in determines the initializers of a resource based on the existing
This admission controller is introduced in v1.7.
The admission controller determines the initializers of a resource based on the existing
`InitializerConfiguration`s. It sets the pending initializers by modifying the
metadata of the resource to be created.
For more information, please check [Dynamic Admission Control](/docs/admin/extensible-admission-controllers).
For more information, please check [Dynamic Admission Control](/docs/admin/extensible-admission-controllers.md).
### InitialResources (experimental)
This plug-in observes pod creation requests. If a container omits compute resource requests and limits,
then the plug-in auto-populates a compute resource request based on historical usage of containers running the same image.
This admission controller observes pod creation requests. If a container omits compute resource requests and limits,
then the admission controller auto-populates a compute resource request based on historical usage of containers running the same image.
If there is not enough data to make a decision the Request is left unchanged.
When the plug-in sets a compute resource request, it does this by *annotating* the
When the admission controller sets a compute resource request, it does this by *annotating* the
the pod spec rather than mutating the `container.resources` fields.
The annotations added contain the information on what compute resources were auto-populated.
@ -279,69 +296,104 @@ See the [InitialResouces proposal](https://git.k8s.io/community/contributors/des
### LimitPodHardAntiAffinity
This plug-in denies any pod that defines `AntiAffinity` topology key other than
This admission controller denies any pod that defines `AntiAffinity` topology key other than
`kubernetes.io/hostname` in `requiredDuringSchedulingRequiredDuringExecution`.
### LimitRanger
This plug-in will observe the incoming request and ensure that it does not violate any of the constraints
This admission controller will observe the incoming request and ensure that it does not violate any of the constraints
enumerated in the `LimitRange` object in a `Namespace`. If you are using `LimitRange` objects in
your Kubernetes deployment, you MUST use this plug-in to enforce those constraints. LimitRanger can also
your Kubernetes deployment, you MUST use this admission controller to enforce those constraints. LimitRanger can also
be used to apply default resource requests to Pods that don't specify any; currently, the default LimitRanger
applies a 0.1 CPU requirement to all Pods in the `default` namespace.
See the [limitRange design doc](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_limit_range.md) and the [example of Limit Range](/docs/tasks/configure-pod-container/limit-range/) for more details.
### MutatingAdmissionWebhook (beta in 1.9)
This admission controller calls any mutating webhooks which match the request. Matching
webhooks are called in serial; each one may modify the object if it desires.
This admission controller (as implied by the name) only runs in the mutating phase.
If a webhook called by this has side effects (for example, decrementing quota) it
*must* have a reconcilation system, as it is not guaranteed that subsequent
webhooks or validating admission controllers will permit the request to finish.
If you disable the MutatingAdmissionWebhook, you must also disable the
`MutatingWebhookConfiguration` object in the `admissionregistration/v1beta1`
group/version via the `--runtime-config` flag (both are on by default in
versions >= 1.9).
#### Use caution when authoring and installing mutating webhooks
* Users may be confused when the objects they try to create are different from
what they get back.
* Built in control loops may break when the objects they try to create are
different when read back.
* Setting originally unset fields is less likely to cause problems than
overwriting fields set in the original request. Avoid doing the latter.
* This is a beta feature. Future versions of Kubernetes may restrict the types of
mutations these webhooks can make.
* Future changes to control loops for built-in resources or third-party resources
may break webhooks that work well today. Even when the webhook installation API
is finalized, not all possible webhook behaviors will be guaranteed to be supported
indefinitely.
### NamespaceAutoProvision
This plug-in examines all incoming requests on namespaced resources and checks
This admission controller examines all incoming requests on namespaced resources and checks
if the referenced namespace does exist.
It creates a namespace if it cannot be found.
This plug-in is useful in deployments that do not want to restrict creation of
This admission controller is useful in deployments that do not want to restrict creation of
a namespace prior to its usage.
### NamespaceExists
This plug-in checks all requests on namespaced resources other than `Namespace` itself.
This admission controller checks all requests on namespaced resources other than `Namespace` itself.
If the namespace referenced from a request doesn't exist, the request is rejected.
### NamespaceLifecycle
This plug-in enforces that a `Namespace` that is undergoing termination cannot have new objects created in it,
and ensures that requests in a non-existent `Namespace` are rejected. This plug-in also prevents deletion of
This admission controller enforces that a `Namespace` that is undergoing termination cannot have new objects created in it,
and ensures that requests in a non-existent `Namespace` are rejected. This admission controller also prevents deletion of
three system reserved namespaces `default`, `kube-system`, `kube-public`.
A `Namespace` deletion kicks off a sequence of operations that remove all objects (pods, services, etc.) in that
namespace. In order to enforce integrity of that process, we strongly recommend running this plug-in.
namespace. In order to enforce integrity of that process, we strongly recommend running this admission controller.
### NodeRestriction
This plug-in limits the `Node` and `Pod` objects a kubelet can modify. In order to be limited by this admission plugin,
This admission controller limits the `Node` and `Pod` objects a kubelet can modify. In order to be limited by this admission controller,
kubelets must use credentials in the `system:nodes` group, with a username in the form `system:node:<nodeName>`.
Such kubelets will only be allowed to modify their own `Node` API object, and only modify `Pod` API objects that are bound to their node.
Future versions may add additional restrictions to ensure kubelets have the minimal set of permissions required to operate correctly.
### OwnerReferencesPermissionEnforcement
This plug-in protects the access to the `metadata.ownerReferences` of an object
This admission controller protects the access to the `metadata.ownerReferences` of an object
so that only users with "delete" permission to the object can change it.
This plug-in also protects the access to `metadata.ownerReferences[x].blockOwnerDeletion`
This admission controller also protects the access to `metadata.ownerReferences[x].blockOwnerDeletion`
of an object, so that only users with "update" permission to the `finalizers`
subresource of the referenced *owner* can change it.
### Persistent Volume Claim Protection (alpha)
{% assign for_k8s_version="v1.9" %}{% include feature-state-alpha.md %}
The `PVCProtection` plugin adds the `kubernetes.io/pvc-protection` finalizer to newly created Persistent Volume Claims (PVCs). In case a user deletes a PVC the PVC is not removed until the finalizer is removed from the PVC by PVC Protection Controller. Refer to the [PVC Protection](/docs/concepts/storage/persistent-volumes/#persistent-volume-claim-protection) for more detailed information.
### PersistentVolumeLabel
This plug-in automatically attaches region or zone labels to PersistentVolumes
as defined by the cloud provider, e.g. GCE and AWS.
This admission controller automatically attaches region or zone labels to PersistentVolumes
as defined by the cloud provider (for example, GCE or AWS).
It helps ensure the Pods and the PersistentVolumes mounted are in the same
region and/or zone.
If the plug-in doesn't support automatic labelling your PersistentVolumes, you
If the admission controller doesn't support automatic labelling your PersistentVolumes, you
may need to add the labels manually to prevent pods from mounting volumes from
a different zone.
### PodNodeSelector
This plug-in defaults and limits what node selectors may be used within a namespace by reading a namespace annotation and a global configuration.
This admission controller defaults and limits what node selectors may be used within a namespace by reading a namespace annotation and a global configuration.
#### Configuration File Format
PodNodeSelector uses the admission config file `--admission-control-config-file` to set configuration options for the behavior of the backend.
@ -371,13 +423,13 @@ metadata:
### PersistentVolumeClaimResize
This plug-in implements additional validations for checking incoming `PersistentVolumeClaim` resize requests.
This admission controller implements additional validations for checking incoming `PersistentVolumeClaim` resize requests.
**Note:** Support for volume resizing is available as an alpha feature. Admins must set the feature gate `ExpandPersistentVolumes`
to `true` to enable resizing.
{: .note}
After enabling the `ExpandPersistentVolumes` feature gate, enabling the `PersistentVolumeClaimResize` admission
plug-in is recommended, too. This plug-in prevents resizing of all claims by default unless a claim's `StorageClass`
controller is recommended, too. This admission controller prevents resizing of all claims by default unless a claim's `StorageClass`
explicitly enables resizing by setting `allowVolumeExpansion` to `true`.
For example: all `PersistentVolumeClaim`s created from the following `StorageClass` support volume expansion:
@ -400,14 +452,14 @@ For more information about persistent volume claims, see ["PersistentVolumeClaim
### PodPreset
This plug-in injects a pod with the fields specified in a matching PodPreset.
This admission controller injects a pod with the fields specified in a matching PodPreset.
See also [PodPreset concept](/docs/concepts/workloads/pods/podpreset/) and
[Inject Information into Pods Using a PodPreset](/docs/tasks/inject-data-application/podpreset)
for more information.
### PodSecurityPolicy
This plug-in acts on creation and modification of the pod and determines if it should be admitted
This admission controller acts on creation and modification of the pod and determines if it should be admitted
based on the requested security context and the available Pod Security Policies.
For Kubernetes < 1.6.0, the API Server must enable the extensions/v1beta1/podsecuritypolicy API
@ -418,7 +470,7 @@ for more information.
### PodTolerationRestriction
This plug-in first verifies any conflict between a pod's tolerations and its
This admission controller first verifies any conflict between a pod's tolerations and its
namespace's tolerations, and rejects the pod request if there is a conflict.
It then merges the namespace's tolerations into the pod's tolerations.
The resulting tolerations are checked against the namespace's whitelist of
@ -440,47 +492,78 @@ The priority admission controller uses the `priorityClassName` field and populat
### ResourceQuota
This plug-in will observe the incoming request and ensure that it does not violate any of the constraints
This admission controller will observe the incoming request and ensure that it does not violate any of the constraints
enumerated in the `ResourceQuota` object in a `Namespace`. If you are using `ResourceQuota`
objects in your Kubernetes deployment, you MUST use this plug-in to enforce quota constraints.
objects in your Kubernetes deployment, you MUST use this admission controller to enforce quota constraints.
See the [resourceQuota design doc](https://git.k8s.io/community/contributors/design-proposals/resource-management/admission_control_resource_quota.md) and the [example of Resource Quota](/docs/concepts/policy/resource-quotas/) for more details.
It is strongly encouraged that this plug-in is configured last in the sequence of admission control plug-ins. This is
It is strongly encouraged that this admission controller is configured last in the sequence of admission controllers. This is
so that quota is not prematurely incremented only for the request to be rejected later in admission control.
### SecurityContextDeny
This plug-in will deny any pod that attempts to set certain escalating [SecurityContext](/docs/user-guide/security-context) fields. This should be enabled if a cluster doesn't utilize [pod security policies](/docs/user-guide/pod-security-policy) to restrict the set of values a security context can take.
This admission controller will deny any pod that attempts to set certain escalating [SecurityContext](/docs/user-guide/security-context) fields. This should be enabled if a cluster doesn't utilize [pod security policies](/docs/user-guide/pod-security-policy) to restrict the set of values a security context can take.
### ServiceAccount
This plug-in implements automation for [serviceAccounts](/docs/user-guide/service-accounts).
We strongly recommend using this plug-in if you intend to make use of Kubernetes `ServiceAccount` objects.
This admission controller implements automation for [serviceAccounts](/docs/user-guide/service-accounts).
We strongly recommend using this admission controller if you intend to make use of Kubernetes `ServiceAccount` objects.
### ValidatingAdmissionWebhook (alpha in 1.8; beta in 1.9)
This admission controller calls any validating webhooks which match the request. Matching
webhooks are called in parallel; if any of them rejects the request, the request
fails. This admission controller only runs in the validation phase; the webhooks it calls may not
mutate the object, as opposed to the webhooks called by the `MutatingAdmissionWebhook` admission controller.
If a webhook called by this has side effects (for example, decrementing quota) it
*must* have a reconcilation system, as it is not guaranteed that subsequent
webhooks or other validating admission controllers will permit the request to finish.
If you disable the ValidatingAdmissionWebhook, you must also disable the
`ValidatingWebhookConfiguration` object in the `admissionregistration/v1beta1`
group/version via the `--runtime-config` flag (both are on by default in
versions >= 1.9).
## Is there a recommended set of plug-ins to use?
## Is there a recommended set of admission controllers to use?
Yes.
For Kubernetes >= 1.6.0, we strongly recommend running the following set of admission control plug-ins (order matters):
For Kubernetes >= 1.9.0, we strongly recommend running the following set of admission controllers (order matters):
```shell
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ValidatingAdmissionWebhook,ResourceQuota,DefaultTolerationSeconds,MutatingAdmissionWebhook
```
It's worth reiterating that in 1.9 and up, these happen in a mutating phase
and a validating phase, and that e.g. `ResourceQuota` runs in the validating
phase, and therefore is the last admission controller to run.
`DefaultTolerationSeconds` and `MutatingAdmissionWebhook` appear after it in this
list, but they run in the mutating phase.
For earlier versions, there was no concept of validating vs mutating and the
admission controllers ran in the exact order specified.
For Kubernetes >= 1.6.0, we strongly recommend running the following set of admission controllers (order matters):
```shell
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,PersistentVolumeLabel,DefaultStorageClass,ResourceQuota,DefaultTolerationSeconds
```
For Kubernetes >= 1.4.0, we strongly recommend running the following set of admission control plug-ins (order matters):
For Kubernetes >= 1.4.0, we strongly recommend running the following set of admission controllers (order matters):
```shell
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,DefaultStorageClass,ResourceQuota
```
For Kubernetes >= 1.2.0, we strongly recommend running the following set of admission control plug-ins (order matters):
For Kubernetes >= 1.2.0, we strongly recommend running the following set of admission controllers (order matters):
```shell
--admission-control=NamespaceLifecycle,LimitRanger,ServiceAccount,ResourceQuota
```
For Kubernetes >= 1.0.0, we strongly recommend running the following set of admission control plug-ins (order matters):
For Kubernetes >= 1.0.0, we strongly recommend running the following set of admission controllers (order matters):
```shell
--admission-control=NamespaceLifecycle,LimitRanger,SecurityContextDeny,ServiceAccount,PersistentVolumeLabel,ResourceQuota

View File

@ -196,10 +196,10 @@ spec:
metadata:
# ...
spec:
serviceAccountName: bob-the-bot
containers:
- name: nginx
image: nginx:1.7.9
serviceAccountName: bob-the-bot
```
Service account bearer tokens are perfectly valid to use outside the cluster and
@ -317,7 +317,7 @@ For an identity provider to work with Kubernetes it must:
3. Have a CA signed certificate (even if the CA is not a commercial CA or is self signed)
A note about requirement #3 above, requiring a CA signed certificate. If you deploy your own identity provider (as opposed to one of the cloud providers like Google or Microsoft) you MUST have your identity provider's web server certificate signed by a certificate with the `CA` flag set to `TRUE`, even if it is self signed. This is due to GoLang's TLS client implementation being very strict to the standards around certificate validation. If you don't have a CA handy, you can use [this script](https://github.com/coreos/dex/blob/1ee5920c54f5926d6468d2607c728b71cfe98092/examples/k8s/gencert.sh) from the CoreOS team to create a simple CA and a signed certificate and key pair.
Or you can use [this similar script](https://raw.githubusercontent.com/TremoloSecurity/openunison-qs-kubernetes/master/makecerts.sh) that generates SHA256 certs with a longer life and larger key size.
Or you can use [this similar script](https://raw.githubusercontent.com/TremoloSecurity/openunison-qs-kubernetes/master/src/main/bash/makessl.sh) that generates SHA256 certs with a longer life and larger key size.
Setup instructions for specific systems:

View File

@ -8,20 +8,34 @@ title: Overview
---
{% capture overview %}
Learn more about Kubernetes authorization, including details about creating policies using the supported authorization modules.
Learn more about Kubernetes authorization, including details about creating
policies using the supported authorization modules.
{% endcapture %}
{% capture body %}
In Kubernetes, you must be authenticated (logged in) before your request can be authorized (granted permission to access). For information about authentication, see [Accessing Control Overview](/docs/admin/accessing-the-api/).
In Kubernetes, you must be authenticated (logged in) before your request can be
authorized (granted permission to access). For information about authentication,
see [Accessing Control Overview](/docs/admin/accessing-the-api/).
Kubernetes expects attributes that are common to REST API requests. This means that Kubernetes authorization works with existing organization-wide or cloud-provider-wide access control systems which may handle other APIs besides the Kubernetes API.
Kubernetes expects attributes that are common to REST API requests. This means
that Kubernetes authorization works with existing organization-wide or
cloud-provider-wide access control systems which may handle other APIs besides
the Kubernetes API.
## Determine Whether a Request is Allowed or Denied
Kubernetes authorizes API requests using the API server. It evaluates all of the request attributes against all policies and allows or denies the request. All parts of an API request must be allowed by some policy in order to proceed. This means that permissions are denied by default.
Kubernetes authorizes API requests using the API server. It evaluates all of the
request attributes against all policies and allows or denies the request. All
parts of an API request must be allowed by some policy in order to proceed. This
means that permissions are denied by default.
(Although Kubernetes uses the API server, access controls and policies that depend on specific fields of specific kinds of objects are handled by Admission Controllers.)
(Although Kubernetes uses the API server, access controls and policies that
depend on specific fields of specific kinds of objects are handled by Admission
Controllers.)
When multiple authorization modules are configured, each is checked in sequence, and if any module authorizes the request, then the request can proceed. If all modules deny the request, then the request is denied (HTTP status code 403).
When multiple authorization modules are configured, each is checked in sequence.
If any authorizer approves or denies a request, that decision is immediately
returned and no other authorizer is consulted. If all modules have no opinion on
the request, then the request is denied. A deny returns an HTTP status code 403.
## Review Your Request Attributes
Kubernetes reviews only the following API request attributes:
@ -33,14 +47,15 @@ Kubernetes reviews only the following API request attributes:
* **Request path** - Path to miscellaneous non-resource endpoints like `/api` or `/healthz`.
* **API request verb** - API verbs `get`, `list`, `create`, `update`, `patch`, `watch`, `proxy`, `redirect`, `delete`, and `deletecollection` are used for resource requests. To determine the request verb for a resource API endpoint, see **Determine the request verb** below.
* **HTTP request verb** - HTTP verbs `get`, `post`, `put`, and `delete` are used for non-resource requests.
* **Resource** - The ID or name of the resource that is being accessed (for resource requests only)
--* For resource requests using `get`, `update`, `patch`, and `delete` verbs, you must provide the resource name.
* **Resource** - The ID or name of the resource that is being accessed (for resource requests only) -- For resource requests using `get`, `update`, `patch`, and `delete` verbs, you must provide the resource name.
* **Subresource** - The subresource that is being accessed (for resource requests only).
* **Namespace** - The namespace of the object that is being accessed (for namespaced resource requests only).
* **API group** - The API group being accessed (for resource requests only). An empty string designates the [core API group](/docs/concepts/overview/kubernetes-api/).
## Determine the Request Verb
To determine the request verb for a resource API endpoint, review the HTTP verb used and whether or not the request acts on an individual resource or a collection of resources:
To determine the request verb for a resource API endpoint, review the HTTP verb
used and whether or not the request acts on an individual resource or a
collection of resources:
HTTP verb | request verb
----------|---------------
@ -87,8 +102,9 @@ $ kubectl auth can-i list secrets --namespace dev --as dave
no
```
`SelfSubjectAccessReview` is part of the `authorization.k8s.io` API group, which exposes the
API server authorization to external services. Other resources in this group include:
`SelfSubjectAccessReview` is part of the `authorization.k8s.io` API group, which
exposes the API server authorization to external services. Other resources in
this group include:
* `SubjectAccessReview` - Access review for any user, not just the current one. Useful for delegating authorization decisions to the API server. For example, the kubelet and extension API servers use this to determine user access to their own APIs.
* `LocalSubjectAccessReview` - Like `SubjectAccessReview` but restricted to a specific namespace.
@ -121,11 +137,13 @@ spec:
verb: create
status:
allowed: true
denied: false
```
## Using Flags for Your Authorization Module
You must include a flag in your policy to indicate which authorization module your policies include:
You must include a flag in your policy to indicate which authorization module
your policies include:
The following flags can be used:
@ -136,12 +154,8 @@ The following flags can be used:
* `--authorization-mode=AlwaysDeny` This flag blocks all requests. Use this flag only for testing.
* `--authorization-mode=AlwaysAllow` This flag allows all requests. Use this flag only if you do not require authorization for your API requests.
You can choose more than one authorization module. If one of the modes is `AlwaysAllow`, then it overrides the other modes and all API requests are allowed.
## Versioning
For version 1.2, clusters created by kube-up.sh are configured so that no authorization is required for any request.
As of version 1.3, clusters created by kube-up.sh are configured so that the ABAC authorization modules are enabled. However, its input file is initially set to allow all users to do all operations. The cluster administrator needs to edit that file, or configure a different authorizer to restrict what users can do.
You can choose more than one authorization module. Modules are checked in order
so an earlier module has higher priority to allow or deny a request.
{% endcapture %}
{% capture whatsnext %}
@ -153,7 +167,16 @@ As of version 1.3, clusters created by kube-up.sh are configured so that the A
## Privilege escalation via pod creation
Users who have ability to create pods in a namespace can potentially escalate their privileges within that namespace. They can create pods that access secrets the user cannot themselves read, or that run under a service account with different/greater permissions.
Users who have the ability to create pods in a namespace can potentially
escalate their privileges within that namespace. They can create pods that
access their privileges within that namespace. They can create pods that access
secrets the user cannot themselves read, or that run under a service account
with different/greater permissions.
**Caution:** System administrators, use care when granting access to pod creation. A user granted permission to create pods (or controllers that create pods) in the namespace can: read all secrets in the namespace; read all config maps in the namespace; and impersonate any service account in the namespace and take any action the account could take. This applies regardless of authorization mode.
**Caution:** System administrators, use care when granting access to pod
creation. A user granted permission to create pods (or controllers that create
pods) in the namespace can: read all secrets in the namespace; read all config
maps in the namespace; and impersonate any service account in the namespace and
take any action the account could take. This applies regardless of authorization
mode.
{: .caution}

View File

@ -191,6 +191,76 @@ Because resource names are not present in the URL for create, list, watch, and d
those verbs would not be allowed by a rule with resourceNames set, since the resourceNames portion of the
rule would not match the request.
### Aggregated ClusterRoles
As of 1.9, ClusterRoles can be created by combining other ClusterRoles using an `aggregationRule`. The
permissions of aggregated ClusterRoles are controller-managed, and filled in by unioning the rules of any
ClusterRole that matches the provided label selector. An example aggregated ClusterRole:
```yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: monitoring
aggregationRule:
clusterRoleSelectors:
- matchLabels:
rbac.example.com/aggregate-to-monitoring: "true"
rules: [] # Rules are automatically filled in by the controller manager.
```
Creating a ClusterRole that matches the label selector will add rules to the aggregated ClusterRole. In this case
rules can be added to the "monitoring" ClusterRole by creating another ClusterRole that has the label
`rbac.example.com/aggregate-to-monitoring: true`.
```yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: monitoring-endpoints
labels:
rbac.example.com/aggregate-to-monitoring: "true"
# These rules will be added to the "monitoring" role.
rules:
- apiGroups: [""]
Resources: ["services", "endpoints", "pods"]
verbs: ["get", "list", "watch"]
```
The default user-facing roles (described below) use ClusterRole aggregation. This lets admins include rules
for custom resources, such as those served by CustomResourceDefinitions or Aggregated API servers, on the
default roles.
For example, the following ClusterRoles let the "admin" and "edit" default roles manage the custom resource
"CronTabs" and the "view" role perform read-only actions on the resource.
```yaml
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aggregate-cron-tabs-edit
labels:
# Add these permissions to the "admin" and "edit" default roles.
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rules:
- apiGroups: ["stable.example.com"]
resources: ["crontabs"]
verbs: ["get", "list", "watch", "create", "update", "patch", "delete"]
---
kind: ClusterRole
apiVersion: rbac.authorization.k8s.io/v1
metadata:
name: aggregate-cron-tabs-view
labels:
# Add these permissions to the "view" default role.
rbac.authorization.k8s.io/aggregate-to-view: "true"
rules:
- apiGroups: ["stable.example.com"]
resources: ["crontabs"]
verbs: ["get", "list", "watch"]
```
#### Role Examples
Only the `rules` section is shown in the following examples.
@ -402,6 +472,18 @@ They include super-user roles (`cluster-admin`),
roles intended to be granted cluster-wide using ClusterRoleBindings (`cluster-status`),
and roles intended to be granted within particular namespaces using RoleBindings (`admin`, `edit`, `view`).
As of 1.9, user-facing roles use [ClusterRole Aggregation](#aggregated-clusterroles) to allow admins to include
rules for custom resources on these roles. To add rules to the "admin", "edit", or "view" role, create a
ClusterRole with one or more of the following labels:
```yaml
metadata:
labels:
rbac.authorization.k8s.io/aggregate-to-admin: "true"
rbac.authorization.k8s.io/aggregate-to-edit: "true"
rbac.authorization.k8s.io/aggregate-to-view: "true"
```
<table>
<colgroup><col width="25%"><col width="25%"><col></colgroup>
<tr>
@ -546,6 +628,7 @@ These roles include:
* system:controller:node-controller
* system:controller:persistent-volume-binder
* system:controller:pod-garbage-collector
* system:controller:pvc-protection-controller
* system:controller:replicaset-controller
* system:controller:replication-controller
* system:controller:resourcequota-controller

View File

@ -31,8 +31,10 @@ A configuration example which uses HTTPS client auth:
clusters:
- name: name-of-remote-authz-service
cluster:
certificate-authority: /path/to/ca.pem # CA for verifying the remote service.
server: https://authz.example.com/authorize # URL of remote service to query. Must use 'https'.
# CA for verifying the remote service.
certificate-authority: /path/to/ca.pem
# URL of remote service to query. Must use 'https'. May not include parameters.
server: https://authz.example.com/authorize
# users refers to the API Server's webhook configuration.
users:

View File

@ -86,7 +86,7 @@ For example:
```yaml
containers:
- name: fluentd-cloud-logging
image: gcr.io/google_containers/fluentd-gcp:1.16
image: k8s.gcr.io/fluentd-gcp:1.16
resources:
limits:
cpu: 100m

View File

@ -131,7 +131,7 @@ seconds to honor the new configuration. Then, `"podimage.example.com"` will be
appended to the `metadata.initializers.pending` field of newly created pods. You
should already have a ready "podimage" initializer controller that handles pods
whose `metadata.initializers.pending[0].name="podimage.example.com"`. Otherwise
the pods will stuck uninitialized.
the pods will be stuck in an uninitialized state.
Make sure that all expansions of the `<apiGroup, apiVersions, resources>` tuple
in a `rule` are valid. If they are not, separate them in different `rules`.

View File

@ -87,9 +87,9 @@ images or you can build them yourself from HEAD.
### Using official release images
As part of every Kubernetes release, official release images are pushed to
`gcr.io/google_containers`. To use the images in this repository, you can
`k8s.gcr.io`. To use the images in this repository, you can
set the container image fields in the following configs to point to the
images in this repository. `gcr.io/google_containers/hyperkube` image
images in this repository. `k8s.gcr.io/hyperkube` image
includes the federation-apiserver and federation-controller-manager
binaries, so you can point the corresponding configs for those components
to the hyperkube image.
@ -315,8 +315,8 @@ official release images or you can build from HEAD.
#### Using official release images
As part of every release, images are pushed to `gcr.io/google_containers`. To use
these images, set env var `FEDERATION_PUSH_REPO_BASE=gcr.io/google_containers`
As part of every release, images are pushed to `k8s.gcr.io`. To use
these images, set env var `FEDERATION_PUSH_REPO_BASE=k8s.gcr.io`
This will always use the latest image.
To use the hyperkube image which includes federation-apiserver and
federation-controller-manager from a specific release, set the
@ -345,7 +345,7 @@ Once you have the images, you can run these as pods on your existing kubernetes
The command to run these pods on an existing GCE cluster will look like:
```shell
$ KUBERNETES_PROVIDER=gce FEDERATION_DNS_PROVIDER=google-clouddns FEDERATION_NAME=myfederation DNS_ZONE_NAME=myfederation.example FEDERATION_PUSH_REPO_BASE=gcr.io/google_containers ./federation/cluster/federation-up.sh
$ KUBERNETES_PROVIDER=gce FEDERATION_DNS_PROVIDER=google-clouddns FEDERATION_NAME=myfederation DNS_ZONE_NAME=myfederation.example FEDERATION_PUSH_REPO_BASE=k8s.gcr.io ./federation/cluster/federation-up.sh
```
`KUBERNETES_PROVIDER` is the cloud provider.

View File

@ -5,7 +5,7 @@ metadata:
spec:
hostNetwork: true
containers:
- image: gcr.io/google_containers/etcd:3.0.17
- image: k8s.gcr.io/etcd:3.0.17
name: etcd-container
command:
- /usr/local/bin/etcd

View File

@ -175,6 +175,57 @@ For pods that you deploy into the cluster, the `kubernetes` service/dns name sho
For external users of the API (e.g. the `kubectl` command line interface, continuous build pipelines, or other clients) you will want to configure
them to talk to the external load balancer's IP address.
### Endpoint reconciler
As mentioned in the previous section, the apiserver is exposed through a
service called `kubernetes`. The endpoints for this service correspond to
the apiserver replicas that we just deployed.
Since updating endpoints and services requires the apiserver to be up, there
is special code in the apiserver to let it update its own endpoints directly.
This code is called the "reconciler," because it reconciles the list of
endpoints stored in etcd, and the list of endpoints that are actually up
and running.
Prior Kubernetes 1.9, the reconciler expects you to provide the
number of endpoints (i.e., the number of apiserver replicas) through
a command-line flag (e.g. `--apiserver-count=3`). If more replicas
are available, the reconciler trims down the list of endpoints.
As a result, if a node running a replica of the apiserver crashes
and gets replaced, the list of endpoints is eventually updated.
However, until the replica gets replaced, its endpoint stays in
the list. During that time, a fraction of the API requests sent
to the `kubernetes` service will fail, because they will be sent
to a down endpoint.
This is why the previous section advises you to deploy a load
balancer, and access the API through that load balancer. The
load balancer will directly assess the health of the apiserver
replicas, and make sure that requests are not sent to crashed
instances.
If you do not add the `--apiserver-count` flag, the value defaults to 1.
Your cluster will work correctly, but each apiserver replica will
continuously try to add itself to the list of endpoints while removing
the other ones, causing a lot of extraneous updates in kube-proxy
and other components.
Starting with Kubernetes 1.9, a new reconciler implementation is available.
It uses a *lease* that is regularly renewed by each apiserver
replica. When a replica is down, it stops renewing its lease, and
the other replicas notice that the lease expired and remove it
from the list of endpoints. You can switch to the new reconciler
by adding the flag `--endpoint-reconciler-type=lease` when starting
your apiserver replicas.
If you want to know more, you can check the following resources:
- [issue kubernetes/kuberenetes#22609](https://github.com/kubernetes/kubernetes/issues/22609),
which gives additional context
- [master/reconcilers/mastercount.go](https://github.com/kubernetes/kubernetes/blob/dd9981d038012c120525c9e6df98b3beb3ef19e1/pkg/master/reconcilers/mastercount.go#L63),
the implementation of the master count reconciler
- [PR kubernetes/kubernetes#51698](https://github.com/kubernetes/kubernetes/pull/51698),
which adds support for the lease reconciler
## Master elected components
So far we have set up state storage, and we have set up the API server, but we haven't run anything that actually modifies

View File

@ -6,7 +6,7 @@ spec:
hostNetwork: true
containers:
- name: kube-apiserver
image: gcr.io/google_containers/kube-apiserver:9680e782e08a1a1c94c656190011bd02
image: k8s.gcr.io/kube-apiserver:9680e782e08a1a1c94c656190011bd02
command:
- /bin/sh
- -c

View File

@ -10,7 +10,7 @@ spec:
- /usr/local/bin/kube-controller-manager --master=127.0.0.1:8080 --cluster-name=e2e-test-bburns
--cluster-cidr=10.245.0.0/16 --allocate-node-cidrs=true --cloud-provider=gce --service-account-private-key-file=/srv/kubernetes/server.key
--v=2 --leader-elect=true 1>>/var/log/kube-controller-manager.log 2>&1
image: gcr.io/google_containers/kube-controller-manager:fda24638d51a48baa13c35337fcd4793
image: k8s.gcr.io/kube-controller-manager:fda24638d51a48baa13c35337fcd4793
livenessProbe:
httpGet:
path: /healthz

View File

@ -6,7 +6,7 @@ spec:
hostNetwork: true
containers:
- name: kube-scheduler
image: gcr.io/google_containers/kube-scheduler:34d0b8f8b31e27937327961528739bc9
image: k8s.gcr.io/kube-scheduler:34d0b8f8b31e27937327961528739bc9
command:
- /bin/sh
- -c

View File

@ -6,7 +6,7 @@ spec:
hostNetwork: true
containers:
- name: scheduler-elector
image: gcr.io/google_containers/podmaster:1.1
image: k8s.gcr.io/podmaster:1.1
command:
- /podmaster
- --etcd-servers=http://127.0.0.1:4001
@ -20,7 +20,7 @@ spec:
- mountPath: /manifests
name: manifests
- name: controller-manager-elector
image: gcr.io/google_containers/podmaster:1.1
image: k8s.gcr.io/podmaster:1.1
command:
- /podmaster
- --etcd-servers=http://127.0.0.1:4001

View File

@ -5,7 +5,7 @@ metadata:
spec:
containers:
- name: kubernetes-serve-hostname
image: gcr.io/google_containers/serve_hostname
image: k8s.gcr.io/serve_hostname
resources:
limits:
cpu: "3"

View File

@ -7,7 +7,7 @@ metadata:
spec:
containers:
- name: kubernetes-serve-hostname
image: gcr.io/google_containers/serve_hostname
image: k8s.gcr.io/serve_hostname
resources:
limits:
cpu: "1"

View File

@ -7,4 +7,4 @@ metadata:
spec:
containers:
- name: pod-with-no-annotation-container
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0

View File

@ -8,4 +8,4 @@ spec:
schedulerName: default-scheduler
containers:
- name: pod-with-default-annotation-container
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0

View File

@ -8,4 +8,4 @@ spec:
schedulerName: my-scheduler
containers:
- name: pod-with-second-annotation-container
image: gcr.io/google_containers/pause:2.0
image: k8s.gcr.io/pause:2.0

View File

@ -43,7 +43,7 @@ placement, and so if the zones in your cluster are heterogeneous
(e.g. different numbers of nodes, different types of nodes, or
different pod resource requirements), this might prevent perfectly
even spreading of your pods across zones. If desired, you can use
homogenous zones (same number and types of nodes) to reduce the
homogeneous zones (same number and types of nodes) to reduce the
probability of unequal spreading.
When persistent volumes are created, the `PersistentVolumeLabel`

View File

@ -1,4 +0,0 @@
approvers:
- derekwaynecarr
- janetkuo

View File

@ -1,10 +0,0 @@
{
"kind": "Namespace",
"apiVersion": "v1",
"metadata": {
"name": "development",
"labels": {
"name": "development"
}
}
}

View File

@ -48,7 +48,7 @@ other Kubelet flags you may care:
# $LOG_DIR is the test output path.
sudo docker run -it --rm --privileged --net=host \
-v /:/rootfs -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
gcr.io/google_containers/node-test:0.2
k8s.gcr.io/node-test:0.2
```
## Running Node Conformance Test for Other Architectures
@ -71,7 +71,7 @@ regular expression of tests you want to run.
sudo docker run -it --rm --privileged --net=host \
-v /:/rootfs:ro -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
-e FOCUS=MirrorPod \ # Only run MirrorPod test
gcr.io/google_containers/node-test:0.2
k8s.gcr.io/node-test:0.2
```
To skip specific tests, overwrite the environment variable `SKIP` with the
@ -81,7 +81,7 @@ regular expression of tests you want to skip.
sudo docker run -it --rm --privileged --net=host \
-v /:/rootfs:ro -v $CONFIG_DIR:$CONFIG_DIR -v $LOG_DIR:/var/result \
-e SKIP=MirrorPod \ # Run all conformance tests but skip MirrorPod test
gcr.io/google_containers/node-test:0.2
k8s.gcr.io/node-test:0.2
```
Node conformance test is a containerized version of [node e2e test](https://github.com/kubernetes/community/blob/{{page.githubbranch}}/contributors/devel/e2e-node-tests.md).

View File

@ -1,201 +0,0 @@
Apache License
Version 2.0, January 2004
http://www.apache.org/licenses/
TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION
1. Definitions.
"License" shall mean the terms and conditions for use, reproduction,
and distribution as defined by Sections 1 through 9 of this document.
"Licensor" shall mean the copyright owner or entity authorized by
the copyright owner that is granting the License.
"Legal Entity" shall mean the union of the acting entity and all
other entities that control, are controlled by, or are under common
control with that entity. For the purposes of this definition,
"control" means (i) the power, direct or indirect, to cause the
direction or management of such entity, whether by contract or
otherwise, or (ii) ownership of fifty percent (50%) or more of the
outstanding shares, or (iii) beneficial ownership of such entity.
"You" (or "Your") shall mean an individual or Legal Entity
exercising permissions granted by this License.
"Source" form shall mean the preferred form for making modifications,
including but not limited to software source code, documentation
source, and configuration files.
"Object" form shall mean any form resulting from mechanical
transformation or translation of a Source form, including but
not limited to compiled object code, generated documentation,
and conversions to other media types.
"Work" shall mean the work of authorship, whether in Source or
Object form, made available under the License, as indicated by a
copyright notice that is included in or attached to the work
(an example is provided in the Appendix below).
"Derivative Works" shall mean any work, whether in Source or Object
form, that is based on (or derived from) the Work and for which the
editorial revisions, annotations, elaborations, or other modifications
represent, as a whole, an original work of authorship. For the purposes
of this License, Derivative Works shall not include works that remain
separable from, or merely link (or bind by name) to the interfaces of,
the Work and Derivative Works thereof.
"Contribution" shall mean any work of authorship, including
the original version of the Work and any modifications or additions
to that Work or Derivative Works thereof, that is intentionally
submitted to Licensor for inclusion in the Work by the copyright owner
or by an individual or Legal Entity authorized to submit on behalf of
the copyright owner. For the purposes of this definition, "submitted"
means any form of electronic, verbal, or written communication sent
to the Licensor or its representatives, including but not limited to
communication on electronic mailing lists, source code control systems,
and issue tracking systems that are managed by, or on behalf of, the
Licensor for the purpose of discussing and improving the Work, but
excluding communication that is conspicuously marked or otherwise
designated in writing by the copyright owner as "Not a Contribution."
"Contributor" shall mean Licensor and any individual or Legal Entity
on behalf of whom a Contribution has been received by Licensor and
subsequently incorporated within the Work.
2. Grant of Copyright License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
copyright license to reproduce, prepare Derivative Works of,
publicly display, publicly perform, sublicense, and distribute the
Work and such Derivative Works in Source or Object form.
3. Grant of Patent License. Subject to the terms and conditions of
this License, each Contributor hereby grants to You a perpetual,
worldwide, non-exclusive, no-charge, royalty-free, irrevocable
(except as stated in this section) patent license to make, have made,
use, offer to sell, sell, import, and otherwise transfer the Work,
where such license applies only to those patent claims licensable
by such Contributor that are necessarily infringed by their
Contribution(s) alone or by combination of their Contribution(s)
with the Work to which such Contribution(s) was submitted. If You
institute patent litigation against any entity (including a
cross-claim or counterclaim in a lawsuit) alleging that the Work
or a Contribution incorporated within the Work constitutes direct
or contributory patent infringement, then any patent licenses
granted to You under this License for that Work shall terminate
as of the date such litigation is filed.
4. Redistribution. You may reproduce and distribute copies of the
Work or Derivative Works thereof in any medium, with or without
modifications, and in Source or Object form, provided that You
meet the following conditions:
(a) You must give any other recipients of the Work or
Derivative Works a copy of this License; and
(b) You must cause any modified files to carry prominent notices
stating that You changed the files; and
(c) You must retain, in the Source form of any Derivative Works
that You distribute, all copyright, patent, trademark, and
attribution notices from the Source form of the Work,
excluding those notices that do not pertain to any part of
the Derivative Works; and
(d) If the Work includes a "NOTICE" text file as part of its
distribution, then any Derivative Works that You distribute must
include a readable copy of the attribution notices contained
within such NOTICE file, excluding those notices that do not
pertain to any part of the Derivative Works, in at least one
of the following places: within a NOTICE text file distributed
as part of the Derivative Works; within the Source form or
documentation, if provided along with the Derivative Works; or,
within a display generated by the Derivative Works, if and
wherever such third-party notices normally appear. The contents
of the NOTICE file are for informational purposes only and
do not modify the License. You may add Your own attribution
notices within Derivative Works that You distribute, alongside
or as an addendum to the NOTICE text from the Work, provided
that such additional attribution notices cannot be construed
as modifying the License.
You may add Your own copyright statement to Your modifications and
may provide additional or different license terms and conditions
for use, reproduction, or distribution of Your modifications, or
for any such Derivative Works as a whole, provided Your use,
reproduction, and distribution of the Work otherwise complies with
the conditions stated in this License.
5. Submission of Contributions. Unless You explicitly state otherwise,
any Contribution intentionally submitted for inclusion in the Work
by You to the Licensor shall be under the terms and conditions of
this License, without any additional terms or conditions.
Notwithstanding the above, nothing herein shall supersede or modify
the terms of any separate license agreement you may have executed
with Licensor regarding such Contributions.
6. Trademarks. This License does not grant permission to use the trade
names, trademarks, service marks, or product names of the Licensor,
except as required for reasonable and customary use in describing the
origin of the Work and reproducing the content of the NOTICE file.
7. Disclaimer of Warranty. Unless required by applicable law or
agreed to in writing, Licensor provides the Work (and each
Contributor provides its Contributions) on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
implied, including, without limitation, any warranties or conditions
of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A
PARTICULAR PURPOSE. You are solely responsible for determining the
appropriateness of using or redistributing the Work and assume any
risks associated with Your exercise of permissions under this License.
8. Limitation of Liability. In no event and under no legal theory,
whether in tort (including negligence), contract, or otherwise,
unless required by applicable law (such as deliberate and grossly
negligent acts) or agreed to in writing, shall any Contributor be
liable to You for damages, including any direct, indirect, special,
incidental, or consequential damages of any character arising as a
result of this License or out of the use or inability to use the
Work (including but not limited to damages for loss of goodwill,
work stoppage, computer failure or malfunction, or any and all
other commercial damages or losses), even if such Contributor
has been advised of the possibility of such damages.
9. Accepting Warranty or Additional Liability. While redistributing
the Work or Derivative Works thereof, You may choose to offer,
and charge a fee for, acceptance of support, warranty, indemnity,
or other liability obligations and/or rights consistent with this
License. However, in accepting such obligations, You may act only
on Your own behalf and on Your sole responsibility, not on behalf
of any other Contributor, and only if You agree to indemnify,
defend, and hold each Contributor harmless for any liability
incurred by, or claims asserted against, such Contributor by reason
of your accepting any such warranty or additional liability.
END OF TERMS AND CONDITIONS
APPENDIX: How to apply the Apache License to your work.
To apply the Apache License to your work, attach the following
boilerplate notice, with the fields enclosed by brackets "{}"
replaced with your own identifying information. (Don't include
the brackets!) The text should be enclosed in the appropriate
comment syntax for the file format. We also recommend that a
file or class name and description of purpose be included on the
same "printed page" as the copyright notice for easier
identification within third-party archives.
Copyright {yyyy} {name of copyright owner}
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.

View File

@ -1,69 +0,0 @@
## Synopsis
Static compilation of html from markdown including processing for grouping code snippets into arbitrary tabs.
## Code Example
\> bdocs-tab:kubectl Deployment Config to run 3 nginx instances (max rollback set to 10 revisions).
bdocs-tab:tab will be stripped during rendering and utilized to with CSS to show or hide the prefered tab. kubectl indicates the desired tab, since blockquotes have no specific syntax highlighting.
\`\`\`bdocs-tab:kubectl_yaml
apiVersion: extensions/v1beta1
kind: Deployment
metadata:
name: deployment-example
spec:
replicas: 3
revisionHistoryLimit: 10
template:
metadata:
labels:
app: nginx
spec:
containers:
- name: nginx
image: nginx:1.10
\`\`\`
bdocs-tab:tab_lang will be used to indicate which tab these code snippets belong to. The tab section of the string indicates the tab, while, the language is pushed beyond the underscore. During rendering, the language will be properly highlighted as if the bdoc token was omitted.
## Motivation
This is a project to extend markdown documents and render them in html with a table of contents and code snippet pane. Most projects of this variety lean heavily on front end parsing with JavaScript/jQuery. This project uses NodeJS, Marked, and highlight.js to output syntax highlighted code blocks.
With specific tokens on blockquotes and code blocks, the chunks can be placed according to their relevance. Ex: Multiple language code blocks that should be grouped under an arbitrary tab.
## Installation
Clone the repository, then add documents into documents directory. Modify the manifest.json to contain the document filenames in the order desired. The docs field is an array of objects with a filename key.
As a NodeJS program, a valid installation of node is required. Once node is installed, verify it can be run from command line.
```
node --version
```
Next, depedencies need to be installed via npm from the root of the project directory.
```
npm install
```
Once dependencies are installed, run
```
node brodoc.js
```
This will generate the index.html file, which can be opened in a browser or served.
The included node-static server can be run from the project root via
```
npm start
```
## License
Apache License Version 2.0
## FAQ
Q: Why is it named brodocs?
A: This project was born out of a collaboration with my brother to create a suitable docs app for his purposes. It was a fun name for the the two of us to use as actual brothers.

View File

@ -1,58 +0,0 @@
// https://jsfiddle.net/upqwhou2/
$(document).ready(function() {
var navigationLinks = $('#sidebar-wrapper > ul li a');
var navigationSections = $('#sidebar-wrapper > ul > ul');
var sectionIdTonavigationLink = {};
var sections = $('#page-content-wrapper').find('h1, h2').map(function(index, node) {
if (node.id) {
sectionIdTonavigationLink[node.id] = $('#sidebar-wrapper > ul li a[href="#' + node.id + '"]');
return node;
}
});
var sectionIdToNavContainerLink = {};
var topLevelSections = $('#page-content-wrapper').find('h1').map(function(index, node) {
if (node.id) {
sectionIdToNavContainerLink[node.id] = $('#sidebar-wrapper > ul > ul[id="' + node.id + '-nav' +'"]');
return node;
}
});
var firstLevelNavs = $('#sidebar-wrapper > li');
var secondLevelNavs = $('#sidebar-wrapper > ul > ul');
var secondLevelNavContents = $('#sidebar-wrapper > ul > ul > li');
var thirdLevelNavs = null; // TODO: When compile provides 3 level nav, implement
var sectionsReversed = $(sections.get().reverse());
function checkScroll(event) {
var scrollPosition = $(window).scrollTop();
var offset = 50;
scrollPosition += offset;
sections.each(function() {
var currentSection = $(this);
var sectionTop = $(this).offset().top;
var id = $(this).attr('id');
if (scrollPosition >= sectionTop) {
navigationLinks.removeClass('selected');
sectionIdTonavigationLink[id].addClass('selected');
var sectionNavContainer = sectionIdToNavContainerLink[id];
var sectionNavContainerDisplay;
if (sectionNavContainer) {
sectionNavContainerDisplay = sectionNavContainer.css('display');
}
if (sectionNavContainer && sectionNavContainerDisplay === 'none') {
navigationSections.toggle(false);
sectionNavContainer.toggle(true);
}
}
if (($(this).offset().top < window.pageYOffset + 50) && $(this).offset().top + $(this).height() > window.pageYOffset) {
window.location.hash = id;
}
});
}
checkScroll();
$(window).on('scroll', function(event) {
checkScroll(event);
});
});

View File

@ -1,211 +0,0 @@
const docFolder = './documents/';
const fs = require('fs');
const marked = require('marked');
const highlight = require('highlight.js');
const renderer = new marked.Renderer();
const brodocDec = require('./markedDecorations.js');
marked.setOptions({
renderer: renderer,
gfm: true,
tables: true,
breaks: false,
pedantic: false,
sanitize: false,
smartLists: true,
smartypants: false,
highlight: function (code, lang) {
return highlight.highlightAuto(code).value;
}
});
brodocDec.decorateMarked(renderer);
var config = require('./manifest');
var docs = config.docs;
var files = [];
var fileArray = [];
docs.forEach(file => {
files.push(file.filename);
fileArray.push(file);
});
var bodyContent = '';
var navIds = brodocDec.navIds;
var codeTabs = brodocDec.codeTabs;
// const lexer = new marked.Lexer();
// lexer.rules.bdoc = /^(\/{4} )(\w+).*$/;
var path = docFolder;
var fIndex = 0;
var rIndex = 0;
var fileObj = {toc: [], content: [], tabs: []};
fileArray.forEach((file, index) => {
fs.readFile(path + file.filename, 'utf8', (err, data) => {
rIndex++;
file.content = data;
if (rIndex >= files.length) {
// do the things
parseFileContent(fileArray);
var navData = generateNavItems(navIds);
var navContent = navData.content;
var navDataArray = navData.navDataArray;
var codeTabContent = generateCodeTabItems(codeTabs);
var bodyContent = flattenContent(parsedContentArray);
generateDoc(navContent, bodyContent, codeTabContent);
generateNavJson(navDataArray);
}
});
});
function flattenContent(content) {
var flattenedContent = content.reduce(function(accum, val) {
return accum + val;
});
return flattenedContent;
}
var parsedContentArray = [];
function parseFileContent(files) {
files.forEach((file, index) => {
parsedContentArray[index] = parseDoc(file.content);
});
}
function parseDoc(doc) {
return marked(doc, { renderer: renderer });
}
function generateNavItems(navObjs) {
var reversedNavs = navObjs.reverse();
var currentNestArray = [];
var currentStrongArray = [];
var flattenedNest = '';
var nestedNavArray = []; // Array containing generated html menu items - is flattened into a string.
var navArrayInvert = []; // Deals with data layer of navigation;
var navSectionArray = [];
var navStrongSectionArray = [];
var navSectionArrayClone;
var flatNavArrayInvert = [];
reversedNavs.forEach(obj => {
flatNavArrayInvert.push(obj.id);
var strong = (obj.id.indexOf('-strong-') !== -1);
if (obj.level !== 1) {
if (strong && currentNestArray.length !== 0) {
flattenedNest = flattenContent(currentNestArray.reverse());
currentStrongArray.push(generateNestedNav(obj, flattenedNest));
currentNestArray.length = 0;
navSectionArrayClone = Object.assign([], navSectionArray);
navStrongSectionArray.push({section: obj.id, subsections: navSectionArrayClone});
navSectionArray.length = 0;
} else {
currentNestArray.push(generateNav(obj));
navSectionArray.push({section: obj.id});
}
} else if (obj.level === 1) {
if (currentStrongArray.length !== 0) {
currentNestArray.forEach(obj => {
currentStrongArray.push(obj);
});
flattenedNest = flattenContent(currentStrongArray.reverse());
} else if (currentNestArray.length !== 0) {
flattenedNest = flattenContent(currentNestArray.reverse());
}
nestedNavArray.push(generateNestedNav(obj, flattenedNest));
currentNestArray.length = 0;
currentStrongArray.length = 0;
flattenedNest = '';
navSectionArray.forEach(obj => {
navStrongSectionArray.push(obj);
});
navSectionArrayClone = Object.assign([], navStrongSectionArray);
navStrongSectionArray.length = 0;
navArrayInvert.push({section: obj.id, subsections: navSectionArrayClone});
navSectionArray.length = 0;
}
});
var navContent = flattenContent(nestedNavArray.reverse());
return {content: navContent, navDataArray: {toc: navArrayInvert, flatToc: flatNavArrayInvert}};
}
function generateNav(obj) {
var classString = 'nav-level-' + obj.level;
var isStrong = obj.id.indexOf('-strong-') !== -1;
if (isStrong) {
classString += ' strong-nav';
}
return '<li class="' + classString + '">' + '<a href="#' + obj.id + '" class="nav-item">' + obj.text + '</a></li>';
}
function generateNestedNav(parent, nest) {
var nestContent = '';
if (nest.length > 0) {
nestContent = nest ? '<ul id="' + parent.id + '-nav" style="display: none;">' + nest + '</ul>' : '';
}
return '<ul>' + generateNav(parent) + nestContent + '</ul>';
}
function generateNavJson(data) {
var navJson = JSON.stringify(data);
navScript = `(function(){navData = ${navJson};})();`;
fs.writeFile('./navData.js', navScript, function(err) {
if (err) {
return console.log(err);
}
console.log("navData.js saved!");
});
}
function generateCodeTabItems(tabs) {
var codeTabList = '';
tabs.forEach(tab => {
codeTabList += generateCodeTab(tab);
});
return codeTabList;
}
function generateCodeTab(tab) {
return '<li class="code-tab" id="' + tab + '">' + tab + '</li>';
}
function generateDoc(navContent, bodyContent, codeTabContent) {
var doc =
`<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<title>${config.title}</title>
<link rel="shortcut icon" href="favicon.ico" type="image/vnd.microsoft.icon">
<!-- Latest compiled and minified CSS -->
<link rel="stylesheet" href="node_modules/bootstrap/dist/css/bootstrap.min.css">
<link rel="stylesheet" href="node_modules/font-awesome/css/font-awesome.min.css" type="text/css">
<link rel="stylesheet" href="node_modules/highlight.js/styles/default.css" type="text/css">
<link rel="stylesheet" href="stylesheet.css" type="text/css">
</head>
<body>
<div id="sidebar-wrapper" class="side-nav side-bar-nav">${navContent}<br/><div class="copyright">${config.copyright}</div></div>
<div id="wrapper">
<div id="code-tabs-wrapper" class="code-tabs"><ul class="code-tab-list">${codeTabContent}</ul></div>
<div id="page-content-wrapper" class="body-content container-fluid">${bodyContent}</div>
</div>
<script src="node_modules/jquery/dist/jquery.min.js"></script>
<script src="node_modules/jquery.scrollto/jquery.scrollTo.min.js"></script>
<script src="navData.js"></script>
<script src="scroll.js"></script>
<!--<script src="actions.js"></script>-->
<script src="tabvisibility.js"></script>
</body>
</html>`;
fs.writeFile('./index.html', doc, function (err) {
if (err) {
return console.log(err);
}
console.log("index.html saved!");
});
}

View File

@ -1,6 +0,0 @@
{
"compilerOptions": {
"target": "ES6",
"module": "commonjs"
}
}

Some files were not shown because too many files have changed in this diff Show More